1 //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
16 #define LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/Target/TargetLowering.h"
21 #include "llvm/Target/TargetOptions.h"
25 class X86TargetMachine;
28 // X86 Specific DAG Nodes
29 enum NodeType : unsigned {
30 // Start the numbering where the builtin ops leave off.
31 FIRST_NUMBER = ISD::BUILTIN_OP_END,
38 /// Double shift instructions. These correspond to
39 /// X86::SHLDxx and X86::SHRDxx instructions.
43 /// Bitwise logical AND of floating point values. This corresponds
44 /// to X86::ANDPS or X86::ANDPD.
47 /// Bitwise logical OR of floating point values. This corresponds
48 /// to X86::ORPS or X86::ORPD.
51 /// Bitwise logical XOR of floating point values. This corresponds
52 /// to X86::XORPS or X86::XORPD.
55 /// Bitwise logical ANDNOT of floating point values. This
56 /// corresponds to X86::ANDNPS or X86::ANDNPD.
59 /// These operations represent an abstract X86 call
60 /// instruction, which includes a bunch of information. In particular the
61 /// operands of these node are:
63 /// #0 - The incoming token chain
65 /// #2 - The number of arg bytes the caller pushes on the stack.
66 /// #3 - The number of arg bytes the callee pops off the stack.
67 /// #4 - The value to pass in AL/AX/EAX (optional)
68 /// #5 - The value to pass in DL/DX/EDX (optional)
70 /// The result values of these nodes are:
72 /// #0 - The outgoing token chain
73 /// #1 - The first register result value (optional)
74 /// #2 - The second register result value (optional)
78 /// This operation implements the lowering for readcyclecounter.
81 /// X86 Read Time-Stamp Counter and Processor ID.
84 /// X86 Read Performance Monitoring Counters.
87 /// X86 compare and logical compare instructions.
90 /// X86 bit-test instructions.
93 /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS
94 /// operand, usually produced by a CMP instruction.
100 // Same as SETCC except it's materialized with a sbb and the value is all
101 // one's or all zero's.
102 SETCC_CARRY, // R = carry_bit ? ~0 : 0
104 /// X86 FP SETCC, implemented with CMP{cc}SS/CMP{cc}SD.
105 /// Operands are two FP values to compare; result is a mask of
106 /// 0s or 1s. Generally DTRT for C/C++ with NaNs.
109 /// X86 FP SETCC, similar to above, but with output as an i1 mask and
110 /// with optional rounding mode.
111 FSETCCM, FSETCCM_RND,
113 /// X86 conditional moves. Operand 0 and operand 1 are the two values
114 /// to select from. Operand 2 is the condition code, and operand 3 is the
115 /// flag operand produced by a CMP or TEST instruction. It also writes a
119 /// X86 conditional branches. Operand 0 is the chain operand, operand 1
120 /// is the block to branch if condition is true, operand 2 is the
121 /// condition code, and operand 3 is the flag operand produced by a CMP
122 /// or TEST instruction.
125 /// Return with a flag operand. Operand 0 is the chain operand, operand
126 /// 1 is the number of bytes of stack to pop.
129 /// Return from interrupt. Operand 0 is the number of bytes to pop.
132 /// Repeat fill, corresponds to X86::REP_STOSx.
135 /// Repeat move, corresponds to X86::REP_MOVSx.
138 /// On Darwin, this node represents the result of the popl
139 /// at function entry, used for PIC code.
142 /// A wrapper node for TargetConstantPool, TargetJumpTable,
143 /// TargetExternalSymbol, TargetGlobalAddress, TargetGlobalTLSAddress,
144 /// MCSymbol and TargetBlockAddress.
147 /// Special wrapper used under X86-64 PIC mode for RIP
148 /// relative displacements.
151 /// Copies a 64-bit value from the low word of an XMM vector
152 /// to an MMX vector.
155 /// Copies a 32-bit value from the low word of a MMX
159 /// Copies a GPR into the low 32-bit word of a MMX vector
160 /// and zero out the high word.
163 /// Extract an 8-bit value from a vector and zero extend it to
164 /// i32, corresponds to X86::PEXTRB.
167 /// Extract a 16-bit value from a vector and zero extend it to
168 /// i32, corresponds to X86::PEXTRW.
171 /// Insert any element of a 4 x float vector into any element
172 /// of a destination 4 x floatvector.
175 /// Insert the lower 8-bits of a 32-bit value to a vector,
176 /// corresponds to X86::PINSRB.
179 /// Insert the lower 16-bits of a 32-bit value to a vector,
180 /// corresponds to X86::PINSRW.
183 /// Shuffle 16 8-bit values within a vector.
186 /// Compute Sum of Absolute Differences.
188 /// Compute Double Block Packed Sum-Absolute-Differences
191 /// Bitwise Logical AND NOT of Packed FP values.
194 /// Blend where the selector is an immediate.
197 /// Dynamic (non-constant condition) vector blend where only the sign bits
198 /// of the condition elements are used. This is used to enforce that the
199 /// condition mask is not valid for generic VSELECT optimizations.
202 /// Combined add and sub on an FP vector.
205 // FP vector ops with rounding mode.
212 FSQRT_RND, FSQRTS_RND,
214 // FP vector get exponent.
215 FGETEXP_RND, FGETEXPS_RND,
216 // Extract Normalized Mantissas.
222 // Integer add/sub with unsigned saturation.
226 // Integer add/sub with signed saturation.
230 // Unsigned Integer average.
233 /// Integer horizontal add/sub.
237 /// Floating point horizontal add/sub.
241 // Detect Conflicts Within a Vector
244 /// Floating point max and min.
247 /// Commutative FMIN and FMAX.
250 /// Scalar intrinsic floating point max and min.
253 /// Floating point reciprocal-sqrt and reciprocal approximation.
254 /// Note that these typically require refinement
255 /// in order to obtain suitable precision.
259 // Thread Local Storage.
262 // Thread Local Storage. A call to get the start address
263 // of the TLS block for the current module.
266 // Thread Local Storage. When calling to an OS provided
267 // thunk at the address from an earlier relocation.
270 // Exception Handling helpers.
273 // SjLj exception handling setjmp.
276 // SjLj exception handling longjmp.
279 // SjLj exception handling dispatch.
280 EH_SJLJ_SETUP_DISPATCH,
282 /// Tail call return. See X86TargetLowering::LowerCall for
283 /// the list of operands.
286 // Vector move to low scalar and zero higher vector elements.
289 // Vector integer zero-extend.
291 // Vector integer signed-extend.
294 // Vector integer truncate.
296 // Vector integer truncate with unsigned/signed saturation.
300 VFPEXT, VFPEXT_RND, VFPEXTS_RND,
303 VFPROUND, VFPROUND_RND, VFPROUNDS_RND,
305 // Convert a vector to mask, set bits base on MSB.
308 // 128-bit vector logical left / right shift
311 // Vector shift elements
314 // Vector variable shift right arithmetic.
315 // Unlike ISD::SRA, in case shift count greater then element size
316 // use sign bit to fill destination data element.
319 // Vector shift elements by immediate
322 // Shifts of mask registers.
325 // Bit rotate by immediate
328 // Vector packed double/float comparison.
331 // Vector integer comparisons.
333 // Vector integer comparisons, the result is in a mask vector.
338 /// Vector comparison generating mask bits for fp and
339 /// integer signed and unsigned data types.
342 // Vector comparison with rounding mode for FP values
345 // Arithmetic operations with FLAGS results.
346 ADD, SUB, ADC, SBB, SMUL,
347 INC, DEC, OR, XOR, AND,
349 // Bit field extract.
352 // LOW, HI, FLAGS = umul LHS, RHS.
355 // 8-bit SMUL/UMUL - AX, FLAGS = smul8/umul8 AL, RHS.
358 // 8-bit divrem that zero-extend the high result (AH).
362 // X86-specific multiply by immediate.
365 // Vector sign bit extraction.
368 // Vector bitwise comparisons.
371 // Vector packed fp sign bitwise comparisons.
374 // Vector "test" in AVX-512, the result is in a mask vector.
378 // OR/AND test for masks.
382 // Several flavors of instructions with vector shuffle behaviors.
383 // Saturated signed/unnsigned packing.
386 // Intra-lane alignr.
388 // AVX512 inter-lane alignr.
394 //Shuffle Packed Values at 128-bit granularity.
413 // Variable Permute (VPERM).
414 // Res = VPERMV MaskV, V0
417 // 3-op Variable Permute (VPERMT2).
418 // Res = VPERMV3 V0, MaskV, V1
421 // 3-op Variable Permute overwriting the index (VPERMI2).
422 // Res = VPERMIV3 V0, MaskV, V1
425 // Bitwise ternary logic.
427 // Fix Up Special Packed Float32/64 values.
430 // Range Restriction Calculation For Packed Pairs of Float32/64 values.
432 // Reduce - Perform Reduction Transformation on scalar\packed FP.
434 // RndScale - Round FP Values To Include A Given Number Of Fraction Bits.
435 VRNDSCALE, VRNDSCALES,
436 // Tests Types Of a FP Values for packed types.
438 // Tests Types Of a FP Values for scalar types.
441 // Broadcast scalar to vector.
443 // Broadcast mask to vector.
445 // Broadcast subvector to vector.
448 // Extract vector element.
451 /// SSE4A Extraction and Insertion.
454 // XOP variable/immediate rotations.
456 // XOP arithmetic/logical shifts.
458 // XOP signed/unsigned integer comparisons.
460 // XOP packed permute bytes.
462 // XOP two source permutation.
465 // Vector multiply packed unsigned doubleword integers.
467 // Vector multiply packed signed doubleword integers.
469 // Vector Multiply Packed UnsignedIntegers with Round and Scale.
472 // Multiply and Add Packed Integers.
473 VPMADDUBSW, VPMADDWD,
474 VPMADD52L, VPMADD52H,
484 // FMA with rounding mode.
492 // Scalar intrinsic FMA with rounding mode.
493 // Two versions, passthru bits on op1 or op3.
494 FMADDS1_RND, FMADDS3_RND,
495 FNMADDS1_RND, FNMADDS3_RND,
496 FMSUBS1_RND, FMSUBS3_RND,
497 FNMSUBS1_RND, FNMSUBS3_RND,
499 // Compress and expand.
503 // Convert Unsigned/Integer to Floating-Point Value with rounding mode.
504 SINT_TO_FP_RND, UINT_TO_FP_RND,
505 SCALAR_SINT_TO_FP_RND, SCALAR_UINT_TO_FP_RND,
507 // Vector float/double to signed/unsigned integer.
508 CVTP2SI, CVTP2UI, CVTP2SI_RND, CVTP2UI_RND,
509 // Scalar float/double to signed/unsigned integer.
510 CVTS2SI_RND, CVTS2UI_RND,
512 // Vector float/double to signed/unsigned integer with truncation.
513 CVTTP2SI, CVTTP2UI, CVTTP2SI_RND, CVTTP2UI_RND,
514 // Scalar float/double to signed/unsigned integer with truncation.
515 CVTTS2SI_RND, CVTTS2UI_RND,
517 // Vector signed/unsigned integer to float/double.
520 // Save xmm argument registers to the stack, according to %al. An operator
521 // is needed so that this can be expanded with control flow.
522 VASTART_SAVE_XMM_REGS,
524 // Windows's _chkstk call to do stack probing.
527 // For allocating variable amounts of stack space when using
528 // segmented stacks. Check if the current stacklet has enough space, and
529 // falls back to heap allocation if not.
536 // Store FP status word into i16 register.
539 // Store contents of %ah into %eflags.
542 // Get a random integer and indicate whether it is valid in CF.
545 // Get a NIST SP800-90B & C compliant random integer and
546 // indicate whether it is valid in CF.
549 // SSE42 string comparisons.
553 // Test if in transactional execution.
557 RSQRT28, RSQRT28S, RCP28, RCP28S, EXP2,
559 // Conversions between float and half-float.
562 // LWP insert record.
566 LCMPXCHG_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
569 LCMPXCHG8_SAVE_EBX_DAG,
570 LCMPXCHG16_SAVE_RBX_DAG,
572 /// LOCK-prefixed arithmetic read-modify-write instructions.
573 /// EFLAGS, OUTCHAIN = LADD(INCHAIN, PTR, RHS)
574 LADD, LSUB, LOR, LXOR, LAND,
576 // Load, scalar_to_vector, and zero extend.
579 // Store FP control world into i16 memory.
582 /// This instruction implements FP_TO_SINT with the
583 /// integer destination in memory and a FP reg source. This corresponds
584 /// to the X86::FIST*m instructions and the rounding mode change stuff. It
585 /// has two inputs (token chain and address) and two outputs (int value
586 /// and token chain).
591 /// This instruction implements SINT_TO_FP with the
592 /// integer source in memory and FP reg result. This corresponds to the
593 /// X86::FILD*m instructions. It has three inputs (token chain, address,
594 /// and source type) and two outputs (FP value and token chain). FILD_FLAG
595 /// also produces a flag).
599 /// This instruction implements an extending load to FP stack slots.
600 /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
601 /// operand, ptr to load from, and a ValueType node indicating the type
605 /// This instruction implements a truncating store to FP stack
606 /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
607 /// chain operand, value to store, address, and a ValueType to store it
611 /// This instruction grabs the address of the next argument
612 /// from a va_list. (reads and modifies the va_list in memory)
615 // Vector truncating store with unsigned/signed saturation
616 VTRUNCSTOREUS, VTRUNCSTORES,
617 // Vector truncating masked store with unsigned/signed saturation
618 VMTRUNCSTOREUS, VMTRUNCSTORES,
620 // X86 specific gather
623 // WARNING: Do not add anything in the end unless you want the node to
624 // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
625 // opcodes will be thought as target memory ops!
627 } // end namespace X86ISD
629 /// Define some predicates that are used for node matching.
631 /// Return true if the specified
632 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
633 /// suitable for input to VEXTRACTF128, VEXTRACTI128 instructions.
634 bool isVEXTRACT128Index(SDNode *N);
636 /// Return true if the specified
637 /// INSERT_SUBVECTOR operand specifies a subvector insert that is
638 /// suitable for input to VINSERTF128, VINSERTI128 instructions.
639 bool isVINSERT128Index(SDNode *N);
641 /// Return true if the specified
642 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
643 /// suitable for input to VEXTRACTF64X4, VEXTRACTI64X4 instructions.
644 bool isVEXTRACT256Index(SDNode *N);
646 /// Return true if the specified
647 /// INSERT_SUBVECTOR operand specifies a subvector insert that is
648 /// suitable for input to VINSERTF64X4, VINSERTI64X4 instructions.
649 bool isVINSERT256Index(SDNode *N);
651 /// Return the appropriate
652 /// immediate to extract the specified EXTRACT_SUBVECTOR index
653 /// with VEXTRACTF128, VEXTRACTI128 instructions.
654 unsigned getExtractVEXTRACT128Immediate(SDNode *N);
656 /// Return the appropriate
657 /// immediate to insert at the specified INSERT_SUBVECTOR index
658 /// with VINSERTF128, VINSERT128 instructions.
659 unsigned getInsertVINSERT128Immediate(SDNode *N);
661 /// Return the appropriate
662 /// immediate to extract the specified EXTRACT_SUBVECTOR index
663 /// with VEXTRACTF64X4, VEXTRACTI64x4 instructions.
664 unsigned getExtractVEXTRACT256Immediate(SDNode *N);
666 /// Return the appropriate
667 /// immediate to insert at the specified INSERT_SUBVECTOR index
668 /// with VINSERTF64x4, VINSERTI64x4 instructions.
669 unsigned getInsertVINSERT256Immediate(SDNode *N);
671 /// Returns true if Elt is a constant zero or floating point constant +0.0.
672 bool isZeroNode(SDValue Elt);
674 /// Returns true of the given offset can be
675 /// fit into displacement field of the instruction.
676 bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
677 bool hasSymbolicDisplacement = true);
679 /// Determines whether the callee is required to pop its
680 /// own arguments. Callee pop is necessary to support tail calls.
681 bool isCalleePop(CallingConv::ID CallingConv,
682 bool is64Bit, bool IsVarArg, bool GuaranteeTCO);
684 } // end namespace X86
686 //===--------------------------------------------------------------------===//
687 // X86 Implementation of the TargetLowering interface
688 class X86TargetLowering final : public TargetLowering {
690 explicit X86TargetLowering(const X86TargetMachine &TM,
691 const X86Subtarget &STI);
693 unsigned getJumpTableEncoding() const override;
694 bool useSoftFloat() const override;
696 void markLibCallAttributes(MachineFunction *MF, unsigned CC,
697 ArgListTy &Args) const override;
699 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
704 LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
705 const MachineBasicBlock *MBB, unsigned uid,
706 MCContext &Ctx) const override;
708 /// Returns relocation base for the given PIC jumptable.
709 SDValue getPICJumpTableRelocBase(SDValue Table,
710 SelectionDAG &DAG) const override;
712 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
713 unsigned JTI, MCContext &Ctx) const override;
715 /// Return the desired alignment for ByVal aggregate
716 /// function arguments in the caller parameter area. For X86, aggregates
717 /// that contains are placed at 16-byte boundaries while the rest are at
718 /// 4-byte boundaries.
719 unsigned getByValTypeAlignment(Type *Ty,
720 const DataLayout &DL) const override;
722 /// Returns the target specific optimal type for load
723 /// and store operations as a result of memset, memcpy, and memmove
724 /// lowering. If DstAlign is zero that means it's safe to destination
725 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
726 /// means there isn't a need to check it against alignment requirement,
727 /// probably because the source does not need to be loaded. If 'IsMemset' is
728 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
729 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
730 /// source is constant so it does not need to be loaded.
731 /// It returns EVT::Other if the type should be determined using generic
732 /// target-independent logic.
733 EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
734 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
735 MachineFunction &MF) const override;
737 /// Returns true if it's safe to use load / store of the
738 /// specified type to expand memcpy / memset inline. This is mostly true
739 /// for all types except for some special cases. For example, on X86
740 /// targets without SSE2 f64 load / store are done with fldl / fstpl which
741 /// also does type conversion. Note the specified type doesn't have to be
742 /// legal as the hook is used before type legalization.
743 bool isSafeMemOpType(MVT VT) const override;
745 /// Returns true if the target allows unaligned memory accesses of the
746 /// specified type. Returns whether it is "fast" in the last argument.
747 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, unsigned Align,
748 bool *Fast) const override;
750 /// Provide custom lowering hooks for some operations.
752 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
754 /// Places new result values for the node in Results (their number
755 /// and types must exactly match those of the original return values of
756 /// the node), or leaves Results empty, which indicates that the node is not
757 /// to be custom lowered after all.
758 void LowerOperationWrapper(SDNode *N,
759 SmallVectorImpl<SDValue> &Results,
760 SelectionDAG &DAG) const override;
762 /// Replace the results of node with an illegal result
763 /// type with new values built out of custom code.
765 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
766 SelectionDAG &DAG) const override;
768 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
770 // Return true if it is profitable to combine a BUILD_VECTOR to a TRUNCATE
771 // for given operand and result types.
772 // Example of such a combine:
773 // v4i32 build_vector((extract_elt V, 0),
774 // (extract_elt V, 2),
775 // (extract_elt V, 4),
776 // (extract_elt V, 6))
778 // v4i32 truncate (bitcast V to v4i64)
779 bool isDesirableToCombineBuildVectorToTruncate() const override {
783 /// Return true if the target has native support for
784 /// the specified value type and it is 'desirable' to use the type for the
785 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
786 /// instruction encodings are longer and some i16 instructions are slow.
787 bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override;
789 /// Return true if the target has native support for the
790 /// specified value type and it is 'desirable' to use the type. e.g. On x86
791 /// i16 is legal, but undesirable since i16 instruction encodings are longer
792 /// and some i16 instructions are slow.
793 bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const override;
796 EmitInstrWithCustomInserter(MachineInstr &MI,
797 MachineBasicBlock *MBB) const override;
799 /// This method returns the name of a target specific DAG node.
800 const char *getTargetNodeName(unsigned Opcode) const override;
802 bool isCheapToSpeculateCttz() const override;
804 bool isCheapToSpeculateCtlz() const override;
806 bool isCtlzFast() const override;
808 bool hasBitPreservingFPLogic(EVT VT) const override {
809 return VT == MVT::f32 || VT == MVT::f64 || VT.isVector();
812 bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const override {
813 // If the pair to store is a mixture of float and int values, we will
814 // save two bitwise instructions and one float-to-int instruction and
815 // increase one store instruction. There is potentially a more
816 // significant benefit because it avoids the float->int domain switch
817 // for input value. So It is more likely a win.
818 if ((LTy.isFloatingPoint() && HTy.isInteger()) ||
819 (LTy.isInteger() && HTy.isFloatingPoint()))
821 // If the pair only contains int values, we will save two bitwise
822 // instructions and increase one store instruction (costing one more
823 // store buffer). Since the benefit is more blurred so we leave
824 // such pair out until we get testcase to prove it is a win.
828 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
830 bool hasAndNotCompare(SDValue Y) const override;
832 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
833 return VT.isScalarInteger();
836 /// Vector-sized comparisons are fast using PCMPEQ + PMOVMSK or PTEST.
837 MVT hasFastEqualityCompare(unsigned NumBits) const override;
839 /// Return the value type to use for ISD::SETCC.
840 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
841 EVT VT) const override;
843 /// Determine which of the bits specified in Mask are known to be either
844 /// zero or one and return them in the KnownZero/KnownOne bitsets.
845 void computeKnownBitsForTargetNode(const SDValue Op,
847 const APInt &DemandedElts,
848 const SelectionDAG &DAG,
849 unsigned Depth = 0) const override;
851 /// Determine the number of bits in the operation that are sign bits.
852 unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
853 const APInt &DemandedElts,
854 const SelectionDAG &DAG,
855 unsigned Depth) const override;
857 bool isGAPlusOffset(SDNode *N, const GlobalValue* &GA,
858 int64_t &Offset) const override;
860 SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
862 bool ExpandInlineAsm(CallInst *CI) const override;
864 ConstraintType getConstraintType(StringRef Constraint) const override;
866 /// Examine constraint string and operand type and determine a weight value.
867 /// The operand object must already have been set up with the operand type.
869 getSingleConstraintMatchWeight(AsmOperandInfo &info,
870 const char *constraint) const override;
872 const char *LowerXConstraint(EVT ConstraintVT) const override;
874 /// Lower the specified operand into the Ops vector. If it is invalid, don't
875 /// add anything to Ops. If hasMemory is true it means one of the asm
876 /// constraint of the inline asm instruction being processed is 'm'.
877 void LowerAsmOperandForConstraint(SDValue Op,
878 std::string &Constraint,
879 std::vector<SDValue> &Ops,
880 SelectionDAG &DAG) const override;
883 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
884 if (ConstraintCode == "i")
885 return InlineAsm::Constraint_i;
886 else if (ConstraintCode == "o")
887 return InlineAsm::Constraint_o;
888 else if (ConstraintCode == "v")
889 return InlineAsm::Constraint_v;
890 else if (ConstraintCode == "X")
891 return InlineAsm::Constraint_X;
892 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
895 /// Given a physical register constraint
896 /// (e.g. {edx}), return the register number and the register class for the
897 /// register. This should only be used for C_Register constraints. On
898 /// error, this returns a register number of 0.
899 std::pair<unsigned, const TargetRegisterClass *>
900 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
901 StringRef Constraint, MVT VT) const override;
903 /// Return true if the addressing mode represented
904 /// by AM is legal for this target, for a load/store of the specified type.
905 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
906 Type *Ty, unsigned AS) const override;
908 /// Return true if the specified immediate is legal
909 /// icmp immediate, that is the target has icmp instructions which can
910 /// compare a register against the immediate without having to materialize
911 /// the immediate into a register.
912 bool isLegalICmpImmediate(int64_t Imm) const override;
914 /// Return true if the specified immediate is legal
915 /// add immediate, that is the target has add instructions which can
916 /// add a register and the immediate without having to materialize
917 /// the immediate into a register.
918 bool isLegalAddImmediate(int64_t Imm) const override;
920 /// \brief Return the cost of the scaling factor used in the addressing
921 /// mode represented by AM for this target, for a load/store
922 /// of the specified type.
923 /// If the AM is supported, the return value must be >= 0.
924 /// If the AM is not supported, it returns a negative value.
925 int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
926 unsigned AS) const override;
928 bool isVectorShiftByScalarCheap(Type *Ty) const override;
930 /// Return true if it's free to truncate a value of
931 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
932 /// register EAX to i16 by referencing its sub-register AX.
933 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
934 bool isTruncateFree(EVT VT1, EVT VT2) const override;
936 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
938 /// Return true if any actual instruction that defines a
939 /// value of type Ty1 implicit zero-extends the value to Ty2 in the result
940 /// register. This does not necessarily include registers defined in
941 /// unknown ways, such as incoming arguments, or copies from unknown
942 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
943 /// does not necessarily apply to truncate instructions. e.g. on x86-64,
944 /// all instructions that define 32-bit values implicit zero-extend the
945 /// result out to 64 bits.
946 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
947 bool isZExtFree(EVT VT1, EVT VT2) const override;
948 bool isZExtFree(SDValue Val, EVT VT2) const override;
950 /// Return true if folding a vector load into ExtVal (a sign, zero, or any
951 /// extend node) is profitable.
952 bool isVectorLoadExtDesirable(SDValue) const override;
954 /// Return true if an FMA operation is faster than a pair of fmul and fadd
955 /// instructions. fmuladd intrinsics will be expanded to FMAs when this
956 /// method returns true, otherwise fmuladd is expanded to fmul + fadd.
957 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
959 /// Return true if it's profitable to narrow
960 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
961 /// from i32 to i8 but not from i32 to i16.
962 bool isNarrowingProfitable(EVT VT1, EVT VT2) const override;
964 /// Given an intrinsic, checks if on the target the intrinsic will need to map
965 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
966 /// true and stores the intrinsic information into the IntrinsicInfo that was
967 /// passed to the function.
968 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
969 unsigned Intrinsic) const override;
971 /// Returns true if the target can instruction select the
972 /// specified FP immediate natively. If false, the legalizer will
973 /// materialize the FP immediate as a load from a constant pool.
974 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
976 /// Targets can use this to indicate that they only support *some*
977 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
978 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to
980 bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
981 EVT VT) const override;
983 /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
984 /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to
985 /// replace a VAND with a constant pool entry.
986 bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
987 EVT VT) const override;
989 /// If true, then instruction selection should
990 /// seek to shrink the FP constant of the specified type to a smaller type
991 /// in order to save space and / or reduce runtime.
992 bool ShouldShrinkFPConstant(EVT VT) const override {
993 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
994 // expensive than a straight movsd. On the other hand, it's important to
995 // shrink long double fp constant since fldt is very slow.
996 return !X86ScalarSSEf64 || VT == MVT::f80;
999 /// Return true if we believe it is correct and profitable to reduce the
1000 /// load node to a smaller type.
1001 bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
1002 EVT NewVT) const override;
1004 /// Return true if the specified scalar FP type is computed in an SSE
1005 /// register, not on the X87 floating point stack.
1006 bool isScalarFPTypeInSSEReg(EVT VT) const {
1007 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
1008 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
1011 /// \brief Returns true if it is beneficial to convert a load of a constant
1012 /// to just the constant itself.
1013 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
1014 Type *Ty) const override;
1016 bool convertSelectOfConstantsToMath() const override {
1020 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
1021 /// with this index.
1022 bool isExtractSubvectorCheap(EVT ResVT, unsigned Index) const override;
1024 /// Intel processors have a unified instruction and data cache
1025 const char * getClearCacheBuiltinName() const override {
1026 return nullptr; // nothing to do, move along.
1029 unsigned getRegisterByName(const char* RegName, EVT VT,
1030 SelectionDAG &DAG) const override;
1032 /// If a physical register, this returns the register that receives the
1033 /// exception address on entry to an EH pad.
1035 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
1037 /// If a physical register, this returns the register that receives the
1038 /// exception typeid on entry to a landing pad.
1040 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
1042 virtual bool needsFixedCatchObjects() const override;
1044 /// This method returns a target specific FastISel object,
1045 /// or null if the target does not support "fast" ISel.
1046 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
1047 const TargetLibraryInfo *libInfo) const override;
1049 /// If the target has a standard location for the stack protector cookie,
1050 /// returns the address of that location. Otherwise, returns nullptr.
1051 Value *getIRStackGuard(IRBuilder<> &IRB) const override;
1053 bool useLoadStackGuardNode() const override;
1054 void insertSSPDeclarations(Module &M) const override;
1055 Value *getSDagStackGuard(const Module &M) const override;
1056 Value *getSSPStackGuardCheck(const Module &M) const override;
1058 /// Return true if the target stores SafeStack pointer at a fixed offset in
1059 /// some non-standard address space, and populates the address space and
1060 /// offset as appropriate.
1061 Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
1063 SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
1064 SelectionDAG &DAG) const;
1066 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
1068 /// \brief Customize the preferred legalization strategy for certain types.
1069 LegalizeTypeAction getPreferredVectorAction(EVT VT) const override;
1071 bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
1073 bool supportSwiftError() const override;
1075 StringRef getStackProbeSymbolName(MachineFunction &MF) const override;
1077 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
1079 /// \brief Lower interleaved load(s) into target specific
1080 /// instructions/intrinsics.
1081 bool lowerInterleavedLoad(LoadInst *LI,
1082 ArrayRef<ShuffleVectorInst *> Shuffles,
1083 ArrayRef<unsigned> Indices,
1084 unsigned Factor) const override;
1086 /// \brief Lower interleaved store(s) into target specific
1087 /// instructions/intrinsics.
1088 bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
1089 unsigned Factor) const override;
1092 void finalizeLowering(MachineFunction &MF) const override;
1095 std::pair<const TargetRegisterClass *, uint8_t>
1096 findRepresentativeClass(const TargetRegisterInfo *TRI,
1097 MVT VT) const override;
1100 /// Keep a reference to the X86Subtarget around so that we can
1101 /// make the right decision when generating code for different targets.
1102 const X86Subtarget &Subtarget;
1104 /// Select between SSE or x87 floating point ops.
1105 /// When SSE is available, use it for f32 operations.
1106 /// When SSE2 is available, use it for f64 operations.
1107 bool X86ScalarSSEf32;
1108 bool X86ScalarSSEf64;
1110 /// A list of legal FP immediates.
1111 std::vector<APFloat> LegalFPImmediates;
1113 /// Indicate that this x86 target can instruction
1114 /// select the specified FP immediate natively.
1115 void addLegalFPImmediate(const APFloat& Imm) {
1116 LegalFPImmediates.push_back(Imm);
1119 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
1120 CallingConv::ID CallConv, bool isVarArg,
1121 const SmallVectorImpl<ISD::InputArg> &Ins,
1122 const SDLoc &dl, SelectionDAG &DAG,
1123 SmallVectorImpl<SDValue> &InVals,
1124 uint32_t *RegMask) const;
1125 SDValue LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
1126 const SmallVectorImpl<ISD::InputArg> &ArgInfo,
1127 const SDLoc &dl, SelectionDAG &DAG,
1128 const CCValAssign &VA, MachineFrameInfo &MFI,
1130 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
1131 const SDLoc &dl, SelectionDAG &DAG,
1132 const CCValAssign &VA,
1133 ISD::ArgFlagsTy Flags) const;
1135 // Call lowering helpers.
1137 /// Check whether the call is eligible for tail call optimization. Targets
1138 /// that want to do tail call optimization should implement this function.
1139 bool IsEligibleForTailCallOptimization(SDValue Callee,
1140 CallingConv::ID CalleeCC,
1142 bool isCalleeStructRet,
1143 bool isCallerStructRet,
1145 const SmallVectorImpl<ISD::OutputArg> &Outs,
1146 const SmallVectorImpl<SDValue> &OutVals,
1147 const SmallVectorImpl<ISD::InputArg> &Ins,
1148 SelectionDAG& DAG) const;
1149 SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
1150 SDValue Chain, bool IsTailCall,
1151 bool Is64Bit, int FPDiff,
1152 const SDLoc &dl) const;
1154 unsigned GetAlignedArgumentStackSize(unsigned StackSize,
1155 SelectionDAG &DAG) const;
1157 unsigned getAddressSpace(void) const;
1159 std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
1161 bool isReplace) const;
1163 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
1164 SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const;
1165 SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
1166 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
1167 SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const;
1168 SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const;
1169 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
1171 unsigned getGlobalWrapperKind(const GlobalValue *GV = nullptr) const;
1172 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
1173 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
1174 SDValue LowerGlobalAddress(const GlobalValue *GV, const SDLoc &dl,
1175 int64_t Offset, SelectionDAG &DAG) const;
1176 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
1177 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
1178 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
1180 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1181 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1182 SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
1183 SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const;
1184 SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG) const;
1185 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
1186 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
1187 SDValue LowerToBT(SDValue And, ISD::CondCode CC, const SDLoc &dl,
1188 SelectionDAG &DAG) const;
1189 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
1190 SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const;
1191 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
1192 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
1193 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
1194 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
1195 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
1196 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
1197 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
1198 SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
1199 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
1200 SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
1201 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
1202 SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
1203 SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
1204 SDValue lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
1205 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
1206 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
1207 SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const;
1208 SDValue LowerGC_TRANSITION_START(SDValue Op, SelectionDAG &DAG) const;
1209 SDValue LowerGC_TRANSITION_END(SDValue Op, SelectionDAG &DAG) const;
1212 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1213 const SmallVectorImpl<ISD::InputArg> &Ins,
1214 const SDLoc &dl, SelectionDAG &DAG,
1215 SmallVectorImpl<SDValue> &InVals) const override;
1216 SDValue LowerCall(CallLoweringInfo &CLI,
1217 SmallVectorImpl<SDValue> &InVals) const override;
1219 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1220 const SmallVectorImpl<ISD::OutputArg> &Outs,
1221 const SmallVectorImpl<SDValue> &OutVals,
1222 const SDLoc &dl, SelectionDAG &DAG) const override;
1224 bool supportSplitCSR(MachineFunction *MF) const override {
1225 return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
1226 MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
1228 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
1229 void insertCopiesSplitCSR(
1230 MachineBasicBlock *Entry,
1231 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
1233 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
1235 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
1237 EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
1238 ISD::NodeType ExtendKind) const override;
1240 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1242 const SmallVectorImpl<ISD::OutputArg> &Outs,
1243 LLVMContext &Context) const override;
1245 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
1247 TargetLoweringBase::AtomicExpansionKind
1248 shouldExpandAtomicLoadInIR(LoadInst *SI) const override;
1249 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
1250 TargetLoweringBase::AtomicExpansionKind
1251 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
1254 lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const override;
1256 bool needsCmpXchgNb(Type *MemType) const;
1258 void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
1259 MachineBasicBlock *DispatchBB, int FI) const;
1261 // Utility function to emit the low-level va_arg code for X86-64.
1263 EmitVAARG64WithCustomInserter(MachineInstr &MI,
1264 MachineBasicBlock *MBB) const;
1266 /// Utility function to emit the xmm reg save portion of va_start.
1268 EmitVAStartSaveXMMRegsWithCustomInserter(MachineInstr &BInstr,
1269 MachineBasicBlock *BB) const;
1271 MachineBasicBlock *EmitLoweredSelect(MachineInstr &I,
1272 MachineBasicBlock *BB) const;
1274 MachineBasicBlock *EmitLoweredAtomicFP(MachineInstr &I,
1275 MachineBasicBlock *BB) const;
1277 MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
1278 MachineBasicBlock *BB) const;
1280 MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
1281 MachineBasicBlock *BB) const;
1283 MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr &MI,
1284 MachineBasicBlock *BB) const;
1286 MachineBasicBlock *EmitLoweredTLSAddr(MachineInstr &MI,
1287 MachineBasicBlock *BB) const;
1289 MachineBasicBlock *EmitLoweredTLSCall(MachineInstr &MI,
1290 MachineBasicBlock *BB) const;
1292 MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
1293 MachineBasicBlock *MBB) const;
1295 MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
1296 MachineBasicBlock *MBB) const;
1298 MachineBasicBlock *emitFMA3Instr(MachineInstr &MI,
1299 MachineBasicBlock *MBB) const;
1301 MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr &MI,
1302 MachineBasicBlock *MBB) const;
1304 /// Emit nodes that will be selected as "test Op0,Op0", or something
1305 /// equivalent, for use with the given x86 condition code.
1306 SDValue EmitTest(SDValue Op0, unsigned X86CC, const SDLoc &dl,
1307 SelectionDAG &DAG) const;
1309 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
1310 /// equivalent, for use with the given x86 condition code.
1311 SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, const SDLoc &dl,
1312 SelectionDAG &DAG) const;
1314 /// Convert a comparison if required by the subtarget.
1315 SDValue ConvertCmpIfNecessary(SDValue Cmp, SelectionDAG &DAG) const;
1317 /// Check if replacement of SQRT with RSQRT should be disabled.
1318 bool isFsqrtCheap(SDValue Operand, SelectionDAG &DAG) const override;
1320 /// Use rsqrt* to speed up sqrt calculations.
1321 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1322 int &RefinementSteps, bool &UseOneConstNR,
1323 bool Reciprocal) const override;
1325 /// Use rcp* to speed up fdiv calculations.
1326 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1327 int &RefinementSteps) const override;
1329 /// Reassociate floating point divisions into multiply by reciprocal.
1330 unsigned combineRepeatedFPDivisors() const override;
1334 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
1335 const TargetLibraryInfo *libInfo);
1336 } // end namespace X86
1338 // Base class for all X86 non-masked store operations.
1339 class X86StoreSDNode : public MemSDNode {
1341 X86StoreSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
1342 SDVTList VTs, EVT MemVT,
1343 MachineMemOperand *MMO)
1344 :MemSDNode(Opcode, Order, dl, VTs, MemVT, MMO) {}
1345 const SDValue &getValue() const { return getOperand(1); }
1346 const SDValue &getBasePtr() const { return getOperand(2); }
1348 static bool classof(const SDNode *N) {
1349 return N->getOpcode() == X86ISD::VTRUNCSTORES ||
1350 N->getOpcode() == X86ISD::VTRUNCSTOREUS;
1354 // Base class for all X86 masked store operations.
1355 // The class has the same order of operands as MaskedStoreSDNode for
1357 class X86MaskedStoreSDNode : public MemSDNode {
1359 X86MaskedStoreSDNode(unsigned Opcode, unsigned Order,
1360 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
1361 MachineMemOperand *MMO)
1362 : MemSDNode(Opcode, Order, dl, VTs, MemVT, MMO) {}
1364 const SDValue &getBasePtr() const { return getOperand(1); }
1365 const SDValue &getMask() const { return getOperand(2); }
1366 const SDValue &getValue() const { return getOperand(3); }
1368 static bool classof(const SDNode *N) {
1369 return N->getOpcode() == X86ISD::VMTRUNCSTORES ||
1370 N->getOpcode() == X86ISD::VMTRUNCSTOREUS;
1374 // X86 Truncating Store with Signed saturation.
1375 class TruncSStoreSDNode : public X86StoreSDNode {
1377 TruncSStoreSDNode(unsigned Order, const DebugLoc &dl,
1378 SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
1379 : X86StoreSDNode(X86ISD::VTRUNCSTORES, Order, dl, VTs, MemVT, MMO) {}
1381 static bool classof(const SDNode *N) {
1382 return N->getOpcode() == X86ISD::VTRUNCSTORES;
1386 // X86 Truncating Store with Unsigned saturation.
1387 class TruncUSStoreSDNode : public X86StoreSDNode {
1389 TruncUSStoreSDNode(unsigned Order, const DebugLoc &dl,
1390 SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
1391 : X86StoreSDNode(X86ISD::VTRUNCSTOREUS, Order, dl, VTs, MemVT, MMO) {}
1393 static bool classof(const SDNode *N) {
1394 return N->getOpcode() == X86ISD::VTRUNCSTOREUS;
1398 // X86 Truncating Masked Store with Signed saturation.
1399 class MaskedTruncSStoreSDNode : public X86MaskedStoreSDNode {
1401 MaskedTruncSStoreSDNode(unsigned Order,
1402 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
1403 MachineMemOperand *MMO)
1404 : X86MaskedStoreSDNode(X86ISD::VMTRUNCSTORES, Order, dl, VTs, MemVT, MMO) {}
1406 static bool classof(const SDNode *N) {
1407 return N->getOpcode() == X86ISD::VMTRUNCSTORES;
1411 // X86 Truncating Masked Store with Unsigned saturation.
1412 class MaskedTruncUSStoreSDNode : public X86MaskedStoreSDNode {
1414 MaskedTruncUSStoreSDNode(unsigned Order,
1415 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
1416 MachineMemOperand *MMO)
1417 : X86MaskedStoreSDNode(X86ISD::VMTRUNCSTOREUS, Order, dl, VTs, MemVT, MMO) {}
1419 static bool classof(const SDNode *N) {
1420 return N->getOpcode() == X86ISD::VMTRUNCSTOREUS;
1424 // X86 specific Gather node.
1425 class X86MaskedGatherSDNode : public MaskedGatherScatterSDNode {
1427 X86MaskedGatherSDNode(unsigned Order,
1428 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
1429 MachineMemOperand *MMO)
1430 : MaskedGatherScatterSDNode(X86ISD::MGATHER, Order, dl, VTs, MemVT, MMO)
1432 static bool classof(const SDNode *N) {
1433 return N->getOpcode() == X86ISD::MGATHER;
1437 } // end namespace llvm
1439 #endif // LLVM_LIB_TARGET_X86_X86ISELLOWERING_H