1 //===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This file describes how to lower LLVM code to machine code. This has two
14 /// 1. Which ValueTypes are natively supported by the target.
15 /// 2. Which operations are supported for supported ValueTypes.
16 /// 3. Cost thresholds for alternative implementations of certain operations.
18 /// In addition it has a few other components, like information about FP
21 //===----------------------------------------------------------------------===//
23 #ifndef LLVM_TARGET_TARGETLOWERING_H
24 #define LLVM_TARGET_TARGETLOWERING_H
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/CodeGen/DAGCombine.h"
28 #include "llvm/CodeGen/RuntimeLibcalls.h"
29 #include "llvm/CodeGen/SelectionDAGNodes.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/InlineAsm.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/MC/MCRegisterInfo.h"
37 #include "llvm/Target/TargetCallingConv.h"
38 #include "llvm/Target/TargetMachine.h"
44 class BranchProbability;
49 class FunctionLoweringInfo;
50 class ImmutableCallSite;
52 class MachineBasicBlock;
53 class MachineFunction;
55 class MachineJumpTableInfo;
57 class MachineRegisterInfo;
62 template<typename T> class SmallVectorImpl;
64 class TargetRegisterClass;
65 class TargetLibraryInfo;
66 class TargetLoweringObjectFile;
71 None, // No preference
72 Source, // Follow source order.
73 RegPressure, // Scheduling for lowest register pressure.
74 Hybrid, // Scheduling for both latency and register pressure.
75 ILP, // Scheduling for ILP in low register pressure mode.
76 VLIW // Scheduling for VLIW targets.
80 /// This base class for TargetLowering contains the SelectionDAG-independent
81 /// parts that can be used from the rest of CodeGen.
82 class TargetLoweringBase {
83 TargetLoweringBase(const TargetLoweringBase&) = delete;
84 void operator=(const TargetLoweringBase&) = delete;
87 /// This enum indicates whether operations are valid for a target, and if not,
88 /// what action should be used to make them valid.
89 enum LegalizeAction : uint8_t {
90 Legal, // The target natively supports this operation.
91 Promote, // This operation should be executed in a larger type.
92 Expand, // Try to expand this to other ops, otherwise use a libcall.
93 LibCall, // Don't try to expand this to other ops, always use a libcall.
94 Custom // Use the LowerOperation hook to implement custom lowering.
97 /// This enum indicates whether a types are legal for a target, and if not,
98 /// what action should be used to make them valid.
99 enum LegalizeTypeAction : uint8_t {
100 TypeLegal, // The target natively supports this type.
101 TypePromoteInteger, // Replace this integer with a larger one.
102 TypeExpandInteger, // Split this integer into two of half the size.
103 TypeSoftenFloat, // Convert this float to a same size integer type,
104 // if an operation is not supported in target HW.
105 TypeExpandFloat, // Split this float into two of half the size.
106 TypeScalarizeVector, // Replace this one-element vector with its element.
107 TypeSplitVector, // Split this vector into two of half the size.
108 TypeWidenVector, // This vector should be widened into a larger vector.
109 TypePromoteFloat // Replace this float with a larger one.
112 /// LegalizeKind holds the legalization kind that needs to happen to EVT
113 /// in order to type-legalize it.
114 typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
116 /// Enum that describes how the target represents true/false values.
117 enum BooleanContent {
118 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
119 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
120 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
123 /// Enum that describes what type of support for selects the target has.
124 enum SelectSupportKind {
125 ScalarValSelect, // The target supports scalar selects (ex: cmov).
126 ScalarCondVectorVal, // The target supports selects with a scalar condition
127 // and vector values (ex: cmov).
128 VectorMaskSelect // The target supports vector selects with a vector
129 // mask (ex: x86 blends).
132 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
133 /// to, if at all. Exists because different targets have different levels of
134 /// support for these atomic instructions, and also have different options
135 /// w.r.t. what they should expand to.
136 enum class AtomicExpansionKind {
137 None, // Don't expand the instruction.
138 LLSC, // Expand the instruction into loadlinked/storeconditional; used
140 LLOnly, // Expand the (load) instruction into just a load-linked, which has
141 // greater atomic guarantees than a normal load.
142 CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
145 /// Enum that specifies when a multiplication should be expanded.
146 enum class MulExpansionKind {
147 Always, // Always expand the instruction.
148 OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
152 static ISD::NodeType getExtendForContent(BooleanContent Content) {
154 case UndefinedBooleanContent:
155 // Extend by adding rubbish bits.
156 return ISD::ANY_EXTEND;
157 case ZeroOrOneBooleanContent:
158 // Extend by adding zero bits.
159 return ISD::ZERO_EXTEND;
160 case ZeroOrNegativeOneBooleanContent:
161 // Extend by copying the sign bit.
162 return ISD::SIGN_EXTEND;
164 llvm_unreachable("Invalid content kind");
167 /// NOTE: The TargetMachine owns TLOF.
168 explicit TargetLoweringBase(const TargetMachine &TM);
169 virtual ~TargetLoweringBase() {}
172 /// \brief Initialize all of the actions to default values.
176 const TargetMachine &getTargetMachine() const { return TM; }
178 virtual bool useSoftFloat() const { return false; }
180 /// Return the pointer type for the given address space, defaults to
181 /// the pointer type from the data layout.
182 /// FIXME: The default needs to be removed once all the code is updated.
183 MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
184 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
187 /// EVT is not used in-tree, but is used by out-of-tree target.
188 /// A documentation for this function would be nice...
189 virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
191 EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const;
193 /// Returns the type to be used for the index operand of:
194 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
195 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
196 virtual MVT getVectorIdxTy(const DataLayout &DL) const {
197 return getPointerTy(DL);
200 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
204 /// Return true if multiple condition registers are available.
205 bool hasMultipleConditionRegisters() const {
206 return HasMultipleConditionRegisters;
209 /// Return true if the target has BitExtract instructions.
210 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
212 /// Return the preferred vector type legalization action.
213 virtual TargetLoweringBase::LegalizeTypeAction
214 getPreferredVectorAction(EVT VT) const {
215 // The default action for one element vectors is to scalarize
216 if (VT.getVectorNumElements() == 1)
217 return TypeScalarizeVector;
218 // The default action for other vectors is to promote
219 return TypePromoteInteger;
222 // There are two general methods for expanding a BUILD_VECTOR node:
223 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
225 // 2. Build the vector on the stack and then load it.
226 // If this function returns true, then method (1) will be used, subject to
227 // the constraint that all of the necessary shuffles are legal (as determined
228 // by isShuffleMaskLegal). If this function returns false, then method (2) is
229 // always used. The vector type, and the number of defined values, are
232 shouldExpandBuildVectorWithShuffles(EVT /* VT */,
233 unsigned DefinedValues) const {
234 return DefinedValues < 3;
237 /// Return true if integer divide is usually cheaper than a sequence of
238 /// several shifts, adds, and multiplies for this target.
239 /// The definition of "cheaper" may depend on whether we're optimizing
240 /// for speed or for size.
241 virtual bool isIntDivCheap(EVT VT, AttributeSet Attr) const {
245 /// Return true if the target can handle a standalone remainder operation.
246 virtual bool hasStandaloneRem(EVT VT) const {
250 /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
251 virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
252 // Default behavior is to replace SQRT(X) with X*RSQRT(X).
256 /// Reciprocal estimate status values used by the functions below.
257 enum ReciprocalEstimate : int {
263 /// Return a ReciprocalEstimate enum value for a square root of the given type
264 /// based on the function's attributes. If the operation is not overridden by
265 /// the function's attributes, "Unspecified" is returned and target defaults
266 /// are expected to be used for instruction selection.
267 int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
269 /// Return a ReciprocalEstimate enum value for a division of the given type
270 /// based on the function's attributes. If the operation is not overridden by
271 /// the function's attributes, "Unspecified" is returned and target defaults
272 /// are expected to be used for instruction selection.
273 int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
275 /// Return the refinement step count for a square root of the given type based
276 /// on the function's attributes. If the operation is not overridden by
277 /// the function's attributes, "Unspecified" is returned and target defaults
278 /// are expected to be used for instruction selection.
279 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
281 /// Return the refinement step count for a division of the given type based
282 /// on the function's attributes. If the operation is not overridden by
283 /// the function's attributes, "Unspecified" is returned and target defaults
284 /// are expected to be used for instruction selection.
285 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
287 /// Returns true if target has indicated at least one type should be bypassed.
288 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
290 /// Returns map of slow types for division or remainder with corresponding
292 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
293 return BypassSlowDivWidths;
296 /// Return true if Flow Control is an expensive operation that should be
298 bool isJumpExpensive() const { return JumpIsExpensive; }
300 /// Return true if selects are only cheaper than branches if the branch is
301 /// unlikely to be predicted right.
302 bool isPredictableSelectExpensive() const {
303 return PredictableSelectIsExpensive;
306 /// If a branch or a select condition is skewed in one direction by more than
307 /// this factor, it is very likely to be predicted correctly.
308 virtual BranchProbability getPredictableBranchThreshold() const;
310 /// Return true if the following transform is beneficial:
311 /// fold (conv (load x)) -> (load (conv*)x)
312 /// On architectures that don't natively support some vector loads
313 /// efficiently, casting the load to a smaller vector of larger types and
314 /// loading is more efficient, however, this can be undone by optimizations in
316 virtual bool isLoadBitCastBeneficial(EVT LoadVT,
317 EVT BitcastVT) const {
318 // Don't do if we could do an indexed load on the original type, but not on
320 if (!LoadVT.isSimple() || !BitcastVT.isSimple())
323 MVT LoadMVT = LoadVT.getSimpleVT();
325 // Don't bother doing this if it's just going to be promoted again later, as
326 // doing so might interfere with other combines.
327 if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
328 getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
334 /// Return true if the following transform is beneficial:
335 /// (store (y (conv x)), y*)) -> (store x, (x*))
336 virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT) const {
337 // Default to the same logic as loads.
338 return isLoadBitCastBeneficial(StoreVT, BitcastVT);
341 /// Return true if it is expected to be cheaper to do a store of a non-zero
342 /// vector constant with the given size and type for the address space than to
343 /// store the individual scalar element constants.
344 virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
346 unsigned AddrSpace) const {
350 /// \brief Return true if it is cheap to speculate a call to intrinsic cttz.
351 virtual bool isCheapToSpeculateCttz() const {
355 /// \brief Return true if it is cheap to speculate a call to intrinsic ctlz.
356 virtual bool isCheapToSpeculateCtlz() const {
360 /// \brief Return true if ctlz instruction is fast.
361 virtual bool isCtlzFast() const {
365 /// Return true if it is safe to transform an integer-domain bitwise operation
366 /// into the equivalent floating-point operation. This should be set to true
367 /// if the target has IEEE-754-compliant fabs/fneg operations for the input
369 virtual bool hasBitPreservingFPLogic(EVT VT) const {
373 /// \brief Return true if it is cheaper to split the store of a merged int val
374 /// from a pair of smaller values into multiple stores.
375 virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
379 /// \brief Return if the target supports combining a
382 /// %andResult = and %val1, #imm-with-one-bit-set;
383 /// %icmpResult = icmp %andResult, 0
384 /// br i1 %icmpResult, label %dest1, label %dest2
386 /// into a single machine instruction of a form like:
388 /// brOnBitSet %register, #bitNumber, dest
390 bool isMaskAndBranchFoldingLegal() const {
391 return MaskAndBranchFoldingIsLegal;
394 /// Return true if the target should transform:
395 /// (X & Y) == Y ---> (~X & Y) == 0
396 /// (X & Y) != Y ---> (~X & Y) != 0
398 /// This may be profitable if the target has a bitwise and-not operation that
399 /// sets comparison flags. A target may want to limit the transformation based
400 /// on the type of Y or if Y is a constant.
402 /// Note that the transform will not occur if Y is known to be a power-of-2
403 /// because a mask and compare of a single bit can be handled by inverting the
404 /// predicate, for example:
405 /// (X & 8) == 8 ---> (X & 8) != 0
406 virtual bool hasAndNotCompare(SDValue Y) const {
410 /// Return true if the target has a bitwise and-not operation:
412 /// This can be used to simplify select or other instructions.
413 virtual bool hasAndNot(SDValue X) const {
414 // If the target has the more complex version of this operation, assume that
415 // it has this operation too.
416 return hasAndNotCompare(X);
419 /// \brief Return true if the target wants to use the optimization that
420 /// turns ext(promotableInst1(...(promotableInstN(load)))) into
421 /// promotedInst1(...(promotedInstN(ext(load)))).
422 bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
424 /// Return true if the target can combine store(extractelement VectorTy,
426 /// \p Cost[out] gives the cost of that transformation when this is true.
427 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
428 unsigned &Cost) const {
432 /// Return true if target supports floating point exceptions.
433 bool hasFloatingPointExceptions() const {
434 return HasFloatingPointExceptions;
437 /// Return true if target always beneficiates from combining into FMA for a
438 /// given value type. This must typically return false on targets where FMA
439 /// takes more cycles to execute than FADD.
440 virtual bool enableAggressiveFMAFusion(EVT VT) const {
444 /// Return the ValueType of the result of SETCC operations.
445 virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
448 /// Return the ValueType for comparison libcalls. Comparions libcalls include
449 /// floating point comparion calls, and Ordered/Unordered check calls on
450 /// floating point numbers.
452 MVT::SimpleValueType getCmpLibcallReturnType() const;
454 /// For targets without i1 registers, this gives the nature of the high-bits
455 /// of boolean values held in types wider than i1.
457 /// "Boolean values" are special true/false values produced by nodes like
458 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
459 /// Not to be confused with general values promoted from i1. Some cpus
460 /// distinguish between vectors of boolean and scalars; the isVec parameter
461 /// selects between the two kinds. For example on X86 a scalar boolean should
462 /// be zero extended from i1, while the elements of a vector of booleans
463 /// should be sign extended from i1.
465 /// Some cpus also treat floating point types the same way as they treat
466 /// vectors instead of the way they treat scalars.
467 BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
469 return BooleanVectorContents;
470 return isFloat ? BooleanFloatContents : BooleanContents;
473 BooleanContent getBooleanContents(EVT Type) const {
474 return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
477 /// Return target scheduling preference.
478 Sched::Preference getSchedulingPreference() const {
479 return SchedPreferenceInfo;
482 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
483 /// for different nodes. This function returns the preference (or none) for
485 virtual Sched::Preference getSchedulingPreference(SDNode *) const {
489 /// Return the register class that should be used for the specified value
491 virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
492 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
493 assert(RC && "This value type is not natively supported!");
497 /// Return the 'representative' register class for the specified value
500 /// The 'representative' register class is the largest legal super-reg
501 /// register class for the register class of the value type. For example, on
502 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
503 /// register class is GR64 on x86_64.
504 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
505 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
509 /// Return the cost of the 'representative' register class for the specified
511 virtual uint8_t getRepRegClassCostFor(MVT VT) const {
512 return RepRegClassCostForVT[VT.SimpleTy];
515 /// Return true if the target has native support for the specified value type.
516 /// This means that it has a register that directly holds it without
517 /// promotions or expansions.
518 bool isTypeLegal(EVT VT) const {
519 assert(!VT.isSimple() ||
520 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
521 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
524 class ValueTypeActionImpl {
525 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
526 /// that indicates how instruction selection should deal with the type.
527 LegalizeTypeAction ValueTypeActions[MVT::LAST_VALUETYPE];
530 ValueTypeActionImpl() {
531 std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
535 LegalizeTypeAction getTypeAction(MVT VT) const {
536 return ValueTypeActions[VT.SimpleTy];
539 void setTypeAction(MVT VT, LegalizeTypeAction Action) {
540 ValueTypeActions[VT.SimpleTy] = Action;
544 const ValueTypeActionImpl &getValueTypeActions() const {
545 return ValueTypeActions;
548 /// Return how we should legalize values of this type, either it is already
549 /// legal (return 'Legal') or we need to promote it to a larger type (return
550 /// 'Promote'), or we need to expand it into multiple registers of smaller
551 /// integer type (return 'Expand'). 'Custom' is not an option.
552 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
553 return getTypeConversion(Context, VT).first;
555 LegalizeTypeAction getTypeAction(MVT VT) const {
556 return ValueTypeActions.getTypeAction(VT);
559 /// For types supported by the target, this is an identity function. For
560 /// types that must be promoted to larger types, this returns the larger type
561 /// to promote to. For integer types that are larger than the largest integer
562 /// register, this contains one step in the expansion to get to the smaller
563 /// register. For illegal floating point types, this returns the integer type
565 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
566 return getTypeConversion(Context, VT).second;
569 /// For types supported by the target, this is an identity function. For
570 /// types that must be expanded (i.e. integer types that are larger than the
571 /// largest integer register or illegal floating point types), this returns
572 /// the largest legal type it will be expanded to.
573 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
574 assert(!VT.isVector());
576 switch (getTypeAction(Context, VT)) {
579 case TypeExpandInteger:
580 VT = getTypeToTransformTo(Context, VT);
583 llvm_unreachable("Type is not legal nor is it to be expanded!");
588 /// Vector types are broken down into some number of legal first class types.
589 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
590 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
591 /// turns into 4 EVT::i32 values with both PPC and X86.
593 /// This method returns the number of registers needed, and the VT for each
594 /// register. It also returns the VT and quantity of the intermediate values
595 /// before they are promoted/expanded.
596 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
598 unsigned &NumIntermediates,
599 MVT &RegisterVT) const;
601 struct IntrinsicInfo {
602 unsigned opc; // target opcode
603 EVT memVT; // memory VT
604 const Value* ptrVal; // value representing memory location
605 int offset; // offset off of ptrVal
606 unsigned size; // the size of the memory location
607 // (taken from memVT if zero)
608 unsigned align; // alignment
609 bool vol; // is volatile?
610 bool readMem; // reads memory?
611 bool writeMem; // writes memory?
613 IntrinsicInfo() : opc(0), ptrVal(nullptr), offset(0), size(0), align(1),
614 vol(false), readMem(false), writeMem(false) {}
617 /// Given an intrinsic, checks if on the target the intrinsic will need to map
618 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
619 /// true and store the intrinsic information into the IntrinsicInfo that was
620 /// passed to the function.
621 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
622 unsigned /*Intrinsic*/) const {
626 /// Returns true if the target can instruction select the specified FP
627 /// immediate natively. If false, the legalizer will materialize the FP
628 /// immediate as a load from a constant pool.
629 virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
633 /// Targets can use this to indicate that they only support *some*
634 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
635 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
637 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
642 /// Returns true if the operation can trap for the value type.
644 /// VT must be a legal type. By default, we optimistically assume most
645 /// operations don't trap except for integer divide and remainder.
646 virtual bool canOpTrap(unsigned Op, EVT VT) const;
648 /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
649 /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace
650 /// a VAND with a constant pool entry.
651 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
656 /// Return how this operation should be treated: either it is legal, needs to
657 /// be promoted to a larger size, needs to be expanded to some other code
658 /// sequence, or the target has a custom expander for it.
659 LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
660 if (VT.isExtended()) return Expand;
661 // If a target-specific SDNode requires legalization, require the target
662 // to provide custom legalization for it.
663 if (Op > array_lengthof(OpActions[0])) return Custom;
664 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
667 /// Return true if the specified operation is legal on this target or can be
668 /// made legal with custom lowering. This is used to help guide high-level
669 /// lowering decisions.
670 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
671 return (VT == MVT::Other || isTypeLegal(VT)) &&
672 (getOperationAction(Op, VT) == Legal ||
673 getOperationAction(Op, VT) == Custom);
676 /// Return true if the specified operation is legal on this target or can be
677 /// made legal using promotion. This is used to help guide high-level lowering
679 bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
680 return (VT == MVT::Other || isTypeLegal(VT)) &&
681 (getOperationAction(Op, VT) == Legal ||
682 getOperationAction(Op, VT) == Promote);
685 /// Return true if the specified operation is legal on this target or can be
686 /// made legal with custom lowering or using promotion. This is used to help
687 /// guide high-level lowering decisions.
688 bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT) const {
689 return (VT == MVT::Other || isTypeLegal(VT)) &&
690 (getOperationAction(Op, VT) == Legal ||
691 getOperationAction(Op, VT) == Custom ||
692 getOperationAction(Op, VT) == Promote);
695 /// Return true if the specified operation is illegal but has a custom lowering
696 /// on that type. This is used to help guide high-level lowering
698 bool isOperationCustom(unsigned Op, EVT VT) const {
699 return (!isTypeLegal(VT) && getOperationAction(Op, VT) == Custom);
702 /// Return true if the specified operation is illegal on this target or
703 /// unlikely to be made legal with custom lowering. This is used to help guide
704 /// high-level lowering decisions.
705 bool isOperationExpand(unsigned Op, EVT VT) const {
706 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
709 /// Return true if the specified operation is legal on this target.
710 bool isOperationLegal(unsigned Op, EVT VT) const {
711 return (VT == MVT::Other || isTypeLegal(VT)) &&
712 getOperationAction(Op, VT) == Legal;
715 /// Return how this load with extension should be treated: either it is legal,
716 /// needs to be promoted to a larger size, needs to be expanded to some other
717 /// code sequence, or the target has a custom expander for it.
718 LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
720 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
721 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
722 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
723 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&
724 MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!");
725 unsigned Shift = 4 * ExtType;
726 return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
729 /// Return true if the specified load with extension is legal on this target.
730 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
731 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
734 /// Return true if the specified load with extension is legal or custom
736 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
737 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
738 getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
741 /// Return how this store with truncation should be treated: either it is
742 /// legal, needs to be promoted to a larger size, needs to be expanded to some
743 /// other code sequence, or the target has a custom expander for it.
744 LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
745 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
746 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
747 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
748 assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
749 "Table isn't big enough!");
750 return TruncStoreActions[ValI][MemI];
753 /// Return true if the specified store with truncation is legal on this
755 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
756 return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
759 /// Return true if the specified store with truncation has solution on this
761 bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
762 return isTypeLegal(ValVT) &&
763 (getTruncStoreAction(ValVT, MemVT) == Legal ||
764 getTruncStoreAction(ValVT, MemVT) == Custom);
767 /// Return how the indexed load should be treated: either it is legal, needs
768 /// to be promoted to a larger size, needs to be expanded to some other code
769 /// sequence, or the target has a custom expander for it.
771 getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
772 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
773 "Table isn't big enough!");
774 unsigned Ty = (unsigned)VT.SimpleTy;
775 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
778 /// Return true if the specified indexed load is legal on this target.
779 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
780 return VT.isSimple() &&
781 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
782 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
785 /// Return how the indexed store should be treated: either it is legal, needs
786 /// to be promoted to a larger size, needs to be expanded to some other code
787 /// sequence, or the target has a custom expander for it.
789 getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
790 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
791 "Table isn't big enough!");
792 unsigned Ty = (unsigned)VT.SimpleTy;
793 return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
796 /// Return true if the specified indexed load is legal on this target.
797 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
798 return VT.isSimple() &&
799 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
800 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
803 /// Return how the condition code should be treated: either it is legal, needs
804 /// to be expanded to some other code sequence, or the target has a custom
807 getCondCodeAction(ISD::CondCode CC, MVT VT) const {
808 assert((unsigned)CC < array_lengthof(CondCodeActions) &&
809 ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) &&
810 "Table isn't big enough!");
811 // See setCondCodeAction for how this is encoded.
812 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
813 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
814 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
815 assert(Action != Promote && "Can't promote condition code!");
819 /// Return true if the specified condition code is legal on this target.
820 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
822 getCondCodeAction(CC, VT) == Legal ||
823 getCondCodeAction(CC, VT) == Custom;
827 /// If the action for this operation is to promote, this method returns the
828 /// ValueType to promote to.
829 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
830 assert(getOperationAction(Op, VT) == Promote &&
831 "This operation isn't promoted!");
833 // See if this has an explicit type specified.
834 std::map<std::pair<unsigned, MVT::SimpleValueType>,
835 MVT::SimpleValueType>::const_iterator PTTI =
836 PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
837 if (PTTI != PromoteToType.end()) return PTTI->second;
839 assert((VT.isInteger() || VT.isFloatingPoint()) &&
840 "Cannot autopromote this type, add it with AddPromotedToType.");
844 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
845 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
846 "Didn't find type to promote to!");
847 } while (!isTypeLegal(NVT) ||
848 getOperationAction(Op, NVT) == Promote);
852 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
853 /// operations except for the pointer size. If AllowUnknown is true, this
854 /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
855 /// otherwise it will assert.
856 EVT getValueType(const DataLayout &DL, Type *Ty,
857 bool AllowUnknown = false) const {
858 // Lower scalar pointers to native pointer types.
859 if (PointerType *PTy = dyn_cast<PointerType>(Ty))
860 return getPointerTy(DL, PTy->getAddressSpace());
862 if (Ty->isVectorTy()) {
863 VectorType *VTy = cast<VectorType>(Ty);
864 Type *Elm = VTy->getElementType();
865 // Lower vectors of pointers to native pointer types.
866 if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
867 EVT PointerTy(getPointerTy(DL, PT->getAddressSpace()));
868 Elm = PointerTy.getTypeForEVT(Ty->getContext());
871 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
872 VTy->getNumElements());
874 return EVT::getEVT(Ty, AllowUnknown);
877 /// Return the MVT corresponding to this LLVM type. See getValueType.
878 MVT getSimpleValueType(const DataLayout &DL, Type *Ty,
879 bool AllowUnknown = false) const {
880 return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
883 /// Return the desired alignment for ByVal or InAlloca aggregate function
884 /// arguments in the caller parameter area. This is the actual alignment, not
886 virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
888 /// Return the type of registers that this ValueType will eventually require.
889 MVT getRegisterType(MVT VT) const {
890 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
891 return RegisterTypeForVT[VT.SimpleTy];
894 /// Return the type of registers that this ValueType will eventually require.
895 MVT getRegisterType(LLVMContext &Context, EVT VT) const {
897 assert((unsigned)VT.getSimpleVT().SimpleTy <
898 array_lengthof(RegisterTypeForVT));
899 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
904 unsigned NumIntermediates;
905 (void)getVectorTypeBreakdown(Context, VT, VT1,
906 NumIntermediates, RegisterVT);
909 if (VT.isInteger()) {
910 return getRegisterType(Context, getTypeToTransformTo(Context, VT));
912 llvm_unreachable("Unsupported extended type!");
915 /// Return the number of registers that this ValueType will eventually
918 /// This is one for any types promoted to live in larger registers, but may be
919 /// more than one for types (like i64) that are split into pieces. For types
920 /// like i140, which are first promoted then expanded, it is the number of
921 /// registers needed to hold all the bits of the original type. For an i140
922 /// on a 32 bit machine this means 5 registers.
923 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
925 assert((unsigned)VT.getSimpleVT().SimpleTy <
926 array_lengthof(NumRegistersForVT));
927 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
932 unsigned NumIntermediates;
933 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
935 if (VT.isInteger()) {
936 unsigned BitWidth = VT.getSizeInBits();
937 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
938 return (BitWidth + RegWidth - 1) / RegWidth;
940 llvm_unreachable("Unsupported extended type!");
943 /// If true, then instruction selection should seek to shrink the FP constant
944 /// of the specified type to a smaller type in order to save space and / or
946 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
948 // Return true if it is profitable to reduce the given load node to a smaller
951 // e.g. (i16 (trunc (i32 (load x))) -> i16 load x should be performed
952 virtual bool shouldReduceLoadWidth(SDNode *Load,
953 ISD::LoadExtType ExtTy,
958 /// When splitting a value of the specified type into parts, does the Lo
959 /// or Hi part come first? This usually follows the endianness, except
960 /// for ppcf128, where the Hi part always comes first.
961 bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
962 return DL.isBigEndian() || VT == MVT::ppcf128;
965 /// If true, the target has custom DAG combine transformations that it can
966 /// perform for the specified node.
967 bool hasTargetDAGCombine(ISD::NodeType NT) const {
968 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
969 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
972 unsigned getGatherAllAliasesMaxDepth() const {
973 return GatherAllAliasesMaxDepth;
976 /// \brief Get maximum # of store operations permitted for llvm.memset
978 /// This function returns the maximum number of store operations permitted
979 /// to replace a call to llvm.memset. The value is set by the target at the
980 /// performance threshold for such a replacement. If OptSize is true,
981 /// return the limit for functions that have OptSize attribute.
982 unsigned getMaxStoresPerMemset(bool OptSize) const {
983 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
986 /// \brief Get maximum # of store operations permitted for llvm.memcpy
988 /// This function returns the maximum number of store operations permitted
989 /// to replace a call to llvm.memcpy. The value is set by the target at the
990 /// performance threshold for such a replacement. If OptSize is true,
991 /// return the limit for functions that have OptSize attribute.
992 unsigned getMaxStoresPerMemcpy(bool OptSize) const {
993 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
996 /// \brief Get maximum # of store operations permitted for llvm.memmove
998 /// This function returns the maximum number of store operations permitted
999 /// to replace a call to llvm.memmove. The value is set by the target at the
1000 /// performance threshold for such a replacement. If OptSize is true,
1001 /// return the limit for functions that have OptSize attribute.
1002 unsigned getMaxStoresPerMemmove(bool OptSize) const {
1003 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
1006 /// \brief Determine if the target supports unaligned memory accesses.
1008 /// This function returns true if the target allows unaligned memory accesses
1009 /// of the specified type in the given address space. If true, it also returns
1010 /// whether the unaligned memory access is "fast" in the last argument by
1011 /// reference. This is used, for example, in situations where an array
1012 /// copy/move/set is converted to a sequence of store operations. Its use
1013 /// helps to ensure that such replacements don't generate code that causes an
1014 /// alignment error (trap) on the target machine.
1015 virtual bool allowsMisalignedMemoryAccesses(EVT,
1016 unsigned AddrSpace = 0,
1018 bool * /*Fast*/ = nullptr) const {
1022 /// Return true if the target supports a memory access of this type for the
1023 /// given address space and alignment. If the access is allowed, the optional
1024 /// final parameter returns if the access is also fast (as defined by the
1026 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1027 unsigned AddrSpace = 0, unsigned Alignment = 1,
1028 bool *Fast = nullptr) const;
1030 /// Returns the target specific optimal type for load and store operations as
1031 /// a result of memset, memcpy, and memmove lowering.
1033 /// If DstAlign is zero that means it's safe to destination alignment can
1034 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
1035 /// a need to check it against alignment requirement, probably because the
1036 /// source does not need to be loaded. If 'IsMemset' is true, that means it's
1037 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
1038 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
1039 /// does not need to be loaded. It returns EVT::Other if the type should be
1040 /// determined using generic target-independent logic.
1041 virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
1042 unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
1044 bool /*ZeroMemset*/,
1045 bool /*MemcpyStrSrc*/,
1046 MachineFunction &/*MF*/) const {
1050 /// Returns true if it's safe to use load / store of the specified type to
1051 /// expand memcpy / memset inline.
1053 /// This is mostly true for all types except for some special cases. For
1054 /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1055 /// fstpl which also does type conversion. Note the specified type doesn't
1056 /// have to be legal as the hook is used before type legalization.
1057 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
1059 /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
1060 bool usesUnderscoreSetJmp() const {
1061 return UseUnderscoreSetJmp;
1064 /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
1065 bool usesUnderscoreLongJmp() const {
1066 return UseUnderscoreLongJmp;
1069 /// Return lower limit for number of blocks in a jump table.
1070 unsigned getMinimumJumpTableEntries() const;
1072 /// Return upper limit for number of entries in a jump table.
1073 /// Zero if no limit.
1074 unsigned getMaximumJumpTableSize() const;
1076 virtual bool isJumpTableRelative() const {
1077 return TM.isPositionIndependent();
1080 /// If a physical register, this specifies the register that
1081 /// llvm.savestack/llvm.restorestack should save and restore.
1082 unsigned getStackPointerRegisterToSaveRestore() const {
1083 return StackPointerRegisterToSaveRestore;
1086 /// If a physical register, this returns the register that receives the
1087 /// exception address on entry to an EH pad.
1089 getExceptionPointerRegister(const Constant *PersonalityFn) const {
1090 // 0 is guaranteed to be the NoRegister value on all targets
1094 /// If a physical register, this returns the register that receives the
1095 /// exception typeid on entry to a landing pad.
1097 getExceptionSelectorRegister(const Constant *PersonalityFn) const {
1098 // 0 is guaranteed to be the NoRegister value on all targets
1102 virtual bool needsFixedCatchObjects() const {
1103 report_fatal_error("Funclet EH is not implemented for this target");
1106 /// Returns the target's jmp_buf size in bytes (if never set, the default is
1108 unsigned getJumpBufSize() const {
1112 /// Returns the target's jmp_buf alignment in bytes (if never set, the default
1114 unsigned getJumpBufAlignment() const {
1115 return JumpBufAlignment;
1118 /// Return the minimum stack alignment of an argument.
1119 unsigned getMinStackArgumentAlignment() const {
1120 return MinStackArgumentAlignment;
1123 /// Return the minimum function alignment.
1124 unsigned getMinFunctionAlignment() const {
1125 return MinFunctionAlignment;
1128 /// Return the preferred function alignment.
1129 unsigned getPrefFunctionAlignment() const {
1130 return PrefFunctionAlignment;
1133 /// Return the preferred loop alignment.
1134 virtual unsigned getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
1135 return PrefLoopAlignment;
1138 /// If the target has a standard location for the stack protector guard,
1139 /// returns the address of that location. Otherwise, returns nullptr.
1140 /// DEPRECATED: please override useLoadStackGuardNode and customize
1141 /// LOAD_STACK_GUARD, or customize @llvm.stackguard().
1142 virtual Value *getIRStackGuard(IRBuilder<> &IRB) const;
1144 /// Inserts necessary declarations for SSP (stack protection) purpose.
1145 /// Should be used only when getIRStackGuard returns nullptr.
1146 virtual void insertSSPDeclarations(Module &M) const;
1148 /// Return the variable that's previously inserted by insertSSPDeclarations,
1149 /// if any, otherwise return nullptr. Should be used only when
1150 /// getIRStackGuard returns nullptr.
1151 virtual Value *getSDagStackGuard(const Module &M) const;
1153 /// If the target has a standard stack protection check function that
1154 /// performs validation and error handling, returns the function. Otherwise,
1155 /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
1156 /// Should be used only when getIRStackGuard returns nullptr.
1157 virtual Value *getSSPStackGuardCheck(const Module &M) const;
1160 Value *getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
1164 /// Returns the target-specific address of the unsafe stack pointer.
1165 virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const;
1167 /// Returns true if a cast between SrcAS and DestAS is a noop.
1168 virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1172 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
1173 /// are happy to sink it into basic blocks.
1174 virtual bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1175 return isNoopAddrSpaceCast(SrcAS, DestAS);
1178 /// Return true if the pointer arguments to CI should be aligned by aligning
1179 /// the object whose address is being passed. If so then MinSize is set to the
1180 /// minimum size the object must be to be aligned and PrefAlign is set to the
1181 /// preferred alignment.
1182 virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
1183 unsigned & /*PrefAlign*/) const {
1187 //===--------------------------------------------------------------------===//
1188 /// \name Helpers for TargetTransformInfo implementations
1191 /// Get the ISD node that corresponds to the Instruction class opcode.
1192 int InstructionOpcodeToISD(unsigned Opcode) const;
1194 /// Estimate the cost of type-legalization and the legalized type.
1195 std::pair<int, MVT> getTypeLegalizationCost(const DataLayout &DL,
1200 //===--------------------------------------------------------------------===//
1201 /// \name Helpers for atomic expansion.
1204 /// Returns the maximum atomic operation size (in bits) supported by
1205 /// the backend. Atomic operations greater than this size (as well
1206 /// as ones that are not naturally aligned), will be expanded by
1207 /// AtomicExpandPass into an __atomic_* library call.
1208 unsigned getMaxAtomicSizeInBitsSupported() const {
1209 return MaxAtomicSizeInBitsSupported;
1212 /// Returns the size of the smallest cmpxchg or ll/sc instruction
1213 /// the backend supports. Any smaller operations are widened in
1214 /// AtomicExpandPass.
1216 /// Note that *unlike* operations above the maximum size, atomic ops
1217 /// are still natively supported below the minimum; they just
1218 /// require a more complex expansion.
1219 unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
1221 /// Whether AtomicExpandPass should automatically insert fences and reduce
1222 /// ordering for this atomic. This should be true for most architectures with
1223 /// weak memory ordering. Defaults to false.
1224 virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
1228 /// Perform a load-linked operation on Addr, returning a "Value *" with the
1229 /// corresponding pointee type. This may entail some non-trivial operations to
1230 /// truncate or reconstruct types that will be illegal in the backend. See
1231 /// ARMISelLowering for an example implementation.
1232 virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
1233 AtomicOrdering Ord) const {
1234 llvm_unreachable("Load linked unimplemented on this target");
1237 /// Perform a store-conditional operation to Addr. Return the status of the
1238 /// store. This should be 0 if the store succeeded, non-zero otherwise.
1239 virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
1240 Value *Addr, AtomicOrdering Ord) const {
1241 llvm_unreachable("Store conditional unimplemented on this target");
1244 /// Inserts in the IR a target-specific intrinsic specifying a fence.
1245 /// It is called by AtomicExpandPass before expanding an
1246 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
1247 /// if shouldInsertFencesForAtomic returns true.
1248 /// RMW and CmpXchg set both IsStore and IsLoad to true.
1249 /// This function should either return a nullptr, or a pointer to an IR-level
1250 /// Instruction*. Even complex fence sequences can be represented by a
1251 /// single Instruction* through an intrinsic to be lowered later.
1252 /// Backends should override this method to produce target-specific intrinsic
1253 /// for their fences.
1254 /// FIXME: Please note that the default implementation here in terms of
1255 /// IR-level fences exists for historical/compatibility reasons and is
1256 /// *unsound* ! Fences cannot, in general, be used to restore sequential
1257 /// consistency. For example, consider the following example:
1258 /// atomic<int> x = y = 0;
1259 /// int r1, r2, r3, r4;
1270 /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
1271 /// seq_cst. But if they are lowered to monotonic accesses, no amount of
1272 /// IR-level fences can prevent it.
1274 virtual Instruction *emitLeadingFence(IRBuilder<> &Builder,
1275 AtomicOrdering Ord, bool IsStore,
1276 bool IsLoad) const {
1277 if (isReleaseOrStronger(Ord) && IsStore)
1278 return Builder.CreateFence(Ord);
1283 virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
1284 AtomicOrdering Ord, bool IsStore,
1285 bool IsLoad) const {
1286 if (isAcquireOrStronger(Ord))
1287 return Builder.CreateFence(Ord);
1293 // Emits code that executes when the comparison result in the ll/sc
1294 // expansion of a cmpxchg instruction is such that the store-conditional will
1295 // not execute. This makes it possible to balance out the load-linked with
1296 // a dedicated instruction, if desired.
1297 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
1298 // be unnecessarily held, except if clrex, inserted by this hook, is executed.
1299 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const {}
1301 /// Returns true if the given (atomic) store should be expanded by the
1302 /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
1303 virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
1307 /// Returns true if arguments should be sign-extended in lib calls.
1308 virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
1312 /// Returns how the given (atomic) load should be expanded by the
1313 /// IR-level AtomicExpand pass.
1314 virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const {
1315 return AtomicExpansionKind::None;
1318 /// Returns true if the given atomic cmpxchg should be expanded by the
1319 /// IR-level AtomicExpand pass into a load-linked/store-conditional sequence
1320 /// (through emitLoadLinked() and emitStoreConditional()).
1321 virtual bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
1325 /// Returns how the IR-level AtomicExpand pass should expand the given
1326 /// AtomicRMW, if at all. Default is to never expand.
1327 virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const {
1328 return AtomicExpansionKind::None;
1331 /// On some platforms, an AtomicRMW that never actually modifies the value
1332 /// (such as fetch_add of 0) can be turned into a fence followed by an
1333 /// atomic load. This may sound useless, but it makes it possible for the
1334 /// processor to keep the cacheline shared, dramatically improving
1335 /// performance. And such idempotent RMWs are useful for implementing some
1336 /// kinds of locks, see for example (justification + benchmarks):
1337 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
1338 /// This method tries doing that transformation, returning the atomic load if
1339 /// it succeeds, and nullptr otherwise.
1340 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
1341 /// another round of expansion.
1343 lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
1347 /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
1348 /// SIGN_EXTEND, or ANY_EXTEND).
1349 virtual ISD::NodeType getExtendForAtomicOps() const {
1350 return ISD::ZERO_EXTEND;
1355 /// Returns true if we should normalize
1356 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
1357 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
1358 /// that it saves us from materializing N0 and N1 in an integer register.
1359 /// Targets that are able to perform and/or on flags should return false here.
1360 virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
1362 // If a target has multiple condition registers, then it likely has logical
1363 // operations on those registers.
1364 if (hasMultipleConditionRegisters())
1366 // Only do the transform if the value won't be split into multiple
1368 LegalizeTypeAction Action = getTypeAction(Context, VT);
1369 return Action != TypeExpandInteger && Action != TypeExpandFloat &&
1370 Action != TypeSplitVector;
1373 //===--------------------------------------------------------------------===//
1374 // TargetLowering Configuration Methods - These methods should be invoked by
1375 // the derived class constructor to configure this object for the target.
1378 /// Specify how the target extends the result of integer and floating point
1379 /// boolean values from i1 to a wider type. See getBooleanContents.
1380 void setBooleanContents(BooleanContent Ty) {
1381 BooleanContents = Ty;
1382 BooleanFloatContents = Ty;
1385 /// Specify how the target extends the result of integer and floating point
1386 /// boolean values from i1 to a wider type. See getBooleanContents.
1387 void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
1388 BooleanContents = IntTy;
1389 BooleanFloatContents = FloatTy;
1392 /// Specify how the target extends the result of a vector boolean value from a
1393 /// vector of i1 to a wider type. See getBooleanContents.
1394 void setBooleanVectorContents(BooleanContent Ty) {
1395 BooleanVectorContents = Ty;
1398 /// Specify the target scheduling preference.
1399 void setSchedulingPreference(Sched::Preference Pref) {
1400 SchedPreferenceInfo = Pref;
1403 /// Indicate whether this target prefers to use _setjmp to implement
1404 /// llvm.setjmp or the version without _. Defaults to false.
1405 void setUseUnderscoreSetJmp(bool Val) {
1406 UseUnderscoreSetJmp = Val;
1409 /// Indicate whether this target prefers to use _longjmp to implement
1410 /// llvm.longjmp or the version without _. Defaults to false.
1411 void setUseUnderscoreLongJmp(bool Val) {
1412 UseUnderscoreLongJmp = Val;
1415 /// Indicate the minimum number of blocks to generate jump tables.
1416 void setMinimumJumpTableEntries(unsigned Val);
1418 /// Indicate the maximum number of entries in jump tables.
1419 /// Set to zero to generate unlimited jump tables.
1420 void setMaximumJumpTableSize(unsigned);
1422 /// If set to a physical register, this specifies the register that
1423 /// llvm.savestack/llvm.restorestack should save and restore.
1424 void setStackPointerRegisterToSaveRestore(unsigned R) {
1425 StackPointerRegisterToSaveRestore = R;
1428 /// Tells the code generator that the target has multiple (allocatable)
1429 /// condition registers that can be used to store the results of comparisons
1430 /// for use by selects and conditional branches. With multiple condition
1431 /// registers, the code generator will not aggressively sink comparisons into
1432 /// the blocks of their users.
1433 void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
1434 HasMultipleConditionRegisters = hasManyRegs;
1437 /// Tells the code generator that the target has BitExtract instructions.
1438 /// The code generator will aggressively sink "shift"s into the blocks of
1439 /// their users if the users will generate "and" instructions which can be
1440 /// combined with "shift" to BitExtract instructions.
1441 void setHasExtractBitsInsn(bool hasExtractInsn = true) {
1442 HasExtractBitsInsn = hasExtractInsn;
1445 /// Tells the code generator not to expand logic operations on comparison
1446 /// predicates into separate sequences that increase the amount of flow
1448 void setJumpIsExpensive(bool isExpensive = true);
1450 /// Tells the code generator that this target supports floating point
1451 /// exceptions and cares about preserving floating point exception behavior.
1452 void setHasFloatingPointExceptions(bool FPExceptions = true) {
1453 HasFloatingPointExceptions = FPExceptions;
1456 /// Tells the code generator which bitwidths to bypass.
1457 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
1458 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
1461 /// Add the specified register class as an available regclass for the
1462 /// specified value type. This indicates the selector can handle values of
1463 /// that class natively.
1464 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
1465 assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
1466 RegClassForVT[VT.SimpleTy] = RC;
1469 /// Return the largest legal super-reg register class of the register class
1470 /// for the specified type and its associated "cost".
1471 virtual std::pair<const TargetRegisterClass *, uint8_t>
1472 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
1474 /// Once all of the register classes are added, this allows us to compute
1475 /// derived properties we expose.
1476 void computeRegisterProperties(const TargetRegisterInfo *TRI);
1478 /// Indicate that the specified operation does not work with the specified
1479 /// type and indicate what to do about it.
1480 void setOperationAction(unsigned Op, MVT VT,
1481 LegalizeAction Action) {
1482 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
1483 OpActions[(unsigned)VT.SimpleTy][Op] = Action;
1486 /// Indicate that the specified load with extension does not work with the
1487 /// specified type and indicate what to do about it.
1488 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
1489 LegalizeAction Action) {
1490 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
1491 MemVT.isValid() && "Table isn't big enough!");
1492 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
1493 unsigned Shift = 4 * ExtType;
1494 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
1495 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
1498 /// Indicate that the specified truncating store does not work with the
1499 /// specified type and indicate what to do about it.
1500 void setTruncStoreAction(MVT ValVT, MVT MemVT,
1501 LegalizeAction Action) {
1502 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
1503 TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
1506 /// Indicate that the specified indexed load does or does not work with the
1507 /// specified type and indicate what to do abort it.
1509 /// NOTE: All indexed mode loads are initialized to Expand in
1510 /// TargetLowering.cpp
1511 void setIndexedLoadAction(unsigned IdxMode, MVT VT,
1512 LegalizeAction Action) {
1513 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
1514 (unsigned)Action < 0xf && "Table isn't big enough!");
1515 // Load action are kept in the upper half.
1516 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
1517 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
1520 /// Indicate that the specified indexed store does or does not work with the
1521 /// specified type and indicate what to do about it.
1523 /// NOTE: All indexed mode stores are initialized to Expand in
1524 /// TargetLowering.cpp
1525 void setIndexedStoreAction(unsigned IdxMode, MVT VT,
1526 LegalizeAction Action) {
1527 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
1528 (unsigned)Action < 0xf && "Table isn't big enough!");
1529 // Store action are kept in the lower half.
1530 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
1531 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
1534 /// Indicate that the specified condition code is or isn't supported on the
1535 /// target and indicate what to do about it.
1536 void setCondCodeAction(ISD::CondCode CC, MVT VT,
1537 LegalizeAction Action) {
1538 assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&
1539 "Table isn't big enough!");
1540 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
1541 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 32-bit
1542 /// value and the upper 29 bits index into the second dimension of the array
1543 /// to select what 32-bit value to use.
1544 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1545 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
1546 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
1549 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
1550 /// to trying a larger integer/fp until it can find one that works. If that
1551 /// default is insufficient, this method can be used by the target to override
1553 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1554 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
1557 /// Convenience method to set an operation to Promote and specify the type
1558 /// in a single call.
1559 void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1560 setOperationAction(Opc, OrigVT, Promote);
1561 AddPromotedToType(Opc, OrigVT, DestVT);
1564 /// Targets should invoke this method for each target independent node that
1565 /// they want to provide a custom DAG combiner for by implementing the
1566 /// PerformDAGCombine virtual method.
1567 void setTargetDAGCombine(ISD::NodeType NT) {
1568 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1569 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
1572 /// Set the target's required jmp_buf buffer size (in bytes); default is 200
1573 void setJumpBufSize(unsigned Size) {
1577 /// Set the target's required jmp_buf buffer alignment (in bytes); default is
1579 void setJumpBufAlignment(unsigned Align) {
1580 JumpBufAlignment = Align;
1583 /// Set the target's minimum function alignment (in log2(bytes))
1584 void setMinFunctionAlignment(unsigned Align) {
1585 MinFunctionAlignment = Align;
1588 /// Set the target's preferred function alignment. This should be set if
1589 /// there is a performance benefit to higher-than-minimum alignment (in
1591 void setPrefFunctionAlignment(unsigned Align) {
1592 PrefFunctionAlignment = Align;
1595 /// Set the target's preferred loop alignment. Default alignment is zero, it
1596 /// means the target does not care about loop alignment. The alignment is
1597 /// specified in log2(bytes). The target may also override
1598 /// getPrefLoopAlignment to provide per-loop values.
1599 void setPrefLoopAlignment(unsigned Align) {
1600 PrefLoopAlignment = Align;
1603 /// Set the minimum stack alignment of an argument (in log2(bytes)).
1604 void setMinStackArgumentAlignment(unsigned Align) {
1605 MinStackArgumentAlignment = Align;
1608 /// Set the maximum atomic operation size supported by the
1609 /// backend. Atomic operations greater than this size (as well as
1610 /// ones that are not naturally aligned), will be expanded by
1611 /// AtomicExpandPass into an __atomic_* library call.
1612 void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
1613 MaxAtomicSizeInBitsSupported = SizeInBits;
1616 // Sets the minimum cmpxchg or ll/sc size supported by the backend.
1617 void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
1618 MinCmpXchgSizeInBits = SizeInBits;
1622 //===--------------------------------------------------------------------===//
1623 // Addressing mode description hooks (used by LSR etc).
1626 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
1627 /// instructions reading the address. This allows as much computation as
1628 /// possible to be done in the address mode for that operand. This hook lets
1629 /// targets also pass back when this should be done on intrinsics which
1631 virtual bool GetAddrModeArguments(IntrinsicInst * /*I*/,
1632 SmallVectorImpl<Value*> &/*Ops*/,
1633 Type *&/*AccessTy*/,
1634 unsigned AddrSpace = 0) const {
1638 /// This represents an addressing mode of:
1639 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1640 /// If BaseGV is null, there is no BaseGV.
1641 /// If BaseOffs is zero, there is no base offset.
1642 /// If HasBaseReg is false, there is no base register.
1643 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
1646 GlobalValue *BaseGV;
1650 AddrMode() : BaseGV(nullptr), BaseOffs(0), HasBaseReg(false), Scale(0) {}
1653 /// Return true if the addressing mode represented by AM is legal for this
1654 /// target, for a load/store of the specified type.
1656 /// The type may be VoidTy, in which case only return true if the addressing
1657 /// mode is legal for a load/store of any legal type. TODO: Handle
1658 /// pre/postinc as well.
1660 /// If the address space cannot be determined, it will be -1.
1662 /// TODO: Remove default argument
1663 virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
1664 Type *Ty, unsigned AddrSpace) const;
1666 /// \brief Return the cost of the scaling factor used in the addressing mode
1667 /// represented by AM for this target, for a load/store of the specified type.
1669 /// If the AM is supported, the return value must be >= 0.
1670 /// If the AM is not supported, it returns a negative value.
1671 /// TODO: Handle pre/postinc as well.
1672 /// TODO: Remove default argument
1673 virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM,
1674 Type *Ty, unsigned AS = 0) const {
1675 // Default: assume that any scaling factor used in a legal AM is free.
1676 if (isLegalAddressingMode(DL, AM, Ty, AS))
1681 virtual bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) const {
1685 /// Return true if the specified immediate is legal icmp immediate, that is
1686 /// the target has icmp instructions which can compare a register against the
1687 /// immediate without having to materialize the immediate into a register.
1688 virtual bool isLegalICmpImmediate(int64_t) const {
1692 /// Return true if the specified immediate is legal add immediate, that is the
1693 /// target has add instructions which can add a register with the immediate
1694 /// without having to materialize the immediate into a register.
1695 virtual bool isLegalAddImmediate(int64_t) const {
1699 /// Return true if it's significantly cheaper to shift a vector by a uniform
1700 /// scalar than by an amount which will vary across each lane. On x86, for
1701 /// example, there is a "psllw" instruction for the former case, but no simple
1702 /// instruction for a general "a << b" operation on vectors.
1703 virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
1707 /// Return true if it's free to truncate a value of type FromTy to type
1708 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
1709 /// by referencing its sub-register AX.
1710 /// Targets must return false when FromTy <= ToTy.
1711 virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
1715 /// Return true if a truncation from FromTy to ToTy is permitted when deciding
1716 /// whether a call is in tail position. Typically this means that both results
1717 /// would be assigned to the same register or stack slot, but it could mean
1718 /// the target performs adequate checks of its own before proceeding with the
1719 /// tail call. Targets must return false when FromTy <= ToTy.
1720 virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
1724 virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const {
1728 virtual bool isProfitableToHoist(Instruction *I) const { return true; }
1730 /// Return true if the extension represented by \p I is free.
1731 /// Unlikely the is[Z|FP]ExtFree family which is based on types,
1732 /// this method can use the context provided by \p I to decide
1733 /// whether or not \p I is free.
1734 /// This method extends the behavior of the is[Z|FP]ExtFree family.
1735 /// In other words, if is[Z|FP]Free returns true, then this method
1736 /// returns true as well. The converse is not true.
1737 /// The target can perform the adequate checks by overriding isExtFreeImpl.
1738 /// \pre \p I must be a sign, zero, or fp extension.
1739 bool isExtFree(const Instruction *I) const {
1740 switch (I->getOpcode()) {
1741 case Instruction::FPExt:
1742 if (isFPExtFree(EVT::getEVT(I->getType())))
1745 case Instruction::ZExt:
1746 if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
1749 case Instruction::SExt:
1752 llvm_unreachable("Instruction is not an extension");
1754 return isExtFreeImpl(I);
1757 /// Return true if any actual instruction that defines a value of type FromTy
1758 /// implicitly zero-extends the value to ToTy in the result register.
1760 /// The function should return true when it is likely that the truncate can
1761 /// be freely folded with an instruction defining a value of FromTy. If
1762 /// the defining instruction is unknown (because you're looking at a
1763 /// function argument, PHI, etc.) then the target may require an
1764 /// explicit truncate, which is not necessarily free, but this function
1765 /// does not deal with those cases.
1766 /// Targets must return false when FromTy >= ToTy.
1767 virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
1771 virtual bool isZExtFree(EVT FromTy, EVT ToTy) const {
1775 /// Return true if the target supplies and combines to a paired load
1776 /// two loaded values of type LoadedType next to each other in memory.
1777 /// RequiredAlignment gives the minimal alignment constraints that must be met
1778 /// to be able to select this paired load.
1780 /// This information is *not* used to generate actual paired loads, but it is
1781 /// used to generate a sequence of loads that is easier to combine into a
1783 /// For instance, something like this:
1784 /// a = load i64* addr
1785 /// b = trunc i64 a to i32
1786 /// c = lshr i64 a, 32
1787 /// d = trunc i64 c to i32
1788 /// will be optimized into:
1789 /// b = load i32* addr1
1790 /// d = load i32* addr2
1791 /// Where addr1 = addr2 +/- sizeof(i32).
1793 /// In other words, unless the target performs a post-isel load combining,
1794 /// this information should not be provided because it will generate more
1796 virtual bool hasPairedLoad(EVT /*LoadedType*/,
1797 unsigned & /*RequiredAligment*/) const {
1801 /// \brief Get the maximum supported factor for interleaved memory accesses.
1802 /// Default to be the minimum interleave factor: 2.
1803 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
1805 /// \brief Lower an interleaved load to target specific intrinsics. Return
1806 /// true on success.
1808 /// \p LI is the vector load instruction.
1809 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
1810 /// \p Indices is the corresponding indices for each shufflevector.
1811 /// \p Factor is the interleave factor.
1812 virtual bool lowerInterleavedLoad(LoadInst *LI,
1813 ArrayRef<ShuffleVectorInst *> Shuffles,
1814 ArrayRef<unsigned> Indices,
1815 unsigned Factor) const {
1819 /// \brief Lower an interleaved store to target specific intrinsics. Return
1820 /// true on success.
1822 /// \p SI is the vector store instruction.
1823 /// \p SVI is the shufflevector to RE-interleave the stored vector.
1824 /// \p Factor is the interleave factor.
1825 virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
1826 unsigned Factor) const {
1830 /// Return true if zero-extending the specific node Val to type VT2 is free
1831 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
1832 /// because it's folded such as X86 zero-extending loads).
1833 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
1834 return isZExtFree(Val.getValueType(), VT2);
1837 /// Return true if an fpext operation is free (for instance, because
1838 /// single-precision floating-point numbers are implicitly extended to
1839 /// double-precision).
1840 virtual bool isFPExtFree(EVT VT) const {
1841 assert(VT.isFloatingPoint());
1845 /// Return true if folding a vector load into ExtVal (a sign, zero, or any
1846 /// extend node) is profitable.
1847 virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
1849 /// Return true if an fneg operation is free to the point where it is never
1850 /// worthwhile to replace it with a bitwise operation.
1851 virtual bool isFNegFree(EVT VT) const {
1852 assert(VT.isFloatingPoint());
1856 /// Return true if an fabs operation is free to the point where it is never
1857 /// worthwhile to replace it with a bitwise operation.
1858 virtual bool isFAbsFree(EVT VT) const {
1859 assert(VT.isFloatingPoint());
1863 /// Return true if an FMA operation is faster than a pair of fmul and fadd
1864 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
1865 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
1867 /// NOTE: This may be called before legalization on types for which FMAs are
1868 /// not legal, but should return true if those types will eventually legalize
1869 /// to types that support FMAs. After legalization, it will only be called on
1870 /// types that support FMAs (via Legal or Custom actions)
1871 virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
1875 /// Return true if it's profitable to narrow operations of type VT1 to
1876 /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
1878 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
1882 /// \brief Return true if it is beneficial to convert a load of a constant to
1883 /// just the constant itself.
1884 /// On some targets it might be more efficient to use a combination of
1885 /// arithmetic instructions to materialize the constant instead of loading it
1886 /// from a constant pool.
1887 virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
1892 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
1893 /// with this index. This is needed because EXTRACT_SUBVECTOR usually
1894 /// has custom lowering that depends on the index of the first element,
1895 /// and only the target knows which lowering is cheap.
1896 virtual bool isExtractSubvectorCheap(EVT ResVT, unsigned Index) const {
1900 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
1901 // even if the vector itself has multiple uses.
1902 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
1906 //===--------------------------------------------------------------------===//
1907 // Runtime Library hooks
1910 /// Rename the default libcall routine name for the specified libcall.
1911 void setLibcallName(RTLIB::Libcall Call, const char *Name) {
1912 LibcallRoutineNames[Call] = Name;
1915 /// Get the libcall routine name for the specified libcall.
1916 const char *getLibcallName(RTLIB::Libcall Call) const {
1917 return LibcallRoutineNames[Call];
1920 /// Override the default CondCode to be used to test the result of the
1921 /// comparison libcall against zero.
1922 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
1923 CmpLibcallCCs[Call] = CC;
1926 /// Get the CondCode that's to be used to test the result of the comparison
1927 /// libcall against zero.
1928 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
1929 return CmpLibcallCCs[Call];
1932 /// Set the CallingConv that should be used for the specified libcall.
1933 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
1934 LibcallCallingConvs[Call] = CC;
1937 /// Get the CallingConv that should be used for the specified libcall.
1938 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
1939 return LibcallCallingConvs[Call];
1943 const TargetMachine &TM;
1945 /// Tells the code generator that the target has multiple (allocatable)
1946 /// condition registers that can be used to store the results of comparisons
1947 /// for use by selects and conditional branches. With multiple condition
1948 /// registers, the code generator will not aggressively sink comparisons into
1949 /// the blocks of their users.
1950 bool HasMultipleConditionRegisters;
1952 /// Tells the code generator that the target has BitExtract instructions.
1953 /// The code generator will aggressively sink "shift"s into the blocks of
1954 /// their users if the users will generate "and" instructions which can be
1955 /// combined with "shift" to BitExtract instructions.
1956 bool HasExtractBitsInsn;
1958 /// Tells the code generator to bypass slow divide or remainder
1959 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
1960 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
1961 /// div/rem when the operands are positive and less than 256.
1962 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
1964 /// Tells the code generator that it shouldn't generate extra flow control
1965 /// instructions and should attempt to combine flow control instructions via
1967 bool JumpIsExpensive;
1969 /// Whether the target supports or cares about preserving floating point
1970 /// exception behavior.
1971 bool HasFloatingPointExceptions;
1973 /// This target prefers to use _setjmp to implement llvm.setjmp.
1975 /// Defaults to false.
1976 bool UseUnderscoreSetJmp;
1978 /// This target prefers to use _longjmp to implement llvm.longjmp.
1980 /// Defaults to false.
1981 bool UseUnderscoreLongJmp;
1983 /// Information about the contents of the high-bits in boolean values held in
1984 /// a type wider than i1. See getBooleanContents.
1985 BooleanContent BooleanContents;
1987 /// Information about the contents of the high-bits in boolean values held in
1988 /// a type wider than i1. See getBooleanContents.
1989 BooleanContent BooleanFloatContents;
1991 /// Information about the contents of the high-bits in boolean vector values
1992 /// when the element type is wider than i1. See getBooleanContents.
1993 BooleanContent BooleanVectorContents;
1995 /// The target scheduling preference: shortest possible total cycles or lowest
1997 Sched::Preference SchedPreferenceInfo;
1999 /// The size, in bytes, of the target's jmp_buf buffers
2000 unsigned JumpBufSize;
2002 /// The alignment, in bytes, of the target's jmp_buf buffers
2003 unsigned JumpBufAlignment;
2005 /// The minimum alignment that any argument on the stack needs to have.
2006 unsigned MinStackArgumentAlignment;
2008 /// The minimum function alignment (used when optimizing for size, and to
2009 /// prevent explicitly provided alignment from leading to incorrect code).
2010 unsigned MinFunctionAlignment;
2012 /// The preferred function alignment (used when alignment unspecified and
2013 /// optimizing for speed).
2014 unsigned PrefFunctionAlignment;
2016 /// The preferred loop alignment.
2017 unsigned PrefLoopAlignment;
2019 /// Size in bits of the maximum atomics size the backend supports.
2020 /// Accesses larger than this will be expanded by AtomicExpandPass.
2021 unsigned MaxAtomicSizeInBitsSupported;
2023 /// Size in bits of the minimum cmpxchg or ll/sc operation the
2024 /// backend supports.
2025 unsigned MinCmpXchgSizeInBits;
2027 /// If set to a physical register, this specifies the register that
2028 /// llvm.savestack/llvm.restorestack should save and restore.
2029 unsigned StackPointerRegisterToSaveRestore;
2031 /// This indicates the default register class to use for each ValueType the
2032 /// target supports natively.
2033 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
2034 unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
2035 MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
2037 /// This indicates the "representative" register class to use for each
2038 /// ValueType the target supports natively. This information is used by the
2039 /// scheduler to track register pressure. By default, the representative
2040 /// register class is the largest legal super-reg register class of the
2041 /// register class of the specified type. e.g. On x86, i8, i16, and i32's
2042 /// representative class would be GR32.
2043 const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
2045 /// This indicates the "cost" of the "representative" register class for each
2046 /// ValueType. The cost is used by the scheduler to approximate register
2048 uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
2050 /// For any value types we are promoting or expanding, this contains the value
2051 /// type that we are changing to. For Expanded types, this contains one step
2052 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
2053 /// (e.g. i64 -> i16). For types natively supported by the system, this holds
2054 /// the same type (e.g. i32 -> i32).
2055 MVT TransformToType[MVT::LAST_VALUETYPE];
2057 /// For each operation and each value type, keep a LegalizeAction that
2058 /// indicates how instruction selection should deal with the operation. Most
2059 /// operations are Legal (aka, supported natively by the target), but
2060 /// operations that are not should be described. Note that operations on
2061 /// non-legal value types are not described here.
2062 LegalizeAction OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
2064 /// For each load extension type and each value type, keep a LegalizeAction
2065 /// that indicates how instruction selection should deal with a load of a
2066 /// specific value type and extension type. Uses 4-bits to store the action
2067 /// for each of the 4 load ext types.
2068 uint16_t LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
2070 /// For each value type pair keep a LegalizeAction that indicates whether a
2071 /// truncating store of a specific value type and truncating type is legal.
2072 LegalizeAction TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
2074 /// For each indexed mode and each value type, keep a pair of LegalizeAction
2075 /// that indicates how instruction selection should deal with the load /
2078 /// The first dimension is the value_type for the reference. The second
2079 /// dimension represents the various modes for load store.
2080 uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
2082 /// For each condition code (ISD::CondCode) keep a LegalizeAction that
2083 /// indicates how instruction selection should deal with the condition code.
2085 /// Because each CC action takes up 4 bits, we need to have the array size be
2086 /// large enough to fit all of the value types. This can be done by rounding
2087 /// up the MVT::LAST_VALUETYPE value to the next multiple of 8.
2088 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 7) / 8];
2091 ValueTypeActionImpl ValueTypeActions;
2094 LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
2098 /// Targets can specify ISD nodes that they would like PerformDAGCombine
2099 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
2102 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
2104 /// For operations that must be promoted to a specific type, this holds the
2105 /// destination type. This map should be sparse, so don't hold it as an
2108 /// Targets add entries to this map with AddPromotedToType(..), clients access
2109 /// this with getTypeToPromoteTo(..).
2110 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
2113 /// Stores the name each libcall.
2114 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
2116 /// The ISD::CondCode that should be used to test the result of each of the
2117 /// comparison libcall against zero.
2118 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
2120 /// Stores the CallingConv that should be used for each libcall.
2121 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
2124 /// Return true if the extension represented by \p I is free.
2125 /// \pre \p I is a sign, zero, or fp extension and
2126 /// is[Z|FP]ExtFree of the related types is not true.
2127 virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
2129 /// Depth that GatherAllAliases should should continue looking for chain
2130 /// dependencies when trying to find a more preferable chain. As an
2131 /// approximation, this should be more than the number of consecutive stores
2132 /// expected to be merged.
2133 unsigned GatherAllAliasesMaxDepth;
2135 /// \brief Specify maximum number of store instructions per memset call.
2137 /// When lowering \@llvm.memset this field specifies the maximum number of
2138 /// store operations that may be substituted for the call to memset. Targets
2139 /// must set this value based on the cost threshold for that target. Targets
2140 /// should assume that the memset will be done using as many of the largest
2141 /// store operations first, followed by smaller ones, if necessary, per
2142 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
2143 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
2144 /// store. This only applies to setting a constant array of a constant size.
2145 unsigned MaxStoresPerMemset;
2147 /// Maximum number of stores operations that may be substituted for the call
2148 /// to memset, used for functions with OptSize attribute.
2149 unsigned MaxStoresPerMemsetOptSize;
2151 /// \brief Specify maximum bytes of store instructions per memcpy call.
2153 /// When lowering \@llvm.memcpy this field specifies the maximum number of
2154 /// store operations that may be substituted for a call to memcpy. Targets
2155 /// must set this value based on the cost threshold for that target. Targets
2156 /// should assume that the memcpy will be done using as many of the largest
2157 /// store operations first, followed by smaller ones, if necessary, per
2158 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
2159 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
2160 /// and one 1-byte store. This only applies to copying a constant array of
2162 unsigned MaxStoresPerMemcpy;
2164 /// Maximum number of store operations that may be substituted for a call to
2165 /// memcpy, used for functions with OptSize attribute.
2166 unsigned MaxStoresPerMemcpyOptSize;
2168 /// \brief Specify maximum bytes of store instructions per memmove call.
2170 /// When lowering \@llvm.memmove this field specifies the maximum number of
2171 /// store instructions that may be substituted for a call to memmove. Targets
2172 /// must set this value based on the cost threshold for that target. Targets
2173 /// should assume that the memmove will be done using as many of the largest
2174 /// store operations first, followed by smaller ones, if necessary, per
2175 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
2176 /// with 8-bit alignment would result in nine 1-byte stores. This only
2177 /// applies to copying a constant array of constant size.
2178 unsigned MaxStoresPerMemmove;
2180 /// Maximum number of store instructions that may be substituted for a call to
2181 /// memmove, used for functions with OptSize attribute.
2182 unsigned MaxStoresPerMemmoveOptSize;
2184 /// Tells the code generator that select is more expensive than a branch if
2185 /// the branch is usually predicted right.
2186 bool PredictableSelectIsExpensive;
2188 /// MaskAndBranchFoldingIsLegal - Indicates if the target supports folding
2189 /// a mask of a single bit, a compare, and a branch into a single instruction.
2190 bool MaskAndBranchFoldingIsLegal;
2192 /// \see enableExtLdPromotion.
2193 bool EnableExtLdPromotion;
2196 /// Return true if the value types that can be represented by the specified
2197 /// register class are all legal.
2198 bool isLegalRC(const TargetRegisterClass *RC) const;
2200 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
2201 /// sequence of memory operands that is recognized by PrologEpilogInserter.
2202 MachineBasicBlock *emitPatchPoint(MachineInstr &MI,
2203 MachineBasicBlock *MBB) const;
2206 /// This class defines information used to lower LLVM code to legal SelectionDAG
2207 /// operators that the target instruction selector can accept natively.
2209 /// This class also defines callbacks that targets must implement to lower
2210 /// target-specific constructs to SelectionDAG operators.
2211 class TargetLowering : public TargetLoweringBase {
2212 TargetLowering(const TargetLowering&) = delete;
2213 void operator=(const TargetLowering&) = delete;
2216 struct DAGCombinerInfo;
2218 /// NOTE: The TargetMachine owns TLOF.
2219 explicit TargetLowering(const TargetMachine &TM);
2221 bool isPositionIndependent() const;
2223 /// Returns true by value, base pointer and offset pointer and addressing mode
2224 /// by reference if the node's address can be legally represented as
2225 /// pre-indexed load / store address.
2226 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
2227 SDValue &/*Offset*/,
2228 ISD::MemIndexedMode &/*AM*/,
2229 SelectionDAG &/*DAG*/) const {
2233 /// Returns true by value, base pointer and offset pointer and addressing mode
2234 /// by reference if this node can be combined with a load / store to form a
2235 /// post-indexed load / store.
2236 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
2238 SDValue &/*Offset*/,
2239 ISD::MemIndexedMode &/*AM*/,
2240 SelectionDAG &/*DAG*/) const {
2244 /// Return the entry encoding for a jump table in the current function. The
2245 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
2246 virtual unsigned getJumpTableEncoding() const;
2248 virtual const MCExpr *
2249 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
2250 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
2251 MCContext &/*Ctx*/) const {
2252 llvm_unreachable("Need to implement this hook if target has custom JTIs");
2255 /// Returns relocation base for the given PIC jumptable.
2256 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
2257 SelectionDAG &DAG) const;
2259 /// This returns the relocation base for the given PIC jumptable, the same as
2260 /// getPICJumpTableRelocBase, but as an MCExpr.
2261 virtual const MCExpr *
2262 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2263 unsigned JTI, MCContext &Ctx) const;
2265 /// Return true if folding a constant offset with the given GlobalAddress is
2266 /// legal. It is frequently not legal in PIC relocation models.
2267 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
2269 bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
2270 SDValue &Chain) const;
2272 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
2273 SDValue &NewRHS, ISD::CondCode &CCCode,
2274 const SDLoc &DL) const;
2276 /// Returns a pair of (return value, chain).
2277 /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
2278 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
2279 EVT RetVT, ArrayRef<SDValue> Ops,
2280 bool isSigned, const SDLoc &dl,
2281 bool doesNotReturn = false,
2282 bool isReturnValueUsed = true) const;
2284 /// Check whether parameters to a call that are passed in callee saved
2285 /// registers are the same as from the calling function. This needs to be
2286 /// checked for tail call eligibility.
2287 bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
2288 const uint32_t *CallerPreservedMask,
2289 const SmallVectorImpl<CCValAssign> &ArgLocs,
2290 const SmallVectorImpl<SDValue> &OutVals) const;
2292 //===--------------------------------------------------------------------===//
2293 // TargetLowering Optimization Methods
2296 /// A convenience struct that encapsulates a DAG, and two SDValues for
2297 /// returning information from TargetLowering to its clients that want to
2299 struct TargetLoweringOpt {
2306 explicit TargetLoweringOpt(SelectionDAG &InDAG,
2308 DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
2310 bool LegalTypes() const { return LegalTys; }
2311 bool LegalOperations() const { return LegalOps; }
2313 bool CombineTo(SDValue O, SDValue N) {
2319 /// Check to see if the specified operand of the specified instruction is a
2320 /// constant integer. If so, check to see if there are any bits set in the
2321 /// constant that are not demanded. If so, shrink the constant and return
2323 bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
2325 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
2326 /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
2327 /// generalized for targets with other types of implicit widening casts.
2328 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
2331 /// Helper for SimplifyDemandedBits that can simplify an operation with
2332 /// multiple uses. This function uses TLI.SimplifyDemandedBits to
2333 /// simplify Operand \p OpIdx of \p User and then updated \p User with
2334 /// the simplified version. No other uses of \p OpIdx are updated.
2335 /// If \p User is the only user of \p OpIdx, this function behaves exactly
2336 /// like TLI.SimplifyDemandedBits except that it also updates the DAG by
2337 /// calling DCI.CommitTargetLoweringOpt.
2338 bool SimplifyDemandedBits(SDNode *User, unsigned OpIdx,
2339 const APInt &Demanded, DAGCombinerInfo &DCI);
2342 /// Look at Op. At this point, we know that only the DemandedMask bits of the
2343 /// result of Op are ever used downstream. If we can use this information to
2344 /// simplify Op, create a new simplified DAG node and return true, returning
2345 /// the original and new nodes in Old and New. Otherwise, analyze the
2346 /// expression and return a mask of KnownOne and KnownZero bits for the
2347 /// expression (used to simplify the caller). The KnownZero/One bits may only
2348 /// be accurate for those bits in the DemandedMask.
2349 /// \p AssumeSingleUse When this paramater is true, this function will
2350 /// attempt to simplify \p Op even if there are multiple uses.
2351 /// Callers are responsible for correctly updating the DAG based on the
2352 /// results of this function, because simply replacing replacing TLO.Old
2353 /// with TLO.New will be incorrect when this paramater is true and TLO.Old
2354 /// has multiple uses.
2355 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
2356 APInt &KnownZero, APInt &KnownOne,
2357 TargetLoweringOpt &TLO,
2359 bool AssumeSingleUse = false) const;
2361 /// Determine which of the bits specified in Mask are known to be either zero
2362 /// or one and return them in the KnownZero/KnownOne bitsets.
2363 virtual void computeKnownBitsForTargetNode(const SDValue Op,
2366 const SelectionDAG &DAG,
2367 unsigned Depth = 0) const;
2369 /// This method can be implemented by targets that want to expose additional
2370 /// information about sign bits to the DAG Combiner.
2371 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
2372 const SelectionDAG &DAG,
2373 unsigned Depth = 0) const;
2375 struct DAGCombinerInfo {
2376 void *DC; // The DAG Combiner object.
2378 bool CalledByLegalizer;
2382 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
2383 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
2385 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
2386 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
2387 bool isAfterLegalizeVectorOps() const {
2388 return Level == AfterLegalizeDAG;
2390 CombineLevel getDAGCombineLevel() { return Level; }
2391 bool isCalledByLegalizer() const { return CalledByLegalizer; }
2393 void AddToWorklist(SDNode *N);
2394 SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
2395 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
2396 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
2398 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
2401 /// Return if the N is a constant or constant vector equal to the true value
2402 /// from getBooleanContents().
2403 bool isConstTrueVal(const SDNode *N) const;
2405 /// Return if the N is a constant or constant vector equal to the false value
2406 /// from getBooleanContents().
2407 bool isConstFalseVal(const SDNode *N) const;
2409 /// Return a constant of type VT that contains a true value that respects
2410 /// getBooleanContents()
2411 SDValue getConstTrueVal(SelectionDAG &DAG, EVT VT, const SDLoc &DL) const;
2413 /// Return if \p N is a True value when extended to \p VT.
2414 bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool Signed) const;
2416 /// Try to simplify a setcc built with the specified operands and cc. If it is
2417 /// unable to simplify it, return a null SDValue.
2418 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
2419 bool foldBooleans, DAGCombinerInfo &DCI,
2420 const SDLoc &dl) const;
2422 /// Returns true (and the GlobalValue and the offset) if the node is a
2423 /// GlobalAddress + offset.
2425 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
2427 /// This method will be invoked for all target nodes and for any
2428 /// target-independent nodes that the target has registered with invoke it
2431 /// The semantics are as follows:
2433 /// SDValue.Val == 0 - No change was made
2434 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
2435 /// otherwise - N should be replaced by the returned Operand.
2437 /// In addition, methods provided by DAGCombinerInfo may be used to perform
2438 /// more complex transformations.
2440 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
2442 /// Return true if it is profitable to move a following shift through this
2443 // node, adjusting any immediate operands as necessary to preserve semantics.
2444 // This transformation may not be desirable if it disrupts a particularly
2445 // auspicious target-specific tree (e.g. bitfield extraction in AArch64).
2446 // By default, it returns true.
2447 virtual bool isDesirableToCommuteWithShift(const SDNode *N /*Op*/) const {
2451 /// Return true if the target has native support for the specified value type
2452 /// and it is 'desirable' to use the type for the given node type. e.g. On x86
2453 /// i16 is legal, but undesirable since i16 instruction encodings are longer
2454 /// and some i16 instructions are slow.
2455 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
2456 // By default, assume all legal types are desirable.
2457 return isTypeLegal(VT);
2460 /// Return true if it is profitable for dag combiner to transform a floating
2461 /// point op of specified opcode to a equivalent op of an integer
2462 /// type. e.g. f32 load -> i32 load can be profitable on ARM.
2463 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
2468 /// This method query the target whether it is beneficial for dag combiner to
2469 /// promote the specified node. If true, it should return the desired
2470 /// promotion type by reference.
2471 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
2475 /// Return true if the target supports swifterror attribute. It optimizes
2476 /// loads and stores to reading and writing a specific register.
2477 virtual bool supportSwiftError() const {
2481 /// Return true if the target supports that a subset of CSRs for the given
2482 /// machine function is handled explicitly via copies.
2483 virtual bool supportSplitCSR(MachineFunction *MF) const {
2487 /// Return true if the MachineFunction contains a COPY which would imply
2488 /// HasCopyImplyingStackAdjustment.
2489 virtual bool hasCopyImplyingStackAdjustment(MachineFunction *MF) const {
2493 /// Perform necessary initialization to handle a subset of CSRs explicitly
2494 /// via copies. This function is called at the beginning of instruction
2496 virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
2497 llvm_unreachable("Not Implemented");
2500 /// Insert explicit copies in entry and exit blocks. We copy a subset of
2501 /// CSRs to virtual registers in the entry block, and copy them back to
2502 /// physical registers in the exit blocks. This function is called at the end
2503 /// of instruction selection.
2504 virtual void insertCopiesSplitCSR(
2505 MachineBasicBlock *Entry,
2506 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
2507 llvm_unreachable("Not Implemented");
2510 //===--------------------------------------------------------------------===//
2511 // Lowering methods - These methods must be implemented by targets so that
2512 // the SelectionDAGBuilder code knows how to lower these.
2515 /// This hook must be implemented to lower the incoming (formal) arguments,
2516 /// described by the Ins array, into the specified DAG. The implementation
2517 /// should fill in the InVals array with legal-type argument values, and
2518 /// return the resulting token chain value.
2520 virtual SDValue LowerFormalArguments(
2521 SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
2522 const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
2523 SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
2524 llvm_unreachable("Not Implemented");
2527 struct ArgListEntry {
2536 bool isInAlloca : 1;
2537 bool isReturned : 1;
2538 bool isSwiftSelf : 1;
2539 bool isSwiftError : 1;
2542 ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
2543 isSRet(false), isNest(false), isByVal(false), isInAlloca(false),
2544 isReturned(false), isSwiftSelf(false), isSwiftError(false),
2547 void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx);
2549 typedef std::vector<ArgListEntry> ArgListTy;
2551 /// This structure contains all information that is necessary for lowering
2552 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
2553 /// needs to lower a call, and targets will see this struct in their LowerCall
2555 struct CallLoweringInfo {
2562 bool DoesNotReturn : 1;
2563 bool IsReturnValueUsed : 1;
2564 bool IsConvergent : 1;
2566 // IsTailCall should be modified by implementations of
2567 // TargetLowering::LowerCall that perform tail call conversions.
2570 unsigned NumFixedArgs;
2571 CallingConv::ID CallConv;
2576 ImmutableCallSite *CS;
2578 SmallVector<ISD::OutputArg, 32> Outs;
2579 SmallVector<SDValue, 32> OutVals;
2580 SmallVector<ISD::InputArg, 32> Ins;
2581 SmallVector<SDValue, 4> InVals;
2583 CallLoweringInfo(SelectionDAG &DAG)
2584 : RetTy(nullptr), RetSExt(false), RetZExt(false), IsVarArg(false),
2585 IsInReg(false), DoesNotReturn(false), IsReturnValueUsed(true),
2586 IsConvergent(false), IsTailCall(false), NumFixedArgs(-1),
2587 CallConv(CallingConv::C), DAG(DAG), CS(nullptr), IsPatchPoint(false) {
2590 CallLoweringInfo &setDebugLoc(const SDLoc &dl) {
2595 CallLoweringInfo &setChain(SDValue InChain) {
2600 CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
2601 SDValue Target, ArgListTy &&ArgsList) {
2605 NumFixedArgs = Args.size();
2606 Args = std::move(ArgsList);
2610 CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
2611 SDValue Target, ArgListTy &&ArgsList,
2612 ImmutableCallSite &Call) {
2615 IsInReg = Call.paramHasAttr(0, Attribute::InReg);
2617 Call.doesNotReturn() ||
2618 (!Call.isInvoke() &&
2619 isa<UnreachableInst>(Call.getInstruction()->getNextNode()));
2620 IsVarArg = FTy->isVarArg();
2621 IsReturnValueUsed = !Call.getInstruction()->use_empty();
2622 RetSExt = Call.paramHasAttr(0, Attribute::SExt);
2623 RetZExt = Call.paramHasAttr(0, Attribute::ZExt);
2627 CallConv = Call.getCallingConv();
2628 NumFixedArgs = FTy->getNumParams();
2629 Args = std::move(ArgsList);
2636 CallLoweringInfo &setInRegister(bool Value = true) {
2641 CallLoweringInfo &setNoReturn(bool Value = true) {
2642 DoesNotReturn = Value;
2646 CallLoweringInfo &setVarArg(bool Value = true) {
2651 CallLoweringInfo &setTailCall(bool Value = true) {
2656 CallLoweringInfo &setDiscardResult(bool Value = true) {
2657 IsReturnValueUsed = !Value;
2661 CallLoweringInfo &setConvergent(bool Value = true) {
2662 IsConvergent = Value;
2666 CallLoweringInfo &setSExtResult(bool Value = true) {
2671 CallLoweringInfo &setZExtResult(bool Value = true) {
2676 CallLoweringInfo &setIsPatchPoint(bool Value = true) {
2677 IsPatchPoint = Value;
2681 ArgListTy &getArgs() {
2687 /// This function lowers an abstract call to a function into an actual call.
2688 /// This returns a pair of operands. The first element is the return value
2689 /// for the function (if RetTy is not VoidTy). The second element is the
2690 /// outgoing token chain. It calls LowerCall to do the actual lowering.
2691 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
2693 /// This hook must be implemented to lower calls into the specified
2694 /// DAG. The outgoing arguments to the call are described by the Outs array,
2695 /// and the values to be returned by the call are described by the Ins
2696 /// array. The implementation should fill in the InVals array with legal-type
2697 /// return values from the call, and return the resulting token chain value.
2699 LowerCall(CallLoweringInfo &/*CLI*/,
2700 SmallVectorImpl<SDValue> &/*InVals*/) const {
2701 llvm_unreachable("Not Implemented");
2704 /// Target-specific cleanup for formal ByVal parameters.
2705 virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
2707 /// This hook should be implemented to check whether the return values
2708 /// described by the Outs array can fit into the return registers. If false
2709 /// is returned, an sret-demotion is performed.
2710 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
2711 MachineFunction &/*MF*/, bool /*isVarArg*/,
2712 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
2713 LLVMContext &/*Context*/) const
2715 // Return true by default to get preexisting behavior.
2719 /// This hook must be implemented to lower outgoing return values, described
2720 /// by the Outs array, into the specified DAG. The implementation should
2721 /// return the resulting token chain value.
2722 virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
2724 const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
2725 const SmallVectorImpl<SDValue> & /*OutVals*/,
2726 const SDLoc & /*dl*/,
2727 SelectionDAG & /*DAG*/) const {
2728 llvm_unreachable("Not Implemented");
2731 /// Return true if result of the specified node is used by a return node
2732 /// only. It also compute and return the input chain for the tail call.
2734 /// This is used to determine whether it is possible to codegen a libcall as
2735 /// tail call at legalization time.
2736 virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
2740 /// Return true if the target may be able emit the call instruction as a tail
2741 /// call. This is used by optimization passes to determine if it's profitable
2742 /// to duplicate return instructions to enable tailcall optimization.
2743 virtual bool mayBeEmittedAsTailCall(CallInst *) const {
2747 /// Return the builtin name for the __builtin___clear_cache intrinsic
2748 /// Default is to invoke the clear cache library call
2749 virtual const char * getClearCacheBuiltinName() const {
2750 return "__clear_cache";
2753 /// Return the register ID of the name passed in. Used by named register
2754 /// global variables extension. There is no target-independent behaviour
2755 /// so the default action is to bail.
2756 virtual unsigned getRegisterByName(const char* RegName, EVT VT,
2757 SelectionDAG &DAG) const {
2758 report_fatal_error("Named registers not implemented for this target");
2761 /// Return the type that should be used to zero or sign extend a
2762 /// zeroext/signext integer return value. FIXME: Some C calling conventions
2763 /// require the return type to be promoted, but this is not true all the time,
2764 /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
2765 /// conventions. The frontend should handle this and include all of the
2766 /// necessary information.
2767 virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
2768 ISD::NodeType /*ExtendKind*/) const {
2769 EVT MinVT = getRegisterType(Context, MVT::i32);
2770 return VT.bitsLT(MinVT) ? MinVT : VT;
2773 /// For some targets, an LLVM struct type must be broken down into multiple
2774 /// simple types, but the calling convention specifies that the entire struct
2775 /// must be passed in a block of consecutive registers.
2777 functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
2778 bool isVarArg) const {
2782 /// Returns a 0 terminated array of registers that can be safely used as
2783 /// scratch registers.
2784 virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
2788 /// This callback is used to prepare for a volatile or atomic load.
2789 /// It takes a chain node as input and returns the chain for the load itself.
2791 /// Having a callback like this is necessary for targets like SystemZ,
2792 /// which allows a CPU to reuse the result of a previous load indefinitely,
2793 /// even if a cache-coherent store is performed by another CPU. The default
2794 /// implementation does nothing.
2795 virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL,
2796 SelectionDAG &DAG) const {
2800 /// This callback is invoked by the type legalizer to legalize nodes with an
2801 /// illegal operand type but legal result types. It replaces the
2802 /// LowerOperation callback in the type Legalizer. The reason we can not do
2803 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
2804 /// use this callback.
2806 /// TODO: Consider merging with ReplaceNodeResults.
2808 /// The target places new result values for the node in Results (their number
2809 /// and types must exactly match those of the original return values of
2810 /// the node), or leaves Results empty, which indicates that the node is not
2811 /// to be custom lowered after all.
2812 /// The default implementation calls LowerOperation.
2813 virtual void LowerOperationWrapper(SDNode *N,
2814 SmallVectorImpl<SDValue> &Results,
2815 SelectionDAG &DAG) const;
2817 /// This callback is invoked for operations that are unsupported by the
2818 /// target, which are registered to use 'custom' lowering, and whose defined
2819 /// values are all legal. If the target has no operations that require custom
2820 /// lowering, it need not implement this. The default implementation of this
2822 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
2824 /// This callback is invoked when a node result type is illegal for the
2825 /// target, and the operation was registered to use 'custom' lowering for that
2826 /// result type. The target places new result values for the node in Results
2827 /// (their number and types must exactly match those of the original return
2828 /// values of the node), or leaves Results empty, which indicates that the
2829 /// node is not to be custom lowered after all.
2831 /// If the target has no operations that require custom lowering, it need not
2832 /// implement this. The default implementation aborts.
2833 virtual void ReplaceNodeResults(SDNode * /*N*/,
2834 SmallVectorImpl<SDValue> &/*Results*/,
2835 SelectionDAG &/*DAG*/) const {
2836 llvm_unreachable("ReplaceNodeResults not implemented for this target!");
2839 /// This method returns the name of a target specific DAG node.
2840 virtual const char *getTargetNodeName(unsigned Opcode) const;
2842 /// This method returns a target specific FastISel object, or null if the
2843 /// target does not support "fast" ISel.
2844 virtual FastISel *createFastISel(FunctionLoweringInfo &,
2845 const TargetLibraryInfo *) const {
2850 bool verifyReturnAddressArgumentIsConstant(SDValue Op,
2851 SelectionDAG &DAG) const;
2853 //===--------------------------------------------------------------------===//
2854 // Inline Asm Support hooks
2857 /// This hook allows the target to expand an inline asm call to be explicit
2858 /// llvm code if it wants to. This is useful for turning simple inline asms
2859 /// into LLVM intrinsics, which gives the compiler more information about the
2860 /// behavior of the code.
2861 virtual bool ExpandInlineAsm(CallInst *) const {
2865 enum ConstraintType {
2866 C_Register, // Constraint represents specific register(s).
2867 C_RegisterClass, // Constraint represents any of register(s) in class.
2868 C_Memory, // Memory constraint.
2869 C_Other, // Something else.
2870 C_Unknown // Unsupported constraint.
2873 enum ConstraintWeight {
2875 CW_Invalid = -1, // No match.
2876 CW_Okay = 0, // Acceptable.
2877 CW_Good = 1, // Good weight.
2878 CW_Better = 2, // Better weight.
2879 CW_Best = 3, // Best weight.
2881 // Well-known weights.
2882 CW_SpecificReg = CW_Okay, // Specific register operands.
2883 CW_Register = CW_Good, // Register operands.
2884 CW_Memory = CW_Better, // Memory operands.
2885 CW_Constant = CW_Best, // Constant operand.
2886 CW_Default = CW_Okay // Default or don't know type.
2889 /// This contains information for each constraint that we are lowering.
2890 struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
2891 /// This contains the actual string for the code, like "m". TargetLowering
2892 /// picks the 'best' code from ConstraintInfo::Codes that most closely
2893 /// matches the operand.
2894 std::string ConstraintCode;
2896 /// Information about the constraint code, e.g. Register, RegisterClass,
2897 /// Memory, Other, Unknown.
2898 TargetLowering::ConstraintType ConstraintType;
2900 /// If this is the result output operand or a clobber, this is null,
2901 /// otherwise it is the incoming operand to the CallInst. This gets
2902 /// modified as the asm is processed.
2903 Value *CallOperandVal;
2905 /// The ValueType for the operand value.
2908 /// Return true of this is an input operand that is a matching constraint
2910 bool isMatchingInputConstraint() const;
2912 /// If this is an input matching constraint, this method returns the output
2913 /// operand it matches.
2914 unsigned getMatchedOperand() const;
2916 /// Copy constructor for copying from a ConstraintInfo.
2917 AsmOperandInfo(InlineAsm::ConstraintInfo Info)
2918 : InlineAsm::ConstraintInfo(std::move(Info)),
2919 ConstraintType(TargetLowering::C_Unknown), CallOperandVal(nullptr),
2920 ConstraintVT(MVT::Other) {}
2923 typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
2925 /// Split up the constraint string from the inline assembly value into the
2926 /// specific constraints and their prefixes, and also tie in the associated
2927 /// operand values. If this returns an empty vector, and if the constraint
2928 /// string itself isn't empty, there was an error parsing.
2929 virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL,
2930 const TargetRegisterInfo *TRI,
2931 ImmutableCallSite CS) const;
2933 /// Examine constraint type and operand type and determine a weight value.
2934 /// The operand object must already have been set up with the operand type.
2935 virtual ConstraintWeight getMultipleConstraintMatchWeight(
2936 AsmOperandInfo &info, int maIndex) const;
2938 /// Examine constraint string and operand type and determine a weight value.
2939 /// The operand object must already have been set up with the operand type.
2940 virtual ConstraintWeight getSingleConstraintMatchWeight(
2941 AsmOperandInfo &info, const char *constraint) const;
2943 /// Determines the constraint code and constraint type to use for the specific
2944 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
2945 /// If the actual operand being passed in is available, it can be passed in as
2946 /// Op, otherwise an empty SDValue can be passed.
2947 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
2949 SelectionDAG *DAG = nullptr) const;
2951 /// Given a constraint, return the type of constraint it is for this target.
2952 virtual ConstraintType getConstraintType(StringRef Constraint) const;
2954 /// Given a physical register constraint (e.g. {edx}), return the register
2955 /// number and the register class for the register.
2957 /// Given a register class constraint, like 'r', if this corresponds directly
2958 /// to an LLVM register class, return a register of 0 and the register class
2961 /// This should only be used for C_Register constraints. On error, this
2962 /// returns a register number of 0 and a null register class pointer.
2963 virtual std::pair<unsigned, const TargetRegisterClass *>
2964 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
2965 StringRef Constraint, MVT VT) const;
2967 virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const {
2968 if (ConstraintCode == "i")
2969 return InlineAsm::Constraint_i;
2970 else if (ConstraintCode == "m")
2971 return InlineAsm::Constraint_m;
2972 return InlineAsm::Constraint_Unknown;
2975 /// Try to replace an X constraint, which matches anything, with another that
2976 /// has more specific requirements based on the type of the corresponding
2977 /// operand. This returns null if there is no replacement to make.
2978 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
2980 /// Lower the specified operand into the Ops vector. If it is invalid, don't
2981 /// add anything to Ops.
2982 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
2983 std::vector<SDValue> &Ops,
2984 SelectionDAG &DAG) const;
2986 //===--------------------------------------------------------------------===//
2987 // Div utility functions
2989 SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
2990 bool IsAfterLegalization,
2991 std::vector<SDNode *> *Created) const;
2992 SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
2993 bool IsAfterLegalization,
2994 std::vector<SDNode *> *Created) const;
2996 /// Targets may override this function to provide custom SDIV lowering for
2997 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM
2998 /// assumes SDIV is expensive and replaces it with a series of other integer
3000 virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
3002 std::vector<SDNode *> *Created) const;
3004 /// Indicate whether this target prefers to combine FDIVs with the same
3005 /// divisor. If the transform should never be done, return zero. If the
3006 /// transform should be done, return the minimum number of divisor uses
3007 /// that must exist.
3008 virtual unsigned combineRepeatedFPDivisors() const {
3012 /// Hooks for building estimates in place of slower divisions and square
3015 /// Return either a square root or its reciprocal estimate value for the input
3017 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
3018 /// 'Enabled' as set by a potential default override attribute.
3019 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
3020 /// refinement iterations required to generate a sufficient (though not
3021 /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
3022 /// The boolean UseOneConstNR output is used to select a Newton-Raphson
3023 /// algorithm implementation that uses either one or two constants.
3024 /// The boolean Reciprocal is used to select whether the estimate is for the
3025 /// square root of the input operand or the reciprocal of its square root.
3026 /// A target may choose to implement its own refinement within this function.
3027 /// If that's true, then return '0' as the number of RefinementSteps to avoid
3028 /// any further refinement of the estimate.
3029 /// An empty SDValue return means no estimate sequence can be created.
3030 virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
3031 int Enabled, int &RefinementSteps,
3032 bool &UseOneConstNR, bool Reciprocal) const {
3036 /// Return a reciprocal estimate value for the input operand.
3037 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
3038 /// 'Enabled' as set by a potential default override attribute.
3039 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
3040 /// refinement iterations required to generate a sufficient (though not
3041 /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
3042 /// A target may choose to implement its own refinement within this function.
3043 /// If that's true, then return '0' as the number of RefinementSteps to avoid
3044 /// any further refinement of the estimate.
3045 /// An empty SDValue return means no estimate sequence can be created.
3046 virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
3047 int Enabled, int &RefinementSteps) const {
3051 //===--------------------------------------------------------------------===//
3052 // Legalization utility functions
3055 /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
3056 /// respectively, each computing an n/2-bit part of the result.
3057 /// \param Result A vector that will be filled with the parts of the result
3058 /// in little-endian order.
3059 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
3060 /// if you want to control how low bits are extracted from the LHS.
3061 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
3062 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
3063 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
3064 /// \returns true if the node has been expanded, false if it has not
3065 bool expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl, SDValue LHS,
3066 SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
3067 SelectionDAG &DAG, MulExpansionKind Kind,
3068 SDValue LL = SDValue(), SDValue LH = SDValue(),
3069 SDValue RL = SDValue(), SDValue RH = SDValue()) const;
3071 /// Expand a MUL into two nodes. One that computes the high bits of
3072 /// the result and one that computes the low bits.
3073 /// \param HiLoVT The value type to use for the Lo and Hi nodes.
3074 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
3075 /// if you want to control how low bits are extracted from the LHS.
3076 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
3077 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
3078 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
3079 /// \returns true if the node has been expanded. false if it has not
3080 bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
3081 SelectionDAG &DAG, MulExpansionKind Kind,
3082 SDValue LL = SDValue(), SDValue LH = SDValue(),
3083 SDValue RL = SDValue(), SDValue RH = SDValue()) const;
3085 /// Expand float(f32) to SINT(i64) conversion
3086 /// \param N Node to expand
3087 /// \param Result output after conversion
3088 /// \returns True, if the expansion was successful, false otherwise
3089 bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
3091 /// Turn load of vector type into a load of the individual elements.
3092 /// \param LD load to expand
3093 /// \returns MERGE_VALUEs of the scalar loads with their chains.
3094 SDValue scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const;
3096 // Turn a store of a vector type into stores of the individual elements.
3097 /// \param ST Store with a vector value type
3098 /// \returns MERGE_VALUs of the individual store chains.
3099 SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const;
3101 /// Expands an unaligned load to 2 half-size loads for an integer, and
3102 /// possibly more for vectors.
3103 std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD,
3104 SelectionDAG &DAG) const;
3106 /// Expands an unaligned store to 2 half-size stores for integer values, and
3107 /// possibly more for vectors.
3108 SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const;
3110 /// Increments memory address \p Addr according to the type of the value
3111 /// \p DataVT that should be stored. If the data is stored in compressed
3112 /// form, the memory address should be incremented according to the number of
3113 /// the stored elements. This number is equal to the number of '1's bits
3115 /// \p DataVT is a vector type. \p Mask is a vector value.
3116 /// \p DataVT and \p Mask have the same number of vector elements.
3117 SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL,
3118 EVT DataVT, SelectionDAG &DAG,
3119 bool IsCompressedMemory) const;
3121 //===--------------------------------------------------------------------===//
3122 // Instruction Emitting Hooks
3125 /// This method should be implemented by targets that mark instructions with
3126 /// the 'usesCustomInserter' flag. These instructions are special in various
3127 /// ways, which require special support to insert. The specified MachineInstr
3128 /// is created but not inserted into any basic blocks, and this method is
3129 /// called to expand it into a sequence of instructions, potentially also
3130 /// creating new basic blocks and control flow.
3131 /// As long as the returned basic block is different (i.e., we created a new
3132 /// one), the custom inserter is free to modify the rest of \p MBB.
3133 virtual MachineBasicBlock *
3134 EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
3136 /// This method should be implemented by targets that mark instructions with
3137 /// the 'hasPostISelHook' flag. These instructions must be adjusted after
3138 /// instruction selection by target hooks. e.g. To fill in optional defs for
3139 /// ARM 's' setting instructions.
3140 virtual void AdjustInstrPostInstrSelection(MachineInstr &MI,
3141 SDNode *Node) const;
3143 /// If this function returns true, SelectionDAGBuilder emits a
3144 /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
3145 virtual bool useLoadStackGuardNode() const {
3149 /// Lower TLS global address SDNode for target independent emulated TLS model.
3150 virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
3151 SelectionDAG &DAG) const;
3153 // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
3154 // If we're comparing for equality to zero and isCtlzFast is true, expose the
3155 // fact that this can be implemented as a ctlz/srl pair, so that the dag
3156 // combiner can fold the new nodes.
3157 SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const;
3160 SDValue simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
3161 ISD::CondCode Cond, DAGCombinerInfo &DCI,
3162 const SDLoc &DL) const;
3165 /// Given an LLVM IR type and return type attributes, compute the return value
3166 /// EVTs and flags, and optionally also the offsets, if the return value is
3167 /// being lowered to memory.
3168 void GetReturnInfo(Type *ReturnType, AttributeSet attr,
3169 SmallVectorImpl<ISD::OutputArg> &Outs,
3170 const TargetLowering &TLI, const DataLayout &DL);
3172 } // end llvm namespace