1 //===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This file describes how to lower LLVM code to machine code. This has two
14 /// 1. Which ValueTypes are natively supported by the target.
15 /// 2. Which operations are supported for supported ValueTypes.
16 /// 3. Cost thresholds for alternative implementations of certain operations.
18 /// In addition it has a few other components, like information about FP
21 //===----------------------------------------------------------------------===//
23 #ifndef LLVM_CODEGEN_TARGETLOWERING_H
24 #define LLVM_CODEGEN_TARGETLOWERING_H
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/STLExtras.h"
30 #include "llvm/ADT/SmallVector.h"
31 #include "llvm/ADT/StringRef.h"
32 #include "llvm/CodeGen/DAGCombine.h"
33 #include "llvm/CodeGen/ISDOpcodes.h"
34 #include "llvm/CodeGen/MachineValueType.h"
35 #include "llvm/CodeGen/RuntimeLibcalls.h"
36 #include "llvm/CodeGen/SelectionDAG.h"
37 #include "llvm/CodeGen/SelectionDAGNodes.h"
38 #include "llvm/CodeGen/TargetCallingConv.h"
39 #include "llvm/CodeGen/ValueTypes.h"
40 #include "llvm/IR/Attributes.h"
41 #include "llvm/IR/CallSite.h"
42 #include "llvm/IR/CallingConv.h"
43 #include "llvm/IR/DataLayout.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/IRBuilder.h"
47 #include "llvm/IR/InlineAsm.h"
48 #include "llvm/IR/Instruction.h"
49 #include "llvm/IR/Instructions.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/MC/MCRegisterInfo.h"
52 #include "llvm/Support/AtomicOrdering.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Target/TargetMachine.h"
68 class BranchProbability;
73 class FunctionLoweringInfo;
78 class MachineBasicBlock;
79 class MachineFunction;
81 class MachineJumpTableInfo;
83 class MachineRegisterInfo;
87 class TargetRegisterClass;
88 class TargetLibraryInfo;
89 class TargetRegisterInfo;
95 None, // No preference
96 Source, // Follow source order.
97 RegPressure, // Scheduling for lowest register pressure.
98 Hybrid, // Scheduling for both latency and register pressure.
99 ILP, // Scheduling for ILP in low register pressure mode.
100 VLIW // Scheduling for VLIW targets.
103 } // end namespace Sched
105 /// This base class for TargetLowering contains the SelectionDAG-independent
106 /// parts that can be used from the rest of CodeGen.
107 class TargetLoweringBase {
109 /// This enum indicates whether operations are valid for a target, and if not,
110 /// what action should be used to make them valid.
111 enum LegalizeAction : uint8_t {
112 Legal, // The target natively supports this operation.
113 Promote, // This operation should be executed in a larger type.
114 Expand, // Try to expand this to other ops, otherwise use a libcall.
115 LibCall, // Don't try to expand this to other ops, always use a libcall.
116 Custom // Use the LowerOperation hook to implement custom lowering.
119 /// This enum indicates whether a types are legal for a target, and if not,
120 /// what action should be used to make them valid.
121 enum LegalizeTypeAction : uint8_t {
122 TypeLegal, // The target natively supports this type.
123 TypePromoteInteger, // Replace this integer with a larger one.
124 TypeExpandInteger, // Split this integer into two of half the size.
125 TypeSoftenFloat, // Convert this float to a same size integer type,
126 // if an operation is not supported in target HW.
127 TypeExpandFloat, // Split this float into two of half the size.
128 TypeScalarizeVector, // Replace this one-element vector with its element.
129 TypeSplitVector, // Split this vector into two of half the size.
130 TypeWidenVector, // This vector should be widened into a larger vector.
131 TypePromoteFloat // Replace this float with a larger one.
134 /// LegalizeKind holds the legalization kind that needs to happen to EVT
135 /// in order to type-legalize it.
136 using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
138 /// Enum that describes how the target represents true/false values.
139 enum BooleanContent {
140 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
141 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
142 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
145 /// Enum that describes what type of support for selects the target has.
146 enum SelectSupportKind {
147 ScalarValSelect, // The target supports scalar selects (ex: cmov).
148 ScalarCondVectorVal, // The target supports selects with a scalar condition
149 // and vector values (ex: cmov).
150 VectorMaskSelect // The target supports vector selects with a vector
151 // mask (ex: x86 blends).
154 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
155 /// to, if at all. Exists because different targets have different levels of
156 /// support for these atomic instructions, and also have different options
157 /// w.r.t. what they should expand to.
158 enum class AtomicExpansionKind {
159 None, // Don't expand the instruction.
160 LLSC, // Expand the instruction into loadlinked/storeconditional; used
162 LLOnly, // Expand the (load) instruction into just a load-linked, which has
163 // greater atomic guarantees than a normal load.
164 CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
167 /// Enum that specifies when a multiplication should be expanded.
168 enum class MulExpansionKind {
169 Always, // Always expand the instruction.
170 OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
176 Value *Val = nullptr;
177 SDValue Node = SDValue();
187 bool IsSwiftSelf : 1;
188 bool IsSwiftError : 1;
189 uint16_t Alignment = 0;
192 : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false),
193 IsNest(false), IsByVal(false), IsInAlloca(false), IsReturned(false),
194 IsSwiftSelf(false), IsSwiftError(false) {}
196 void setAttributes(ImmutableCallSite *CS, unsigned ArgIdx);
198 using ArgListTy = std::vector<ArgListEntry>;
200 virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
201 ArgListTy &Args) const {};
203 static ISD::NodeType getExtendForContent(BooleanContent Content) {
205 case UndefinedBooleanContent:
206 // Extend by adding rubbish bits.
207 return ISD::ANY_EXTEND;
208 case ZeroOrOneBooleanContent:
209 // Extend by adding zero bits.
210 return ISD::ZERO_EXTEND;
211 case ZeroOrNegativeOneBooleanContent:
212 // Extend by copying the sign bit.
213 return ISD::SIGN_EXTEND;
215 llvm_unreachable("Invalid content kind");
218 /// NOTE: The TargetMachine owns TLOF.
219 explicit TargetLoweringBase(const TargetMachine &TM);
220 TargetLoweringBase(const TargetLoweringBase &) = delete;
221 TargetLoweringBase &operator=(const TargetLoweringBase &) = delete;
222 virtual ~TargetLoweringBase() = default;
225 /// \brief Initialize all of the actions to default values.
229 const TargetMachine &getTargetMachine() const { return TM; }
231 virtual bool useSoftFloat() const { return false; }
233 /// Return the pointer type for the given address space, defaults to
234 /// the pointer type from the data layout.
235 /// FIXME: The default needs to be removed once all the code is updated.
236 MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
237 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
240 /// Return the type for frame index, which is determined by
241 /// the alloca address space specified through the data layout.
242 MVT getFrameIndexTy(const DataLayout &DL) const {
243 return getPointerTy(DL, DL.getAllocaAddrSpace());
246 /// Return the type for operands of fence.
247 /// TODO: Let fence operands be of i32 type and remove this.
248 virtual MVT getFenceOperandTy(const DataLayout &DL) const {
249 return getPointerTy(DL);
252 /// EVT is not used in-tree, but is used by out-of-tree target.
253 /// A documentation for this function would be nice...
254 virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
256 EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const;
258 /// Returns the type to be used for the index operand of:
259 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
260 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
261 virtual MVT getVectorIdxTy(const DataLayout &DL) const {
262 return getPointerTy(DL);
265 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
269 /// Return true if multiple condition registers are available.
270 bool hasMultipleConditionRegisters() const {
271 return HasMultipleConditionRegisters;
274 /// Return true if the target has BitExtract instructions.
275 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
277 /// Return the preferred vector type legalization action.
278 virtual TargetLoweringBase::LegalizeTypeAction
279 getPreferredVectorAction(EVT VT) const {
280 // The default action for one element vectors is to scalarize
281 if (VT.getVectorNumElements() == 1)
282 return TypeScalarizeVector;
283 // The default action for other vectors is to promote
284 return TypePromoteInteger;
287 // There are two general methods for expanding a BUILD_VECTOR node:
288 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
290 // 2. Build the vector on the stack and then load it.
291 // If this function returns true, then method (1) will be used, subject to
292 // the constraint that all of the necessary shuffles are legal (as determined
293 // by isShuffleMaskLegal). If this function returns false, then method (2) is
294 // always used. The vector type, and the number of defined values, are
297 shouldExpandBuildVectorWithShuffles(EVT /* VT */,
298 unsigned DefinedValues) const {
299 return DefinedValues < 3;
302 /// Return true if integer divide is usually cheaper than a sequence of
303 /// several shifts, adds, and multiplies for this target.
304 /// The definition of "cheaper" may depend on whether we're optimizing
305 /// for speed or for size.
306 virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
308 /// Return true if the target can handle a standalone remainder operation.
309 virtual bool hasStandaloneRem(EVT VT) const {
313 /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
314 virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
315 // Default behavior is to replace SQRT(X) with X*RSQRT(X).
319 /// Reciprocal estimate status values used by the functions below.
320 enum ReciprocalEstimate : int {
326 /// Return a ReciprocalEstimate enum value for a square root of the given type
327 /// based on the function's attributes. If the operation is not overridden by
328 /// the function's attributes, "Unspecified" is returned and target defaults
329 /// are expected to be used for instruction selection.
330 int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
332 /// Return a ReciprocalEstimate enum value for a division of the given type
333 /// based on the function's attributes. If the operation is not overridden by
334 /// the function's attributes, "Unspecified" is returned and target defaults
335 /// are expected to be used for instruction selection.
336 int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
338 /// Return the refinement step count for a square root of the given type based
339 /// on the function's attributes. If the operation is not overridden by
340 /// the function's attributes, "Unspecified" is returned and target defaults
341 /// are expected to be used for instruction selection.
342 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
344 /// Return the refinement step count for a division of the given type based
345 /// on the function's attributes. If the operation is not overridden by
346 /// the function's attributes, "Unspecified" is returned and target defaults
347 /// are expected to be used for instruction selection.
348 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
350 /// Returns true if target has indicated at least one type should be bypassed.
351 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
353 /// Returns map of slow types for division or remainder with corresponding
355 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
356 return BypassSlowDivWidths;
359 /// Return true if Flow Control is an expensive operation that should be
361 bool isJumpExpensive() const { return JumpIsExpensive; }
363 /// Return true if selects are only cheaper than branches if the branch is
364 /// unlikely to be predicted right.
365 bool isPredictableSelectExpensive() const {
366 return PredictableSelectIsExpensive;
369 /// If a branch or a select condition is skewed in one direction by more than
370 /// this factor, it is very likely to be predicted correctly.
371 virtual BranchProbability getPredictableBranchThreshold() const;
373 /// Return true if the following transform is beneficial:
374 /// fold (conv (load x)) -> (load (conv*)x)
375 /// On architectures that don't natively support some vector loads
376 /// efficiently, casting the load to a smaller vector of larger types and
377 /// loading is more efficient, however, this can be undone by optimizations in
379 virtual bool isLoadBitCastBeneficial(EVT LoadVT,
380 EVT BitcastVT) const {
381 // Don't do if we could do an indexed load on the original type, but not on
383 if (!LoadVT.isSimple() || !BitcastVT.isSimple())
386 MVT LoadMVT = LoadVT.getSimpleVT();
388 // Don't bother doing this if it's just going to be promoted again later, as
389 // doing so might interfere with other combines.
390 if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
391 getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
397 /// Return true if the following transform is beneficial:
398 /// (store (y (conv x)), y*)) -> (store x, (x*))
399 virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT) const {
400 // Default to the same logic as loads.
401 return isLoadBitCastBeneficial(StoreVT, BitcastVT);
404 /// Return true if it is expected to be cheaper to do a store of a non-zero
405 /// vector constant with the given size and type for the address space than to
406 /// store the individual scalar element constants.
407 virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
409 unsigned AddrSpace) const {
413 /// Allow store merging after legalization in addition to before legalization.
414 /// This may catch stores that do not exist earlier (eg, stores created from
416 virtual bool mergeStoresAfterLegalization() const { return true; }
418 /// Returns if it's reasonable to merge stores to MemVT size.
419 virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
420 const SelectionDAG &DAG) const {
424 /// \brief Return true if it is cheap to speculate a call to intrinsic cttz.
425 virtual bool isCheapToSpeculateCttz() const {
429 /// \brief Return true if it is cheap to speculate a call to intrinsic ctlz.
430 virtual bool isCheapToSpeculateCtlz() const {
434 /// \brief Return true if ctlz instruction is fast.
435 virtual bool isCtlzFast() const {
439 /// Return true if it is safe to transform an integer-domain bitwise operation
440 /// into the equivalent floating-point operation. This should be set to true
441 /// if the target has IEEE-754-compliant fabs/fneg operations for the input
443 virtual bool hasBitPreservingFPLogic(EVT VT) const {
447 /// \brief Return true if it is cheaper to split the store of a merged int val
448 /// from a pair of smaller values into multiple stores.
449 virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
453 /// \brief Return if the target supports combining a
456 /// %andResult = and %val1, #mask
457 /// %icmpResult = icmp %andResult, 0
459 /// into a single machine instruction of a form like:
461 /// cc = test %register, #mask
463 virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
467 /// Use bitwise logic to make pairs of compares more efficient. For example:
468 /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
469 /// This should be true when it takes more than one instruction to lower
470 /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
471 /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
472 virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
476 /// Return the preferred operand type if the target has a quick way to compare
477 /// integer values of the given size. Assume that any legal integer type can
478 /// be compared efficiently. Targets may override this to allow illegal wide
479 /// types to return a vector type if there is support to compare that type.
480 virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
481 MVT VT = MVT::getIntegerVT(NumBits);
482 return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE;
485 /// Return true if the target should transform:
486 /// (X & Y) == Y ---> (~X & Y) == 0
487 /// (X & Y) != Y ---> (~X & Y) != 0
489 /// This may be profitable if the target has a bitwise and-not operation that
490 /// sets comparison flags. A target may want to limit the transformation based
491 /// on the type of Y or if Y is a constant.
493 /// Note that the transform will not occur if Y is known to be a power-of-2
494 /// because a mask and compare of a single bit can be handled by inverting the
495 /// predicate, for example:
496 /// (X & 8) == 8 ---> (X & 8) != 0
497 virtual bool hasAndNotCompare(SDValue Y) const {
501 /// Return true if the target has a bitwise and-not operation:
503 /// This can be used to simplify select or other instructions.
504 virtual bool hasAndNot(SDValue X) const {
505 // If the target has the more complex version of this operation, assume that
506 // it has this operation too.
507 return hasAndNotCompare(X);
510 /// \brief Return true if the target wants to use the optimization that
511 /// turns ext(promotableInst1(...(promotableInstN(load)))) into
512 /// promotedInst1(...(promotedInstN(ext(load)))).
513 bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
515 /// Return true if the target can combine store(extractelement VectorTy,
517 /// \p Cost[out] gives the cost of that transformation when this is true.
518 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
519 unsigned &Cost) const {
523 /// Return true if target supports floating point exceptions.
524 bool hasFloatingPointExceptions() const {
525 return HasFloatingPointExceptions;
528 /// Return true if target always beneficiates from combining into FMA for a
529 /// given value type. This must typically return false on targets where FMA
530 /// takes more cycles to execute than FADD.
531 virtual bool enableAggressiveFMAFusion(EVT VT) const {
535 /// Return the ValueType of the result of SETCC operations.
536 virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
539 /// Return the ValueType for comparison libcalls. Comparions libcalls include
540 /// floating point comparion calls, and Ordered/Unordered check calls on
541 /// floating point numbers.
543 MVT::SimpleValueType getCmpLibcallReturnType() const;
545 /// For targets without i1 registers, this gives the nature of the high-bits
546 /// of boolean values held in types wider than i1.
548 /// "Boolean values" are special true/false values produced by nodes like
549 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
550 /// Not to be confused with general values promoted from i1. Some cpus
551 /// distinguish between vectors of boolean and scalars; the isVec parameter
552 /// selects between the two kinds. For example on X86 a scalar boolean should
553 /// be zero extended from i1, while the elements of a vector of booleans
554 /// should be sign extended from i1.
556 /// Some cpus also treat floating point types the same way as they treat
557 /// vectors instead of the way they treat scalars.
558 BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
560 return BooleanVectorContents;
561 return isFloat ? BooleanFloatContents : BooleanContents;
564 BooleanContent getBooleanContents(EVT Type) const {
565 return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
568 /// Return target scheduling preference.
569 Sched::Preference getSchedulingPreference() const {
570 return SchedPreferenceInfo;
573 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
574 /// for different nodes. This function returns the preference (or none) for
576 virtual Sched::Preference getSchedulingPreference(SDNode *) const {
580 /// Return the register class that should be used for the specified value
582 virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
583 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
584 assert(RC && "This value type is not natively supported!");
588 /// Return the 'representative' register class for the specified value
591 /// The 'representative' register class is the largest legal super-reg
592 /// register class for the register class of the value type. For example, on
593 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
594 /// register class is GR64 on x86_64.
595 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
596 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
600 /// Return the cost of the 'representative' register class for the specified
602 virtual uint8_t getRepRegClassCostFor(MVT VT) const {
603 return RepRegClassCostForVT[VT.SimpleTy];
606 /// Return true if the target has native support for the specified value type.
607 /// This means that it has a register that directly holds it without
608 /// promotions or expansions.
609 bool isTypeLegal(EVT VT) const {
610 assert(!VT.isSimple() ||
611 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
612 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
615 class ValueTypeActionImpl {
616 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
617 /// that indicates how instruction selection should deal with the type.
618 LegalizeTypeAction ValueTypeActions[MVT::LAST_VALUETYPE];
621 ValueTypeActionImpl() {
622 std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
626 LegalizeTypeAction getTypeAction(MVT VT) const {
627 return ValueTypeActions[VT.SimpleTy];
630 void setTypeAction(MVT VT, LegalizeTypeAction Action) {
631 ValueTypeActions[VT.SimpleTy] = Action;
635 const ValueTypeActionImpl &getValueTypeActions() const {
636 return ValueTypeActions;
639 /// Return how we should legalize values of this type, either it is already
640 /// legal (return 'Legal') or we need to promote it to a larger type (return
641 /// 'Promote'), or we need to expand it into multiple registers of smaller
642 /// integer type (return 'Expand'). 'Custom' is not an option.
643 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
644 return getTypeConversion(Context, VT).first;
646 LegalizeTypeAction getTypeAction(MVT VT) const {
647 return ValueTypeActions.getTypeAction(VT);
650 /// For types supported by the target, this is an identity function. For
651 /// types that must be promoted to larger types, this returns the larger type
652 /// to promote to. For integer types that are larger than the largest integer
653 /// register, this contains one step in the expansion to get to the smaller
654 /// register. For illegal floating point types, this returns the integer type
656 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
657 return getTypeConversion(Context, VT).second;
660 /// For types supported by the target, this is an identity function. For
661 /// types that must be expanded (i.e. integer types that are larger than the
662 /// largest integer register or illegal floating point types), this returns
663 /// the largest legal type it will be expanded to.
664 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
665 assert(!VT.isVector());
667 switch (getTypeAction(Context, VT)) {
670 case TypeExpandInteger:
671 VT = getTypeToTransformTo(Context, VT);
674 llvm_unreachable("Type is not legal nor is it to be expanded!");
679 /// Vector types are broken down into some number of legal first class types.
680 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
681 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
682 /// turns into 4 EVT::i32 values with both PPC and X86.
684 /// This method returns the number of registers needed, and the VT for each
685 /// register. It also returns the VT and quantity of the intermediate values
686 /// before they are promoted/expanded.
687 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
689 unsigned &NumIntermediates,
690 MVT &RegisterVT) const;
692 /// Certain targets such as MIPS require that some types such as vectors are
693 /// always broken down into scalars in some contexts. This occurs even if the
694 /// vector type is legal.
695 virtual unsigned getVectorTypeBreakdownForCallingConv(
696 LLVMContext &Context, EVT VT, EVT &IntermediateVT,
697 unsigned &NumIntermediates, MVT &RegisterVT) const {
698 return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
702 struct IntrinsicInfo {
703 unsigned opc = 0; // target opcode
704 EVT memVT; // memory VT
706 // value representing memory location
707 PointerUnion<const Value *, const PseudoSourceValue *> ptrVal;
709 int offset = 0; // offset off of ptrVal
710 unsigned size = 0; // the size of the memory location
711 // (taken from memVT if zero)
712 unsigned align = 1; // alignment
714 MachineMemOperand::Flags flags = MachineMemOperand::MONone;
715 IntrinsicInfo() = default;
718 /// Given an intrinsic, checks if on the target the intrinsic will need to map
719 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
720 /// true and store the intrinsic information into the IntrinsicInfo that was
721 /// passed to the function.
722 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
724 unsigned /*Intrinsic*/) const {
728 /// Returns true if the target can instruction select the specified FP
729 /// immediate natively. If false, the legalizer will materialize the FP
730 /// immediate as a load from a constant pool.
731 virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
735 /// Targets can use this to indicate that they only support *some*
736 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
737 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
739 virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
743 /// Returns true if the operation can trap for the value type.
745 /// VT must be a legal type. By default, we optimistically assume most
746 /// operations don't trap except for integer divide and remainder.
747 virtual bool canOpTrap(unsigned Op, EVT VT) const;
749 /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
750 /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace
751 /// a VAND with a constant pool entry.
752 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
757 /// Return how this operation should be treated: either it is legal, needs to
758 /// be promoted to a larger size, needs to be expanded to some other code
759 /// sequence, or the target has a custom expander for it.
760 LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
761 if (VT.isExtended()) return Expand;
762 // If a target-specific SDNode requires legalization, require the target
763 // to provide custom legalization for it.
764 if (Op >= array_lengthof(OpActions[0])) return Custom;
765 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
768 /// Return true if the specified operation is legal on this target or can be
769 /// made legal with custom lowering. This is used to help guide high-level
770 /// lowering decisions.
771 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
772 return (VT == MVT::Other || isTypeLegal(VT)) &&
773 (getOperationAction(Op, VT) == Legal ||
774 getOperationAction(Op, VT) == Custom);
777 /// Return true if the specified operation is legal on this target or can be
778 /// made legal using promotion. This is used to help guide high-level lowering
780 bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
781 return (VT == MVT::Other || isTypeLegal(VT)) &&
782 (getOperationAction(Op, VT) == Legal ||
783 getOperationAction(Op, VT) == Promote);
786 /// Return true if the specified operation is legal on this target or can be
787 /// made legal with custom lowering or using promotion. This is used to help
788 /// guide high-level lowering decisions.
789 bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT) const {
790 return (VT == MVT::Other || isTypeLegal(VT)) &&
791 (getOperationAction(Op, VT) == Legal ||
792 getOperationAction(Op, VT) == Custom ||
793 getOperationAction(Op, VT) == Promote);
796 /// Return true if the operation uses custom lowering, regardless of whether
797 /// the type is legal or not.
798 bool isOperationCustom(unsigned Op, EVT VT) const {
799 return getOperationAction(Op, VT) == Custom;
802 /// Return true if lowering to a jump table is allowed.
803 bool areJTsAllowed(const Function *Fn) const {
804 if (Fn->getFnAttribute("no-jump-tables").getValueAsString() == "true")
807 return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
808 isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
811 /// Check whether the range [Low,High] fits in a machine word.
812 bool rangeFitsInWord(const APInt &Low, const APInt &High,
813 const DataLayout &DL) const {
814 // FIXME: Using the pointer type doesn't seem ideal.
815 uint64_t BW = DL.getPointerSizeInBits();
816 uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
820 /// Return true if lowering to a jump table is suitable for a set of case
821 /// clusters which may contain \p NumCases cases, \p Range range of values.
822 /// FIXME: This function check the maximum table size and density, but the
823 /// minimum size is not checked. It would be nice if the the minimum size is
824 /// also combined within this function. Currently, the minimum size check is
825 /// performed in findJumpTable() in SelectionDAGBuiler and
826 /// getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
827 virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
828 uint64_t Range) const {
829 const bool OptForSize = SI->getParent()->getParent()->optForSize();
830 const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
831 const unsigned MaxJumpTableSize =
832 OptForSize || getMaximumJumpTableSize() == 0
834 : getMaximumJumpTableSize();
835 // Check whether a range of clusters is dense enough for a jump table.
836 if (Range <= MaxJumpTableSize &&
837 (NumCases * 100 >= Range * MinDensity)) {
843 /// Return true if lowering to a bit test is suitable for a set of case
844 /// clusters which contains \p NumDests unique destinations, \p Low and
845 /// \p High as its lowest and highest case values, and expects \p NumCmps
846 /// case value comparisons. Check if the number of destinations, comparison
847 /// metric, and range are all suitable.
848 bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
849 const APInt &Low, const APInt &High,
850 const DataLayout &DL) const {
851 // FIXME: I don't think NumCmps is the correct metric: a single case and a
852 // range of cases both require only one branch to lower. Just looking at the
853 // number of clusters and destinations should be enough to decide whether to
856 // To lower a range with bit tests, the range must fit the bitwidth of a
858 if (!rangeFitsInWord(Low, High, DL))
861 // Decide whether it's profitable to lower this range with bit tests. Each
862 // destination requires a bit test and branch, and there is an overall range
863 // check branch. For a small number of clusters, separate comparisons might
864 // be cheaper, and for many destinations, splitting the range might be
866 return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
867 (NumDests == 3 && NumCmps >= 6);
870 /// Return true if the specified operation is illegal on this target or
871 /// unlikely to be made legal with custom lowering. This is used to help guide
872 /// high-level lowering decisions.
873 bool isOperationExpand(unsigned Op, EVT VT) const {
874 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
877 /// Return true if the specified operation is legal on this target.
878 bool isOperationLegal(unsigned Op, EVT VT) const {
879 return (VT == MVT::Other || isTypeLegal(VT)) &&
880 getOperationAction(Op, VT) == Legal;
883 /// Return how this load with extension should be treated: either it is legal,
884 /// needs to be promoted to a larger size, needs to be expanded to some other
885 /// code sequence, or the target has a custom expander for it.
886 LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
888 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
889 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
890 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
891 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&
892 MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!");
893 unsigned Shift = 4 * ExtType;
894 return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
897 /// Return true if the specified load with extension is legal on this target.
898 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
899 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
902 /// Return true if the specified load with extension is legal or custom
904 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
905 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
906 getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
909 /// Return how this store with truncation should be treated: either it is
910 /// legal, needs to be promoted to a larger size, needs to be expanded to some
911 /// other code sequence, or the target has a custom expander for it.
912 LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
913 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
914 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
915 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
916 assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
917 "Table isn't big enough!");
918 return TruncStoreActions[ValI][MemI];
921 /// Return true if the specified store with truncation is legal on this
923 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
924 return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
927 /// Return true if the specified store with truncation has solution on this
929 bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
930 return isTypeLegal(ValVT) &&
931 (getTruncStoreAction(ValVT, MemVT) == Legal ||
932 getTruncStoreAction(ValVT, MemVT) == Custom);
935 /// Return how the indexed load should be treated: either it is legal, needs
936 /// to be promoted to a larger size, needs to be expanded to some other code
937 /// sequence, or the target has a custom expander for it.
939 getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
940 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
941 "Table isn't big enough!");
942 unsigned Ty = (unsigned)VT.SimpleTy;
943 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
946 /// Return true if the specified indexed load is legal on this target.
947 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
948 return VT.isSimple() &&
949 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
950 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
953 /// Return how the indexed store should be treated: either it is legal, needs
954 /// to be promoted to a larger size, needs to be expanded to some other code
955 /// sequence, or the target has a custom expander for it.
957 getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
958 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
959 "Table isn't big enough!");
960 unsigned Ty = (unsigned)VT.SimpleTy;
961 return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
964 /// Return true if the specified indexed load is legal on this target.
965 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
966 return VT.isSimple() &&
967 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
968 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
971 /// Return how the condition code should be treated: either it is legal, needs
972 /// to be expanded to some other code sequence, or the target has a custom
975 getCondCodeAction(ISD::CondCode CC, MVT VT) const {
976 assert((unsigned)CC < array_lengthof(CondCodeActions) &&
977 ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) &&
978 "Table isn't big enough!");
979 // See setCondCodeAction for how this is encoded.
980 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
981 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
982 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
983 assert(Action != Promote && "Can't promote condition code!");
987 /// Return true if the specified condition code is legal on this target.
988 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
990 getCondCodeAction(CC, VT) == Legal ||
991 getCondCodeAction(CC, VT) == Custom;
994 /// If the action for this operation is to promote, this method returns the
995 /// ValueType to promote to.
996 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
997 assert(getOperationAction(Op, VT) == Promote &&
998 "This operation isn't promoted!");
1000 // See if this has an explicit type specified.
1001 std::map<std::pair<unsigned, MVT::SimpleValueType>,
1002 MVT::SimpleValueType>::const_iterator PTTI =
1003 PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
1004 if (PTTI != PromoteToType.end()) return PTTI->second;
1006 assert((VT.isInteger() || VT.isFloatingPoint()) &&
1007 "Cannot autopromote this type, add it with AddPromotedToType.");
1011 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
1012 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
1013 "Didn't find type to promote to!");
1014 } while (!isTypeLegal(NVT) ||
1015 getOperationAction(Op, NVT) == Promote);
1019 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
1020 /// operations except for the pointer size. If AllowUnknown is true, this
1021 /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
1022 /// otherwise it will assert.
1023 EVT getValueType(const DataLayout &DL, Type *Ty,
1024 bool AllowUnknown = false) const {
1025 // Lower scalar pointers to native pointer types.
1026 if (PointerType *PTy = dyn_cast<PointerType>(Ty))
1027 return getPointerTy(DL, PTy->getAddressSpace());
1029 if (Ty->isVectorTy()) {
1030 VectorType *VTy = cast<VectorType>(Ty);
1031 Type *Elm = VTy->getElementType();
1032 // Lower vectors of pointers to native pointer types.
1033 if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
1034 EVT PointerTy(getPointerTy(DL, PT->getAddressSpace()));
1035 Elm = PointerTy.getTypeForEVT(Ty->getContext());
1038 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
1039 VTy->getNumElements());
1041 return EVT::getEVT(Ty, AllowUnknown);
1044 /// Return the MVT corresponding to this LLVM type. See getValueType.
1045 MVT getSimpleValueType(const DataLayout &DL, Type *Ty,
1046 bool AllowUnknown = false) const {
1047 return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
1050 /// Return the desired alignment for ByVal or InAlloca aggregate function
1051 /// arguments in the caller parameter area. This is the actual alignment, not
1053 virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
1055 /// Return the type of registers that this ValueType will eventually require.
1056 MVT getRegisterType(MVT VT) const {
1057 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
1058 return RegisterTypeForVT[VT.SimpleTy];
1061 /// Return the type of registers that this ValueType will eventually require.
1062 MVT getRegisterType(LLVMContext &Context, EVT VT) const {
1063 if (VT.isSimple()) {
1064 assert((unsigned)VT.getSimpleVT().SimpleTy <
1065 array_lengthof(RegisterTypeForVT));
1066 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
1068 if (VT.isVector()) {
1071 unsigned NumIntermediates;
1072 (void)getVectorTypeBreakdown(Context, VT, VT1,
1073 NumIntermediates, RegisterVT);
1076 if (VT.isInteger()) {
1077 return getRegisterType(Context, getTypeToTransformTo(Context, VT));
1079 llvm_unreachable("Unsupported extended type!");
1082 /// Return the number of registers that this ValueType will eventually
1085 /// This is one for any types promoted to live in larger registers, but may be
1086 /// more than one for types (like i64) that are split into pieces. For types
1087 /// like i140, which are first promoted then expanded, it is the number of
1088 /// registers needed to hold all the bits of the original type. For an i140
1089 /// on a 32 bit machine this means 5 registers.
1090 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
1091 if (VT.isSimple()) {
1092 assert((unsigned)VT.getSimpleVT().SimpleTy <
1093 array_lengthof(NumRegistersForVT));
1094 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
1096 if (VT.isVector()) {
1099 unsigned NumIntermediates;
1100 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
1102 if (VT.isInteger()) {
1103 unsigned BitWidth = VT.getSizeInBits();
1104 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
1105 return (BitWidth + RegWidth - 1) / RegWidth;
1107 llvm_unreachable("Unsupported extended type!");
1110 /// Certain combinations of ABIs, Targets and features require that types
1111 /// are legal for some operations and not for other operations.
1112 /// For MIPS all vector types must be passed through the integer register set.
1113 virtual MVT getRegisterTypeForCallingConv(MVT VT) const {
1114 return getRegisterType(VT);
1117 virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
1119 return getRegisterType(Context, VT);
1122 /// Certain targets require unusual breakdowns of certain types. For MIPS,
1123 /// this occurs when a vector type is used, as vector are passed through the
1124 /// integer register set.
1125 virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
1127 return getNumRegisters(Context, VT);
1130 /// Certain targets have context senstive alignment requirements, where one
1131 /// type has the alignment requirement of another type.
1132 virtual unsigned getABIAlignmentForCallingConv(Type *ArgTy,
1133 DataLayout DL) const {
1134 return DL.getABITypeAlignment(ArgTy);
1137 /// If true, then instruction selection should seek to shrink the FP constant
1138 /// of the specified type to a smaller type in order to save space and / or
1140 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
1142 // Return true if it is profitable to reduce the given load node to a smaller
1145 // e.g. (i16 (trunc (i32 (load x))) -> i16 load x should be performed
1146 virtual bool shouldReduceLoadWidth(SDNode *Load,
1147 ISD::LoadExtType ExtTy,
1152 /// When splitting a value of the specified type into parts, does the Lo
1153 /// or Hi part come first? This usually follows the endianness, except
1154 /// for ppcf128, where the Hi part always comes first.
1155 bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
1156 return DL.isBigEndian() || VT == MVT::ppcf128;
1159 /// If true, the target has custom DAG combine transformations that it can
1160 /// perform for the specified node.
1161 bool hasTargetDAGCombine(ISD::NodeType NT) const {
1162 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1163 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
1166 unsigned getGatherAllAliasesMaxDepth() const {
1167 return GatherAllAliasesMaxDepth;
1170 /// Returns the size of the platform's va_list object.
1171 virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
1172 return getPointerTy(DL).getSizeInBits();
1175 /// \brief Get maximum # of store operations permitted for llvm.memset
1177 /// This function returns the maximum number of store operations permitted
1178 /// to replace a call to llvm.memset. The value is set by the target at the
1179 /// performance threshold for such a replacement. If OptSize is true,
1180 /// return the limit for functions that have OptSize attribute.
1181 unsigned getMaxStoresPerMemset(bool OptSize) const {
1182 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
1185 /// \brief Get maximum # of store operations permitted for llvm.memcpy
1187 /// This function returns the maximum number of store operations permitted
1188 /// to replace a call to llvm.memcpy. The value is set by the target at the
1189 /// performance threshold for such a replacement. If OptSize is true,
1190 /// return the limit for functions that have OptSize attribute.
1191 unsigned getMaxStoresPerMemcpy(bool OptSize) const {
1192 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
1195 /// Get maximum # of load operations permitted for memcmp
1197 /// This function returns the maximum number of load operations permitted
1198 /// to replace a call to memcmp. The value is set by the target at the
1199 /// performance threshold for such a replacement. If OptSize is true,
1200 /// return the limit for functions that have OptSize attribute.
1201 unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
1202 return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp;
1205 /// \brief Get maximum # of store operations permitted for llvm.memmove
1207 /// This function returns the maximum number of store operations permitted
1208 /// to replace a call to llvm.memmove. The value is set by the target at the
1209 /// performance threshold for such a replacement. If OptSize is true,
1210 /// return the limit for functions that have OptSize attribute.
1211 unsigned getMaxStoresPerMemmove(bool OptSize) const {
1212 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
1215 /// \brief Determine if the target supports unaligned memory accesses.
1217 /// This function returns true if the target allows unaligned memory accesses
1218 /// of the specified type in the given address space. If true, it also returns
1219 /// whether the unaligned memory access is "fast" in the last argument by
1220 /// reference. This is used, for example, in situations where an array
1221 /// copy/move/set is converted to a sequence of store operations. Its use
1222 /// helps to ensure that such replacements don't generate code that causes an
1223 /// alignment error (trap) on the target machine.
1224 virtual bool allowsMisalignedMemoryAccesses(EVT,
1225 unsigned AddrSpace = 0,
1227 bool * /*Fast*/ = nullptr) const {
1231 /// Return true if the target supports a memory access of this type for the
1232 /// given address space and alignment. If the access is allowed, the optional
1233 /// final parameter returns if the access is also fast (as defined by the
1235 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1236 unsigned AddrSpace = 0, unsigned Alignment = 1,
1237 bool *Fast = nullptr) const;
1239 /// Returns the target specific optimal type for load and store operations as
1240 /// a result of memset, memcpy, and memmove lowering.
1242 /// If DstAlign is zero that means it's safe to destination alignment can
1243 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
1244 /// a need to check it against alignment requirement, probably because the
1245 /// source does not need to be loaded. If 'IsMemset' is true, that means it's
1246 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
1247 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
1248 /// does not need to be loaded. It returns EVT::Other if the type should be
1249 /// determined using generic target-independent logic.
1250 virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
1251 unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
1253 bool /*ZeroMemset*/,
1254 bool /*MemcpyStrSrc*/,
1255 MachineFunction &/*MF*/) const {
1259 /// Returns true if it's safe to use load / store of the specified type to
1260 /// expand memcpy / memset inline.
1262 /// This is mostly true for all types except for some special cases. For
1263 /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1264 /// fstpl which also does type conversion. Note the specified type doesn't
1265 /// have to be legal as the hook is used before type legalization.
1266 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
1268 /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
1269 bool usesUnderscoreSetJmp() const {
1270 return UseUnderscoreSetJmp;
1273 /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
1274 bool usesUnderscoreLongJmp() const {
1275 return UseUnderscoreLongJmp;
1278 /// Return lower limit for number of blocks in a jump table.
1279 virtual unsigned getMinimumJumpTableEntries() const;
1281 /// Return lower limit of the density in a jump table.
1282 unsigned getMinimumJumpTableDensity(bool OptForSize) const;
1284 /// Return upper limit for number of entries in a jump table.
1285 /// Zero if no limit.
1286 unsigned getMaximumJumpTableSize() const;
1288 virtual bool isJumpTableRelative() const {
1289 return TM.isPositionIndependent();
1292 /// If a physical register, this specifies the register that
1293 /// llvm.savestack/llvm.restorestack should save and restore.
1294 unsigned getStackPointerRegisterToSaveRestore() const {
1295 return StackPointerRegisterToSaveRestore;
1298 /// If a physical register, this returns the register that receives the
1299 /// exception address on entry to an EH pad.
1301 getExceptionPointerRegister(const Constant *PersonalityFn) const {
1302 // 0 is guaranteed to be the NoRegister value on all targets
1306 /// If a physical register, this returns the register that receives the
1307 /// exception typeid on entry to a landing pad.
1309 getExceptionSelectorRegister(const Constant *PersonalityFn) const {
1310 // 0 is guaranteed to be the NoRegister value on all targets
1314 virtual bool needsFixedCatchObjects() const {
1315 report_fatal_error("Funclet EH is not implemented for this target");
1318 /// Returns the target's jmp_buf size in bytes (if never set, the default is
1320 unsigned getJumpBufSize() const {
1324 /// Returns the target's jmp_buf alignment in bytes (if never set, the default
1326 unsigned getJumpBufAlignment() const {
1327 return JumpBufAlignment;
1330 /// Return the minimum stack alignment of an argument.
1331 unsigned getMinStackArgumentAlignment() const {
1332 return MinStackArgumentAlignment;
1335 /// Return the minimum function alignment.
1336 unsigned getMinFunctionAlignment() const {
1337 return MinFunctionAlignment;
1340 /// Return the preferred function alignment.
1341 unsigned getPrefFunctionAlignment() const {
1342 return PrefFunctionAlignment;
1345 /// Return the preferred loop alignment.
1346 virtual unsigned getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
1347 return PrefLoopAlignment;
1350 /// If the target has a standard location for the stack protector guard,
1351 /// returns the address of that location. Otherwise, returns nullptr.
1352 /// DEPRECATED: please override useLoadStackGuardNode and customize
1353 /// LOAD_STACK_GUARD, or customize @llvm.stackguard().
1354 virtual Value *getIRStackGuard(IRBuilder<> &IRB) const;
1356 /// Inserts necessary declarations for SSP (stack protection) purpose.
1357 /// Should be used only when getIRStackGuard returns nullptr.
1358 virtual void insertSSPDeclarations(Module &M) const;
1360 /// Return the variable that's previously inserted by insertSSPDeclarations,
1361 /// if any, otherwise return nullptr. Should be used only when
1362 /// getIRStackGuard returns nullptr.
1363 virtual Value *getSDagStackGuard(const Module &M) const;
1365 /// If this function returns true, stack protection checks should XOR the
1366 /// frame pointer (or whichever pointer is used to address locals) into the
1367 /// stack guard value before checking it. getIRStackGuard must return nullptr
1368 /// if this returns true.
1369 virtual bool useStackGuardXorFP() const { return false; }
1371 /// If the target has a standard stack protection check function that
1372 /// performs validation and error handling, returns the function. Otherwise,
1373 /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
1374 /// Should be used only when getIRStackGuard returns nullptr.
1375 virtual Value *getSSPStackGuardCheck(const Module &M) const;
1378 Value *getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
1382 /// Returns the target-specific address of the unsafe stack pointer.
1383 virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const;
1385 /// Returns the name of the symbol used to emit stack probes or the empty
1386 /// string if not applicable.
1387 virtual StringRef getStackProbeSymbolName(MachineFunction &MF) const {
1391 /// Returns true if a cast between SrcAS and DestAS is a noop.
1392 virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1396 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
1397 /// are happy to sink it into basic blocks.
1398 virtual bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1399 return isNoopAddrSpaceCast(SrcAS, DestAS);
1402 /// Return true if the pointer arguments to CI should be aligned by aligning
1403 /// the object whose address is being passed. If so then MinSize is set to the
1404 /// minimum size the object must be to be aligned and PrefAlign is set to the
1405 /// preferred alignment.
1406 virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
1407 unsigned & /*PrefAlign*/) const {
1411 //===--------------------------------------------------------------------===//
1412 /// \name Helpers for TargetTransformInfo implementations
1415 /// Get the ISD node that corresponds to the Instruction class opcode.
1416 int InstructionOpcodeToISD(unsigned Opcode) const;
1418 /// Estimate the cost of type-legalization and the legalized type.
1419 std::pair<int, MVT> getTypeLegalizationCost(const DataLayout &DL,
1424 //===--------------------------------------------------------------------===//
1425 /// \name Helpers for atomic expansion.
1428 /// Returns the maximum atomic operation size (in bits) supported by
1429 /// the backend. Atomic operations greater than this size (as well
1430 /// as ones that are not naturally aligned), will be expanded by
1431 /// AtomicExpandPass into an __atomic_* library call.
1432 unsigned getMaxAtomicSizeInBitsSupported() const {
1433 return MaxAtomicSizeInBitsSupported;
1436 /// Returns the size of the smallest cmpxchg or ll/sc instruction
1437 /// the backend supports. Any smaller operations are widened in
1438 /// AtomicExpandPass.
1440 /// Note that *unlike* operations above the maximum size, atomic ops
1441 /// are still natively supported below the minimum; they just
1442 /// require a more complex expansion.
1443 unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
1445 /// Whether the target supports unaligned atomic operations.
1446 bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
1448 /// Whether AtomicExpandPass should automatically insert fences and reduce
1449 /// ordering for this atomic. This should be true for most architectures with
1450 /// weak memory ordering. Defaults to false.
1451 virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
1455 /// Perform a load-linked operation on Addr, returning a "Value *" with the
1456 /// corresponding pointee type. This may entail some non-trivial operations to
1457 /// truncate or reconstruct types that will be illegal in the backend. See
1458 /// ARMISelLowering for an example implementation.
1459 virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
1460 AtomicOrdering Ord) const {
1461 llvm_unreachable("Load linked unimplemented on this target");
1464 /// Perform a store-conditional operation to Addr. Return the status of the
1465 /// store. This should be 0 if the store succeeded, non-zero otherwise.
1466 virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
1467 Value *Addr, AtomicOrdering Ord) const {
1468 llvm_unreachable("Store conditional unimplemented on this target");
1471 /// Inserts in the IR a target-specific intrinsic specifying a fence.
1472 /// It is called by AtomicExpandPass before expanding an
1473 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
1474 /// if shouldInsertFencesForAtomic returns true.
1476 /// Inst is the original atomic instruction, prior to other expansions that
1477 /// may be performed.
1479 /// This function should either return a nullptr, or a pointer to an IR-level
1480 /// Instruction*. Even complex fence sequences can be represented by a
1481 /// single Instruction* through an intrinsic to be lowered later.
1482 /// Backends should override this method to produce target-specific intrinsic
1483 /// for their fences.
1484 /// FIXME: Please note that the default implementation here in terms of
1485 /// IR-level fences exists for historical/compatibility reasons and is
1486 /// *unsound* ! Fences cannot, in general, be used to restore sequential
1487 /// consistency. For example, consider the following example:
1488 /// atomic<int> x = y = 0;
1489 /// int r1, r2, r3, r4;
1500 /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
1501 /// seq_cst. But if they are lowered to monotonic accesses, no amount of
1502 /// IR-level fences can prevent it.
1504 virtual Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
1505 AtomicOrdering Ord) const {
1506 if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
1507 return Builder.CreateFence(Ord);
1512 virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
1514 AtomicOrdering Ord) const {
1515 if (isAcquireOrStronger(Ord))
1516 return Builder.CreateFence(Ord);
1522 // Emits code that executes when the comparison result in the ll/sc
1523 // expansion of a cmpxchg instruction is such that the store-conditional will
1524 // not execute. This makes it possible to balance out the load-linked with
1525 // a dedicated instruction, if desired.
1526 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
1527 // be unnecessarily held, except if clrex, inserted by this hook, is executed.
1528 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const {}
1530 /// Returns true if the given (atomic) store should be expanded by the
1531 /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
1532 virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
1536 /// Returns true if arguments should be sign-extended in lib calls.
1537 virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
1541 /// Returns how the given (atomic) load should be expanded by the
1542 /// IR-level AtomicExpand pass.
1543 virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const {
1544 return AtomicExpansionKind::None;
1547 /// Returns true if the given atomic cmpxchg should be expanded by the
1548 /// IR-level AtomicExpand pass into a load-linked/store-conditional sequence
1549 /// (through emitLoadLinked() and emitStoreConditional()).
1550 virtual bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
1554 /// Returns how the IR-level AtomicExpand pass should expand the given
1555 /// AtomicRMW, if at all. Default is to never expand.
1556 virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const {
1557 return AtomicExpansionKind::None;
1560 /// On some platforms, an AtomicRMW that never actually modifies the value
1561 /// (such as fetch_add of 0) can be turned into a fence followed by an
1562 /// atomic load. This may sound useless, but it makes it possible for the
1563 /// processor to keep the cacheline shared, dramatically improving
1564 /// performance. And such idempotent RMWs are useful for implementing some
1565 /// kinds of locks, see for example (justification + benchmarks):
1566 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
1567 /// This method tries doing that transformation, returning the atomic load if
1568 /// it succeeds, and nullptr otherwise.
1569 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
1570 /// another round of expansion.
1572 lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
1576 /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
1577 /// SIGN_EXTEND, or ANY_EXTEND).
1578 virtual ISD::NodeType getExtendForAtomicOps() const {
1579 return ISD::ZERO_EXTEND;
1584 /// Returns true if we should normalize
1585 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
1586 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
1587 /// that it saves us from materializing N0 and N1 in an integer register.
1588 /// Targets that are able to perform and/or on flags should return false here.
1589 virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
1591 // If a target has multiple condition registers, then it likely has logical
1592 // operations on those registers.
1593 if (hasMultipleConditionRegisters())
1595 // Only do the transform if the value won't be split into multiple
1597 LegalizeTypeAction Action = getTypeAction(Context, VT);
1598 return Action != TypeExpandInteger && Action != TypeExpandFloat &&
1599 Action != TypeSplitVector;
1602 /// Return true if a select of constants (select Cond, C1, C2) should be
1603 /// transformed into simple math ops with the condition value. For example:
1604 /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
1605 virtual bool convertSelectOfConstantsToMath(EVT VT) const {
1609 //===--------------------------------------------------------------------===//
1610 // TargetLowering Configuration Methods - These methods should be invoked by
1611 // the derived class constructor to configure this object for the target.
1614 /// Specify how the target extends the result of integer and floating point
1615 /// boolean values from i1 to a wider type. See getBooleanContents.
1616 void setBooleanContents(BooleanContent Ty) {
1617 BooleanContents = Ty;
1618 BooleanFloatContents = Ty;
1621 /// Specify how the target extends the result of integer and floating point
1622 /// boolean values from i1 to a wider type. See getBooleanContents.
1623 void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
1624 BooleanContents = IntTy;
1625 BooleanFloatContents = FloatTy;
1628 /// Specify how the target extends the result of a vector boolean value from a
1629 /// vector of i1 to a wider type. See getBooleanContents.
1630 void setBooleanVectorContents(BooleanContent Ty) {
1631 BooleanVectorContents = Ty;
1634 /// Specify the target scheduling preference.
1635 void setSchedulingPreference(Sched::Preference Pref) {
1636 SchedPreferenceInfo = Pref;
1639 /// Indicate whether this target prefers to use _setjmp to implement
1640 /// llvm.setjmp or the version without _. Defaults to false.
1641 void setUseUnderscoreSetJmp(bool Val) {
1642 UseUnderscoreSetJmp = Val;
1645 /// Indicate whether this target prefers to use _longjmp to implement
1646 /// llvm.longjmp or the version without _. Defaults to false.
1647 void setUseUnderscoreLongJmp(bool Val) {
1648 UseUnderscoreLongJmp = Val;
1651 /// Indicate the minimum number of blocks to generate jump tables.
1652 void setMinimumJumpTableEntries(unsigned Val);
1654 /// Indicate the maximum number of entries in jump tables.
1655 /// Set to zero to generate unlimited jump tables.
1656 void setMaximumJumpTableSize(unsigned);
1658 /// If set to a physical register, this specifies the register that
1659 /// llvm.savestack/llvm.restorestack should save and restore.
1660 void setStackPointerRegisterToSaveRestore(unsigned R) {
1661 StackPointerRegisterToSaveRestore = R;
1664 /// Tells the code generator that the target has multiple (allocatable)
1665 /// condition registers that can be used to store the results of comparisons
1666 /// for use by selects and conditional branches. With multiple condition
1667 /// registers, the code generator will not aggressively sink comparisons into
1668 /// the blocks of their users.
1669 void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
1670 HasMultipleConditionRegisters = hasManyRegs;
1673 /// Tells the code generator that the target has BitExtract instructions.
1674 /// The code generator will aggressively sink "shift"s into the blocks of
1675 /// their users if the users will generate "and" instructions which can be
1676 /// combined with "shift" to BitExtract instructions.
1677 void setHasExtractBitsInsn(bool hasExtractInsn = true) {
1678 HasExtractBitsInsn = hasExtractInsn;
1681 /// Tells the code generator not to expand logic operations on comparison
1682 /// predicates into separate sequences that increase the amount of flow
1684 void setJumpIsExpensive(bool isExpensive = true);
1686 /// Tells the code generator that this target supports floating point
1687 /// exceptions and cares about preserving floating point exception behavior.
1688 void setHasFloatingPointExceptions(bool FPExceptions = true) {
1689 HasFloatingPointExceptions = FPExceptions;
1692 /// Tells the code generator which bitwidths to bypass.
1693 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
1694 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
1697 /// Add the specified register class as an available regclass for the
1698 /// specified value type. This indicates the selector can handle values of
1699 /// that class natively.
1700 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
1701 assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
1702 RegClassForVT[VT.SimpleTy] = RC;
1705 /// Return the largest legal super-reg register class of the register class
1706 /// for the specified type and its associated "cost".
1707 virtual std::pair<const TargetRegisterClass *, uint8_t>
1708 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
1710 /// Once all of the register classes are added, this allows us to compute
1711 /// derived properties we expose.
1712 void computeRegisterProperties(const TargetRegisterInfo *TRI);
1714 /// Indicate that the specified operation does not work with the specified
1715 /// type and indicate what to do about it. Note that VT may refer to either
1716 /// the type of a result or that of an operand of Op.
1717 void setOperationAction(unsigned Op, MVT VT,
1718 LegalizeAction Action) {
1719 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
1720 OpActions[(unsigned)VT.SimpleTy][Op] = Action;
1723 /// Indicate that the specified load with extension does not work with the
1724 /// specified type and indicate what to do about it.
1725 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
1726 LegalizeAction Action) {
1727 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
1728 MemVT.isValid() && "Table isn't big enough!");
1729 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
1730 unsigned Shift = 4 * ExtType;
1731 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
1732 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
1735 /// Indicate that the specified truncating store does not work with the
1736 /// specified type and indicate what to do about it.
1737 void setTruncStoreAction(MVT ValVT, MVT MemVT,
1738 LegalizeAction Action) {
1739 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
1740 TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
1743 /// Indicate that the specified indexed load does or does not work with the
1744 /// specified type and indicate what to do abort it.
1746 /// NOTE: All indexed mode loads are initialized to Expand in
1747 /// TargetLowering.cpp
1748 void setIndexedLoadAction(unsigned IdxMode, MVT VT,
1749 LegalizeAction Action) {
1750 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
1751 (unsigned)Action < 0xf && "Table isn't big enough!");
1752 // Load action are kept in the upper half.
1753 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
1754 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
1757 /// Indicate that the specified indexed store does or does not work with the
1758 /// specified type and indicate what to do about it.
1760 /// NOTE: All indexed mode stores are initialized to Expand in
1761 /// TargetLowering.cpp
1762 void setIndexedStoreAction(unsigned IdxMode, MVT VT,
1763 LegalizeAction Action) {
1764 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
1765 (unsigned)Action < 0xf && "Table isn't big enough!");
1766 // Store action are kept in the lower half.
1767 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
1768 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
1771 /// Indicate that the specified condition code is or isn't supported on the
1772 /// target and indicate what to do about it.
1773 void setCondCodeAction(ISD::CondCode CC, MVT VT,
1774 LegalizeAction Action) {
1775 assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&
1776 "Table isn't big enough!");
1777 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
1778 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 32-bit
1779 /// value and the upper 29 bits index into the second dimension of the array
1780 /// to select what 32-bit value to use.
1781 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1782 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
1783 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
1786 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
1787 /// to trying a larger integer/fp until it can find one that works. If that
1788 /// default is insufficient, this method can be used by the target to override
1790 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1791 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
1794 /// Convenience method to set an operation to Promote and specify the type
1795 /// in a single call.
1796 void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1797 setOperationAction(Opc, OrigVT, Promote);
1798 AddPromotedToType(Opc, OrigVT, DestVT);
1801 /// Targets should invoke this method for each target independent node that
1802 /// they want to provide a custom DAG combiner for by implementing the
1803 /// PerformDAGCombine virtual method.
1804 void setTargetDAGCombine(ISD::NodeType NT) {
1805 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1806 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
1809 /// Set the target's required jmp_buf buffer size (in bytes); default is 200
1810 void setJumpBufSize(unsigned Size) {
1814 /// Set the target's required jmp_buf buffer alignment (in bytes); default is
1816 void setJumpBufAlignment(unsigned Align) {
1817 JumpBufAlignment = Align;
1820 /// Set the target's minimum function alignment (in log2(bytes))
1821 void setMinFunctionAlignment(unsigned Align) {
1822 MinFunctionAlignment = Align;
1825 /// Set the target's preferred function alignment. This should be set if
1826 /// there is a performance benefit to higher-than-minimum alignment (in
1828 void setPrefFunctionAlignment(unsigned Align) {
1829 PrefFunctionAlignment = Align;
1832 /// Set the target's preferred loop alignment. Default alignment is zero, it
1833 /// means the target does not care about loop alignment. The alignment is
1834 /// specified in log2(bytes). The target may also override
1835 /// getPrefLoopAlignment to provide per-loop values.
1836 void setPrefLoopAlignment(unsigned Align) {
1837 PrefLoopAlignment = Align;
1840 /// Set the minimum stack alignment of an argument (in log2(bytes)).
1841 void setMinStackArgumentAlignment(unsigned Align) {
1842 MinStackArgumentAlignment = Align;
1845 /// Set the maximum atomic operation size supported by the
1846 /// backend. Atomic operations greater than this size (as well as
1847 /// ones that are not naturally aligned), will be expanded by
1848 /// AtomicExpandPass into an __atomic_* library call.
1849 void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
1850 MaxAtomicSizeInBitsSupported = SizeInBits;
1853 /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
1854 void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
1855 MinCmpXchgSizeInBits = SizeInBits;
1858 /// Sets whether unaligned atomic operations are supported.
1859 void setSupportsUnalignedAtomics(bool UnalignedSupported) {
1860 SupportsUnalignedAtomics = UnalignedSupported;
1864 //===--------------------------------------------------------------------===//
1865 // Addressing mode description hooks (used by LSR etc).
1868 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
1869 /// instructions reading the address. This allows as much computation as
1870 /// possible to be done in the address mode for that operand. This hook lets
1871 /// targets also pass back when this should be done on intrinsics which
1873 virtual bool getAddrModeArguments(IntrinsicInst * /*I*/,
1874 SmallVectorImpl<Value*> &/*Ops*/,
1875 Type *&/*AccessTy*/) const {
1879 /// This represents an addressing mode of:
1880 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1881 /// If BaseGV is null, there is no BaseGV.
1882 /// If BaseOffs is zero, there is no base offset.
1883 /// If HasBaseReg is false, there is no base register.
1884 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
1887 GlobalValue *BaseGV = nullptr;
1888 int64_t BaseOffs = 0;
1889 bool HasBaseReg = false;
1891 AddrMode() = default;
1894 /// Return true if the addressing mode represented by AM is legal for this
1895 /// target, for a load/store of the specified type.
1897 /// The type may be VoidTy, in which case only return true if the addressing
1898 /// mode is legal for a load/store of any legal type. TODO: Handle
1899 /// pre/postinc as well.
1901 /// If the address space cannot be determined, it will be -1.
1903 /// TODO: Remove default argument
1904 virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
1905 Type *Ty, unsigned AddrSpace,
1906 Instruction *I = nullptr) const;
1908 /// \brief Return the cost of the scaling factor used in the addressing mode
1909 /// represented by AM for this target, for a load/store of the specified type.
1911 /// If the AM is supported, the return value must be >= 0.
1912 /// If the AM is not supported, it returns a negative value.
1913 /// TODO: Handle pre/postinc as well.
1914 /// TODO: Remove default argument
1915 virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM,
1916 Type *Ty, unsigned AS = 0) const {
1917 // Default: assume that any scaling factor used in a legal AM is free.
1918 if (isLegalAddressingMode(DL, AM, Ty, AS))
1923 /// Return true if the specified immediate is legal icmp immediate, that is
1924 /// the target has icmp instructions which can compare a register against the
1925 /// immediate without having to materialize the immediate into a register.
1926 virtual bool isLegalICmpImmediate(int64_t) const {
1930 /// Return true if the specified immediate is legal add immediate, that is the
1931 /// target has add instructions which can add a register with the immediate
1932 /// without having to materialize the immediate into a register.
1933 virtual bool isLegalAddImmediate(int64_t) const {
1937 /// Return true if it's significantly cheaper to shift a vector by a uniform
1938 /// scalar than by an amount which will vary across each lane. On x86, for
1939 /// example, there is a "psllw" instruction for the former case, but no simple
1940 /// instruction for a general "a << b" operation on vectors.
1941 virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
1945 /// Returns true if the opcode is a commutative binary operation.
1946 virtual bool isCommutativeBinOp(unsigned Opcode) const {
1947 // FIXME: This should get its info from the td file.
1957 case ISD::SMUL_LOHI:
1958 case ISD::UMUL_LOHI:
1973 default: return false;
1977 /// Return true if it's free to truncate a value of type FromTy to type
1978 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
1979 /// by referencing its sub-register AX.
1980 /// Targets must return false when FromTy <= ToTy.
1981 virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
1985 /// Return true if a truncation from FromTy to ToTy is permitted when deciding
1986 /// whether a call is in tail position. Typically this means that both results
1987 /// would be assigned to the same register or stack slot, but it could mean
1988 /// the target performs adequate checks of its own before proceeding with the
1989 /// tail call. Targets must return false when FromTy <= ToTy.
1990 virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
1994 virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const {
1998 virtual bool isProfitableToHoist(Instruction *I) const { return true; }
2000 /// Return true if the extension represented by \p I is free.
2001 /// Unlikely the is[Z|FP]ExtFree family which is based on types,
2002 /// this method can use the context provided by \p I to decide
2003 /// whether or not \p I is free.
2004 /// This method extends the behavior of the is[Z|FP]ExtFree family.
2005 /// In other words, if is[Z|FP]Free returns true, then this method
2006 /// returns true as well. The converse is not true.
2007 /// The target can perform the adequate checks by overriding isExtFreeImpl.
2008 /// \pre \p I must be a sign, zero, or fp extension.
2009 bool isExtFree(const Instruction *I) const {
2010 switch (I->getOpcode()) {
2011 case Instruction::FPExt:
2012 if (isFPExtFree(EVT::getEVT(I->getType()),
2013 EVT::getEVT(I->getOperand(0)->getType())))
2016 case Instruction::ZExt:
2017 if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
2020 case Instruction::SExt:
2023 llvm_unreachable("Instruction is not an extension");
2025 return isExtFreeImpl(I);
2028 /// Return true if \p Load and \p Ext can form an ExtLoad.
2029 /// For example, in AArch64
2030 /// %L = load i8, i8* %ptr
2031 /// %E = zext i8 %L to i32
2032 /// can be lowered into one load instruction
2034 bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
2035 const DataLayout &DL) const {
2036 EVT VT = getValueType(DL, Ext->getType());
2037 EVT LoadVT = getValueType(DL, Load->getType());
2039 // If the load has other users and the truncate is not free, the ext
2040 // probably isn't free.
2041 if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
2042 !isTruncateFree(Ext->getType(), Load->getType()))
2045 // Check whether the target supports casts folded into loads.
2047 if (isa<ZExtInst>(Ext))
2048 LType = ISD::ZEXTLOAD;
2050 assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
2051 LType = ISD::SEXTLOAD;
2054 return isLoadExtLegal(LType, VT, LoadVT);
2057 /// Return true if any actual instruction that defines a value of type FromTy
2058 /// implicitly zero-extends the value to ToTy in the result register.
2060 /// The function should return true when it is likely that the truncate can
2061 /// be freely folded with an instruction defining a value of FromTy. If
2062 /// the defining instruction is unknown (because you're looking at a
2063 /// function argument, PHI, etc.) then the target may require an
2064 /// explicit truncate, which is not necessarily free, but this function
2065 /// does not deal with those cases.
2066 /// Targets must return false when FromTy >= ToTy.
2067 virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
2071 virtual bool isZExtFree(EVT FromTy, EVT ToTy) const {
2075 /// Return true if the target supplies and combines to a paired load
2076 /// two loaded values of type LoadedType next to each other in memory.
2077 /// RequiredAlignment gives the minimal alignment constraints that must be met
2078 /// to be able to select this paired load.
2080 /// This information is *not* used to generate actual paired loads, but it is
2081 /// used to generate a sequence of loads that is easier to combine into a
2083 /// For instance, something like this:
2084 /// a = load i64* addr
2085 /// b = trunc i64 a to i32
2086 /// c = lshr i64 a, 32
2087 /// d = trunc i64 c to i32
2088 /// will be optimized into:
2089 /// b = load i32* addr1
2090 /// d = load i32* addr2
2091 /// Where addr1 = addr2 +/- sizeof(i32).
2093 /// In other words, unless the target performs a post-isel load combining,
2094 /// this information should not be provided because it will generate more
2096 virtual bool hasPairedLoad(EVT /*LoadedType*/,
2097 unsigned & /*RequiredAlignment*/) const {
2101 /// \brief Get the maximum supported factor for interleaved memory accesses.
2102 /// Default to be the minimum interleave factor: 2.
2103 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
2105 /// \brief Lower an interleaved load to target specific intrinsics. Return
2106 /// true on success.
2108 /// \p LI is the vector load instruction.
2109 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
2110 /// \p Indices is the corresponding indices for each shufflevector.
2111 /// \p Factor is the interleave factor.
2112 virtual bool lowerInterleavedLoad(LoadInst *LI,
2113 ArrayRef<ShuffleVectorInst *> Shuffles,
2114 ArrayRef<unsigned> Indices,
2115 unsigned Factor) const {
2119 /// \brief Lower an interleaved store to target specific intrinsics. Return
2120 /// true on success.
2122 /// \p SI is the vector store instruction.
2123 /// \p SVI is the shufflevector to RE-interleave the stored vector.
2124 /// \p Factor is the interleave factor.
2125 virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
2126 unsigned Factor) const {
2130 /// Return true if zero-extending the specific node Val to type VT2 is free
2131 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
2132 /// because it's folded such as X86 zero-extending loads).
2133 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
2134 return isZExtFree(Val.getValueType(), VT2);
2137 /// Return true if an fpext operation is free (for instance, because
2138 /// single-precision floating-point numbers are implicitly extended to
2139 /// double-precision).
2140 virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
2141 assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&
2142 "invalid fpext types");
2146 /// Return true if an fpext operation input to an \p Opcode operation is free
2147 /// (for instance, because half-precision floating-point numbers are
2148 /// implicitly extended to float-precision) for an FMA instruction.
2149 virtual bool isFPExtFoldable(unsigned Opcode, EVT DestVT, EVT SrcVT) const {
2150 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
2151 "invalid fpext types");
2152 return isFPExtFree(DestVT, SrcVT);
2155 /// Return true if folding a vector load into ExtVal (a sign, zero, or any
2156 /// extend node) is profitable.
2157 virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
2159 /// Return true if an fneg operation is free to the point where it is never
2160 /// worthwhile to replace it with a bitwise operation.
2161 virtual bool isFNegFree(EVT VT) const {
2162 assert(VT.isFloatingPoint());
2166 /// Return true if an fabs operation is free to the point where it is never
2167 /// worthwhile to replace it with a bitwise operation.
2168 virtual bool isFAbsFree(EVT VT) const {
2169 assert(VT.isFloatingPoint());
2173 /// Return true if an FMA operation is faster than a pair of fmul and fadd
2174 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
2175 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
2177 /// NOTE: This may be called before legalization on types for which FMAs are
2178 /// not legal, but should return true if those types will eventually legalize
2179 /// to types that support FMAs. After legalization, it will only be called on
2180 /// types that support FMAs (via Legal or Custom actions)
2181 virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
2185 /// Return true if it's profitable to narrow operations of type VT1 to
2186 /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
2188 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
2192 /// \brief Return true if it is beneficial to convert a load of a constant to
2193 /// just the constant itself.
2194 /// On some targets it might be more efficient to use a combination of
2195 /// arithmetic instructions to materialize the constant instead of loading it
2196 /// from a constant pool.
2197 virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
2202 /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
2203 /// from this source type with this index. This is needed because
2204 /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
2205 /// the first element, and only the target knows which lowering is cheap.
2206 virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
2207 unsigned Index) const {
2211 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
2212 // even if the vector itself has multiple uses.
2213 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
2217 //===--------------------------------------------------------------------===//
2218 // Runtime Library hooks
2221 /// Rename the default libcall routine name for the specified libcall.
2222 void setLibcallName(RTLIB::Libcall Call, const char *Name) {
2223 LibcallRoutineNames[Call] = Name;
2226 /// Get the libcall routine name for the specified libcall.
2227 const char *getLibcallName(RTLIB::Libcall Call) const {
2228 return LibcallRoutineNames[Call];
2231 /// Override the default CondCode to be used to test the result of the
2232 /// comparison libcall against zero.
2233 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
2234 CmpLibcallCCs[Call] = CC;
2237 /// Get the CondCode that's to be used to test the result of the comparison
2238 /// libcall against zero.
2239 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
2240 return CmpLibcallCCs[Call];
2243 /// Set the CallingConv that should be used for the specified libcall.
2244 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
2245 LibcallCallingConvs[Call] = CC;
2248 /// Get the CallingConv that should be used for the specified libcall.
2249 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
2250 return LibcallCallingConvs[Call];
2253 /// Execute target specific actions to finalize target lowering.
2254 /// This is used to set extra flags in MachineFrameInformation and freezing
2255 /// the set of reserved registers.
2256 /// The default implementation just freezes the set of reserved registers.
2257 virtual void finalizeLowering(MachineFunction &MF) const;
2260 const TargetMachine &TM;
2262 /// Tells the code generator that the target has multiple (allocatable)
2263 /// condition registers that can be used to store the results of comparisons
2264 /// for use by selects and conditional branches. With multiple condition
2265 /// registers, the code generator will not aggressively sink comparisons into
2266 /// the blocks of their users.
2267 bool HasMultipleConditionRegisters;
2269 /// Tells the code generator that the target has BitExtract instructions.
2270 /// The code generator will aggressively sink "shift"s into the blocks of
2271 /// their users if the users will generate "and" instructions which can be
2272 /// combined with "shift" to BitExtract instructions.
2273 bool HasExtractBitsInsn;
2275 /// Tells the code generator to bypass slow divide or remainder
2276 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
2277 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
2278 /// div/rem when the operands are positive and less than 256.
2279 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
2281 /// Tells the code generator that it shouldn't generate extra flow control
2282 /// instructions and should attempt to combine flow control instructions via
2284 bool JumpIsExpensive;
2286 /// Whether the target supports or cares about preserving floating point
2287 /// exception behavior.
2288 bool HasFloatingPointExceptions;
2290 /// This target prefers to use _setjmp to implement llvm.setjmp.
2292 /// Defaults to false.
2293 bool UseUnderscoreSetJmp;
2295 /// This target prefers to use _longjmp to implement llvm.longjmp.
2297 /// Defaults to false.
2298 bool UseUnderscoreLongJmp;
2300 /// Information about the contents of the high-bits in boolean values held in
2301 /// a type wider than i1. See getBooleanContents.
2302 BooleanContent BooleanContents;
2304 /// Information about the contents of the high-bits in boolean values held in
2305 /// a type wider than i1. See getBooleanContents.
2306 BooleanContent BooleanFloatContents;
2308 /// Information about the contents of the high-bits in boolean vector values
2309 /// when the element type is wider than i1. See getBooleanContents.
2310 BooleanContent BooleanVectorContents;
2312 /// The target scheduling preference: shortest possible total cycles or lowest
2314 Sched::Preference SchedPreferenceInfo;
2316 /// The size, in bytes, of the target's jmp_buf buffers
2317 unsigned JumpBufSize;
2319 /// The alignment, in bytes, of the target's jmp_buf buffers
2320 unsigned JumpBufAlignment;
2322 /// The minimum alignment that any argument on the stack needs to have.
2323 unsigned MinStackArgumentAlignment;
2325 /// The minimum function alignment (used when optimizing for size, and to
2326 /// prevent explicitly provided alignment from leading to incorrect code).
2327 unsigned MinFunctionAlignment;
2329 /// The preferred function alignment (used when alignment unspecified and
2330 /// optimizing for speed).
2331 unsigned PrefFunctionAlignment;
2333 /// The preferred loop alignment.
2334 unsigned PrefLoopAlignment;
2336 /// Size in bits of the maximum atomics size the backend supports.
2337 /// Accesses larger than this will be expanded by AtomicExpandPass.
2338 unsigned MaxAtomicSizeInBitsSupported;
2340 /// Size in bits of the minimum cmpxchg or ll/sc operation the
2341 /// backend supports.
2342 unsigned MinCmpXchgSizeInBits;
2344 /// This indicates if the target supports unaligned atomic operations.
2345 bool SupportsUnalignedAtomics;
2347 /// If set to a physical register, this specifies the register that
2348 /// llvm.savestack/llvm.restorestack should save and restore.
2349 unsigned StackPointerRegisterToSaveRestore;
2351 /// This indicates the default register class to use for each ValueType the
2352 /// target supports natively.
2353 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
2354 unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
2355 MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
2357 /// This indicates the "representative" register class to use for each
2358 /// ValueType the target supports natively. This information is used by the
2359 /// scheduler to track register pressure. By default, the representative
2360 /// register class is the largest legal super-reg register class of the
2361 /// register class of the specified type. e.g. On x86, i8, i16, and i32's
2362 /// representative class would be GR32.
2363 const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
2365 /// This indicates the "cost" of the "representative" register class for each
2366 /// ValueType. The cost is used by the scheduler to approximate register
2368 uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
2370 /// For any value types we are promoting or expanding, this contains the value
2371 /// type that we are changing to. For Expanded types, this contains one step
2372 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
2373 /// (e.g. i64 -> i16). For types natively supported by the system, this holds
2374 /// the same type (e.g. i32 -> i32).
2375 MVT TransformToType[MVT::LAST_VALUETYPE];
2377 /// For each operation and each value type, keep a LegalizeAction that
2378 /// indicates how instruction selection should deal with the operation. Most
2379 /// operations are Legal (aka, supported natively by the target), but
2380 /// operations that are not should be described. Note that operations on
2381 /// non-legal value types are not described here.
2382 LegalizeAction OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
2384 /// For each load extension type and each value type, keep a LegalizeAction
2385 /// that indicates how instruction selection should deal with a load of a
2386 /// specific value type and extension type. Uses 4-bits to store the action
2387 /// for each of the 4 load ext types.
2388 uint16_t LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
2390 /// For each value type pair keep a LegalizeAction that indicates whether a
2391 /// truncating store of a specific value type and truncating type is legal.
2392 LegalizeAction TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
2394 /// For each indexed mode and each value type, keep a pair of LegalizeAction
2395 /// that indicates how instruction selection should deal with the load /
2398 /// The first dimension is the value_type for the reference. The second
2399 /// dimension represents the various modes for load store.
2400 uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
2402 /// For each condition code (ISD::CondCode) keep a LegalizeAction that
2403 /// indicates how instruction selection should deal with the condition code.
2405 /// Because each CC action takes up 4 bits, we need to have the array size be
2406 /// large enough to fit all of the value types. This can be done by rounding
2407 /// up the MVT::LAST_VALUETYPE value to the next multiple of 8.
2408 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 7) / 8];
2411 ValueTypeActionImpl ValueTypeActions;
2414 LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
2416 /// Targets can specify ISD nodes that they would like PerformDAGCombine
2417 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
2420 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
2422 /// For operations that must be promoted to a specific type, this holds the
2423 /// destination type. This map should be sparse, so don't hold it as an
2426 /// Targets add entries to this map with AddPromotedToType(..), clients access
2427 /// this with getTypeToPromoteTo(..).
2428 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
2431 /// Stores the name each libcall.
2432 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1];
2434 /// The ISD::CondCode that should be used to test the result of each of the
2435 /// comparison libcall against zero.
2436 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
2438 /// Stores the CallingConv that should be used for each libcall.
2439 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
2441 /// Set default libcall names and calling conventions.
2442 void InitLibcalls(const Triple &TT);
2445 /// Return true if the extension represented by \p I is free.
2446 /// \pre \p I is a sign, zero, or fp extension and
2447 /// is[Z|FP]ExtFree of the related types is not true.
2448 virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
2450 /// Depth that GatherAllAliases should should continue looking for chain
2451 /// dependencies when trying to find a more preferable chain. As an
2452 /// approximation, this should be more than the number of consecutive stores
2453 /// expected to be merged.
2454 unsigned GatherAllAliasesMaxDepth;
2456 /// \brief Specify maximum number of store instructions per memset call.
2458 /// When lowering \@llvm.memset this field specifies the maximum number of
2459 /// store operations that may be substituted for the call to memset. Targets
2460 /// must set this value based on the cost threshold for that target. Targets
2461 /// should assume that the memset will be done using as many of the largest
2462 /// store operations first, followed by smaller ones, if necessary, per
2463 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
2464 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
2465 /// store. This only applies to setting a constant array of a constant size.
2466 unsigned MaxStoresPerMemset;
2468 /// Maximum number of stores operations that may be substituted for the call
2469 /// to memset, used for functions with OptSize attribute.
2470 unsigned MaxStoresPerMemsetOptSize;
2472 /// \brief Specify maximum bytes of store instructions per memcpy call.
2474 /// When lowering \@llvm.memcpy this field specifies the maximum number of
2475 /// store operations that may be substituted for a call to memcpy. Targets
2476 /// must set this value based on the cost threshold for that target. Targets
2477 /// should assume that the memcpy will be done using as many of the largest
2478 /// store operations first, followed by smaller ones, if necessary, per
2479 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
2480 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
2481 /// and one 1-byte store. This only applies to copying a constant array of
2483 unsigned MaxStoresPerMemcpy;
2485 /// Maximum number of store operations that may be substituted for a call to
2486 /// memcpy, used for functions with OptSize attribute.
2487 unsigned MaxStoresPerMemcpyOptSize;
2488 unsigned MaxLoadsPerMemcmp;
2489 unsigned MaxLoadsPerMemcmpOptSize;
2491 /// \brief Specify maximum bytes of store instructions per memmove call.
2493 /// When lowering \@llvm.memmove this field specifies the maximum number of
2494 /// store instructions that may be substituted for a call to memmove. Targets
2495 /// must set this value based on the cost threshold for that target. Targets
2496 /// should assume that the memmove will be done using as many of the largest
2497 /// store operations first, followed by smaller ones, if necessary, per
2498 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
2499 /// with 8-bit alignment would result in nine 1-byte stores. This only
2500 /// applies to copying a constant array of constant size.
2501 unsigned MaxStoresPerMemmove;
2503 /// Maximum number of store instructions that may be substituted for a call to
2504 /// memmove, used for functions with OptSize attribute.
2505 unsigned MaxStoresPerMemmoveOptSize;
2507 /// Tells the code generator that select is more expensive than a branch if
2508 /// the branch is usually predicted right.
2509 bool PredictableSelectIsExpensive;
2511 /// \see enableExtLdPromotion.
2512 bool EnableExtLdPromotion;
2514 /// Return true if the value types that can be represented by the specified
2515 /// register class are all legal.
2516 bool isLegalRC(const TargetRegisterInfo &TRI,
2517 const TargetRegisterClass &RC) const;
2519 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
2520 /// sequence of memory operands that is recognized by PrologEpilogInserter.
2521 MachineBasicBlock *emitPatchPoint(MachineInstr &MI,
2522 MachineBasicBlock *MBB) const;
2525 /// This class defines information used to lower LLVM code to legal SelectionDAG
2526 /// operators that the target instruction selector can accept natively.
2528 /// This class also defines callbacks that targets must implement to lower
2529 /// target-specific constructs to SelectionDAG operators.
2530 class TargetLowering : public TargetLoweringBase {
2532 struct DAGCombinerInfo;
2534 TargetLowering(const TargetLowering &) = delete;
2535 TargetLowering &operator=(const TargetLowering &) = delete;
2537 /// NOTE: The TargetMachine owns TLOF.
2538 explicit TargetLowering(const TargetMachine &TM);
2540 bool isPositionIndependent() const;
2542 /// Returns true by value, base pointer and offset pointer and addressing mode
2543 /// by reference if the node's address can be legally represented as
2544 /// pre-indexed load / store address.
2545 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
2546 SDValue &/*Offset*/,
2547 ISD::MemIndexedMode &/*AM*/,
2548 SelectionDAG &/*DAG*/) const {
2552 /// Returns true by value, base pointer and offset pointer and addressing mode
2553 /// by reference if this node can be combined with a load / store to form a
2554 /// post-indexed load / store.
2555 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
2557 SDValue &/*Offset*/,
2558 ISD::MemIndexedMode &/*AM*/,
2559 SelectionDAG &/*DAG*/) const {
2563 /// Return the entry encoding for a jump table in the current function. The
2564 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
2565 virtual unsigned getJumpTableEncoding() const;
2567 virtual const MCExpr *
2568 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
2569 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
2570 MCContext &/*Ctx*/) const {
2571 llvm_unreachable("Need to implement this hook if target has custom JTIs");
2574 /// Returns relocation base for the given PIC jumptable.
2575 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
2576 SelectionDAG &DAG) const;
2578 /// This returns the relocation base for the given PIC jumptable, the same as
2579 /// getPICJumpTableRelocBase, but as an MCExpr.
2580 virtual const MCExpr *
2581 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2582 unsigned JTI, MCContext &Ctx) const;
2584 /// Return true if folding a constant offset with the given GlobalAddress is
2585 /// legal. It is frequently not legal in PIC relocation models.
2586 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
2588 bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
2589 SDValue &Chain) const;
2591 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
2592 SDValue &NewRHS, ISD::CondCode &CCCode,
2593 const SDLoc &DL) const;
2595 /// Returns a pair of (return value, chain).
2596 /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
2597 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
2598 EVT RetVT, ArrayRef<SDValue> Ops,
2599 bool isSigned, const SDLoc &dl,
2600 bool doesNotReturn = false,
2601 bool isReturnValueUsed = true) const;
2603 /// Check whether parameters to a call that are passed in callee saved
2604 /// registers are the same as from the calling function. This needs to be
2605 /// checked for tail call eligibility.
2606 bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
2607 const uint32_t *CallerPreservedMask,
2608 const SmallVectorImpl<CCValAssign> &ArgLocs,
2609 const SmallVectorImpl<SDValue> &OutVals) const;
2611 //===--------------------------------------------------------------------===//
2612 // TargetLowering Optimization Methods
2615 /// A convenience struct that encapsulates a DAG, and two SDValues for
2616 /// returning information from TargetLowering to its clients that want to
2618 struct TargetLoweringOpt {
2625 explicit TargetLoweringOpt(SelectionDAG &InDAG,
2627 DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
2629 bool LegalTypes() const { return LegalTys; }
2630 bool LegalOperations() const { return LegalOps; }
2632 bool CombineTo(SDValue O, SDValue N) {
2639 /// Check to see if the specified operand of the specified instruction is a
2640 /// constant integer. If so, check to see if there are any bits set in the
2641 /// constant that are not demanded. If so, shrink the constant and return
2643 bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
2644 TargetLoweringOpt &TLO) const;
2646 // Target hook to do target-specific const optimization, which is called by
2647 // ShrinkDemandedConstant. This function should return true if the target
2648 // doesn't want ShrinkDemandedConstant to further optimize the constant.
2649 virtual bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
2650 TargetLoweringOpt &TLO) const {
2654 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
2655 /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
2656 /// generalized for targets with other types of implicit widening casts.
2657 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
2658 TargetLoweringOpt &TLO) const;
2660 /// Helper for SimplifyDemandedBits that can simplify an operation with
2661 /// multiple uses. This function simplifies operand \p OpIdx of \p User and
2662 /// then updates \p User with the simplified version. No other uses of
2663 /// \p OpIdx are updated. If \p User is the only user of \p OpIdx, this
2664 /// function behaves exactly like function SimplifyDemandedBits declared
2665 /// below except that it also updates the DAG by calling
2666 /// DCI.CommitTargetLoweringOpt.
2667 bool SimplifyDemandedBits(SDNode *User, unsigned OpIdx, const APInt &Demanded,
2668 DAGCombinerInfo &DCI, TargetLoweringOpt &TLO) const;
2670 /// Look at Op. At this point, we know that only the DemandedMask bits of the
2671 /// result of Op are ever used downstream. If we can use this information to
2672 /// simplify Op, create a new simplified DAG node and return true, returning
2673 /// the original and new nodes in Old and New. Otherwise, analyze the
2674 /// expression and return a mask of KnownOne and KnownZero bits for the
2675 /// expression (used to simplify the caller). The KnownZero/One bits may only
2676 /// be accurate for those bits in the DemandedMask.
2677 /// \p AssumeSingleUse When this parameter is true, this function will
2678 /// attempt to simplify \p Op even if there are multiple uses.
2679 /// Callers are responsible for correctly updating the DAG based on the
2680 /// results of this function, because simply replacing replacing TLO.Old
2681 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
2682 /// has multiple uses.
2683 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
2685 TargetLoweringOpt &TLO,
2687 bool AssumeSingleUse = false) const;
2689 /// Helper wrapper around SimplifyDemandedBits
2690 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
2691 DAGCombinerInfo &DCI) const;
2693 /// Determine which of the bits specified in Mask are known to be either zero
2694 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
2695 /// argument allows us to only collect the known bits that are shared by the
2696 /// requested vector elements.
2697 virtual void computeKnownBitsForTargetNode(const SDValue Op,
2699 const APInt &DemandedElts,
2700 const SelectionDAG &DAG,
2701 unsigned Depth = 0) const;
2703 /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
2704 /// Default implementation computes low bits based on alignment
2705 /// information. This should preserve known bits passed into it.
2706 virtual void computeKnownBitsForFrameIndex(const SDValue FIOp,
2708 const APInt &DemandedElts,
2709 const SelectionDAG &DAG,
2710 unsigned Depth = 0) const;
2712 /// This method can be implemented by targets that want to expose additional
2713 /// information about sign bits to the DAG Combiner. The DemandedElts
2714 /// argument allows us to only collect the minimum sign bits that are shared
2715 /// by the requested vector elements.
2716 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
2717 const APInt &DemandedElts,
2718 const SelectionDAG &DAG,
2719 unsigned Depth = 0) const;
2721 struct DAGCombinerInfo {
2722 void *DC; // The DAG Combiner object.
2724 bool CalledByLegalizer;
2729 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
2730 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
2732 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
2733 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
2734 bool isAfterLegalizeVectorOps() const {
2735 return Level == AfterLegalizeDAG;
2737 CombineLevel getDAGCombineLevel() { return Level; }
2738 bool isCalledByLegalizer() const { return CalledByLegalizer; }
2740 void AddToWorklist(SDNode *N);
2741 SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
2742 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
2743 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
2745 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
2748 /// Return if the N is a constant or constant vector equal to the true value
2749 /// from getBooleanContents().
2750 bool isConstTrueVal(const SDNode *N) const;
2752 /// Return if the N is a constant or constant vector equal to the false value
2753 /// from getBooleanContents().
2754 bool isConstFalseVal(const SDNode *N) const;
2756 /// Return a constant of type VT that contains a true value that respects
2757 /// getBooleanContents()
2758 SDValue getConstTrueVal(SelectionDAG &DAG, EVT VT, const SDLoc &DL) const;
2760 /// Return if \p N is a True value when extended to \p VT.
2761 bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool Signed) const;
2763 /// Try to simplify a setcc built with the specified operands and cc. If it is
2764 /// unable to simplify it, return a null SDValue.
2765 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
2766 bool foldBooleans, DAGCombinerInfo &DCI,
2767 const SDLoc &dl) const;
2769 // For targets which wrap address, unwrap for analysis.
2770 virtual SDValue unwrapAddress(SDValue N) const { return N; }
2772 /// Returns true (and the GlobalValue and the offset) if the node is a
2773 /// GlobalAddress + offset.
2775 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
2777 /// This method will be invoked for all target nodes and for any
2778 /// target-independent nodes that the target has registered with invoke it
2781 /// The semantics are as follows:
2783 /// SDValue.Val == 0 - No change was made
2784 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
2785 /// otherwise - N should be replaced by the returned Operand.
2787 /// In addition, methods provided by DAGCombinerInfo may be used to perform
2788 /// more complex transformations.
2790 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
2792 /// Return true if it is profitable to move a following shift through this
2793 // node, adjusting any immediate operands as necessary to preserve semantics.
2794 // This transformation may not be desirable if it disrupts a particularly
2795 // auspicious target-specific tree (e.g. bitfield extraction in AArch64).
2796 // By default, it returns true.
2797 virtual bool isDesirableToCommuteWithShift(const SDNode *N) const {
2801 // Return true if it is profitable to combine a BUILD_VECTOR with a stride-pattern
2802 // to a shuffle and a truncate.
2803 // Example of such a combine:
2804 // v4i32 build_vector((extract_elt V, 1),
2805 // (extract_elt V, 3),
2806 // (extract_elt V, 5),
2807 // (extract_elt V, 7))
2809 // v4i32 truncate (bitcast (shuffle<1,u,3,u,5,u,7,u> V, u) to v4i64)
2810 virtual bool isDesirableToCombineBuildVectorToShuffleTruncate(
2811 ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const {
2815 /// Return true if the target has native support for the specified value type
2816 /// and it is 'desirable' to use the type for the given node type. e.g. On x86
2817 /// i16 is legal, but undesirable since i16 instruction encodings are longer
2818 /// and some i16 instructions are slow.
2819 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
2820 // By default, assume all legal types are desirable.
2821 return isTypeLegal(VT);
2824 /// Return true if it is profitable for dag combiner to transform a floating
2825 /// point op of specified opcode to a equivalent op of an integer
2826 /// type. e.g. f32 load -> i32 load can be profitable on ARM.
2827 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
2832 /// This method query the target whether it is beneficial for dag combiner to
2833 /// promote the specified node. If true, it should return the desired
2834 /// promotion type by reference.
2835 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
2839 /// Return true if the target supports swifterror attribute. It optimizes
2840 /// loads and stores to reading and writing a specific register.
2841 virtual bool supportSwiftError() const {
2845 /// Return true if the target supports that a subset of CSRs for the given
2846 /// machine function is handled explicitly via copies.
2847 virtual bool supportSplitCSR(MachineFunction *MF) const {
2851 /// Perform necessary initialization to handle a subset of CSRs explicitly
2852 /// via copies. This function is called at the beginning of instruction
2854 virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
2855 llvm_unreachable("Not Implemented");
2858 /// Insert explicit copies in entry and exit blocks. We copy a subset of
2859 /// CSRs to virtual registers in the entry block, and copy them back to
2860 /// physical registers in the exit blocks. This function is called at the end
2861 /// of instruction selection.
2862 virtual void insertCopiesSplitCSR(
2863 MachineBasicBlock *Entry,
2864 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
2865 llvm_unreachable("Not Implemented");
2868 //===--------------------------------------------------------------------===//
2869 // Lowering methods - These methods must be implemented by targets so that
2870 // the SelectionDAGBuilder code knows how to lower these.
2873 /// This hook must be implemented to lower the incoming (formal) arguments,
2874 /// described by the Ins array, into the specified DAG. The implementation
2875 /// should fill in the InVals array with legal-type argument values, and
2876 /// return the resulting token chain value.
2877 virtual SDValue LowerFormalArguments(
2878 SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
2879 const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
2880 SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
2881 llvm_unreachable("Not Implemented");
2884 /// This structure contains all information that is necessary for lowering
2885 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
2886 /// needs to lower a call, and targets will see this struct in their LowerCall
2888 struct CallLoweringInfo {
2890 Type *RetTy = nullptr;
2895 bool DoesNotReturn : 1;
2896 bool IsReturnValueUsed : 1;
2897 bool IsConvergent : 1;
2898 bool IsPatchPoint : 1;
2900 // IsTailCall should be modified by implementations of
2901 // TargetLowering::LowerCall that perform tail call conversions.
2902 bool IsTailCall = false;
2904 // Is Call lowering done post SelectionDAG type legalization.
2905 bool IsPostTypeLegalization = false;
2907 unsigned NumFixedArgs = -1;
2908 CallingConv::ID CallConv = CallingConv::C;
2913 ImmutableCallSite CS;
2914 SmallVector<ISD::OutputArg, 32> Outs;
2915 SmallVector<SDValue, 32> OutVals;
2916 SmallVector<ISD::InputArg, 32> Ins;
2917 SmallVector<SDValue, 4> InVals;
2919 CallLoweringInfo(SelectionDAG &DAG)
2920 : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
2921 DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false),
2922 IsPatchPoint(false), DAG(DAG) {}
2924 CallLoweringInfo &setDebugLoc(const SDLoc &dl) {
2929 CallLoweringInfo &setChain(SDValue InChain) {
2934 // setCallee with target/module-specific attributes
2935 CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType,
2936 SDValue Target, ArgListTy &&ArgsList) {
2940 NumFixedArgs = ArgsList.size();
2941 Args = std::move(ArgsList);
2943 DAG.getTargetLoweringInfo().markLibCallAttributes(
2944 &(DAG.getMachineFunction()), CC, Args);
2948 CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
2949 SDValue Target, ArgListTy &&ArgsList) {
2953 NumFixedArgs = ArgsList.size();
2954 Args = std::move(ArgsList);
2958 CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
2959 SDValue Target, ArgListTy &&ArgsList,
2960 ImmutableCallSite Call) {
2963 IsInReg = Call.hasRetAttr(Attribute::InReg);
2965 Call.doesNotReturn() ||
2966 (!Call.isInvoke() &&
2967 isa<UnreachableInst>(Call.getInstruction()->getNextNode()));
2968 IsVarArg = FTy->isVarArg();
2969 IsReturnValueUsed = !Call.getInstruction()->use_empty();
2970 RetSExt = Call.hasRetAttr(Attribute::SExt);
2971 RetZExt = Call.hasRetAttr(Attribute::ZExt);
2975 CallConv = Call.getCallingConv();
2976 NumFixedArgs = FTy->getNumParams();
2977 Args = std::move(ArgsList);
2984 CallLoweringInfo &setInRegister(bool Value = true) {
2989 CallLoweringInfo &setNoReturn(bool Value = true) {
2990 DoesNotReturn = Value;
2994 CallLoweringInfo &setVarArg(bool Value = true) {
2999 CallLoweringInfo &setTailCall(bool Value = true) {
3004 CallLoweringInfo &setDiscardResult(bool Value = true) {
3005 IsReturnValueUsed = !Value;
3009 CallLoweringInfo &setConvergent(bool Value = true) {
3010 IsConvergent = Value;
3014 CallLoweringInfo &setSExtResult(bool Value = true) {
3019 CallLoweringInfo &setZExtResult(bool Value = true) {
3024 CallLoweringInfo &setIsPatchPoint(bool Value = true) {
3025 IsPatchPoint = Value;
3029 CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) {
3030 IsPostTypeLegalization = Value;
3034 ArgListTy &getArgs() {
3039 /// This function lowers an abstract call to a function into an actual call.
3040 /// This returns a pair of operands. The first element is the return value
3041 /// for the function (if RetTy is not VoidTy). The second element is the
3042 /// outgoing token chain. It calls LowerCall to do the actual lowering.
3043 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
3045 /// This hook must be implemented to lower calls into the specified
3046 /// DAG. The outgoing arguments to the call are described by the Outs array,
3047 /// and the values to be returned by the call are described by the Ins
3048 /// array. The implementation should fill in the InVals array with legal-type
3049 /// return values from the call, and return the resulting token chain value.
3051 LowerCall(CallLoweringInfo &/*CLI*/,
3052 SmallVectorImpl<SDValue> &/*InVals*/) const {
3053 llvm_unreachable("Not Implemented");
3056 /// Target-specific cleanup for formal ByVal parameters.
3057 virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
3059 /// This hook should be implemented to check whether the return values
3060 /// described by the Outs array can fit into the return registers. If false
3061 /// is returned, an sret-demotion is performed.
3062 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
3063 MachineFunction &/*MF*/, bool /*isVarArg*/,
3064 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
3065 LLVMContext &/*Context*/) const
3067 // Return true by default to get preexisting behavior.
3071 /// This hook must be implemented to lower outgoing return values, described
3072 /// by the Outs array, into the specified DAG. The implementation should
3073 /// return the resulting token chain value.
3074 virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
3076 const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
3077 const SmallVectorImpl<SDValue> & /*OutVals*/,
3078 const SDLoc & /*dl*/,
3079 SelectionDAG & /*DAG*/) const {
3080 llvm_unreachable("Not Implemented");
3083 /// Return true if result of the specified node is used by a return node
3084 /// only. It also compute and return the input chain for the tail call.
3086 /// This is used to determine whether it is possible to codegen a libcall as
3087 /// tail call at legalization time.
3088 virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
3092 /// Return true if the target may be able emit the call instruction as a tail
3093 /// call. This is used by optimization passes to determine if it's profitable
3094 /// to duplicate return instructions to enable tailcall optimization.
3095 virtual bool mayBeEmittedAsTailCall(const CallInst *) const {
3099 /// Return the builtin name for the __builtin___clear_cache intrinsic
3100 /// Default is to invoke the clear cache library call
3101 virtual const char * getClearCacheBuiltinName() const {
3102 return "__clear_cache";
3105 /// Return the register ID of the name passed in. Used by named register
3106 /// global variables extension. There is no target-independent behaviour
3107 /// so the default action is to bail.
3108 virtual unsigned getRegisterByName(const char* RegName, EVT VT,
3109 SelectionDAG &DAG) const {
3110 report_fatal_error("Named registers not implemented for this target");
3113 /// Return the type that should be used to zero or sign extend a
3114 /// zeroext/signext integer return value. FIXME: Some C calling conventions
3115 /// require the return type to be promoted, but this is not true all the time,
3116 /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
3117 /// conventions. The frontend should handle this and include all of the
3118 /// necessary information.
3119 virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
3120 ISD::NodeType /*ExtendKind*/) const {
3121 EVT MinVT = getRegisterType(Context, MVT::i32);
3122 return VT.bitsLT(MinVT) ? MinVT : VT;
3125 /// For some targets, an LLVM struct type must be broken down into multiple
3126 /// simple types, but the calling convention specifies that the entire struct
3127 /// must be passed in a block of consecutive registers.
3129 functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
3130 bool isVarArg) const {
3134 /// Returns a 0 terminated array of registers that can be safely used as
3135 /// scratch registers.
3136 virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
3140 /// This callback is used to prepare for a volatile or atomic load.
3141 /// It takes a chain node as input and returns the chain for the load itself.
3143 /// Having a callback like this is necessary for targets like SystemZ,
3144 /// which allows a CPU to reuse the result of a previous load indefinitely,
3145 /// even if a cache-coherent store is performed by another CPU. The default
3146 /// implementation does nothing.
3147 virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL,
3148 SelectionDAG &DAG) const {
3152 /// This callback is used to inspect load/store instructions and add
3153 /// target-specific MachineMemOperand flags to them. The default
3154 /// implementation does nothing.
3155 virtual MachineMemOperand::Flags getMMOFlags(const Instruction &I) const {
3156 return MachineMemOperand::MONone;
3159 /// This callback is invoked by the type legalizer to legalize nodes with an
3160 /// illegal operand type but legal result types. It replaces the
3161 /// LowerOperation callback in the type Legalizer. The reason we can not do
3162 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
3163 /// use this callback.
3165 /// TODO: Consider merging with ReplaceNodeResults.
3167 /// The target places new result values for the node in Results (their number
3168 /// and types must exactly match those of the original return values of
3169 /// the node), or leaves Results empty, which indicates that the node is not
3170 /// to be custom lowered after all.
3171 /// The default implementation calls LowerOperation.
3172 virtual void LowerOperationWrapper(SDNode *N,
3173 SmallVectorImpl<SDValue> &Results,
3174 SelectionDAG &DAG) const;
3176 /// This callback is invoked for operations that are unsupported by the
3177 /// target, which are registered to use 'custom' lowering, and whose defined
3178 /// values are all legal. If the target has no operations that require custom
3179 /// lowering, it need not implement this. The default implementation of this
3181 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
3183 /// This callback is invoked when a node result type is illegal for the
3184 /// target, and the operation was registered to use 'custom' lowering for that
3185 /// result type. The target places new result values for the node in Results
3186 /// (their number and types must exactly match those of the original return
3187 /// values of the node), or leaves Results empty, which indicates that the
3188 /// node is not to be custom lowered after all.
3190 /// If the target has no operations that require custom lowering, it need not
3191 /// implement this. The default implementation aborts.
3192 virtual void ReplaceNodeResults(SDNode * /*N*/,
3193 SmallVectorImpl<SDValue> &/*Results*/,
3194 SelectionDAG &/*DAG*/) const {
3195 llvm_unreachable("ReplaceNodeResults not implemented for this target!");
3198 /// This method returns the name of a target specific DAG node.
3199 virtual const char *getTargetNodeName(unsigned Opcode) const;
3201 /// This method returns a target specific FastISel object, or null if the
3202 /// target does not support "fast" ISel.
3203 virtual FastISel *createFastISel(FunctionLoweringInfo &,
3204 const TargetLibraryInfo *) const {
3208 bool verifyReturnAddressArgumentIsConstant(SDValue Op,
3209 SelectionDAG &DAG) const;
3211 //===--------------------------------------------------------------------===//
3212 // Inline Asm Support hooks
3215 /// This hook allows the target to expand an inline asm call to be explicit
3216 /// llvm code if it wants to. This is useful for turning simple inline asms
3217 /// into LLVM intrinsics, which gives the compiler more information about the
3218 /// behavior of the code.
3219 virtual bool ExpandInlineAsm(CallInst *) const {
3223 enum ConstraintType {
3224 C_Register, // Constraint represents specific register(s).
3225 C_RegisterClass, // Constraint represents any of register(s) in class.
3226 C_Memory, // Memory constraint.
3227 C_Other, // Something else.
3228 C_Unknown // Unsupported constraint.
3231 enum ConstraintWeight {
3233 CW_Invalid = -1, // No match.
3234 CW_Okay = 0, // Acceptable.
3235 CW_Good = 1, // Good weight.
3236 CW_Better = 2, // Better weight.
3237 CW_Best = 3, // Best weight.
3239 // Well-known weights.
3240 CW_SpecificReg = CW_Okay, // Specific register operands.
3241 CW_Register = CW_Good, // Register operands.
3242 CW_Memory = CW_Better, // Memory operands.
3243 CW_Constant = CW_Best, // Constant operand.
3244 CW_Default = CW_Okay // Default or don't know type.
3247 /// This contains information for each constraint that we are lowering.
3248 struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
3249 /// This contains the actual string for the code, like "m". TargetLowering
3250 /// picks the 'best' code from ConstraintInfo::Codes that most closely
3251 /// matches the operand.
3252 std::string ConstraintCode;
3254 /// Information about the constraint code, e.g. Register, RegisterClass,
3255 /// Memory, Other, Unknown.
3256 TargetLowering::ConstraintType ConstraintType = TargetLowering::C_Unknown;
3258 /// If this is the result output operand or a clobber, this is null,
3259 /// otherwise it is the incoming operand to the CallInst. This gets
3260 /// modified as the asm is processed.
3261 Value *CallOperandVal = nullptr;
3263 /// The ValueType for the operand value.
3264 MVT ConstraintVT = MVT::Other;
3266 /// Copy constructor for copying from a ConstraintInfo.
3267 AsmOperandInfo(InlineAsm::ConstraintInfo Info)
3268 : InlineAsm::ConstraintInfo(std::move(Info)) {}
3270 /// Return true of this is an input operand that is a matching constraint
3272 bool isMatchingInputConstraint() const;
3274 /// If this is an input matching constraint, this method returns the output
3275 /// operand it matches.
3276 unsigned getMatchedOperand() const;
3279 using AsmOperandInfoVector = std::vector<AsmOperandInfo>;
3281 /// Split up the constraint string from the inline assembly value into the
3282 /// specific constraints and their prefixes, and also tie in the associated
3283 /// operand values. If this returns an empty vector, and if the constraint
3284 /// string itself isn't empty, there was an error parsing.
3285 virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL,
3286 const TargetRegisterInfo *TRI,
3287 ImmutableCallSite CS) const;
3289 /// Examine constraint type and operand type and determine a weight value.
3290 /// The operand object must already have been set up with the operand type.
3291 virtual ConstraintWeight getMultipleConstraintMatchWeight(
3292 AsmOperandInfo &info, int maIndex) const;
3294 /// Examine constraint string and operand type and determine a weight value.
3295 /// The operand object must already have been set up with the operand type.
3296 virtual ConstraintWeight getSingleConstraintMatchWeight(
3297 AsmOperandInfo &info, const char *constraint) const;
3299 /// Determines the constraint code and constraint type to use for the specific
3300 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
3301 /// If the actual operand being passed in is available, it can be passed in as
3302 /// Op, otherwise an empty SDValue can be passed.
3303 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
3305 SelectionDAG *DAG = nullptr) const;
3307 /// Given a constraint, return the type of constraint it is for this target.
3308 virtual ConstraintType getConstraintType(StringRef Constraint) const;
3310 /// Given a physical register constraint (e.g. {edx}), return the register
3311 /// number and the register class for the register.
3313 /// Given a register class constraint, like 'r', if this corresponds directly
3314 /// to an LLVM register class, return a register of 0 and the register class
3317 /// This should only be used for C_Register constraints. On error, this
3318 /// returns a register number of 0 and a null register class pointer.
3319 virtual std::pair<unsigned, const TargetRegisterClass *>
3320 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
3321 StringRef Constraint, MVT VT) const;
3323 virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const {
3324 if (ConstraintCode == "i")
3325 return InlineAsm::Constraint_i;
3326 else if (ConstraintCode == "m")
3327 return InlineAsm::Constraint_m;
3328 return InlineAsm::Constraint_Unknown;
3331 /// Try to replace an X constraint, which matches anything, with another that
3332 /// has more specific requirements based on the type of the corresponding
3333 /// operand. This returns null if there is no replacement to make.
3334 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
3336 /// Lower the specified operand into the Ops vector. If it is invalid, don't
3337 /// add anything to Ops.
3338 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
3339 std::vector<SDValue> &Ops,
3340 SelectionDAG &DAG) const;
3342 //===--------------------------------------------------------------------===//
3343 // Div utility functions
3345 SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
3346 bool IsAfterLegalization,
3347 std::vector<SDNode *> *Created) const;
3348 SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
3349 bool IsAfterLegalization,
3350 std::vector<SDNode *> *Created) const;
3352 /// Targets may override this function to provide custom SDIV lowering for
3353 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM
3354 /// assumes SDIV is expensive and replaces it with a series of other integer
3356 virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
3358 std::vector<SDNode *> *Created) const;
3360 /// Indicate whether this target prefers to combine FDIVs with the same
3361 /// divisor. If the transform should never be done, return zero. If the
3362 /// transform should be done, return the minimum number of divisor uses
3363 /// that must exist.
3364 virtual unsigned combineRepeatedFPDivisors() const {
3368 /// Hooks for building estimates in place of slower divisions and square
3371 /// Return either a square root or its reciprocal estimate value for the input
3373 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
3374 /// 'Enabled' as set by a potential default override attribute.
3375 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
3376 /// refinement iterations required to generate a sufficient (though not
3377 /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
3378 /// The boolean UseOneConstNR output is used to select a Newton-Raphson
3379 /// algorithm implementation that uses either one or two constants.
3380 /// The boolean Reciprocal is used to select whether the estimate is for the
3381 /// square root of the input operand or the reciprocal of its square root.
3382 /// A target may choose to implement its own refinement within this function.
3383 /// If that's true, then return '0' as the number of RefinementSteps to avoid
3384 /// any further refinement of the estimate.
3385 /// An empty SDValue return means no estimate sequence can be created.
3386 virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
3387 int Enabled, int &RefinementSteps,
3388 bool &UseOneConstNR, bool Reciprocal) const {
3392 /// Return a reciprocal estimate value for the input operand.
3393 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
3394 /// 'Enabled' as set by a potential default override attribute.
3395 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
3396 /// refinement iterations required to generate a sufficient (though not
3397 /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
3398 /// A target may choose to implement its own refinement within this function.
3399 /// If that's true, then return '0' as the number of RefinementSteps to avoid
3400 /// any further refinement of the estimate.
3401 /// An empty SDValue return means no estimate sequence can be created.
3402 virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
3403 int Enabled, int &RefinementSteps) const {
3407 //===--------------------------------------------------------------------===//
3408 // Legalization utility functions
3411 /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
3412 /// respectively, each computing an n/2-bit part of the result.
3413 /// \param Result A vector that will be filled with the parts of the result
3414 /// in little-endian order.
3415 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
3416 /// if you want to control how low bits are extracted from the LHS.
3417 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
3418 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
3419 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
3420 /// \returns true if the node has been expanded, false if it has not
3421 bool expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl, SDValue LHS,
3422 SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
3423 SelectionDAG &DAG, MulExpansionKind Kind,
3424 SDValue LL = SDValue(), SDValue LH = SDValue(),
3425 SDValue RL = SDValue(), SDValue RH = SDValue()) const;
3427 /// Expand a MUL into two nodes. One that computes the high bits of
3428 /// the result and one that computes the low bits.
3429 /// \param HiLoVT The value type to use for the Lo and Hi nodes.
3430 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
3431 /// if you want to control how low bits are extracted from the LHS.
3432 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
3433 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
3434 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
3435 /// \returns true if the node has been expanded. false if it has not
3436 bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
3437 SelectionDAG &DAG, MulExpansionKind Kind,
3438 SDValue LL = SDValue(), SDValue LH = SDValue(),
3439 SDValue RL = SDValue(), SDValue RH = SDValue()) const;
3441 /// Expand float(f32) to SINT(i64) conversion
3442 /// \param N Node to expand
3443 /// \param Result output after conversion
3444 /// \returns True, if the expansion was successful, false otherwise
3445 bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
3447 /// Turn load of vector type into a load of the individual elements.
3448 /// \param LD load to expand
3449 /// \returns MERGE_VALUEs of the scalar loads with their chains.
3450 SDValue scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const;
3452 // Turn a store of a vector type into stores of the individual elements.
3453 /// \param ST Store with a vector value type
3454 /// \returns MERGE_VALUs of the individual store chains.
3455 SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const;
3457 /// Expands an unaligned load to 2 half-size loads for an integer, and
3458 /// possibly more for vectors.
3459 std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD,
3460 SelectionDAG &DAG) const;
3462 /// Expands an unaligned store to 2 half-size stores for integer values, and
3463 /// possibly more for vectors.
3464 SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const;
3466 /// Increments memory address \p Addr according to the type of the value
3467 /// \p DataVT that should be stored. If the data is stored in compressed
3468 /// form, the memory address should be incremented according to the number of
3469 /// the stored elements. This number is equal to the number of '1's bits
3471 /// \p DataVT is a vector type. \p Mask is a vector value.
3472 /// \p DataVT and \p Mask have the same number of vector elements.
3473 SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL,
3474 EVT DataVT, SelectionDAG &DAG,
3475 bool IsCompressedMemory) const;
3477 /// Get a pointer to vector element \p Idx located in memory for a vector of
3478 /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
3479 /// bounds the returned pointer is unspecified, but will be within the vector
3481 SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
3484 //===--------------------------------------------------------------------===//
3485 // Instruction Emitting Hooks
3488 /// This method should be implemented by targets that mark instructions with
3489 /// the 'usesCustomInserter' flag. These instructions are special in various
3490 /// ways, which require special support to insert. The specified MachineInstr
3491 /// is created but not inserted into any basic blocks, and this method is
3492 /// called to expand it into a sequence of instructions, potentially also
3493 /// creating new basic blocks and control flow.
3494 /// As long as the returned basic block is different (i.e., we created a new
3495 /// one), the custom inserter is free to modify the rest of \p MBB.
3496 virtual MachineBasicBlock *
3497 EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
3499 /// This method should be implemented by targets that mark instructions with
3500 /// the 'hasPostISelHook' flag. These instructions must be adjusted after
3501 /// instruction selection by target hooks. e.g. To fill in optional defs for
3502 /// ARM 's' setting instructions.
3503 virtual void AdjustInstrPostInstrSelection(MachineInstr &MI,
3504 SDNode *Node) const;
3506 /// If this function returns true, SelectionDAGBuilder emits a
3507 /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
3508 virtual bool useLoadStackGuardNode() const {
3512 virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
3513 const SDLoc &DL) const {
3514 llvm_unreachable("not implemented for this target");
3517 /// Lower TLS global address SDNode for target independent emulated TLS model.
3518 virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
3519 SelectionDAG &DAG) const;
3521 // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
3522 // If we're comparing for equality to zero and isCtlzFast is true, expose the
3523 // fact that this can be implemented as a ctlz/srl pair, so that the dag
3524 // combiner can fold the new nodes.
3525 SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const;
3528 SDValue simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
3529 ISD::CondCode Cond, DAGCombinerInfo &DCI,
3530 const SDLoc &DL) const;
3533 /// Given an LLVM IR type and return type attributes, compute the return value
3534 /// EVTs and flags, and optionally also the offsets, if the return value is
3535 /// being lowered to memory.
3536 void GetReturnInfo(Type *ReturnType, AttributeList attr,
3537 SmallVectorImpl<ISD::OutputArg> &Outs,
3538 const TargetLowering &TLI, const DataLayout &DL);
3540 } // end namespace llvm
3542 #endif // LLVM_CODEGEN_TARGETLOWERING_H