1 //===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file describes how to lower LLVM code to machine code. This has two
13 /// 1. Which ValueTypes are natively supported by the target.
14 /// 2. Which operations are supported for supported ValueTypes.
15 /// 3. Cost thresholds for alternative implementations of certain operations.
17 /// In addition it has a few other components, like information about FP
20 //===----------------------------------------------------------------------===//
22 #ifndef LLVM_CODEGEN_TARGETLOWERING_H
23 #define LLVM_CODEGEN_TARGETLOWERING_H
25 #include "llvm/ADT/APInt.h"
26 #include "llvm/ADT/ArrayRef.h"
27 #include "llvm/ADT/DenseMap.h"
28 #include "llvm/ADT/STLExtras.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/StringRef.h"
31 #include "llvm/CodeGen/DAGCombine.h"
32 #include "llvm/CodeGen/ISDOpcodes.h"
33 #include "llvm/CodeGen/RuntimeLibcalls.h"
34 #include "llvm/CodeGen/SelectionDAG.h"
35 #include "llvm/CodeGen/SelectionDAGNodes.h"
36 #include "llvm/CodeGen/TargetCallingConv.h"
37 #include "llvm/CodeGen/ValueTypes.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/IRBuilder.h"
44 #include "llvm/IR/InlineAsm.h"
45 #include "llvm/IR/Instruction.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/Support/Alignment.h"
49 #include "llvm/Support/AtomicOrdering.h"
50 #include "llvm/Support/Casting.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MachineValueType.h"
65 class BranchProbability;
70 class FunctionLoweringInfo;
75 class LegacyDivergenceAnalysis;
77 class MachineBasicBlock;
78 class MachineFunction;
80 class MachineJumpTableInfo;
82 class MachineRegisterInfo;
86 class ProfileSummaryInfo;
87 class TargetLibraryInfo;
89 class TargetRegisterClass;
90 class TargetRegisterInfo;
91 class TargetTransformInfo;
97 None, // No preference
98 Source, // Follow source order.
99 RegPressure, // Scheduling for lowest register pressure.
100 Hybrid, // Scheduling for both latency and register pressure.
101 ILP, // Scheduling for ILP in low register pressure mode.
102 VLIW // Scheduling for VLIW targets.
105 } // end namespace Sched
107 // MemOp models a memory operation, either memset or memcpy/memmove.
112 bool DstAlignCanChange; // true if destination alignment can satisfy any
114 Align DstAlign; // Specified alignment of the memory operation.
118 bool IsMemset; // If setthis memory operation is a memset.
119 bool ZeroMemset; // If set clears out memory with zeros.
121 bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register
122 // constant so it does not need to be loaded.
123 Align SrcAlign; // Inferred alignment of the source or default value if the
124 // memory operation does not need to load the value.
126 static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
127 Align SrcAlign, bool IsVolatile,
128 bool MemcpyStrSrc = false) {
131 Op.DstAlignCanChange = DstAlignCanChange;
132 Op.DstAlign = DstAlign;
133 Op.AllowOverlap = !IsVolatile;
135 Op.ZeroMemset = false;
136 Op.MemcpyStrSrc = MemcpyStrSrc;
137 Op.SrcAlign = SrcAlign;
141 static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
142 bool IsZeroMemset, bool IsVolatile) {
145 Op.DstAlignCanChange = DstAlignCanChange;
146 Op.DstAlign = DstAlign;
147 Op.AllowOverlap = !IsVolatile;
149 Op.ZeroMemset = IsZeroMemset;
150 Op.MemcpyStrSrc = false;
154 uint64_t size() const { return Size; }
155 Align getDstAlign() const {
156 assert(!DstAlignCanChange);
159 bool isFixedDstAlign() const { return !DstAlignCanChange; }
160 bool allowOverlap() const { return AllowOverlap; }
161 bool isMemset() const { return IsMemset; }
162 bool isMemcpy() const { return !IsMemset; }
163 bool isMemcpyWithFixedDstAlign() const {
164 return isMemcpy() && !DstAlignCanChange;
166 bool isZeroMemset() const { return isMemset() && ZeroMemset; }
167 bool isMemcpyStrSrc() const {
168 assert(isMemcpy() && "Must be a memcpy");
171 Align getSrcAlign() const {
172 assert(isMemcpy() && "Must be a memcpy");
175 bool isSrcAligned(Align AlignCheck) const {
176 return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value());
178 bool isDstAligned(Align AlignCheck) const {
179 return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value());
181 bool isAligned(Align AlignCheck) const {
182 return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck);
186 /// This base class for TargetLowering contains the SelectionDAG-independent
187 /// parts that can be used from the rest of CodeGen.
188 class TargetLoweringBase {
190 /// This enum indicates whether operations are valid for a target, and if not,
191 /// what action should be used to make them valid.
192 enum LegalizeAction : uint8_t {
193 Legal, // The target natively supports this operation.
194 Promote, // This operation should be executed in a larger type.
195 Expand, // Try to expand this to other ops, otherwise use a libcall.
196 LibCall, // Don't try to expand this to other ops, always use a libcall.
197 Custom // Use the LowerOperation hook to implement custom lowering.
200 /// This enum indicates whether a types are legal for a target, and if not,
201 /// what action should be used to make them valid.
202 enum LegalizeTypeAction : uint8_t {
203 TypeLegal, // The target natively supports this type.
204 TypePromoteInteger, // Replace this integer with a larger one.
205 TypeExpandInteger, // Split this integer into two of half the size.
206 TypeSoftenFloat, // Convert this float to a same size integer type.
207 TypeExpandFloat, // Split this float into two of half the size.
208 TypeScalarizeVector, // Replace this one-element vector with its element.
209 TypeSplitVector, // Split this vector into two of half the size.
210 TypeWidenVector, // This vector should be widened into a larger vector.
211 TypePromoteFloat, // Replace this float with a larger one.
212 TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic.
213 TypeScalarizeScalableVector, // This action is explicitly left unimplemented.
214 // While it is theoretically possible to
215 // legalize operations on scalable types with a
216 // loop that handles the vscale * #lanes of the
217 // vector, this is non-trivial at SelectionDAG
218 // level and these types are better to be
219 // widened or promoted.
222 /// LegalizeKind holds the legalization kind that needs to happen to EVT
223 /// in order to type-legalize it.
224 using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
226 /// Enum that describes how the target represents true/false values.
227 enum BooleanContent {
228 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
229 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
230 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
233 /// Enum that describes what type of support for selects the target has.
234 enum SelectSupportKind {
235 ScalarValSelect, // The target supports scalar selects (ex: cmov).
236 ScalarCondVectorVal, // The target supports selects with a scalar condition
237 // and vector values (ex: cmov).
238 VectorMaskSelect // The target supports vector selects with a vector
239 // mask (ex: x86 blends).
242 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
243 /// to, if at all. Exists because different targets have different levels of
244 /// support for these atomic instructions, and also have different options
245 /// w.r.t. what they should expand to.
246 enum class AtomicExpansionKind {
247 None, // Don't expand the instruction.
248 LLSC, // Expand the instruction into loadlinked/storeconditional; used
250 LLOnly, // Expand the (load) instruction into just a load-linked, which has
251 // greater atomic guarantees than a normal load.
252 CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
253 MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
256 /// Enum that specifies when a multiplication should be expanded.
257 enum class MulExpansionKind {
258 Always, // Always expand the instruction.
259 OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
263 /// Enum that specifies when a float negation is beneficial.
264 enum class NegatibleCost {
265 Cheaper = 0, // Negated expression is cheaper.
266 Neutral = 1, // Negated expression has the same cost.
267 Expensive = 2 // Negated expression is more expensive.
272 Value *Val = nullptr;
273 SDValue Node = SDValue();
282 bool IsPreallocated : 1;
284 bool IsSwiftSelf : 1;
285 bool IsSwiftError : 1;
286 bool IsCFGuardTarget : 1;
287 MaybeAlign Alignment = None;
288 Type *ByValType = nullptr;
289 Type *PreallocatedType = nullptr;
292 : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false),
293 IsNest(false), IsByVal(false), IsInAlloca(false),
294 IsPreallocated(false), IsReturned(false), IsSwiftSelf(false),
295 IsSwiftError(false), IsCFGuardTarget(false) {}
297 void setAttributes(const CallBase *Call, unsigned ArgIdx);
299 using ArgListTy = std::vector<ArgListEntry>;
301 virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
302 ArgListTy &Args) const {};
304 static ISD::NodeType getExtendForContent(BooleanContent Content) {
306 case UndefinedBooleanContent:
307 // Extend by adding rubbish bits.
308 return ISD::ANY_EXTEND;
309 case ZeroOrOneBooleanContent:
310 // Extend by adding zero bits.
311 return ISD::ZERO_EXTEND;
312 case ZeroOrNegativeOneBooleanContent:
313 // Extend by copying the sign bit.
314 return ISD::SIGN_EXTEND;
316 llvm_unreachable("Invalid content kind");
319 explicit TargetLoweringBase(const TargetMachine &TM);
320 TargetLoweringBase(const TargetLoweringBase &) = delete;
321 TargetLoweringBase &operator=(const TargetLoweringBase &) = delete;
322 virtual ~TargetLoweringBase() = default;
324 /// Return true if the target support strict float operation
325 bool isStrictFPEnabled() const {
326 return IsStrictFPEnabled;
330 /// Initialize all of the actions to default values.
334 const TargetMachine &getTargetMachine() const { return TM; }
336 virtual bool useSoftFloat() const { return false; }
338 /// Return the pointer type for the given address space, defaults to
339 /// the pointer type from the data layout.
340 /// FIXME: The default needs to be removed once all the code is updated.
341 virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
342 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
345 /// Return the in-memory pointer type for the given address space, defaults to
346 /// the pointer type from the data layout. FIXME: The default needs to be
347 /// removed once all the code is updated.
348 MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const {
349 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
352 /// Return the type for frame index, which is determined by
353 /// the alloca address space specified through the data layout.
354 MVT getFrameIndexTy(const DataLayout &DL) const {
355 return getPointerTy(DL, DL.getAllocaAddrSpace());
358 /// Return the type for code pointers, which is determined by the program
359 /// address space specified through the data layout.
360 MVT getProgramPointerTy(const DataLayout &DL) const {
361 return getPointerTy(DL, DL.getProgramAddressSpace());
364 /// Return the type for operands of fence.
365 /// TODO: Let fence operands be of i32 type and remove this.
366 virtual MVT getFenceOperandTy(const DataLayout &DL) const {
367 return getPointerTy(DL);
370 /// EVT is not used in-tree, but is used by out-of-tree target.
371 /// A documentation for this function would be nice...
372 virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
374 EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
375 bool LegalTypes = true) const;
377 /// Returns the type to be used for the index operand of:
378 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
379 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
380 virtual MVT getVectorIdxTy(const DataLayout &DL) const {
381 return getPointerTy(DL);
384 /// This callback is used to inspect load/store instructions and add
385 /// target-specific MachineMemOperand flags to them. The default
386 /// implementation does nothing.
387 virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const {
388 return MachineMemOperand::MONone;
391 MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI,
392 const DataLayout &DL) const;
393 MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI,
394 const DataLayout &DL) const;
395 MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI,
396 const DataLayout &DL) const;
398 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
402 /// Return true if it is profitable to convert a select of FP constants into
403 /// a constant pool load whose address depends on the select condition. The
404 /// parameter may be used to differentiate a select with FP compare from
406 virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
410 /// Return true if multiple condition registers are available.
411 bool hasMultipleConditionRegisters() const {
412 return HasMultipleConditionRegisters;
415 /// Return true if the target has BitExtract instructions.
416 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
418 /// Return the preferred vector type legalization action.
419 virtual TargetLoweringBase::LegalizeTypeAction
420 getPreferredVectorAction(MVT VT) const {
421 // The default action for one element vectors is to scalarize
422 if (VT.getVectorElementCount() == 1)
423 return TypeScalarizeVector;
424 // The default action for an odd-width vector is to widen.
425 if (!VT.isPow2VectorType())
426 return TypeWidenVector;
427 // The default action for other vectors is to promote
428 return TypePromoteInteger;
431 // Return true if the half type should be passed around as i16, but promoted
432 // to float around arithmetic. The default behavior is to pass around as
433 // float and convert around loads/stores/bitcasts and other places where
435 virtual bool softPromoteHalfType() const { return false; }
437 // There are two general methods for expanding a BUILD_VECTOR node:
438 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
440 // 2. Build the vector on the stack and then load it.
441 // If this function returns true, then method (1) will be used, subject to
442 // the constraint that all of the necessary shuffles are legal (as determined
443 // by isShuffleMaskLegal). If this function returns false, then method (2) is
444 // always used. The vector type, and the number of defined values, are
447 shouldExpandBuildVectorWithShuffles(EVT /* VT */,
448 unsigned DefinedValues) const {
449 return DefinedValues < 3;
452 /// Return true if integer divide is usually cheaper than a sequence of
453 /// several shifts, adds, and multiplies for this target.
454 /// The definition of "cheaper" may depend on whether we're optimizing
455 /// for speed or for size.
456 virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
458 /// Return true if the target can handle a standalone remainder operation.
459 virtual bool hasStandaloneRem(EVT VT) const {
463 /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
464 virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
465 // Default behavior is to replace SQRT(X) with X*RSQRT(X).
469 /// Reciprocal estimate status values used by the functions below.
470 enum ReciprocalEstimate : int {
476 /// Return a ReciprocalEstimate enum value for a square root of the given type
477 /// based on the function's attributes. If the operation is not overridden by
478 /// the function's attributes, "Unspecified" is returned and target defaults
479 /// are expected to be used for instruction selection.
480 int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
482 /// Return a ReciprocalEstimate enum value for a division of the given type
483 /// based on the function's attributes. If the operation is not overridden by
484 /// the function's attributes, "Unspecified" is returned and target defaults
485 /// are expected to be used for instruction selection.
486 int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
488 /// Return the refinement step count for a square root of the given type based
489 /// on the function's attributes. If the operation is not overridden by
490 /// the function's attributes, "Unspecified" is returned and target defaults
491 /// are expected to be used for instruction selection.
492 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
494 /// Return the refinement step count for a division of the given type based
495 /// on the function's attributes. If the operation is not overridden by
496 /// the function's attributes, "Unspecified" is returned and target defaults
497 /// are expected to be used for instruction selection.
498 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
500 /// Returns true if target has indicated at least one type should be bypassed.
501 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
503 /// Returns map of slow types for division or remainder with corresponding
505 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
506 return BypassSlowDivWidths;
509 /// Return true if Flow Control is an expensive operation that should be
511 bool isJumpExpensive() const { return JumpIsExpensive; }
513 /// Return true if selects are only cheaper than branches if the branch is
514 /// unlikely to be predicted right.
515 bool isPredictableSelectExpensive() const {
516 return PredictableSelectIsExpensive;
519 virtual bool fallBackToDAGISel(const Instruction &Inst) const {
523 /// If a branch or a select condition is skewed in one direction by more than
524 /// this factor, it is very likely to be predicted correctly.
525 virtual BranchProbability getPredictableBranchThreshold() const;
527 /// Return true if the following transform is beneficial:
528 /// fold (conv (load x)) -> (load (conv*)x)
529 /// On architectures that don't natively support some vector loads
530 /// efficiently, casting the load to a smaller vector of larger types and
531 /// loading is more efficient, however, this can be undone by optimizations in
533 virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
534 const SelectionDAG &DAG,
535 const MachineMemOperand &MMO) const {
536 // Don't do if we could do an indexed load on the original type, but not on
538 if (!LoadVT.isSimple() || !BitcastVT.isSimple())
541 MVT LoadMVT = LoadVT.getSimpleVT();
543 // Don't bother doing this if it's just going to be promoted again later, as
544 // doing so might interfere with other combines.
545 if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
546 getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
550 return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT,
554 /// Return true if the following transform is beneficial:
555 /// (store (y (conv x)), y*)) -> (store x, (x*))
556 virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT,
557 const SelectionDAG &DAG,
558 const MachineMemOperand &MMO) const {
559 // Default to the same logic as loads.
560 return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO);
563 /// Return true if it is expected to be cheaper to do a store of a non-zero
564 /// vector constant with the given size and type for the address space than to
565 /// store the individual scalar element constants.
566 virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
568 unsigned AddrSpace) const {
572 /// Allow store merging for the specified type after legalization in addition
573 /// to before legalization. This may transform stores that do not exist
574 /// earlier (for example, stores created from intrinsics).
575 virtual bool mergeStoresAfterLegalization(EVT MemVT) const {
579 /// Returns if it's reasonable to merge stores to MemVT size.
580 virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
581 const SelectionDAG &DAG) const {
585 /// Return true if it is cheap to speculate a call to intrinsic cttz.
586 virtual bool isCheapToSpeculateCttz() const {
590 /// Return true if it is cheap to speculate a call to intrinsic ctlz.
591 virtual bool isCheapToSpeculateCtlz() const {
595 /// Return true if ctlz instruction is fast.
596 virtual bool isCtlzFast() const {
600 /// Return true if instruction generated for equality comparison is folded
601 /// with instruction generated for signed comparison.
602 virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; }
604 /// Return true if it is safe to transform an integer-domain bitwise operation
605 /// into the equivalent floating-point operation. This should be set to true
606 /// if the target has IEEE-754-compliant fabs/fneg operations for the input
608 virtual bool hasBitPreservingFPLogic(EVT VT) const {
612 /// Return true if it is cheaper to split the store of a merged int val
613 /// from a pair of smaller values into multiple stores.
614 virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
618 /// Return if the target supports combining a
621 /// %andResult = and %val1, #mask
622 /// %icmpResult = icmp %andResult, 0
624 /// into a single machine instruction of a form like:
626 /// cc = test %register, #mask
628 virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
632 /// Use bitwise logic to make pairs of compares more efficient. For example:
633 /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
634 /// This should be true when it takes more than one instruction to lower
635 /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
636 /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
637 virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
641 /// Return the preferred operand type if the target has a quick way to compare
642 /// integer values of the given size. Assume that any legal integer type can
643 /// be compared efficiently. Targets may override this to allow illegal wide
644 /// types to return a vector type if there is support to compare that type.
645 virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
646 MVT VT = MVT::getIntegerVT(NumBits);
647 return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE;
650 /// Return true if the target should transform:
651 /// (X & Y) == Y ---> (~X & Y) == 0
652 /// (X & Y) != Y ---> (~X & Y) != 0
654 /// This may be profitable if the target has a bitwise and-not operation that
655 /// sets comparison flags. A target may want to limit the transformation based
656 /// on the type of Y or if Y is a constant.
658 /// Note that the transform will not occur if Y is known to be a power-of-2
659 /// because a mask and compare of a single bit can be handled by inverting the
660 /// predicate, for example:
661 /// (X & 8) == 8 ---> (X & 8) != 0
662 virtual bool hasAndNotCompare(SDValue Y) const {
666 /// Return true if the target has a bitwise and-not operation:
668 /// This can be used to simplify select or other instructions.
669 virtual bool hasAndNot(SDValue X) const {
670 // If the target has the more complex version of this operation, assume that
671 // it has this operation too.
672 return hasAndNotCompare(X);
675 /// Return true if the target has a bit-test instruction:
676 /// (X & (1 << Y)) ==/!= 0
677 /// This knowledge can be used to prevent breaking the pattern,
678 /// or creating it if it could be recognized.
679 virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; }
681 /// There are two ways to clear extreme bits (either low or high):
682 /// Mask: x & (-1 << y) (the instcombine canonical form)
683 /// Shifts: x >> y << y
684 /// Return true if the variant with 2 variable shifts is preferred.
685 /// Return false if there is no preference.
686 virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const {
687 // By default, let's assume that no one prefers shifts.
691 /// Return true if it is profitable to fold a pair of shifts into a mask.
692 /// This is usually true on most targets. But some targets, like Thumb1,
693 /// have immediate shift instructions, but no immediate "and" instruction;
694 /// this makes the fold unprofitable.
695 virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N,
696 CombineLevel Level) const {
700 /// Should we tranform the IR-optimal check for whether given truncation
701 /// down into KeptBits would be truncating or not:
702 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
703 /// Into it's more traditional form:
704 /// ((%x << C) a>> C) dstcond %x
705 /// Return true if we should transform.
706 /// Return false if there is no preference.
707 virtual bool shouldTransformSignedTruncationCheck(EVT XVT,
708 unsigned KeptBits) const {
709 // By default, let's assume that no one prefers shifts.
713 /// Given the pattern
714 /// (X & (C l>>/<< Y)) ==/!= 0
715 /// return true if it should be transformed into:
716 /// ((X <</l>> Y) & C) ==/!= 0
717 /// WARNING: if 'X' is a constant, the fold may deadlock!
718 /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat()
719 /// here because it can end up being not linked in.
720 virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
721 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
722 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
723 SelectionDAG &DAG) const {
724 if (hasBitTest(X, Y)) {
725 // One interesting pattern that we'd want to form is 'bit test':
726 // ((1 << Y) & C) ==/!= 0
727 // But we also need to be careful not to try to reverse that fold.
729 // Is this '1 << Y' ?
730 if (OldShiftOpcode == ISD::SHL && CC->isOne())
731 return false; // Keep the 'bit test' pattern.
733 // Will it be '1 << Y' after the transform ?
734 if (XC && NewShiftOpcode == ISD::SHL && XC->isOne())
735 return true; // Do form the 'bit test' pattern.
738 // If 'X' is a constant, and we transform, then we will immediately
739 // try to undo the fold, thus causing endless combine loop.
740 // So by default, let's assume everyone prefers the fold
741 // iff 'X' is not a constant.
745 /// These two forms are equivalent:
746 /// sub %y, (xor %x, -1)
747 /// add (add %x, 1), %y
748 /// The variant with two add's is IR-canonical.
749 /// Some targets may prefer one to the other.
750 virtual bool preferIncOfAddToSubOfNot(EVT VT) const {
751 // By default, let's assume that everyone prefers the form with two add's.
755 /// Return true if the target wants to use the optimization that
756 /// turns ext(promotableInst1(...(promotableInstN(load)))) into
757 /// promotedInst1(...(promotedInstN(ext(load)))).
758 bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
760 /// Return true if the target can combine store(extractelement VectorTy,
762 /// \p Cost[out] gives the cost of that transformation when this is true.
763 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
764 unsigned &Cost) const {
768 /// Return true if inserting a scalar into a variable element of an undef
769 /// vector is more efficiently handled by splatting the scalar instead.
770 virtual bool shouldSplatInsEltVarIndex(EVT) const {
774 /// Return true if target always beneficiates from combining into FMA for a
775 /// given value type. This must typically return false on targets where FMA
776 /// takes more cycles to execute than FADD.
777 virtual bool enableAggressiveFMAFusion(EVT VT) const {
781 /// Return the ValueType of the result of SETCC operations.
782 virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
785 /// Return the ValueType for comparison libcalls. Comparions libcalls include
786 /// floating point comparion calls, and Ordered/Unordered check calls on
787 /// floating point numbers.
789 MVT::SimpleValueType getCmpLibcallReturnType() const;
791 /// For targets without i1 registers, this gives the nature of the high-bits
792 /// of boolean values held in types wider than i1.
794 /// "Boolean values" are special true/false values produced by nodes like
795 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
796 /// Not to be confused with general values promoted from i1. Some cpus
797 /// distinguish between vectors of boolean and scalars; the isVec parameter
798 /// selects between the two kinds. For example on X86 a scalar boolean should
799 /// be zero extended from i1, while the elements of a vector of booleans
800 /// should be sign extended from i1.
802 /// Some cpus also treat floating point types the same way as they treat
803 /// vectors instead of the way they treat scalars.
804 BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
806 return BooleanVectorContents;
807 return isFloat ? BooleanFloatContents : BooleanContents;
810 BooleanContent getBooleanContents(EVT Type) const {
811 return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
814 /// Return target scheduling preference.
815 Sched::Preference getSchedulingPreference() const {
816 return SchedPreferenceInfo;
819 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
820 /// for different nodes. This function returns the preference (or none) for
822 virtual Sched::Preference getSchedulingPreference(SDNode *) const {
826 /// Return the register class that should be used for the specified value
828 virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const {
830 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
831 assert(RC && "This value type is not natively supported!");
835 /// Allows target to decide about the register class of the
836 /// specific value that is live outside the defining block.
837 /// Returns true if the value needs uniform register class.
838 virtual bool requiresUniformRegister(MachineFunction &MF,
839 const Value *) const {
843 /// Return the 'representative' register class for the specified value
846 /// The 'representative' register class is the largest legal super-reg
847 /// register class for the register class of the value type. For example, on
848 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
849 /// register class is GR64 on x86_64.
850 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
851 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
855 /// Return the cost of the 'representative' register class for the specified
857 virtual uint8_t getRepRegClassCostFor(MVT VT) const {
858 return RepRegClassCostForVT[VT.SimpleTy];
861 /// Return true if SHIFT instructions should be expanded to SHIFT_PARTS
862 /// instructions, and false if a library call is preferred (e.g for code-size
864 virtual bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const {
868 /// Return true if the target has native support for the specified value type.
869 /// This means that it has a register that directly holds it without
870 /// promotions or expansions.
871 bool isTypeLegal(EVT VT) const {
872 assert(!VT.isSimple() ||
873 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
874 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
877 class ValueTypeActionImpl {
878 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
879 /// that indicates how instruction selection should deal with the type.
880 LegalizeTypeAction ValueTypeActions[MVT::LAST_VALUETYPE];
883 ValueTypeActionImpl() {
884 std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
888 LegalizeTypeAction getTypeAction(MVT VT) const {
889 return ValueTypeActions[VT.SimpleTy];
892 void setTypeAction(MVT VT, LegalizeTypeAction Action) {
893 ValueTypeActions[VT.SimpleTy] = Action;
897 const ValueTypeActionImpl &getValueTypeActions() const {
898 return ValueTypeActions;
901 /// Return how we should legalize values of this type, either it is already
902 /// legal (return 'Legal') or we need to promote it to a larger type (return
903 /// 'Promote'), or we need to expand it into multiple registers of smaller
904 /// integer type (return 'Expand'). 'Custom' is not an option.
905 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
906 return getTypeConversion(Context, VT).first;
908 LegalizeTypeAction getTypeAction(MVT VT) const {
909 return ValueTypeActions.getTypeAction(VT);
912 /// For types supported by the target, this is an identity function. For
913 /// types that must be promoted to larger types, this returns the larger type
914 /// to promote to. For integer types that are larger than the largest integer
915 /// register, this contains one step in the expansion to get to the smaller
916 /// register. For illegal floating point types, this returns the integer type
918 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
919 return getTypeConversion(Context, VT).second;
922 /// For types supported by the target, this is an identity function. For
923 /// types that must be expanded (i.e. integer types that are larger than the
924 /// largest integer register or illegal floating point types), this returns
925 /// the largest legal type it will be expanded to.
926 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
927 assert(!VT.isVector());
929 switch (getTypeAction(Context, VT)) {
932 case TypeExpandInteger:
933 VT = getTypeToTransformTo(Context, VT);
936 llvm_unreachable("Type is not legal nor is it to be expanded!");
941 /// Vector types are broken down into some number of legal first class types.
942 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
943 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
944 /// turns into 4 EVT::i32 values with both PPC and X86.
946 /// This method returns the number of registers needed, and the VT for each
947 /// register. It also returns the VT and quantity of the intermediate values
948 /// before they are promoted/expanded.
949 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
951 unsigned &NumIntermediates,
952 MVT &RegisterVT) const;
954 /// Certain targets such as MIPS require that some types such as vectors are
955 /// always broken down into scalars in some contexts. This occurs even if the
956 /// vector type is legal.
957 virtual unsigned getVectorTypeBreakdownForCallingConv(
958 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
959 unsigned &NumIntermediates, MVT &RegisterVT) const {
960 return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
964 struct IntrinsicInfo {
965 unsigned opc = 0; // target opcode
966 EVT memVT; // memory VT
968 // value representing memory location
969 PointerUnion<const Value *, const PseudoSourceValue *> ptrVal;
971 int offset = 0; // offset off of ptrVal
972 uint64_t size = 0; // the size of the memory location
973 // (taken from memVT if zero)
974 MaybeAlign align = Align(1); // alignment
976 MachineMemOperand::Flags flags = MachineMemOperand::MONone;
977 IntrinsicInfo() = default;
980 /// Given an intrinsic, checks if on the target the intrinsic will need to map
981 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
982 /// true and store the intrinsic information into the IntrinsicInfo that was
983 /// passed to the function.
984 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
986 unsigned /*Intrinsic*/) const {
990 /// Returns true if the target can instruction select the specified FP
991 /// immediate natively. If false, the legalizer will materialize the FP
992 /// immediate as a load from a constant pool.
993 virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/,
994 bool ForCodeSize = false) const {
998 /// Targets can use this to indicate that they only support *some*
999 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
1000 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
1002 virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
1006 /// Returns true if the operation can trap for the value type.
1008 /// VT must be a legal type. By default, we optimistically assume most
1009 /// operations don't trap except for integer divide and remainder.
1010 virtual bool canOpTrap(unsigned Op, EVT VT) const;
1012 /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
1013 /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
1014 /// constant pool entry.
1015 virtual bool isVectorClearMaskLegal(ArrayRef<int> /*Mask*/,
1020 /// Return how this operation should be treated: either it is legal, needs to
1021 /// be promoted to a larger size, needs to be expanded to some other code
1022 /// sequence, or the target has a custom expander for it.
1023 LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
1024 if (VT.isExtended()) return Expand;
1025 // If a target-specific SDNode requires legalization, require the target
1026 // to provide custom legalization for it.
1027 if (Op >= array_lengthof(OpActions[0])) return Custom;
1028 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
1031 /// Custom method defined by each target to indicate if an operation which
1032 /// may require a scale is supported natively by the target.
1033 /// If not, the operation is illegal.
1034 virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT,
1035 unsigned Scale) const {
1039 /// Some fixed point operations may be natively supported by the target but
1040 /// only for specific scales. This method allows for checking
1041 /// if the width is supported by the target for a given operation that may
1042 /// depend on scale.
1043 LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT,
1044 unsigned Scale) const {
1045 auto Action = getOperationAction(Op, VT);
1046 if (Action != Legal)
1049 // This operation is supported in this type but may only work on specific
1054 llvm_unreachable("Unexpected fixed point operation.");
1056 case ISD::SMULFIXSAT:
1058 case ISD::UMULFIXSAT:
1060 case ISD::SDIVFIXSAT:
1062 case ISD::UDIVFIXSAT:
1063 Supported = isSupportedFixedPointOperation(Op, VT, Scale);
1067 return Supported ? Action : Expand;
1070 // If Op is a strict floating-point operation, return the result
1071 // of getOperationAction for the equivalent non-strict operation.
1072 LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const {
1075 default: llvm_unreachable("Unexpected FP pseudo-opcode");
1076 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1077 case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
1078 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1079 case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
1080 #include "llvm/IR/ConstrainedOps.def"
1083 return getOperationAction(EqOpc, VT);
1086 /// Return true if the specified operation is legal on this target or can be
1087 /// made legal with custom lowering. This is used to help guide high-level
1088 /// lowering decisions.
1089 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
1090 return (VT == MVT::Other || isTypeLegal(VT)) &&
1091 (getOperationAction(Op, VT) == Legal ||
1092 getOperationAction(Op, VT) == Custom);
1095 /// Return true if the specified operation is legal on this target or can be
1096 /// made legal using promotion. This is used to help guide high-level lowering
1098 bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
1099 return (VT == MVT::Other || isTypeLegal(VT)) &&
1100 (getOperationAction(Op, VT) == Legal ||
1101 getOperationAction(Op, VT) == Promote);
1104 /// Return true if the specified operation is legal on this target or can be
1105 /// made legal with custom lowering or using promotion. This is used to help
1106 /// guide high-level lowering decisions.
1107 bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT) const {
1108 return (VT == MVT::Other || isTypeLegal(VT)) &&
1109 (getOperationAction(Op, VT) == Legal ||
1110 getOperationAction(Op, VT) == Custom ||
1111 getOperationAction(Op, VT) == Promote);
1114 /// Return true if the operation uses custom lowering, regardless of whether
1115 /// the type is legal or not.
1116 bool isOperationCustom(unsigned Op, EVT VT) const {
1117 return getOperationAction(Op, VT) == Custom;
1120 /// Return true if lowering to a jump table is allowed.
1121 virtual bool areJTsAllowed(const Function *Fn) const {
1122 if (Fn->getFnAttribute("no-jump-tables").getValueAsString() == "true")
1125 return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1126 isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
1129 /// Check whether the range [Low,High] fits in a machine word.
1130 bool rangeFitsInWord(const APInt &Low, const APInt &High,
1131 const DataLayout &DL) const {
1132 // FIXME: Using the pointer type doesn't seem ideal.
1133 uint64_t BW = DL.getIndexSizeInBits(0u);
1134 uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
1138 /// Return true if lowering to a jump table is suitable for a set of case
1139 /// clusters which may contain \p NumCases cases, \p Range range of values.
1140 virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
1141 uint64_t Range, ProfileSummaryInfo *PSI,
1142 BlockFrequencyInfo *BFI) const;
1144 /// Return true if lowering to a bit test is suitable for a set of case
1145 /// clusters which contains \p NumDests unique destinations, \p Low and
1146 /// \p High as its lowest and highest case values, and expects \p NumCmps
1147 /// case value comparisons. Check if the number of destinations, comparison
1148 /// metric, and range are all suitable.
1149 bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
1150 const APInt &Low, const APInt &High,
1151 const DataLayout &DL) const {
1152 // FIXME: I don't think NumCmps is the correct metric: a single case and a
1153 // range of cases both require only one branch to lower. Just looking at the
1154 // number of clusters and destinations should be enough to decide whether to
1157 // To lower a range with bit tests, the range must fit the bitwidth of a
1159 if (!rangeFitsInWord(Low, High, DL))
1162 // Decide whether it's profitable to lower this range with bit tests. Each
1163 // destination requires a bit test and branch, and there is an overall range
1164 // check branch. For a small number of clusters, separate comparisons might
1165 // be cheaper, and for many destinations, splitting the range might be
1167 return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
1168 (NumDests == 3 && NumCmps >= 6);
1171 /// Return true if the specified operation is illegal on this target or
1172 /// unlikely to be made legal with custom lowering. This is used to help guide
1173 /// high-level lowering decisions.
1174 bool isOperationExpand(unsigned Op, EVT VT) const {
1175 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
1178 /// Return true if the specified operation is legal on this target.
1179 bool isOperationLegal(unsigned Op, EVT VT) const {
1180 return (VT == MVT::Other || isTypeLegal(VT)) &&
1181 getOperationAction(Op, VT) == Legal;
1184 /// Return how this load with extension should be treated: either it is legal,
1185 /// needs to be promoted to a larger size, needs to be expanded to some other
1186 /// code sequence, or the target has a custom expander for it.
1187 LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
1189 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1190 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1191 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1192 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&
1193 MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!");
1194 unsigned Shift = 4 * ExtType;
1195 return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
1198 /// Return true if the specified load with extension is legal on this target.
1199 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1200 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
1203 /// Return true if the specified load with extension is legal or custom
1205 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1206 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
1207 getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
1210 /// Return how this store with truncation should be treated: either it is
1211 /// legal, needs to be promoted to a larger size, needs to be expanded to some
1212 /// other code sequence, or the target has a custom expander for it.
1213 LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
1214 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1215 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1216 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1217 assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
1218 "Table isn't big enough!");
1219 return TruncStoreActions[ValI][MemI];
1222 /// Return true if the specified store with truncation is legal on this
1224 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
1225 return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
1228 /// Return true if the specified store with truncation has solution on this
1230 bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
1231 return isTypeLegal(ValVT) &&
1232 (getTruncStoreAction(ValVT, MemVT) == Legal ||
1233 getTruncStoreAction(ValVT, MemVT) == Custom);
1236 /// Return how the indexed load should be treated: either it is legal, needs
1237 /// to be promoted to a larger size, needs to be expanded to some other code
1238 /// sequence, or the target has a custom expander for it.
1239 LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
1240 return getIndexedModeAction(IdxMode, VT, IMAB_Load);
1243 /// Return true if the specified indexed load is legal on this target.
1244 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
1245 return VT.isSimple() &&
1246 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1247 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1250 /// Return how the indexed store should be treated: either it is legal, needs
1251 /// to be promoted to a larger size, needs to be expanded to some other code
1252 /// sequence, or the target has a custom expander for it.
1253 LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
1254 return getIndexedModeAction(IdxMode, VT, IMAB_Store);
1257 /// Return true if the specified indexed load is legal on this target.
1258 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
1259 return VT.isSimple() &&
1260 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1261 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1264 /// Return how the indexed load should be treated: either it is legal, needs
1265 /// to be promoted to a larger size, needs to be expanded to some other code
1266 /// sequence, or the target has a custom expander for it.
1267 LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const {
1268 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad);
1271 /// Return true if the specified indexed load is legal on this target.
1272 bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const {
1273 return VT.isSimple() &&
1274 (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1275 getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1278 /// Return how the indexed store should be treated: either it is legal, needs
1279 /// to be promoted to a larger size, needs to be expanded to some other code
1280 /// sequence, or the target has a custom expander for it.
1281 LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const {
1282 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore);
1285 /// Return true if the specified indexed load is legal on this target.
1286 bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const {
1287 return VT.isSimple() &&
1288 (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1289 getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1292 /// Return how the condition code should be treated: either it is legal, needs
1293 /// to be expanded to some other code sequence, or the target has a custom
1294 /// expander for it.
1296 getCondCodeAction(ISD::CondCode CC, MVT VT) const {
1297 assert((unsigned)CC < array_lengthof(CondCodeActions) &&
1298 ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) &&
1299 "Table isn't big enough!");
1300 // See setCondCodeAction for how this is encoded.
1301 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1302 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
1303 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
1304 assert(Action != Promote && "Can't promote condition code!");
1308 /// Return true if the specified condition code is legal on this target.
1309 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
1310 return getCondCodeAction(CC, VT) == Legal;
1313 /// Return true if the specified condition code is legal or custom on this
1315 bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const {
1316 return getCondCodeAction(CC, VT) == Legal ||
1317 getCondCodeAction(CC, VT) == Custom;
1320 /// If the action for this operation is to promote, this method returns the
1321 /// ValueType to promote to.
1322 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
1323 assert(getOperationAction(Op, VT) == Promote &&
1324 "This operation isn't promoted!");
1326 // See if this has an explicit type specified.
1327 std::map<std::pair<unsigned, MVT::SimpleValueType>,
1328 MVT::SimpleValueType>::const_iterator PTTI =
1329 PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
1330 if (PTTI != PromoteToType.end()) return PTTI->second;
1332 assert((VT.isInteger() || VT.isFloatingPoint()) &&
1333 "Cannot autopromote this type, add it with AddPromotedToType.");
1337 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
1338 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
1339 "Didn't find type to promote to!");
1340 } while (!isTypeLegal(NVT) ||
1341 getOperationAction(Op, NVT) == Promote);
1345 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
1346 /// operations except for the pointer size. If AllowUnknown is true, this
1347 /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
1348 /// otherwise it will assert.
1349 EVT getValueType(const DataLayout &DL, Type *Ty,
1350 bool AllowUnknown = false) const {
1351 // Lower scalar pointers to native pointer types.
1352 if (auto *PTy = dyn_cast<PointerType>(Ty))
1353 return getPointerTy(DL, PTy->getAddressSpace());
1355 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1356 Type *EltTy = VTy->getElementType();
1357 // Lower vectors of pointers to native pointer types.
1358 if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
1359 EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace()));
1360 EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1362 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1363 VTy->getElementCount());
1366 return EVT::getEVT(Ty, AllowUnknown);
1369 EVT getMemValueType(const DataLayout &DL, Type *Ty,
1370 bool AllowUnknown = false) const {
1371 // Lower scalar pointers to native pointer types.
1372 if (PointerType *PTy = dyn_cast<PointerType>(Ty))
1373 return getPointerMemTy(DL, PTy->getAddressSpace());
1374 else if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1375 Type *Elm = VTy->getElementType();
1376 if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
1377 EVT PointerTy(getPointerMemTy(DL, PT->getAddressSpace()));
1378 Elm = PointerTy.getTypeForEVT(Ty->getContext());
1380 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
1381 VTy->getElementCount());
1384 return getValueType(DL, Ty, AllowUnknown);
1388 /// Return the MVT corresponding to this LLVM type. See getValueType.
1389 MVT getSimpleValueType(const DataLayout &DL, Type *Ty,
1390 bool AllowUnknown = false) const {
1391 return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
1394 /// Return the desired alignment for ByVal or InAlloca aggregate function
1395 /// arguments in the caller parameter area. This is the actual alignment, not
1397 virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
1399 /// Return the type of registers that this ValueType will eventually require.
1400 MVT getRegisterType(MVT VT) const {
1401 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
1402 return RegisterTypeForVT[VT.SimpleTy];
1405 /// Return the type of registers that this ValueType will eventually require.
1406 MVT getRegisterType(LLVMContext &Context, EVT VT) const {
1407 if (VT.isSimple()) {
1408 assert((unsigned)VT.getSimpleVT().SimpleTy <
1409 array_lengthof(RegisterTypeForVT));
1410 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
1412 if (VT.isVector()) {
1415 unsigned NumIntermediates;
1416 (void)getVectorTypeBreakdown(Context, VT, VT1,
1417 NumIntermediates, RegisterVT);
1420 if (VT.isInteger()) {
1421 return getRegisterType(Context, getTypeToTransformTo(Context, VT));
1423 llvm_unreachable("Unsupported extended type!");
1426 /// Return the number of registers that this ValueType will eventually
1429 /// This is one for any types promoted to live in larger registers, but may be
1430 /// more than one for types (like i64) that are split into pieces. For types
1431 /// like i140, which are first promoted then expanded, it is the number of
1432 /// registers needed to hold all the bits of the original type. For an i140
1433 /// on a 32 bit machine this means 5 registers.
1434 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
1435 if (VT.isSimple()) {
1436 assert((unsigned)VT.getSimpleVT().SimpleTy <
1437 array_lengthof(NumRegistersForVT));
1438 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
1440 if (VT.isVector()) {
1443 unsigned NumIntermediates;
1444 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
1446 if (VT.isInteger()) {
1447 unsigned BitWidth = VT.getSizeInBits();
1448 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
1449 return (BitWidth + RegWidth - 1) / RegWidth;
1451 llvm_unreachable("Unsupported extended type!");
1454 /// Certain combinations of ABIs, Targets and features require that types
1455 /// are legal for some operations and not for other operations.
1456 /// For MIPS all vector types must be passed through the integer register set.
1457 virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
1458 CallingConv::ID CC, EVT VT) const {
1459 return getRegisterType(Context, VT);
1462 /// Certain targets require unusual breakdowns of certain types. For MIPS,
1463 /// this occurs when a vector type is used, as vector are passed through the
1464 /// integer register set.
1465 virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
1468 return getNumRegisters(Context, VT);
1471 /// Certain targets have context senstive alignment requirements, where one
1472 /// type has the alignment requirement of another type.
1473 virtual Align getABIAlignmentForCallingConv(Type *ArgTy,
1474 DataLayout DL) const {
1475 return DL.getABITypeAlign(ArgTy);
1478 /// If true, then instruction selection should seek to shrink the FP constant
1479 /// of the specified type to a smaller type in order to save space and / or
1481 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
1483 /// Return true if it is profitable to reduce a load to a smaller type.
1484 /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
1485 virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
1487 // By default, assume that it is cheaper to extract a subvector from a wide
1488 // vector load rather than creating multiple narrow vector loads.
1489 if (NewVT.isVector() && !Load->hasOneUse())
1495 /// When splitting a value of the specified type into parts, does the Lo
1496 /// or Hi part come first? This usually follows the endianness, except
1497 /// for ppcf128, where the Hi part always comes first.
1498 bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
1499 return DL.isBigEndian() || VT == MVT::ppcf128;
1502 /// If true, the target has custom DAG combine transformations that it can
1503 /// perform for the specified node.
1504 bool hasTargetDAGCombine(ISD::NodeType NT) const {
1505 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1506 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
1509 unsigned getGatherAllAliasesMaxDepth() const {
1510 return GatherAllAliasesMaxDepth;
1513 /// Returns the size of the platform's va_list object.
1514 virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
1515 return getPointerTy(DL).getSizeInBits();
1518 /// Get maximum # of store operations permitted for llvm.memset
1520 /// This function returns the maximum number of store operations permitted
1521 /// to replace a call to llvm.memset. The value is set by the target at the
1522 /// performance threshold for such a replacement. If OptSize is true,
1523 /// return the limit for functions that have OptSize attribute.
1524 unsigned getMaxStoresPerMemset(bool OptSize) const {
1525 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
1528 /// Get maximum # of store operations permitted for llvm.memcpy
1530 /// This function returns the maximum number of store operations permitted
1531 /// to replace a call to llvm.memcpy. The value is set by the target at the
1532 /// performance threshold for such a replacement. If OptSize is true,
1533 /// return the limit for functions that have OptSize attribute.
1534 unsigned getMaxStoresPerMemcpy(bool OptSize) const {
1535 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
1538 /// \brief Get maximum # of store operations to be glued together
1540 /// This function returns the maximum number of store operations permitted
1541 /// to glue together during lowering of llvm.memcpy. The value is set by
1542 // the target at the performance threshold for such a replacement.
1543 virtual unsigned getMaxGluedStoresPerMemcpy() const {
1544 return MaxGluedStoresPerMemcpy;
1547 /// Get maximum # of load operations permitted for memcmp
1549 /// This function returns the maximum number of load operations permitted
1550 /// to replace a call to memcmp. The value is set by the target at the
1551 /// performance threshold for such a replacement. If OptSize is true,
1552 /// return the limit for functions that have OptSize attribute.
1553 unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
1554 return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp;
1557 /// Get maximum # of store operations permitted for llvm.memmove
1559 /// This function returns the maximum number of store operations permitted
1560 /// to replace a call to llvm.memmove. The value is set by the target at the
1561 /// performance threshold for such a replacement. If OptSize is true,
1562 /// return the limit for functions that have OptSize attribute.
1563 unsigned getMaxStoresPerMemmove(bool OptSize) const {
1564 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
1567 /// Determine if the target supports unaligned memory accesses.
1569 /// This function returns true if the target allows unaligned memory accesses
1570 /// of the specified type in the given address space. If true, it also returns
1571 /// whether the unaligned memory access is "fast" in the last argument by
1572 /// reference. This is used, for example, in situations where an array
1573 /// copy/move/set is converted to a sequence of store operations. Its use
1574 /// helps to ensure that such replacements don't generate code that causes an
1575 /// alignment error (trap) on the target machine.
1576 virtual bool allowsMisalignedMemoryAccesses(
1577 EVT, unsigned AddrSpace = 0, unsigned Align = 1,
1578 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1579 bool * /*Fast*/ = nullptr) const {
1583 /// LLT handling variant.
1584 virtual bool allowsMisalignedMemoryAccesses(
1585 LLT, unsigned AddrSpace = 0, Align Alignment = Align(1),
1586 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1587 bool * /*Fast*/ = nullptr) const {
1591 /// This function returns true if the memory access is aligned or if the
1592 /// target allows this specific unaligned memory access. If the access is
1593 /// allowed, the optional final parameter returns if the access is also fast
1594 /// (as defined by the target).
1595 bool allowsMemoryAccessForAlignment(
1596 LLVMContext &Context, const DataLayout &DL, EVT VT,
1597 unsigned AddrSpace = 0, Align Alignment = Align(1),
1598 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1599 bool *Fast = nullptr) const;
1601 /// Return true if the memory access of this type is aligned or if the target
1602 /// allows this specific unaligned access for the given MachineMemOperand.
1603 /// If the access is allowed, the optional final parameter returns if the
1604 /// access is also fast (as defined by the target).
1605 bool allowsMemoryAccessForAlignment(LLVMContext &Context,
1606 const DataLayout &DL, EVT VT,
1607 const MachineMemOperand &MMO,
1608 bool *Fast = nullptr) const;
1610 /// Return true if the target supports a memory access of this type for the
1611 /// given address space and alignment. If the access is allowed, the optional
1612 /// final parameter returns if the access is also fast (as defined by the
1615 allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1616 unsigned AddrSpace = 0, Align Alignment = Align(1),
1617 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1618 bool *Fast = nullptr) const;
1620 /// Return true if the target supports a memory access of this type for the
1621 /// given MachineMemOperand. If the access is allowed, the optional
1622 /// final parameter returns if the access is also fast (as defined by the
1624 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1625 const MachineMemOperand &MMO,
1626 bool *Fast = nullptr) const;
1628 /// Returns the target specific optimal type for load and store operations as
1629 /// a result of memset, memcpy, and memmove lowering.
1630 /// It returns EVT::Other if the type should be determined using generic
1631 /// target-independent logic.
1633 getOptimalMemOpType(const MemOp &Op,
1634 const AttributeList & /*FuncAttributes*/) const {
1638 /// LLT returning variant.
1640 getOptimalMemOpLLT(const MemOp &Op,
1641 const AttributeList & /*FuncAttributes*/) const {
1645 /// Returns true if it's safe to use load / store of the specified type to
1646 /// expand memcpy / memset inline.
1648 /// This is mostly true for all types except for some special cases. For
1649 /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1650 /// fstpl which also does type conversion. Note the specified type doesn't
1651 /// have to be legal as the hook is used before type legalization.
1652 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
1654 /// Return lower limit for number of blocks in a jump table.
1655 virtual unsigned getMinimumJumpTableEntries() const;
1657 /// Return lower limit of the density in a jump table.
1658 unsigned getMinimumJumpTableDensity(bool OptForSize) const;
1660 /// Return upper limit for number of entries in a jump table.
1661 /// Zero if no limit.
1662 unsigned getMaximumJumpTableSize() const;
1664 virtual bool isJumpTableRelative() const;
1666 /// Return true if a mulh[s|u] node for a specific type is cheaper than
1667 /// a multiply followed by a shift. This is false by default.
1668 virtual bool isMulhCheaperThanMulShift(EVT Type) const { return false; }
1670 /// If a physical register, this specifies the register that
1671 /// llvm.savestack/llvm.restorestack should save and restore.
1672 unsigned getStackPointerRegisterToSaveRestore() const {
1673 return StackPointerRegisterToSaveRestore;
1676 /// If a physical register, this returns the register that receives the
1677 /// exception address on entry to an EH pad.
1679 getExceptionPointerRegister(const Constant *PersonalityFn) const {
1683 /// If a physical register, this returns the register that receives the
1684 /// exception typeid on entry to a landing pad.
1686 getExceptionSelectorRegister(const Constant *PersonalityFn) const {
1690 virtual bool needsFixedCatchObjects() const {
1691 report_fatal_error("Funclet EH is not implemented for this target");
1694 /// Return the minimum stack alignment of an argument.
1695 Align getMinStackArgumentAlignment() const {
1696 return MinStackArgumentAlignment;
1699 /// Return the minimum function alignment.
1700 Align getMinFunctionAlignment() const { return MinFunctionAlignment; }
1702 /// Return the preferred function alignment.
1703 Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
1705 /// Return the preferred loop alignment.
1706 virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
1707 return PrefLoopAlignment;
1710 /// Should loops be aligned even when the function is marked OptSize (but not
1712 virtual bool alignLoopsWithOptSize() const {
1716 /// If the target has a standard location for the stack protector guard,
1717 /// returns the address of that location. Otherwise, returns nullptr.
1718 /// DEPRECATED: please override useLoadStackGuardNode and customize
1719 /// LOAD_STACK_GUARD, or customize \@llvm.stackguard().
1720 virtual Value *getIRStackGuard(IRBuilder<> &IRB) const;
1722 /// Inserts necessary declarations for SSP (stack protection) purpose.
1723 /// Should be used only when getIRStackGuard returns nullptr.
1724 virtual void insertSSPDeclarations(Module &M) const;
1726 /// Return the variable that's previously inserted by insertSSPDeclarations,
1727 /// if any, otherwise return nullptr. Should be used only when
1728 /// getIRStackGuard returns nullptr.
1729 virtual Value *getSDagStackGuard(const Module &M) const;
1731 /// If this function returns true, stack protection checks should XOR the
1732 /// frame pointer (or whichever pointer is used to address locals) into the
1733 /// stack guard value before checking it. getIRStackGuard must return nullptr
1734 /// if this returns true.
1735 virtual bool useStackGuardXorFP() const { return false; }
1737 /// If the target has a standard stack protection check function that
1738 /// performs validation and error handling, returns the function. Otherwise,
1739 /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
1740 /// Should be used only when getIRStackGuard returns nullptr.
1741 virtual Function *getSSPStackGuardCheck(const Module &M) const;
1744 Value *getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
1748 /// Returns the target-specific address of the unsafe stack pointer.
1749 virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const;
1751 /// Returns the name of the symbol used to emit stack probes or the empty
1752 /// string if not applicable.
1753 virtual bool hasStackProbeSymbol(MachineFunction &MF) const { return false; }
1755 virtual bool hasInlineStackProbe(MachineFunction &MF) const { return false; }
1757 virtual StringRef getStackProbeSymbolName(MachineFunction &MF) const {
1761 /// Returns true if a cast between SrcAS and DestAS is a noop.
1762 virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1766 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
1767 /// are happy to sink it into basic blocks. A cast may be free, but not
1768 /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
1769 virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1770 return isNoopAddrSpaceCast(SrcAS, DestAS);
1773 /// Return true if the pointer arguments to CI should be aligned by aligning
1774 /// the object whose address is being passed. If so then MinSize is set to the
1775 /// minimum size the object must be to be aligned and PrefAlign is set to the
1776 /// preferred alignment.
1777 virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
1778 unsigned & /*PrefAlign*/) const {
1782 //===--------------------------------------------------------------------===//
1783 /// \name Helpers for TargetTransformInfo implementations
1786 /// Get the ISD node that corresponds to the Instruction class opcode.
1787 int InstructionOpcodeToISD(unsigned Opcode) const;
1789 /// Estimate the cost of type-legalization and the legalized type.
1790 std::pair<int, MVT> getTypeLegalizationCost(const DataLayout &DL,
1795 //===--------------------------------------------------------------------===//
1796 /// \name Helpers for atomic expansion.
1799 /// Returns the maximum atomic operation size (in bits) supported by
1800 /// the backend. Atomic operations greater than this size (as well
1801 /// as ones that are not naturally aligned), will be expanded by
1802 /// AtomicExpandPass into an __atomic_* library call.
1803 unsigned getMaxAtomicSizeInBitsSupported() const {
1804 return MaxAtomicSizeInBitsSupported;
1807 /// Returns the size of the smallest cmpxchg or ll/sc instruction
1808 /// the backend supports. Any smaller operations are widened in
1809 /// AtomicExpandPass.
1811 /// Note that *unlike* operations above the maximum size, atomic ops
1812 /// are still natively supported below the minimum; they just
1813 /// require a more complex expansion.
1814 unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
1816 /// Whether the target supports unaligned atomic operations.
1817 bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
1819 /// Whether AtomicExpandPass should automatically insert fences and reduce
1820 /// ordering for this atomic. This should be true for most architectures with
1821 /// weak memory ordering. Defaults to false.
1822 virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
1826 /// Perform a load-linked operation on Addr, returning a "Value *" with the
1827 /// corresponding pointee type. This may entail some non-trivial operations to
1828 /// truncate or reconstruct types that will be illegal in the backend. See
1829 /// ARMISelLowering for an example implementation.
1830 virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
1831 AtomicOrdering Ord) const {
1832 llvm_unreachable("Load linked unimplemented on this target");
1835 /// Perform a store-conditional operation to Addr. Return the status of the
1836 /// store. This should be 0 if the store succeeded, non-zero otherwise.
1837 virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
1838 Value *Addr, AtomicOrdering Ord) const {
1839 llvm_unreachable("Store conditional unimplemented on this target");
1842 /// Perform a masked atomicrmw using a target-specific intrinsic. This
1843 /// represents the core LL/SC loop which will be lowered at a late stage by
1845 virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilder<> &Builder,
1847 Value *AlignedAddr, Value *Incr,
1848 Value *Mask, Value *ShiftAmt,
1849 AtomicOrdering Ord) const {
1850 llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
1853 /// Perform a masked cmpxchg using a target-specific intrinsic. This
1854 /// represents the core LL/SC loop which will be lowered at a late stage by
1856 virtual Value *emitMaskedAtomicCmpXchgIntrinsic(
1857 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
1858 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
1859 llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
1862 /// Inserts in the IR a target-specific intrinsic specifying a fence.
1863 /// It is called by AtomicExpandPass before expanding an
1864 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
1865 /// if shouldInsertFencesForAtomic returns true.
1867 /// Inst is the original atomic instruction, prior to other expansions that
1868 /// may be performed.
1870 /// This function should either return a nullptr, or a pointer to an IR-level
1871 /// Instruction*. Even complex fence sequences can be represented by a
1872 /// single Instruction* through an intrinsic to be lowered later.
1873 /// Backends should override this method to produce target-specific intrinsic
1874 /// for their fences.
1875 /// FIXME: Please note that the default implementation here in terms of
1876 /// IR-level fences exists for historical/compatibility reasons and is
1877 /// *unsound* ! Fences cannot, in general, be used to restore sequential
1878 /// consistency. For example, consider the following example:
1879 /// atomic<int> x = y = 0;
1880 /// int r1, r2, r3, r4;
1891 /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
1892 /// seq_cst. But if they are lowered to monotonic accesses, no amount of
1893 /// IR-level fences can prevent it.
1895 virtual Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
1896 AtomicOrdering Ord) const {
1897 if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
1898 return Builder.CreateFence(Ord);
1903 virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
1905 AtomicOrdering Ord) const {
1906 if (isAcquireOrStronger(Ord))
1907 return Builder.CreateFence(Ord);
1913 // Emits code that executes when the comparison result in the ll/sc
1914 // expansion of a cmpxchg instruction is such that the store-conditional will
1915 // not execute. This makes it possible to balance out the load-linked with
1916 // a dedicated instruction, if desired.
1917 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
1918 // be unnecessarily held, except if clrex, inserted by this hook, is executed.
1919 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const {}
1921 /// Returns true if the given (atomic) store should be expanded by the
1922 /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
1923 virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
1927 /// Returns true if arguments should be sign-extended in lib calls.
1928 virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
1932 /// Returns true if arguments should be extended in lib calls.
1933 virtual bool shouldExtendTypeInLibCall(EVT Type) const {
1937 /// Returns how the given (atomic) load should be expanded by the
1938 /// IR-level AtomicExpand pass.
1939 virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const {
1940 return AtomicExpansionKind::None;
1943 /// Returns how the given atomic cmpxchg should be expanded by the IR-level
1944 /// AtomicExpand pass.
1945 virtual AtomicExpansionKind
1946 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
1947 return AtomicExpansionKind::None;
1950 /// Returns how the IR-level AtomicExpand pass should expand the given
1951 /// AtomicRMW, if at all. Default is to never expand.
1952 virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
1953 return RMW->isFloatingPointOperation() ?
1954 AtomicExpansionKind::CmpXChg : AtomicExpansionKind::None;
1957 /// On some platforms, an AtomicRMW that never actually modifies the value
1958 /// (such as fetch_add of 0) can be turned into a fence followed by an
1959 /// atomic load. This may sound useless, but it makes it possible for the
1960 /// processor to keep the cacheline shared, dramatically improving
1961 /// performance. And such idempotent RMWs are useful for implementing some
1962 /// kinds of locks, see for example (justification + benchmarks):
1963 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
1964 /// This method tries doing that transformation, returning the atomic load if
1965 /// it succeeds, and nullptr otherwise.
1966 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
1967 /// another round of expansion.
1969 lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
1973 /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
1974 /// SIGN_EXTEND, or ANY_EXTEND).
1975 virtual ISD::NodeType getExtendForAtomicOps() const {
1976 return ISD::ZERO_EXTEND;
1979 /// Returns how the platform's atomic compare and swap expects its comparison
1980 /// value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). This is
1981 /// separate from getExtendForAtomicOps, which is concerned with the
1982 /// sign-extension of the instruction's output, whereas here we are concerned
1983 /// with the sign-extension of the input. For targets with compare-and-swap
1984 /// instructions (or sub-word comparisons in their LL/SC loop expansions),
1985 /// the input can be ANY_EXTEND, but the output will still have a specific
1987 virtual ISD::NodeType getExtendForAtomicCmpSwapArg() const {
1988 return ISD::ANY_EXTEND;
1993 /// Returns true if we should normalize
1994 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
1995 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
1996 /// that it saves us from materializing N0 and N1 in an integer register.
1997 /// Targets that are able to perform and/or on flags should return false here.
1998 virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
2000 // If a target has multiple condition registers, then it likely has logical
2001 // operations on those registers.
2002 if (hasMultipleConditionRegisters())
2004 // Only do the transform if the value won't be split into multiple
2006 LegalizeTypeAction Action = getTypeAction(Context, VT);
2007 return Action != TypeExpandInteger && Action != TypeExpandFloat &&
2008 Action != TypeSplitVector;
2011 virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; }
2013 /// Return true if a select of constants (select Cond, C1, C2) should be
2014 /// transformed into simple math ops with the condition value. For example:
2015 /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
2016 virtual bool convertSelectOfConstantsToMath(EVT VT) const {
2020 /// Return true if it is profitable to transform an integer
2021 /// multiplication-by-constant into simpler operations like shifts and adds.
2022 /// This may be true if the target does not directly support the
2023 /// multiplication operation for the specified type or the sequence of simpler
2024 /// ops is faster than the multiply.
2025 virtual bool decomposeMulByConstant(LLVMContext &Context,
2026 EVT VT, SDValue C) const {
2030 /// Return true if it is more correct/profitable to use strict FP_TO_INT
2031 /// conversion operations - canonicalizing the FP source value instead of
2032 /// converting all cases and then selecting based on value.
2033 /// This may be true if the target throws exceptions for out of bounds
2034 /// conversions or has fast FP CMOV.
2035 virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
2036 bool IsSigned) const {
2040 //===--------------------------------------------------------------------===//
2041 // TargetLowering Configuration Methods - These methods should be invoked by
2042 // the derived class constructor to configure this object for the target.
2045 /// Specify how the target extends the result of integer and floating point
2046 /// boolean values from i1 to a wider type. See getBooleanContents.
2047 void setBooleanContents(BooleanContent Ty) {
2048 BooleanContents = Ty;
2049 BooleanFloatContents = Ty;
2052 /// Specify how the target extends the result of integer and floating point
2053 /// boolean values from i1 to a wider type. See getBooleanContents.
2054 void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
2055 BooleanContents = IntTy;
2056 BooleanFloatContents = FloatTy;
2059 /// Specify how the target extends the result of a vector boolean value from a
2060 /// vector of i1 to a wider type. See getBooleanContents.
2061 void setBooleanVectorContents(BooleanContent Ty) {
2062 BooleanVectorContents = Ty;
2065 /// Specify the target scheduling preference.
2066 void setSchedulingPreference(Sched::Preference Pref) {
2067 SchedPreferenceInfo = Pref;
2070 /// Indicate the minimum number of blocks to generate jump tables.
2071 void setMinimumJumpTableEntries(unsigned Val);
2073 /// Indicate the maximum number of entries in jump tables.
2074 /// Set to zero to generate unlimited jump tables.
2075 void setMaximumJumpTableSize(unsigned);
2077 /// If set to a physical register, this specifies the register that
2078 /// llvm.savestack/llvm.restorestack should save and restore.
2079 void setStackPointerRegisterToSaveRestore(Register R) {
2080 StackPointerRegisterToSaveRestore = R;
2083 /// Tells the code generator that the target has multiple (allocatable)
2084 /// condition registers that can be used to store the results of comparisons
2085 /// for use by selects and conditional branches. With multiple condition
2086 /// registers, the code generator will not aggressively sink comparisons into
2087 /// the blocks of their users.
2088 void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
2089 HasMultipleConditionRegisters = hasManyRegs;
2092 /// Tells the code generator that the target has BitExtract instructions.
2093 /// The code generator will aggressively sink "shift"s into the blocks of
2094 /// their users if the users will generate "and" instructions which can be
2095 /// combined with "shift" to BitExtract instructions.
2096 void setHasExtractBitsInsn(bool hasExtractInsn = true) {
2097 HasExtractBitsInsn = hasExtractInsn;
2100 /// Tells the code generator not to expand logic operations on comparison
2101 /// predicates into separate sequences that increase the amount of flow
2103 void setJumpIsExpensive(bool isExpensive = true);
2105 /// Tells the code generator which bitwidths to bypass.
2106 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
2107 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
2110 /// Add the specified register class as an available regclass for the
2111 /// specified value type. This indicates the selector can handle values of
2112 /// that class natively.
2113 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
2114 assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
2115 RegClassForVT[VT.SimpleTy] = RC;
2118 /// Return the largest legal super-reg register class of the register class
2119 /// for the specified type and its associated "cost".
2120 virtual std::pair<const TargetRegisterClass *, uint8_t>
2121 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
2123 /// Once all of the register classes are added, this allows us to compute
2124 /// derived properties we expose.
2125 void computeRegisterProperties(const TargetRegisterInfo *TRI);
2127 /// Indicate that the specified operation does not work with the specified
2128 /// type and indicate what to do about it. Note that VT may refer to either
2129 /// the type of a result or that of an operand of Op.
2130 void setOperationAction(unsigned Op, MVT VT,
2131 LegalizeAction Action) {
2132 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
2133 OpActions[(unsigned)VT.SimpleTy][Op] = Action;
2136 /// Indicate that the specified load with extension does not work with the
2137 /// specified type and indicate what to do about it.
2138 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
2139 LegalizeAction Action) {
2140 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
2141 MemVT.isValid() && "Table isn't big enough!");
2142 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2143 unsigned Shift = 4 * ExtType;
2144 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
2145 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
2148 /// Indicate that the specified truncating store does not work with the
2149 /// specified type and indicate what to do about it.
2150 void setTruncStoreAction(MVT ValVT, MVT MemVT,
2151 LegalizeAction Action) {
2152 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
2153 TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
2156 /// Indicate that the specified indexed load does or does not work with the
2157 /// specified type and indicate what to do abort it.
2159 /// NOTE: All indexed mode loads are initialized to Expand in
2160 /// TargetLowering.cpp
2161 void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action) {
2162 setIndexedModeAction(IdxMode, VT, IMAB_Load, Action);
2165 /// Indicate that the specified indexed store does or does not work with the
2166 /// specified type and indicate what to do about it.
2168 /// NOTE: All indexed mode stores are initialized to Expand in
2169 /// TargetLowering.cpp
2170 void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action) {
2171 setIndexedModeAction(IdxMode, VT, IMAB_Store, Action);
2174 /// Indicate that the specified indexed masked load does or does not work with
2175 /// the specified type and indicate what to do about it.
2177 /// NOTE: All indexed mode masked loads are initialized to Expand in
2178 /// TargetLowering.cpp
2179 void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT,
2180 LegalizeAction Action) {
2181 setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action);
2184 /// Indicate that the specified indexed masked store does or does not work
2185 /// with the specified type and indicate what to do about it.
2187 /// NOTE: All indexed mode masked stores are initialized to Expand in
2188 /// TargetLowering.cpp
2189 void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT,
2190 LegalizeAction Action) {
2191 setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action);
2194 /// Indicate that the specified condition code is or isn't supported on the
2195 /// target and indicate what to do about it.
2196 void setCondCodeAction(ISD::CondCode CC, MVT VT,
2197 LegalizeAction Action) {
2198 assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&
2199 "Table isn't big enough!");
2200 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2201 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 32-bit
2202 /// value and the upper 29 bits index into the second dimension of the array
2203 /// to select what 32-bit value to use.
2204 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
2205 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
2206 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
2209 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
2210 /// to trying a larger integer/fp until it can find one that works. If that
2211 /// default is insufficient, this method can be used by the target to override
2213 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2214 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
2217 /// Convenience method to set an operation to Promote and specify the type
2218 /// in a single call.
2219 void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2220 setOperationAction(Opc, OrigVT, Promote);
2221 AddPromotedToType(Opc, OrigVT, DestVT);
2224 /// Targets should invoke this method for each target independent node that
2225 /// they want to provide a custom DAG combiner for by implementing the
2226 /// PerformDAGCombine virtual method.
2227 void setTargetDAGCombine(ISD::NodeType NT) {
2228 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
2229 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
2232 /// Set the target's minimum function alignment.
2233 void setMinFunctionAlignment(Align Alignment) {
2234 MinFunctionAlignment = Alignment;
2237 /// Set the target's preferred function alignment. This should be set if
2238 /// there is a performance benefit to higher-than-minimum alignment
2239 void setPrefFunctionAlignment(Align Alignment) {
2240 PrefFunctionAlignment = Alignment;
2243 /// Set the target's preferred loop alignment. Default alignment is one, it
2244 /// means the target does not care about loop alignment. The target may also
2245 /// override getPrefLoopAlignment to provide per-loop values.
2246 void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; }
2248 /// Set the minimum stack alignment of an argument.
2249 void setMinStackArgumentAlignment(Align Alignment) {
2250 MinStackArgumentAlignment = Alignment;
2253 /// Set the maximum atomic operation size supported by the
2254 /// backend. Atomic operations greater than this size (as well as
2255 /// ones that are not naturally aligned), will be expanded by
2256 /// AtomicExpandPass into an __atomic_* library call.
2257 void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
2258 MaxAtomicSizeInBitsSupported = SizeInBits;
2261 /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
2262 void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
2263 MinCmpXchgSizeInBits = SizeInBits;
2266 /// Sets whether unaligned atomic operations are supported.
2267 void setSupportsUnalignedAtomics(bool UnalignedSupported) {
2268 SupportsUnalignedAtomics = UnalignedSupported;
2272 //===--------------------------------------------------------------------===//
2273 // Addressing mode description hooks (used by LSR etc).
2276 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
2277 /// instructions reading the address. This allows as much computation as
2278 /// possible to be done in the address mode for that operand. This hook lets
2279 /// targets also pass back when this should be done on intrinsics which
2281 virtual bool getAddrModeArguments(IntrinsicInst * /*I*/,
2282 SmallVectorImpl<Value*> &/*Ops*/,
2283 Type *&/*AccessTy*/) const {
2287 /// This represents an addressing mode of:
2288 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
2289 /// If BaseGV is null, there is no BaseGV.
2290 /// If BaseOffs is zero, there is no base offset.
2291 /// If HasBaseReg is false, there is no base register.
2292 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
2295 GlobalValue *BaseGV = nullptr;
2296 int64_t BaseOffs = 0;
2297 bool HasBaseReg = false;
2299 AddrMode() = default;
2302 /// Return true if the addressing mode represented by AM is legal for this
2303 /// target, for a load/store of the specified type.
2305 /// The type may be VoidTy, in which case only return true if the addressing
2306 /// mode is legal for a load/store of any legal type. TODO: Handle
2307 /// pre/postinc as well.
2309 /// If the address space cannot be determined, it will be -1.
2311 /// TODO: Remove default argument
2312 virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
2313 Type *Ty, unsigned AddrSpace,
2314 Instruction *I = nullptr) const;
2316 /// Return the cost of the scaling factor used in the addressing mode
2317 /// represented by AM for this target, for a load/store of the specified type.
2319 /// If the AM is supported, the return value must be >= 0.
2320 /// If the AM is not supported, it returns a negative value.
2321 /// TODO: Handle pre/postinc as well.
2322 /// TODO: Remove default argument
2323 virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM,
2324 Type *Ty, unsigned AS = 0) const {
2325 // Default: assume that any scaling factor used in a legal AM is free.
2326 if (isLegalAddressingMode(DL, AM, Ty, AS))
2331 /// Return true if the specified immediate is legal icmp immediate, that is
2332 /// the target has icmp instructions which can compare a register against the
2333 /// immediate without having to materialize the immediate into a register.
2334 virtual bool isLegalICmpImmediate(int64_t) const {
2338 /// Return true if the specified immediate is legal add immediate, that is the
2339 /// target has add instructions which can add a register with the immediate
2340 /// without having to materialize the immediate into a register.
2341 virtual bool isLegalAddImmediate(int64_t) const {
2345 /// Return true if the specified immediate is legal for the value input of a
2346 /// store instruction.
2347 virtual bool isLegalStoreImmediate(int64_t Value) const {
2348 // Default implementation assumes that at least 0 works since it is likely
2349 // that a zero register exists or a zero immediate is allowed.
2353 /// Return true if it's significantly cheaper to shift a vector by a uniform
2354 /// scalar than by an amount which will vary across each lane. On x86 before
2355 /// AVX2 for example, there is a "psllw" instruction for the former case, but
2356 /// no simple instruction for a general "a << b" operation on vectors.
2357 /// This should also apply to lowering for vector funnel shifts (rotates).
2358 virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
2362 /// Given a shuffle vector SVI representing a vector splat, return a new
2363 /// scalar type of size equal to SVI's scalar type if the new type is more
2364 /// profitable. Returns nullptr otherwise. For example under MVE float splats
2365 /// are converted to integer to prevent the need to move from SPR to GPR
2367 virtual Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const {
2371 /// Given a set in interconnected phis of type 'From' that are loaded/stored
2372 /// or bitcast to type 'To', return true if the set should be converted to
2374 virtual bool shouldConvertPhiType(Type *From, Type *To) const {
2375 return (From->isIntegerTy() || From->isFloatingPointTy()) &&
2376 (To->isIntegerTy() || To->isFloatingPointTy());
2379 /// Returns true if the opcode is a commutative binary operation.
2380 virtual bool isCommutativeBinOp(unsigned Opcode) const {
2381 // FIXME: This should get its info from the td file.
2391 case ISD::SMUL_LOHI:
2392 case ISD::UMUL_LOHI:
2406 case ISD::FMINNUM_IEEE:
2407 case ISD::FMAXNUM_IEEE:
2411 default: return false;
2415 /// Return true if the node is a math/logic binary operator.
2416 virtual bool isBinOp(unsigned Opcode) const {
2417 // A commutative binop must be a binop.
2418 if (isCommutativeBinOp(Opcode))
2420 // These are non-commutative binops.
2439 /// Return true if it's free to truncate a value of type FromTy to type
2440 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
2441 /// by referencing its sub-register AX.
2442 /// Targets must return false when FromTy <= ToTy.
2443 virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
2447 /// Return true if a truncation from FromTy to ToTy is permitted when deciding
2448 /// whether a call is in tail position. Typically this means that both results
2449 /// would be assigned to the same register or stack slot, but it could mean
2450 /// the target performs adequate checks of its own before proceeding with the
2451 /// tail call. Targets must return false when FromTy <= ToTy.
2452 virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
2456 virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const {
2460 virtual bool isProfitableToHoist(Instruction *I) const { return true; }
2462 /// Return true if the extension represented by \p I is free.
2463 /// Unlikely the is[Z|FP]ExtFree family which is based on types,
2464 /// this method can use the context provided by \p I to decide
2465 /// whether or not \p I is free.
2466 /// This method extends the behavior of the is[Z|FP]ExtFree family.
2467 /// In other words, if is[Z|FP]Free returns true, then this method
2468 /// returns true as well. The converse is not true.
2469 /// The target can perform the adequate checks by overriding isExtFreeImpl.
2470 /// \pre \p I must be a sign, zero, or fp extension.
2471 bool isExtFree(const Instruction *I) const {
2472 switch (I->getOpcode()) {
2473 case Instruction::FPExt:
2474 if (isFPExtFree(EVT::getEVT(I->getType()),
2475 EVT::getEVT(I->getOperand(0)->getType())))
2478 case Instruction::ZExt:
2479 if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
2482 case Instruction::SExt:
2485 llvm_unreachable("Instruction is not an extension");
2487 return isExtFreeImpl(I);
2490 /// Return true if \p Load and \p Ext can form an ExtLoad.
2491 /// For example, in AArch64
2492 /// %L = load i8, i8* %ptr
2493 /// %E = zext i8 %L to i32
2494 /// can be lowered into one load instruction
2496 bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
2497 const DataLayout &DL) const {
2498 EVT VT = getValueType(DL, Ext->getType());
2499 EVT LoadVT = getValueType(DL, Load->getType());
2501 // If the load has other users and the truncate is not free, the ext
2502 // probably isn't free.
2503 if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
2504 !isTruncateFree(Ext->getType(), Load->getType()))
2507 // Check whether the target supports casts folded into loads.
2509 if (isa<ZExtInst>(Ext))
2510 LType = ISD::ZEXTLOAD;
2512 assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
2513 LType = ISD::SEXTLOAD;
2516 return isLoadExtLegal(LType, VT, LoadVT);
2519 /// Return true if any actual instruction that defines a value of type FromTy
2520 /// implicitly zero-extends the value to ToTy in the result register.
2522 /// The function should return true when it is likely that the truncate can
2523 /// be freely folded with an instruction defining a value of FromTy. If
2524 /// the defining instruction is unknown (because you're looking at a
2525 /// function argument, PHI, etc.) then the target may require an
2526 /// explicit truncate, which is not necessarily free, but this function
2527 /// does not deal with those cases.
2528 /// Targets must return false when FromTy >= ToTy.
2529 virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
2533 virtual bool isZExtFree(EVT FromTy, EVT ToTy) const {
2537 /// Return true if sign-extension from FromTy to ToTy is cheaper than
2539 virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
2543 /// Return true if sinking I's operands to the same basic block as I is
2544 /// profitable, e.g. because the operands can be folded into a target
2545 /// instruction during instruction selection. After calling the function
2546 /// \p Ops contains the Uses to sink ordered by dominance (dominating users
2548 virtual bool shouldSinkOperands(Instruction *I,
2549 SmallVectorImpl<Use *> &Ops) const {
2553 /// Return true if the target supplies and combines to a paired load
2554 /// two loaded values of type LoadedType next to each other in memory.
2555 /// RequiredAlignment gives the minimal alignment constraints that must be met
2556 /// to be able to select this paired load.
2558 /// This information is *not* used to generate actual paired loads, but it is
2559 /// used to generate a sequence of loads that is easier to combine into a
2561 /// For instance, something like this:
2562 /// a = load i64* addr
2563 /// b = trunc i64 a to i32
2564 /// c = lshr i64 a, 32
2565 /// d = trunc i64 c to i32
2566 /// will be optimized into:
2567 /// b = load i32* addr1
2568 /// d = load i32* addr2
2569 /// Where addr1 = addr2 +/- sizeof(i32).
2571 /// In other words, unless the target performs a post-isel load combining,
2572 /// this information should not be provided because it will generate more
2574 virtual bool hasPairedLoad(EVT /*LoadedType*/,
2575 Align & /*RequiredAlignment*/) const {
2579 /// Return true if the target has a vector blend instruction.
2580 virtual bool hasVectorBlend() const { return false; }
2582 /// Get the maximum supported factor for interleaved memory accesses.
2583 /// Default to be the minimum interleave factor: 2.
2584 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
2586 /// Lower an interleaved load to target specific intrinsics. Return
2587 /// true on success.
2589 /// \p LI is the vector load instruction.
2590 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
2591 /// \p Indices is the corresponding indices for each shufflevector.
2592 /// \p Factor is the interleave factor.
2593 virtual bool lowerInterleavedLoad(LoadInst *LI,
2594 ArrayRef<ShuffleVectorInst *> Shuffles,
2595 ArrayRef<unsigned> Indices,
2596 unsigned Factor) const {
2600 /// Lower an interleaved store to target specific intrinsics. Return
2601 /// true on success.
2603 /// \p SI is the vector store instruction.
2604 /// \p SVI is the shufflevector to RE-interleave the stored vector.
2605 /// \p Factor is the interleave factor.
2606 virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
2607 unsigned Factor) const {
2611 /// Return true if zero-extending the specific node Val to type VT2 is free
2612 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
2613 /// because it's folded such as X86 zero-extending loads).
2614 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
2615 return isZExtFree(Val.getValueType(), VT2);
2618 /// Return true if an fpext operation is free (for instance, because
2619 /// single-precision floating-point numbers are implicitly extended to
2620 /// double-precision).
2621 virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
2622 assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&
2623 "invalid fpext types");
2627 /// Return true if an fpext operation input to an \p Opcode operation is free
2628 /// (for instance, because half-precision floating-point numbers are
2629 /// implicitly extended to float-precision) for an FMA instruction.
2630 virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
2631 EVT DestVT, EVT SrcVT) const {
2632 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
2633 "invalid fpext types");
2634 return isFPExtFree(DestVT, SrcVT);
2637 /// Return true if folding a vector load into ExtVal (a sign, zero, or any
2638 /// extend node) is profitable.
2639 virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
2641 /// Return true if an fneg operation is free to the point where it is never
2642 /// worthwhile to replace it with a bitwise operation.
2643 virtual bool isFNegFree(EVT VT) const {
2644 assert(VT.isFloatingPoint());
2648 /// Return true if an fabs operation is free to the point where it is never
2649 /// worthwhile to replace it with a bitwise operation.
2650 virtual bool isFAbsFree(EVT VT) const {
2651 assert(VT.isFloatingPoint());
2655 /// Return true if an FMA operation is faster than a pair of fmul and fadd
2656 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
2657 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
2659 /// NOTE: This may be called before legalization on types for which FMAs are
2660 /// not legal, but should return true if those types will eventually legalize
2661 /// to types that support FMAs. After legalization, it will only be called on
2662 /// types that support FMAs (via Legal or Custom actions)
2663 virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
2669 virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const {
2673 /// Returns true if be combined with to form an ISD::FMAD. \p N may be an
2674 /// ISD::FADD, ISD::FSUB, or an ISD::FMUL which will be distributed into an
2676 virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const {
2677 assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB ||
2678 N->getOpcode() == ISD::FMUL) &&
2679 "unexpected node in FMAD forming combine");
2680 return isOperationLegal(ISD::FMAD, N->getValueType(0));
2683 /// Return true if it's profitable to narrow operations of type VT1 to
2684 /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
2686 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
2690 /// Return true if it is beneficial to convert a load of a constant to
2691 /// just the constant itself.
2692 /// On some targets it might be more efficient to use a combination of
2693 /// arithmetic instructions to materialize the constant instead of loading it
2694 /// from a constant pool.
2695 virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
2700 /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
2701 /// from this source type with this index. This is needed because
2702 /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
2703 /// the first element, and only the target knows which lowering is cheap.
2704 virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
2705 unsigned Index) const {
2709 /// Try to convert an extract element of a vector binary operation into an
2710 /// extract element followed by a scalar operation.
2711 virtual bool shouldScalarizeBinop(SDValue VecOp) const {
2715 /// Return true if extraction of a scalar element from the given vector type
2716 /// at the given index is cheap. For example, if scalar operations occur on
2717 /// the same register file as vector operations, then an extract element may
2718 /// be a sub-register rename rather than an actual instruction.
2719 virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const {
2723 /// Try to convert math with an overflow comparison into the corresponding DAG
2724 /// node operation. Targets may want to override this independently of whether
2725 /// the operation is legal/custom for the given type because it may obscure
2726 /// matching of other patterns.
2727 virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
2728 bool MathUsed) const {
2729 // TODO: The default logic is inherited from code in CodeGenPrepare.
2730 // The opcode should not make a difference by default?
2731 if (Opcode != ISD::UADDO)
2734 // Allow the transform as long as we have an integer type that is not
2735 // obviously illegal and unsupported and if the math result is used
2736 // besides the overflow check. On some targets (e.g. SPARC), it is
2737 // not profitable to form on overflow op if the math result has no
2741 return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT));
2744 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
2745 // even if the vector itself has multiple uses.
2746 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
2750 // Return true if CodeGenPrepare should consider splitting large offset of a
2751 // GEP to make the GEP fit into the addressing mode and can be sunk into the
2752 // same blocks of its users.
2753 virtual bool shouldConsiderGEPOffsetSplit() const { return false; }
2755 /// Return true if creating a shift of the type by the given
2756 /// amount is not profitable.
2757 virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const {
2761 //===--------------------------------------------------------------------===//
2762 // Runtime Library hooks
2765 /// Rename the default libcall routine name for the specified libcall.
2766 void setLibcallName(RTLIB::Libcall Call, const char *Name) {
2767 LibcallRoutineNames[Call] = Name;
2770 /// Get the libcall routine name for the specified libcall.
2771 const char *getLibcallName(RTLIB::Libcall Call) const {
2772 return LibcallRoutineNames[Call];
2775 /// Override the default CondCode to be used to test the result of the
2776 /// comparison libcall against zero.
2777 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
2778 CmpLibcallCCs[Call] = CC;
2781 /// Get the CondCode that's to be used to test the result of the comparison
2782 /// libcall against zero.
2783 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
2784 return CmpLibcallCCs[Call];
2787 /// Set the CallingConv that should be used for the specified libcall.
2788 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
2789 LibcallCallingConvs[Call] = CC;
2792 /// Get the CallingConv that should be used for the specified libcall.
2793 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
2794 return LibcallCallingConvs[Call];
2797 /// Execute target specific actions to finalize target lowering.
2798 /// This is used to set extra flags in MachineFrameInformation and freezing
2799 /// the set of reserved registers.
2800 /// The default implementation just freezes the set of reserved registers.
2801 virtual void finalizeLowering(MachineFunction &MF) const;
2803 //===----------------------------------------------------------------------===//
2805 //===----------------------------------------------------------------------===//
2806 /// Check whether or not \p MI needs to be moved close to its uses.
2807 virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const;
2811 const TargetMachine &TM;
2813 /// Tells the code generator that the target has multiple (allocatable)
2814 /// condition registers that can be used to store the results of comparisons
2815 /// for use by selects and conditional branches. With multiple condition
2816 /// registers, the code generator will not aggressively sink comparisons into
2817 /// the blocks of their users.
2818 bool HasMultipleConditionRegisters;
2820 /// Tells the code generator that the target has BitExtract instructions.
2821 /// The code generator will aggressively sink "shift"s into the blocks of
2822 /// their users if the users will generate "and" instructions which can be
2823 /// combined with "shift" to BitExtract instructions.
2824 bool HasExtractBitsInsn;
2826 /// Tells the code generator to bypass slow divide or remainder
2827 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
2828 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
2829 /// div/rem when the operands are positive and less than 256.
2830 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
2832 /// Tells the code generator that it shouldn't generate extra flow control
2833 /// instructions and should attempt to combine flow control instructions via
2835 bool JumpIsExpensive;
2837 /// Information about the contents of the high-bits in boolean values held in
2838 /// a type wider than i1. See getBooleanContents.
2839 BooleanContent BooleanContents;
2841 /// Information about the contents of the high-bits in boolean values held in
2842 /// a type wider than i1. See getBooleanContents.
2843 BooleanContent BooleanFloatContents;
2845 /// Information about the contents of the high-bits in boolean vector values
2846 /// when the element type is wider than i1. See getBooleanContents.
2847 BooleanContent BooleanVectorContents;
2849 /// The target scheduling preference: shortest possible total cycles or lowest
2851 Sched::Preference SchedPreferenceInfo;
2853 /// The minimum alignment that any argument on the stack needs to have.
2854 Align MinStackArgumentAlignment;
2856 /// The minimum function alignment (used when optimizing for size, and to
2857 /// prevent explicitly provided alignment from leading to incorrect code).
2858 Align MinFunctionAlignment;
2860 /// The preferred function alignment (used when alignment unspecified and
2861 /// optimizing for speed).
2862 Align PrefFunctionAlignment;
2864 /// The preferred loop alignment (in log2 bot in bytes).
2865 Align PrefLoopAlignment;
2867 /// Size in bits of the maximum atomics size the backend supports.
2868 /// Accesses larger than this will be expanded by AtomicExpandPass.
2869 unsigned MaxAtomicSizeInBitsSupported;
2871 /// Size in bits of the minimum cmpxchg or ll/sc operation the
2872 /// backend supports.
2873 unsigned MinCmpXchgSizeInBits;
2875 /// This indicates if the target supports unaligned atomic operations.
2876 bool SupportsUnalignedAtomics;
2878 /// If set to a physical register, this specifies the register that
2879 /// llvm.savestack/llvm.restorestack should save and restore.
2880 Register StackPointerRegisterToSaveRestore;
2882 /// This indicates the default register class to use for each ValueType the
2883 /// target supports natively.
2884 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
2885 uint16_t NumRegistersForVT[MVT::LAST_VALUETYPE];
2886 MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
2888 /// This indicates the "representative" register class to use for each
2889 /// ValueType the target supports natively. This information is used by the
2890 /// scheduler to track register pressure. By default, the representative
2891 /// register class is the largest legal super-reg register class of the
2892 /// register class of the specified type. e.g. On x86, i8, i16, and i32's
2893 /// representative class would be GR32.
2894 const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
2896 /// This indicates the "cost" of the "representative" register class for each
2897 /// ValueType. The cost is used by the scheduler to approximate register
2899 uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
2901 /// For any value types we are promoting or expanding, this contains the value
2902 /// type that we are changing to. For Expanded types, this contains one step
2903 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
2904 /// (e.g. i64 -> i16). For types natively supported by the system, this holds
2905 /// the same type (e.g. i32 -> i32).
2906 MVT TransformToType[MVT::LAST_VALUETYPE];
2908 /// For each operation and each value type, keep a LegalizeAction that
2909 /// indicates how instruction selection should deal with the operation. Most
2910 /// operations are Legal (aka, supported natively by the target), but
2911 /// operations that are not should be described. Note that operations on
2912 /// non-legal value types are not described here.
2913 LegalizeAction OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
2915 /// For each load extension type and each value type, keep a LegalizeAction
2916 /// that indicates how instruction selection should deal with a load of a
2917 /// specific value type and extension type. Uses 4-bits to store the action
2918 /// for each of the 4 load ext types.
2919 uint16_t LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
2921 /// For each value type pair keep a LegalizeAction that indicates whether a
2922 /// truncating store of a specific value type and truncating type is legal.
2923 LegalizeAction TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
2925 /// For each indexed mode and each value type, keep a quad of LegalizeAction
2926 /// that indicates how instruction selection should deal with the load /
2927 /// store / maskedload / maskedstore.
2929 /// The first dimension is the value_type for the reference. The second
2930 /// dimension represents the various modes for load store.
2931 uint16_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
2933 /// For each condition code (ISD::CondCode) keep a LegalizeAction that
2934 /// indicates how instruction selection should deal with the condition code.
2936 /// Because each CC action takes up 4 bits, we need to have the array size be
2937 /// large enough to fit all of the value types. This can be done by rounding
2938 /// up the MVT::LAST_VALUETYPE value to the next multiple of 8.
2939 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 7) / 8];
2941 ValueTypeActionImpl ValueTypeActions;
2944 LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
2946 /// Targets can specify ISD nodes that they would like PerformDAGCombine
2947 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
2950 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
2952 /// For operations that must be promoted to a specific type, this holds the
2953 /// destination type. This map should be sparse, so don't hold it as an
2956 /// Targets add entries to this map with AddPromotedToType(..), clients access
2957 /// this with getTypeToPromoteTo(..).
2958 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
2961 /// Stores the name each libcall.
2962 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1];
2964 /// The ISD::CondCode that should be used to test the result of each of the
2965 /// comparison libcall against zero.
2966 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
2968 /// Stores the CallingConv that should be used for each libcall.
2969 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
2971 /// Set default libcall names and calling conventions.
2972 void InitLibcalls(const Triple &TT);
2974 /// The bits of IndexedModeActions used to store the legalisation actions
2975 /// We store the data as | ML | MS | L | S | each taking 4 bits.
2976 enum IndexedModeActionsBits {
2979 IMAB_MaskedStore = 8,
2980 IMAB_MaskedLoad = 12
2983 void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift,
2984 LegalizeAction Action) {
2985 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
2986 (unsigned)Action < 0xf && "Table isn't big enough!");
2987 unsigned Ty = (unsigned)VT.SimpleTy;
2988 IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift);
2989 IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift;
2992 LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT,
2993 unsigned Shift) const {
2994 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
2995 "Table isn't big enough!");
2996 unsigned Ty = (unsigned)VT.SimpleTy;
2997 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf);
3001 /// Return true if the extension represented by \p I is free.
3002 /// \pre \p I is a sign, zero, or fp extension and
3003 /// is[Z|FP]ExtFree of the related types is not true.
3004 virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
3006 /// Depth that GatherAllAliases should should continue looking for chain
3007 /// dependencies when trying to find a more preferable chain. As an
3008 /// approximation, this should be more than the number of consecutive stores
3009 /// expected to be merged.
3010 unsigned GatherAllAliasesMaxDepth;
3012 /// \brief Specify maximum number of store instructions per memset call.
3014 /// When lowering \@llvm.memset this field specifies the maximum number of
3015 /// store operations that may be substituted for the call to memset. Targets
3016 /// must set this value based on the cost threshold for that target. Targets
3017 /// should assume that the memset will be done using as many of the largest
3018 /// store operations first, followed by smaller ones, if necessary, per
3019 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
3020 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
3021 /// store. This only applies to setting a constant array of a constant size.
3022 unsigned MaxStoresPerMemset;
3023 /// Likewise for functions with the OptSize attribute.
3024 unsigned MaxStoresPerMemsetOptSize;
3026 /// \brief Specify maximum number of store instructions per memcpy call.
3028 /// When lowering \@llvm.memcpy this field specifies the maximum number of
3029 /// store operations that may be substituted for a call to memcpy. Targets
3030 /// must set this value based on the cost threshold for that target. Targets
3031 /// should assume that the memcpy will be done using as many of the largest
3032 /// store operations first, followed by smaller ones, if necessary, per
3033 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
3034 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
3035 /// and one 1-byte store. This only applies to copying a constant array of
3037 unsigned MaxStoresPerMemcpy;
3038 /// Likewise for functions with the OptSize attribute.
3039 unsigned MaxStoresPerMemcpyOptSize;
3040 /// \brief Specify max number of store instructions to glue in inlined memcpy.
3042 /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number
3043 /// of store instructions to keep together. This helps in pairing and
3044 // vectorization later on.
3045 unsigned MaxGluedStoresPerMemcpy = 0;
3047 /// \brief Specify maximum number of load instructions per memcmp call.
3049 /// When lowering \@llvm.memcmp this field specifies the maximum number of
3050 /// pairs of load operations that may be substituted for a call to memcmp.
3051 /// Targets must set this value based on the cost threshold for that target.
3052 /// Targets should assume that the memcmp will be done using as many of the
3053 /// largest load operations first, followed by smaller ones, if necessary, per
3054 /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine
3055 /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load
3056 /// and one 1-byte load. This only applies to copying a constant array of
3058 unsigned MaxLoadsPerMemcmp;
3059 /// Likewise for functions with the OptSize attribute.
3060 unsigned MaxLoadsPerMemcmpOptSize;
3062 /// \brief Specify maximum number of store instructions per memmove call.
3064 /// When lowering \@llvm.memmove this field specifies the maximum number of
3065 /// store instructions that may be substituted for a call to memmove. Targets
3066 /// must set this value based on the cost threshold for that target. Targets
3067 /// should assume that the memmove will be done using as many of the largest
3068 /// store operations first, followed by smaller ones, if necessary, per
3069 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
3070 /// with 8-bit alignment would result in nine 1-byte stores. This only
3071 /// applies to copying a constant array of constant size.
3072 unsigned MaxStoresPerMemmove;
3073 /// Likewise for functions with the OptSize attribute.
3074 unsigned MaxStoresPerMemmoveOptSize;
3076 /// Tells the code generator that select is more expensive than a branch if
3077 /// the branch is usually predicted right.
3078 bool PredictableSelectIsExpensive;
3080 /// \see enableExtLdPromotion.
3081 bool EnableExtLdPromotion;
3083 /// Return true if the value types that can be represented by the specified
3084 /// register class are all legal.
3085 bool isLegalRC(const TargetRegisterInfo &TRI,
3086 const TargetRegisterClass &RC) const;
3088 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
3089 /// sequence of memory operands that is recognized by PrologEpilogInserter.
3090 MachineBasicBlock *emitPatchPoint(MachineInstr &MI,
3091 MachineBasicBlock *MBB) const;
3093 /// Replace/modify the XRay custom event operands with target-dependent
3095 MachineBasicBlock *emitXRayCustomEvent(MachineInstr &MI,
3096 MachineBasicBlock *MBB) const;
3098 /// Replace/modify the XRay typed event operands with target-dependent
3100 MachineBasicBlock *emitXRayTypedEvent(MachineInstr &MI,
3101 MachineBasicBlock *MBB) const;
3103 bool IsStrictFPEnabled;
3106 /// This class defines information used to lower LLVM code to legal SelectionDAG
3107 /// operators that the target instruction selector can accept natively.
3109 /// This class also defines callbacks that targets must implement to lower
3110 /// target-specific constructs to SelectionDAG operators.
3111 class TargetLowering : public TargetLoweringBase {
3113 struct DAGCombinerInfo;
3114 struct MakeLibCallOptions;
3116 TargetLowering(const TargetLowering &) = delete;
3117 TargetLowering &operator=(const TargetLowering &) = delete;
3119 explicit TargetLowering(const TargetMachine &TM);
3121 bool isPositionIndependent() const;
3123 virtual bool isSDNodeSourceOfDivergence(const SDNode *N,
3124 FunctionLoweringInfo *FLI,
3125 LegacyDivergenceAnalysis *DA) const {
3129 virtual bool isSDNodeAlwaysUniform(const SDNode * N) const {
3133 /// Returns true by value, base pointer and offset pointer and addressing mode
3134 /// by reference if the node's address can be legally represented as
3135 /// pre-indexed load / store address.
3136 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
3137 SDValue &/*Offset*/,
3138 ISD::MemIndexedMode &/*AM*/,
3139 SelectionDAG &/*DAG*/) const {
3143 /// Returns true by value, base pointer and offset pointer and addressing mode
3144 /// by reference if this node can be combined with a load / store to form a
3145 /// post-indexed load / store.
3146 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
3148 SDValue &/*Offset*/,
3149 ISD::MemIndexedMode &/*AM*/,
3150 SelectionDAG &/*DAG*/) const {
3154 /// Returns true if the specified base+offset is a legal indexed addressing
3155 /// mode for this target. \p MI is the load or store instruction that is being
3156 /// considered for transformation.
3157 virtual bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset,
3158 bool IsPre, MachineRegisterInfo &MRI) const {
3162 /// Return the entry encoding for a jump table in the current function. The
3163 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
3164 virtual unsigned getJumpTableEncoding() const;
3166 virtual const MCExpr *
3167 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
3168 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
3169 MCContext &/*Ctx*/) const {
3170 llvm_unreachable("Need to implement this hook if target has custom JTIs");
3173 /// Returns relocation base for the given PIC jumptable.
3174 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
3175 SelectionDAG &DAG) const;
3177 /// This returns the relocation base for the given PIC jumptable, the same as
3178 /// getPICJumpTableRelocBase, but as an MCExpr.
3179 virtual const MCExpr *
3180 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
3181 unsigned JTI, MCContext &Ctx) const;
3183 /// Return true if folding a constant offset with the given GlobalAddress is
3184 /// legal. It is frequently not legal in PIC relocation models.
3185 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
3187 bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
3188 SDValue &Chain) const;
3190 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3191 SDValue &NewRHS, ISD::CondCode &CCCode,
3192 const SDLoc &DL, const SDValue OldLHS,
3193 const SDValue OldRHS) const;
3195 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3196 SDValue &NewRHS, ISD::CondCode &CCCode,
3197 const SDLoc &DL, const SDValue OldLHS,
3198 const SDValue OldRHS, SDValue &Chain,
3199 bool IsSignaling = false) const;
3201 /// Returns a pair of (return value, chain).
3202 /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
3203 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
3204 EVT RetVT, ArrayRef<SDValue> Ops,
3205 MakeLibCallOptions CallOptions,
3207 SDValue Chain = SDValue()) const;
3209 /// Check whether parameters to a call that are passed in callee saved
3210 /// registers are the same as from the calling function. This needs to be
3211 /// checked for tail call eligibility.
3212 bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
3213 const uint32_t *CallerPreservedMask,
3214 const SmallVectorImpl<CCValAssign> &ArgLocs,
3215 const SmallVectorImpl<SDValue> &OutVals) const;
3217 //===--------------------------------------------------------------------===//
3218 // TargetLowering Optimization Methods
3221 /// A convenience struct that encapsulates a DAG, and two SDValues for
3222 /// returning information from TargetLowering to its clients that want to
3224 struct TargetLoweringOpt {
3231 explicit TargetLoweringOpt(SelectionDAG &InDAG,
3233 DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
3235 bool LegalTypes() const { return LegalTys; }
3236 bool LegalOperations() const { return LegalOps; }
3238 bool CombineTo(SDValue O, SDValue N) {
3245 /// Determines the optimal series of memory ops to replace the memset / memcpy.
3246 /// Return true if the number of memory ops is below the threshold (Limit).
3247 /// It returns the types of the sequence of memory ops to perform
3248 /// memset / memcpy by reference.
3249 bool findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit,
3250 const MemOp &Op, unsigned DstAS, unsigned SrcAS,
3251 const AttributeList &FuncAttributes) const;
3253 /// Check to see if the specified operand of the specified instruction is a
3254 /// constant integer. If so, check to see if there are any bits set in the
3255 /// constant that are not demanded. If so, shrink the constant and return
3257 bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
3258 const APInt &DemandedElts,
3259 TargetLoweringOpt &TLO) const;
3261 /// Helper wrapper around ShrinkDemandedConstant, demanding all elements.
3262 bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
3263 TargetLoweringOpt &TLO) const;
3265 // Target hook to do target-specific const optimization, which is called by
3266 // ShrinkDemandedConstant. This function should return true if the target
3267 // doesn't want ShrinkDemandedConstant to further optimize the constant.
3268 virtual bool targetShrinkDemandedConstant(SDValue Op,
3269 const APInt &DemandedBits,
3270 const APInt &DemandedElts,
3271 TargetLoweringOpt &TLO) const {
3275 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
3276 /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
3277 /// generalized for targets with other types of implicit widening casts.
3278 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
3279 TargetLoweringOpt &TLO) const;
3281 /// Look at Op. At this point, we know that only the DemandedBits bits of the
3282 /// result of Op are ever used downstream. If we can use this information to
3283 /// simplify Op, create a new simplified DAG node and return true, returning
3284 /// the original and new nodes in Old and New. Otherwise, analyze the
3285 /// expression and return a mask of KnownOne and KnownZero bits for the
3286 /// expression (used to simplify the caller). The KnownZero/One bits may only
3287 /// be accurate for those bits in the Demanded masks.
3288 /// \p AssumeSingleUse When this parameter is true, this function will
3289 /// attempt to simplify \p Op even if there are multiple uses.
3290 /// Callers are responsible for correctly updating the DAG based on the
3291 /// results of this function, because simply replacing replacing TLO.Old
3292 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
3293 /// has multiple uses.
3294 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
3295 const APInt &DemandedElts, KnownBits &Known,
3296 TargetLoweringOpt &TLO, unsigned Depth = 0,
3297 bool AssumeSingleUse = false) const;
3299 /// Helper wrapper around SimplifyDemandedBits, demanding all elements.
3300 /// Adds Op back to the worklist upon success.
3301 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
3302 KnownBits &Known, TargetLoweringOpt &TLO,
3304 bool AssumeSingleUse = false) const;
3306 /// Helper wrapper around SimplifyDemandedBits.
3307 /// Adds Op back to the worklist upon success.
3308 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
3309 DAGCombinerInfo &DCI) const;
3311 /// More limited version of SimplifyDemandedBits that can be used to "look
3312 /// through" ops that don't contribute to the DemandedBits/DemandedElts -
3313 /// bitwise ops etc.
3314 SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits,
3315 const APInt &DemandedElts,
3317 unsigned Depth) const;
3319 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
3321 SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits,
3323 unsigned Depth = 0) const;
3325 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
3326 /// bits from only some vector elements.
3327 SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op,
3328 const APInt &DemandedElts,
3330 unsigned Depth = 0) const;
3332 /// Look at Vector Op. At this point, we know that only the DemandedElts
3333 /// elements of the result of Op are ever used downstream. If we can use
3334 /// this information to simplify Op, create a new simplified DAG node and
3335 /// return true, storing the original and new nodes in TLO.
3336 /// Otherwise, analyze the expression and return a mask of KnownUndef and
3337 /// KnownZero elements for the expression (used to simplify the caller).
3338 /// The KnownUndef/Zero elements may only be accurate for those bits
3339 /// in the DemandedMask.
3340 /// \p AssumeSingleUse When this parameter is true, this function will
3341 /// attempt to simplify \p Op even if there are multiple uses.
3342 /// Callers are responsible for correctly updating the DAG based on the
3343 /// results of this function, because simply replacing replacing TLO.Old
3344 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
3345 /// has multiple uses.
3346 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask,
3347 APInt &KnownUndef, APInt &KnownZero,
3348 TargetLoweringOpt &TLO, unsigned Depth = 0,
3349 bool AssumeSingleUse = false) const;
3351 /// Helper wrapper around SimplifyDemandedVectorElts.
3352 /// Adds Op back to the worklist upon success.
3353 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
3354 APInt &KnownUndef, APInt &KnownZero,
3355 DAGCombinerInfo &DCI) const;
3357 /// Determine which of the bits specified in Mask are known to be either zero
3358 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
3359 /// argument allows us to only collect the known bits that are shared by the
3360 /// requested vector elements.
3361 virtual void computeKnownBitsForTargetNode(const SDValue Op,
3363 const APInt &DemandedElts,
3364 const SelectionDAG &DAG,
3365 unsigned Depth = 0) const;
3367 /// Determine which of the bits specified in Mask are known to be either zero
3368 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
3369 /// argument allows us to only collect the known bits that are shared by the
3370 /// requested vector elements. This is for GISel.
3371 virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis,
3372 Register R, KnownBits &Known,
3373 const APInt &DemandedElts,
3374 const MachineRegisterInfo &MRI,
3375 unsigned Depth = 0) const;
3377 /// Determine the known alignment for the pointer value \p R. This is can
3378 /// typically be inferred from the number of low known 0 bits. However, for a
3379 /// pointer with a non-integral address space, the alignment value may be
3380 /// independent from the known low bits.
3381 virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis,
3383 const MachineRegisterInfo &MRI,
3384 unsigned Depth = 0) const;
3386 /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
3387 /// Default implementation computes low bits based on alignment
3388 /// information. This should preserve known bits passed into it.
3389 virtual void computeKnownBitsForFrameIndex(int FIOp,
3391 const MachineFunction &MF) const;
3393 /// This method can be implemented by targets that want to expose additional
3394 /// information about sign bits to the DAG Combiner. The DemandedElts
3395 /// argument allows us to only collect the minimum sign bits that are shared
3396 /// by the requested vector elements.
3397 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
3398 const APInt &DemandedElts,
3399 const SelectionDAG &DAG,
3400 unsigned Depth = 0) const;
3402 /// This method can be implemented by targets that want to expose additional
3403 /// information about sign bits to GlobalISel combiners. The DemandedElts
3404 /// argument allows us to only collect the minimum sign bits that are shared
3405 /// by the requested vector elements.
3406 virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis,
3408 const APInt &DemandedElts,
3409 const MachineRegisterInfo &MRI,
3410 unsigned Depth = 0) const;
3412 /// Attempt to simplify any target nodes based on the demanded vector
3413 /// elements, returning true on success. Otherwise, analyze the expression and
3414 /// return a mask of KnownUndef and KnownZero elements for the expression
3415 /// (used to simplify the caller). The KnownUndef/Zero elements may only be
3416 /// accurate for those bits in the DemandedMask.
3417 virtual bool SimplifyDemandedVectorEltsForTargetNode(
3418 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
3419 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
3421 /// Attempt to simplify any target nodes based on the demanded bits/elts,
3422 /// returning true on success. Otherwise, analyze the
3423 /// expression and return a mask of KnownOne and KnownZero bits for the
3424 /// expression (used to simplify the caller). The KnownZero/One bits may only
3425 /// be accurate for those bits in the Demanded masks.
3426 virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op,
3427 const APInt &DemandedBits,
3428 const APInt &DemandedElts,
3430 TargetLoweringOpt &TLO,
3431 unsigned Depth = 0) const;
3433 /// More limited version of SimplifyDemandedBits that can be used to "look
3434 /// through" ops that don't contribute to the DemandedBits/DemandedElts -
3435 /// bitwise ops etc.
3436 virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(
3437 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
3438 SelectionDAG &DAG, unsigned Depth) const;
3440 /// Tries to build a legal vector shuffle using the provided parameters
3441 /// or equivalent variations. The Mask argument maybe be modified as the
3442 /// function tries different variations.
3443 /// Returns an empty SDValue if the operation fails.
3444 SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0,
3445 SDValue N1, MutableArrayRef<int> Mask,
3446 SelectionDAG &DAG) const;
3448 /// This method returns the constant pool value that will be loaded by LD.
3449 /// NOTE: You must check for implicit extensions of the constant by LD.
3450 virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const;
3452 /// If \p SNaN is false, \returns true if \p Op is known to never be any
3453 /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling
3455 virtual bool isKnownNeverNaNForTargetNode(SDValue Op,
3456 const SelectionDAG &DAG,
3458 unsigned Depth = 0) const;
3459 struct DAGCombinerInfo {
3460 void *DC; // The DAG Combiner object.
3462 bool CalledByLegalizer;
3467 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
3468 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
3470 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
3471 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
3472 bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; }
3473 CombineLevel getDAGCombineLevel() { return Level; }
3474 bool isCalledByLegalizer() const { return CalledByLegalizer; }
3476 void AddToWorklist(SDNode *N);
3477 SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
3478 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
3479 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
3481 bool recursivelyDeleteUnusedNodes(SDNode *N);
3483 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
3486 /// Return if the N is a constant or constant vector equal to the true value
3487 /// from getBooleanContents().
3488 bool isConstTrueVal(const SDNode *N) const;
3490 /// Return if the N is a constant or constant vector equal to the false value
3491 /// from getBooleanContents().
3492 bool isConstFalseVal(const SDNode *N) const;
3494 /// Return if \p N is a True value when extended to \p VT.
3495 bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const;
3497 /// Try to simplify a setcc built with the specified operands and cc. If it is
3498 /// unable to simplify it, return a null SDValue.
3499 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
3500 bool foldBooleans, DAGCombinerInfo &DCI,
3501 const SDLoc &dl) const;
3503 // For targets which wrap address, unwrap for analysis.
3504 virtual SDValue unwrapAddress(SDValue N) const { return N; }
3506 /// Returns true (and the GlobalValue and the offset) if the node is a
3507 /// GlobalAddress + offset.
3509 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
3511 /// This method will be invoked for all target nodes and for any
3512 /// target-independent nodes that the target has registered with invoke it
3515 /// The semantics are as follows:
3517 /// SDValue.Val == 0 - No change was made
3518 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
3519 /// otherwise - N should be replaced by the returned Operand.
3521 /// In addition, methods provided by DAGCombinerInfo may be used to perform
3522 /// more complex transformations.
3524 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
3526 /// Return true if it is profitable to move this shift by a constant amount
3527 /// though its operand, adjusting any immediate operands as necessary to
3528 /// preserve semantics. This transformation may not be desirable if it
3529 /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
3530 /// extraction in AArch64). By default, it returns true.
3532 /// @param N the shift node
3533 /// @param Level the current DAGCombine legalization level.
3534 virtual bool isDesirableToCommuteWithShift(const SDNode *N,
3535 CombineLevel Level) const {
3539 /// Return true if the target has native support for the specified value type
3540 /// and it is 'desirable' to use the type for the given node type. e.g. On x86
3541 /// i16 is legal, but undesirable since i16 instruction encodings are longer
3542 /// and some i16 instructions are slow.
3543 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
3544 // By default, assume all legal types are desirable.
3545 return isTypeLegal(VT);
3548 /// Return true if it is profitable for dag combiner to transform a floating
3549 /// point op of specified opcode to a equivalent op of an integer
3550 /// type. e.g. f32 load -> i32 load can be profitable on ARM.
3551 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
3556 /// This method query the target whether it is beneficial for dag combiner to
3557 /// promote the specified node. If true, it should return the desired
3558 /// promotion type by reference.
3559 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
3563 /// Return true if the target supports swifterror attribute. It optimizes
3564 /// loads and stores to reading and writing a specific register.
3565 virtual bool supportSwiftError() const {
3569 /// Return true if the target supports that a subset of CSRs for the given
3570 /// machine function is handled explicitly via copies.
3571 virtual bool supportSplitCSR(MachineFunction *MF) const {
3575 /// Perform necessary initialization to handle a subset of CSRs explicitly
3576 /// via copies. This function is called at the beginning of instruction
3578 virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
3579 llvm_unreachable("Not Implemented");
3582 /// Insert explicit copies in entry and exit blocks. We copy a subset of
3583 /// CSRs to virtual registers in the entry block, and copy them back to
3584 /// physical registers in the exit blocks. This function is called at the end
3585 /// of instruction selection.
3586 virtual void insertCopiesSplitCSR(
3587 MachineBasicBlock *Entry,
3588 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
3589 llvm_unreachable("Not Implemented");
3592 /// Return the newly negated expression if the cost is not expensive and
3593 /// set the cost in \p Cost to indicate that if it is cheaper or neutral to
3594 /// do the negation.
3595 virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG,
3596 bool LegalOps, bool OptForSize,
3597 NegatibleCost &Cost,
3598 unsigned Depth = 0) const;
3600 /// This is the helper function to return the newly negated expression only
3601 /// when the cost is cheaper.
3602 SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG,
3603 bool LegalOps, bool OptForSize,
3604 unsigned Depth = 0) const {
3605 NegatibleCost Cost = NegatibleCost::Expensive;
3607 getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
3608 if (Neg && Cost == NegatibleCost::Cheaper)
3610 // Remove the new created node to avoid the side effect to the DAG.
3611 if (Neg && Neg.getNode()->use_empty())
3612 DAG.RemoveDeadNode(Neg.getNode());
3616 /// This is the helper function to return the newly negated expression if
3617 /// the cost is not expensive.
3618 SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps,
3619 bool OptForSize, unsigned Depth = 0) const {
3620 NegatibleCost Cost = NegatibleCost::Expensive;
3621 return getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
3624 //===--------------------------------------------------------------------===//
3625 // Lowering methods - These methods must be implemented by targets so that
3626 // the SelectionDAGBuilder code knows how to lower these.
3629 /// Target-specific splitting of values into parts that fit a register
3630 /// storing a legal type
3631 virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL,
3632 SDValue Val, SDValue *Parts,
3633 unsigned NumParts, MVT PartVT,
3634 Optional<CallingConv::ID> CC) const {
3638 /// Target-specific combining of register parts into its original value
3640 joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL,
3641 const SDValue *Parts, unsigned NumParts,
3642 MVT PartVT, EVT ValueVT,
3643 Optional<CallingConv::ID> CC) const {
3647 /// This hook must be implemented to lower the incoming (formal) arguments,
3648 /// described by the Ins array, into the specified DAG. The implementation
3649 /// should fill in the InVals array with legal-type argument values, and
3650 /// return the resulting token chain value.
3651 virtual SDValue LowerFormalArguments(
3652 SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
3653 const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
3654 SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
3655 llvm_unreachable("Not Implemented");
3658 /// This structure contains all information that is necessary for lowering
3659 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
3660 /// needs to lower a call, and targets will see this struct in their LowerCall
3662 struct CallLoweringInfo {
3664 Type *RetTy = nullptr;
3669 bool DoesNotReturn : 1;
3670 bool IsReturnValueUsed : 1;
3671 bool IsConvergent : 1;
3672 bool IsPatchPoint : 1;
3673 bool IsPreallocated : 1;
3676 // IsTailCall should be modified by implementations of
3677 // TargetLowering::LowerCall that perform tail call conversions.
3678 bool IsTailCall = false;
3680 // Is Call lowering done post SelectionDAG type legalization.
3681 bool IsPostTypeLegalization = false;
3683 unsigned NumFixedArgs = -1;
3684 CallingConv::ID CallConv = CallingConv::C;
3689 const CallBase *CB = nullptr;
3690 SmallVector<ISD::OutputArg, 32> Outs;
3691 SmallVector<SDValue, 32> OutVals;
3692 SmallVector<ISD::InputArg, 32> Ins;
3693 SmallVector<SDValue, 4> InVals;
3695 CallLoweringInfo(SelectionDAG &DAG)
3696 : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
3697 DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false),
3698 IsPatchPoint(false), IsPreallocated(false), NoMerge(false),
3701 CallLoweringInfo &setDebugLoc(const SDLoc &dl) {
3706 CallLoweringInfo &setChain(SDValue InChain) {
3711 // setCallee with target/module-specific attributes
3712 CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType,
3713 SDValue Target, ArgListTy &&ArgsList) {
3717 NumFixedArgs = ArgsList.size();
3718 Args = std::move(ArgsList);
3720 DAG.getTargetLoweringInfo().markLibCallAttributes(
3721 &(DAG.getMachineFunction()), CC, Args);
3725 CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
3726 SDValue Target, ArgListTy &&ArgsList) {
3730 NumFixedArgs = ArgsList.size();
3731 Args = std::move(ArgsList);
3735 CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
3736 SDValue Target, ArgListTy &&ArgsList,
3737 const CallBase &Call) {
3740 IsInReg = Call.hasRetAttr(Attribute::InReg);
3742 Call.doesNotReturn() ||
3743 (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode()));
3744 IsVarArg = FTy->isVarArg();
3745 IsReturnValueUsed = !Call.use_empty();
3746 RetSExt = Call.hasRetAttr(Attribute::SExt);
3747 RetZExt = Call.hasRetAttr(Attribute::ZExt);
3748 NoMerge = Call.hasFnAttr(Attribute::NoMerge);
3752 CallConv = Call.getCallingConv();
3753 NumFixedArgs = FTy->getNumParams();
3754 Args = std::move(ArgsList);
3761 CallLoweringInfo &setInRegister(bool Value = true) {
3766 CallLoweringInfo &setNoReturn(bool Value = true) {
3767 DoesNotReturn = Value;
3771 CallLoweringInfo &setVarArg(bool Value = true) {
3776 CallLoweringInfo &setTailCall(bool Value = true) {
3781 CallLoweringInfo &setDiscardResult(bool Value = true) {
3782 IsReturnValueUsed = !Value;
3786 CallLoweringInfo &setConvergent(bool Value = true) {
3787 IsConvergent = Value;
3791 CallLoweringInfo &setSExtResult(bool Value = true) {
3796 CallLoweringInfo &setZExtResult(bool Value = true) {
3801 CallLoweringInfo &setIsPatchPoint(bool Value = true) {
3802 IsPatchPoint = Value;
3806 CallLoweringInfo &setIsPreallocated(bool Value = true) {
3807 IsPreallocated = Value;
3811 CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) {
3812 IsPostTypeLegalization = Value;
3816 ArgListTy &getArgs() {
3821 /// This structure is used to pass arguments to makeLibCall function.
3822 struct MakeLibCallOptions {
3823 // By passing type list before soften to makeLibCall, the target hook
3824 // shouldExtendTypeInLibCall can get the original type before soften.
3825 ArrayRef<EVT> OpsVTBeforeSoften;
3826 EVT RetVTBeforeSoften;
3828 bool DoesNotReturn : 1;
3829 bool IsReturnValueUsed : 1;
3830 bool IsPostTypeLegalization : 1;
3833 MakeLibCallOptions()
3834 : IsSExt(false), DoesNotReturn(false), IsReturnValueUsed(true),
3835 IsPostTypeLegalization(false), IsSoften(false) {}
3837 MakeLibCallOptions &setSExt(bool Value = true) {
3842 MakeLibCallOptions &setNoReturn(bool Value = true) {
3843 DoesNotReturn = Value;
3847 MakeLibCallOptions &setDiscardResult(bool Value = true) {
3848 IsReturnValueUsed = !Value;
3852 MakeLibCallOptions &setIsPostTypeLegalization(bool Value = true) {
3853 IsPostTypeLegalization = Value;
3857 MakeLibCallOptions &setTypeListBeforeSoften(ArrayRef<EVT> OpsVT, EVT RetVT,
3858 bool Value = true) {
3859 OpsVTBeforeSoften = OpsVT;
3860 RetVTBeforeSoften = RetVT;
3866 /// This function lowers an abstract call to a function into an actual call.
3867 /// This returns a pair of operands. The first element is the return value
3868 /// for the function (if RetTy is not VoidTy). The second element is the
3869 /// outgoing token chain. It calls LowerCall to do the actual lowering.
3870 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
3872 /// This hook must be implemented to lower calls into the specified
3873 /// DAG. The outgoing arguments to the call are described by the Outs array,
3874 /// and the values to be returned by the call are described by the Ins
3875 /// array. The implementation should fill in the InVals array with legal-type
3876 /// return values from the call, and return the resulting token chain value.
3878 LowerCall(CallLoweringInfo &/*CLI*/,
3879 SmallVectorImpl<SDValue> &/*InVals*/) const {
3880 llvm_unreachable("Not Implemented");
3883 /// Target-specific cleanup for formal ByVal parameters.
3884 virtual void HandleByVal(CCState *, unsigned &, Align) const {}
3886 /// This hook should be implemented to check whether the return values
3887 /// described by the Outs array can fit into the return registers. If false
3888 /// is returned, an sret-demotion is performed.
3889 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
3890 MachineFunction &/*MF*/, bool /*isVarArg*/,
3891 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
3892 LLVMContext &/*Context*/) const
3894 // Return true by default to get preexisting behavior.
3898 /// This hook must be implemented to lower outgoing return values, described
3899 /// by the Outs array, into the specified DAG. The implementation should
3900 /// return the resulting token chain value.
3901 virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
3903 const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
3904 const SmallVectorImpl<SDValue> & /*OutVals*/,
3905 const SDLoc & /*dl*/,
3906 SelectionDAG & /*DAG*/) const {
3907 llvm_unreachable("Not Implemented");
3910 /// Return true if result of the specified node is used by a return node
3911 /// only. It also compute and return the input chain for the tail call.
3913 /// This is used to determine whether it is possible to codegen a libcall as
3914 /// tail call at legalization time.
3915 virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
3919 /// Return true if the target may be able emit the call instruction as a tail
3920 /// call. This is used by optimization passes to determine if it's profitable
3921 /// to duplicate return instructions to enable tailcall optimization.
3922 virtual bool mayBeEmittedAsTailCall(const CallInst *) const {
3926 /// Return the builtin name for the __builtin___clear_cache intrinsic
3927 /// Default is to invoke the clear cache library call
3928 virtual const char * getClearCacheBuiltinName() const {
3929 return "__clear_cache";
3932 /// Return the register ID of the name passed in. Used by named register
3933 /// global variables extension. There is no target-independent behaviour
3934 /// so the default action is to bail.
3935 virtual Register getRegisterByName(const char* RegName, LLT Ty,
3936 const MachineFunction &MF) const {
3937 report_fatal_error("Named registers not implemented for this target");
3940 /// Return the type that should be used to zero or sign extend a
3941 /// zeroext/signext integer return value. FIXME: Some C calling conventions
3942 /// require the return type to be promoted, but this is not true all the time,
3943 /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
3944 /// conventions. The frontend should handle this and include all of the
3945 /// necessary information.
3946 virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
3947 ISD::NodeType /*ExtendKind*/) const {
3948 EVT MinVT = getRegisterType(Context, MVT::i32);
3949 return VT.bitsLT(MinVT) ? MinVT : VT;
3952 /// For some targets, an LLVM struct type must be broken down into multiple
3953 /// simple types, but the calling convention specifies that the entire struct
3954 /// must be passed in a block of consecutive registers.
3956 functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
3957 bool isVarArg) const {
3961 /// For most targets, an LLVM type must be broken down into multiple
3962 /// smaller types. Usually the halves are ordered according to the endianness
3963 /// but for some platform that would break. So this method will default to
3964 /// matching the endianness but can be overridden.
3966 shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout &DL) const {
3967 return DL.isLittleEndian();
3970 /// Returns a 0 terminated array of registers that can be safely used as
3971 /// scratch registers.
3972 virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
3976 /// This callback is used to prepare for a volatile or atomic load.
3977 /// It takes a chain node as input and returns the chain for the load itself.
3979 /// Having a callback like this is necessary for targets like SystemZ,
3980 /// which allows a CPU to reuse the result of a previous load indefinitely,
3981 /// even if a cache-coherent store is performed by another CPU. The default
3982 /// implementation does nothing.
3983 virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL,
3984 SelectionDAG &DAG) const {
3988 /// Should SelectionDAG lower an atomic store of the given kind as a normal
3989 /// StoreSDNode (as opposed to an AtomicSDNode)? NOTE: The intention is to
3990 /// eventually migrate all targets to the using StoreSDNodes, but porting is
3991 /// being done target at a time.
3992 virtual bool lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const {
3993 assert(SI.isAtomic() && "violated precondition");
3997 /// Should SelectionDAG lower an atomic load of the given kind as a normal
3998 /// LoadSDNode (as opposed to an AtomicSDNode)? NOTE: The intention is to
3999 /// eventually migrate all targets to the using LoadSDNodes, but porting is
4000 /// being done target at a time.
4001 virtual bool lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const {
4002 assert(LI.isAtomic() && "violated precondition");
4007 /// This callback is invoked by the type legalizer to legalize nodes with an
4008 /// illegal operand type but legal result types. It replaces the
4009 /// LowerOperation callback in the type Legalizer. The reason we can not do
4010 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
4011 /// use this callback.
4013 /// TODO: Consider merging with ReplaceNodeResults.
4015 /// The target places new result values for the node in Results (their number
4016 /// and types must exactly match those of the original return values of
4017 /// the node), or leaves Results empty, which indicates that the node is not
4018 /// to be custom lowered after all.
4019 /// The default implementation calls LowerOperation.
4020 virtual void LowerOperationWrapper(SDNode *N,
4021 SmallVectorImpl<SDValue> &Results,
4022 SelectionDAG &DAG) const;
4024 /// This callback is invoked for operations that are unsupported by the
4025 /// target, which are registered to use 'custom' lowering, and whose defined
4026 /// values are all legal. If the target has no operations that require custom
4027 /// lowering, it need not implement this. The default implementation of this
4029 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
4031 /// This callback is invoked when a node result type is illegal for the
4032 /// target, and the operation was registered to use 'custom' lowering for that
4033 /// result type. The target places new result values for the node in Results
4034 /// (their number and types must exactly match those of the original return
4035 /// values of the node), or leaves Results empty, which indicates that the
4036 /// node is not to be custom lowered after all.
4038 /// If the target has no operations that require custom lowering, it need not
4039 /// implement this. The default implementation aborts.
4040 virtual void ReplaceNodeResults(SDNode * /*N*/,
4041 SmallVectorImpl<SDValue> &/*Results*/,
4042 SelectionDAG &/*DAG*/) const {
4043 llvm_unreachable("ReplaceNodeResults not implemented for this target!");
4046 /// This method returns the name of a target specific DAG node.
4047 virtual const char *getTargetNodeName(unsigned Opcode) const;
4049 /// This method returns a target specific FastISel object, or null if the
4050 /// target does not support "fast" ISel.
4051 virtual FastISel *createFastISel(FunctionLoweringInfo &,
4052 const TargetLibraryInfo *) const {
4056 bool verifyReturnAddressArgumentIsConstant(SDValue Op,
4057 SelectionDAG &DAG) const;
4059 //===--------------------------------------------------------------------===//
4060 // Inline Asm Support hooks
4063 /// This hook allows the target to expand an inline asm call to be explicit
4064 /// llvm code if it wants to. This is useful for turning simple inline asms
4065 /// into LLVM intrinsics, which gives the compiler more information about the
4066 /// behavior of the code.
4067 virtual bool ExpandInlineAsm(CallInst *) const {
4071 enum ConstraintType {
4072 C_Register, // Constraint represents specific register(s).
4073 C_RegisterClass, // Constraint represents any of register(s) in class.
4074 C_Memory, // Memory constraint.
4075 C_Immediate, // Requires an immediate.
4076 C_Other, // Something else.
4077 C_Unknown // Unsupported constraint.
4080 enum ConstraintWeight {
4082 CW_Invalid = -1, // No match.
4083 CW_Okay = 0, // Acceptable.
4084 CW_Good = 1, // Good weight.
4085 CW_Better = 2, // Better weight.
4086 CW_Best = 3, // Best weight.
4088 // Well-known weights.
4089 CW_SpecificReg = CW_Okay, // Specific register operands.
4090 CW_Register = CW_Good, // Register operands.
4091 CW_Memory = CW_Better, // Memory operands.
4092 CW_Constant = CW_Best, // Constant operand.
4093 CW_Default = CW_Okay // Default or don't know type.
4096 /// This contains information for each constraint that we are lowering.
4097 struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
4098 /// This contains the actual string for the code, like "m". TargetLowering
4099 /// picks the 'best' code from ConstraintInfo::Codes that most closely
4100 /// matches the operand.
4101 std::string ConstraintCode;
4103 /// Information about the constraint code, e.g. Register, RegisterClass,
4104 /// Memory, Other, Unknown.
4105 TargetLowering::ConstraintType ConstraintType = TargetLowering::C_Unknown;
4107 /// If this is the result output operand or a clobber, this is null,
4108 /// otherwise it is the incoming operand to the CallInst. This gets
4109 /// modified as the asm is processed.
4110 Value *CallOperandVal = nullptr;
4112 /// The ValueType for the operand value.
4113 MVT ConstraintVT = MVT::Other;
4115 /// Copy constructor for copying from a ConstraintInfo.
4116 AsmOperandInfo(InlineAsm::ConstraintInfo Info)
4117 : InlineAsm::ConstraintInfo(std::move(Info)) {}
4119 /// Return true of this is an input operand that is a matching constraint
4121 bool isMatchingInputConstraint() const;
4123 /// If this is an input matching constraint, this method returns the output
4124 /// operand it matches.
4125 unsigned getMatchedOperand() const;
4128 using AsmOperandInfoVector = std::vector<AsmOperandInfo>;
4130 /// Split up the constraint string from the inline assembly value into the
4131 /// specific constraints and their prefixes, and also tie in the associated
4132 /// operand values. If this returns an empty vector, and if the constraint
4133 /// string itself isn't empty, there was an error parsing.
4134 virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL,
4135 const TargetRegisterInfo *TRI,
4136 const CallBase &Call) const;
4138 /// Examine constraint type and operand type and determine a weight value.
4139 /// The operand object must already have been set up with the operand type.
4140 virtual ConstraintWeight getMultipleConstraintMatchWeight(
4141 AsmOperandInfo &info, int maIndex) const;
4143 /// Examine constraint string and operand type and determine a weight value.
4144 /// The operand object must already have been set up with the operand type.
4145 virtual ConstraintWeight getSingleConstraintMatchWeight(
4146 AsmOperandInfo &info, const char *constraint) const;
4148 /// Determines the constraint code and constraint type to use for the specific
4149 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
4150 /// If the actual operand being passed in is available, it can be passed in as
4151 /// Op, otherwise an empty SDValue can be passed.
4152 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
4154 SelectionDAG *DAG = nullptr) const;
4156 /// Given a constraint, return the type of constraint it is for this target.
4157 virtual ConstraintType getConstraintType(StringRef Constraint) const;
4159 /// Given a physical register constraint (e.g. {edx}), return the register
4160 /// number and the register class for the register.
4162 /// Given a register class constraint, like 'r', if this corresponds directly
4163 /// to an LLVM register class, return a register of 0 and the register class
4166 /// This should only be used for C_Register constraints. On error, this
4167 /// returns a register number of 0 and a null register class pointer.
4168 virtual std::pair<unsigned, const TargetRegisterClass *>
4169 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
4170 StringRef Constraint, MVT VT) const;
4172 virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const {
4173 if (ConstraintCode == "m")
4174 return InlineAsm::Constraint_m;
4175 return InlineAsm::Constraint_Unknown;
4178 /// Try to replace an X constraint, which matches anything, with another that
4179 /// has more specific requirements based on the type of the corresponding
4180 /// operand. This returns null if there is no replacement to make.
4181 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
4183 /// Lower the specified operand into the Ops vector. If it is invalid, don't
4184 /// add anything to Ops.
4185 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
4186 std::vector<SDValue> &Ops,
4187 SelectionDAG &DAG) const;
4189 // Lower custom output constraints. If invalid, return SDValue().
4190 virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag,
4192 const AsmOperandInfo &OpInfo,
4193 SelectionDAG &DAG) const;
4195 //===--------------------------------------------------------------------===//
4196 // Div utility functions
4198 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
4199 SmallVectorImpl<SDNode *> &Created) const;
4200 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
4201 SmallVectorImpl<SDNode *> &Created) const;
4203 /// Targets may override this function to provide custom SDIV lowering for
4204 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM
4205 /// assumes SDIV is expensive and replaces it with a series of other integer
4207 virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
4209 SmallVectorImpl<SDNode *> &Created) const;
4211 /// Indicate whether this target prefers to combine FDIVs with the same
4212 /// divisor. If the transform should never be done, return zero. If the
4213 /// transform should be done, return the minimum number of divisor uses
4214 /// that must exist.
4215 virtual unsigned combineRepeatedFPDivisors() const {
4219 /// Hooks for building estimates in place of slower divisions and square
4222 /// Return either a square root or its reciprocal estimate value for the input
4224 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
4225 /// 'Enabled' as set by a potential default override attribute.
4226 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
4227 /// refinement iterations required to generate a sufficient (though not
4228 /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
4229 /// The boolean UseOneConstNR output is used to select a Newton-Raphson
4230 /// algorithm implementation that uses either one or two constants.
4231 /// The boolean Reciprocal is used to select whether the estimate is for the
4232 /// square root of the input operand or the reciprocal of its square root.
4233 /// A target may choose to implement its own refinement within this function.
4234 /// If that's true, then return '0' as the number of RefinementSteps to avoid
4235 /// any further refinement of the estimate.
4236 /// An empty SDValue return means no estimate sequence can be created.
4237 virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
4238 int Enabled, int &RefinementSteps,
4239 bool &UseOneConstNR, bool Reciprocal) const {
4243 /// Return a reciprocal estimate value for the input operand.
4244 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
4245 /// 'Enabled' as set by a potential default override attribute.
4246 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
4247 /// refinement iterations required to generate a sufficient (though not
4248 /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
4249 /// A target may choose to implement its own refinement within this function.
4250 /// If that's true, then return '0' as the number of RefinementSteps to avoid
4251 /// any further refinement of the estimate.
4252 /// An empty SDValue return means no estimate sequence can be created.
4253 virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
4254 int Enabled, int &RefinementSteps) const {
4258 //===--------------------------------------------------------------------===//
4259 // Legalization utility functions
4262 /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
4263 /// respectively, each computing an n/2-bit part of the result.
4264 /// \param Result A vector that will be filled with the parts of the result
4265 /// in little-endian order.
4266 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
4267 /// if you want to control how low bits are extracted from the LHS.
4268 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
4269 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
4270 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
4271 /// \returns true if the node has been expanded, false if it has not
4272 bool expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl, SDValue LHS,
4273 SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
4274 SelectionDAG &DAG, MulExpansionKind Kind,
4275 SDValue LL = SDValue(), SDValue LH = SDValue(),
4276 SDValue RL = SDValue(), SDValue RH = SDValue()) const;
4278 /// Expand a MUL into two nodes. One that computes the high bits of
4279 /// the result and one that computes the low bits.
4280 /// \param HiLoVT The value type to use for the Lo and Hi nodes.
4281 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
4282 /// if you want to control how low bits are extracted from the LHS.
4283 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
4284 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
4285 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
4286 /// \returns true if the node has been expanded. false if it has not
4287 bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
4288 SelectionDAG &DAG, MulExpansionKind Kind,
4289 SDValue LL = SDValue(), SDValue LH = SDValue(),
4290 SDValue RL = SDValue(), SDValue RH = SDValue()) const;
4292 /// Expand funnel shift.
4293 /// \param N Node to expand
4294 /// \param Result output after conversion
4295 /// \returns True, if the expansion was successful, false otherwise
4296 bool expandFunnelShift(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4298 /// Expand rotations.
4299 /// \param N Node to expand
4300 /// \param Result output after conversion
4301 /// \returns True, if the expansion was successful, false otherwise
4302 bool expandROT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4304 /// Expand float(f32) to SINT(i64) conversion
4305 /// \param N Node to expand
4306 /// \param Result output after conversion
4307 /// \returns True, if the expansion was successful, false otherwise
4308 bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4310 /// Expand float to UINT conversion
4311 /// \param N Node to expand
4312 /// \param Result output after conversion
4313 /// \param Chain output chain after conversion
4314 /// \returns True, if the expansion was successful, false otherwise
4315 bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain,
4316 SelectionDAG &DAG) const;
4318 /// Expand UINT(i64) to double(f64) conversion
4319 /// \param N Node to expand
4320 /// \param Result output after conversion
4321 /// \param Chain output chain after conversion
4322 /// \returns True, if the expansion was successful, false otherwise
4323 bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain,
4324 SelectionDAG &DAG) const;
4326 /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
4327 SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const;
4329 /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes,
4330 /// vector nodes can only succeed if all operations are legal/custom.
4331 /// \param N Node to expand
4332 /// \param Result output after conversion
4333 /// \returns True, if the expansion was successful, false otherwise
4334 bool expandCTPOP(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4336 /// Expand CTLZ/CTLZ_ZERO_UNDEF nodes. Expands vector/scalar CTLZ nodes,
4337 /// vector nodes can only succeed if all operations are legal/custom.
4338 /// \param N Node to expand
4339 /// \param Result output after conversion
4340 /// \returns True, if the expansion was successful, false otherwise
4341 bool expandCTLZ(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4343 /// Expand CTTZ/CTTZ_ZERO_UNDEF nodes. Expands vector/scalar CTTZ nodes,
4344 /// vector nodes can only succeed if all operations are legal/custom.
4345 /// \param N Node to expand
4346 /// \param Result output after conversion
4347 /// \returns True, if the expansion was successful, false otherwise
4348 bool expandCTTZ(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4350 /// Expand ABS nodes. Expands vector/scalar ABS nodes,
4351 /// vector nodes can only succeed if all operations are legal/custom.
4352 /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size))
4353 /// \param N Node to expand
4354 /// \param Result output after conversion
4355 /// \returns True, if the expansion was successful, false otherwise
4356 bool expandABS(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4358 /// Turn load of vector type into a load of the individual elements.
4359 /// \param LD load to expand
4360 /// \returns BUILD_VECTOR and TokenFactor nodes.
4361 std::pair<SDValue, SDValue> scalarizeVectorLoad(LoadSDNode *LD,
4362 SelectionDAG &DAG) const;
4364 // Turn a store of a vector type into stores of the individual elements.
4365 /// \param ST Store with a vector value type
4366 /// \returns TokenFactor of the individual store chains.
4367 SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const;
4369 /// Expands an unaligned load to 2 half-size loads for an integer, and
4370 /// possibly more for vectors.
4371 std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD,
4372 SelectionDAG &DAG) const;
4374 /// Expands an unaligned store to 2 half-size stores for integer values, and
4375 /// possibly more for vectors.
4376 SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const;
4378 /// Increments memory address \p Addr according to the type of the value
4379 /// \p DataVT that should be stored. If the data is stored in compressed
4380 /// form, the memory address should be incremented according to the number of
4381 /// the stored elements. This number is equal to the number of '1's bits
4383 /// \p DataVT is a vector type. \p Mask is a vector value.
4384 /// \p DataVT and \p Mask have the same number of vector elements.
4385 SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL,
4386 EVT DataVT, SelectionDAG &DAG,
4387 bool IsCompressedMemory) const;
4389 /// Get a pointer to vector element \p Idx located in memory for a vector of
4390 /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
4391 /// bounds the returned pointer is unspecified, but will be within the vector
4393 SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
4394 SDValue Index) const;
4396 /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This
4397 /// method accepts integers as its arguments.
4398 SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const;
4400 /// Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT]. This
4401 /// method accepts integers as its arguments.
4402 SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const;
4404 /// Method for building the DAG expansion of ISD::[US]DIVFIX[SAT]. This
4405 /// method accepts integers as its arguments.
4406 /// Note: This method may fail if the division could not be performed
4407 /// within the type. Clients must retry with a wider type if this happens.
4408 SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl,
4409 SDValue LHS, SDValue RHS,
4410 unsigned Scale, SelectionDAG &DAG) const;
4412 /// Method for building the DAG expansion of ISD::U(ADD|SUB)O. Expansion
4413 /// always suceeds and populates the Result and Overflow arguments.
4414 void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow,
4415 SelectionDAG &DAG) const;
4417 /// Method for building the DAG expansion of ISD::S(ADD|SUB)O. Expansion
4418 /// always suceeds and populates the Result and Overflow arguments.
4419 void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow,
4420 SelectionDAG &DAG) const;
4422 /// Method for building the DAG expansion of ISD::[US]MULO. Returns whether
4423 /// expansion was successful and populates the Result and Overflow arguments.
4424 bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow,
4425 SelectionDAG &DAG) const;
4427 /// Expand a VECREDUCE_* into an explicit calculation. If Count is specified,
4428 /// only the first Count elements of the vector are used.
4429 SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const;
4431 /// Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
4432 /// Returns true if the expansion was successful.
4433 bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const;
4435 //===--------------------------------------------------------------------===//
4436 // Instruction Emitting Hooks
4439 /// This method should be implemented by targets that mark instructions with
4440 /// the 'usesCustomInserter' flag. These instructions are special in various
4441 /// ways, which require special support to insert. The specified MachineInstr
4442 /// is created but not inserted into any basic blocks, and this method is
4443 /// called to expand it into a sequence of instructions, potentially also
4444 /// creating new basic blocks and control flow.
4445 /// As long as the returned basic block is different (i.e., we created a new
4446 /// one), the custom inserter is free to modify the rest of \p MBB.
4447 virtual MachineBasicBlock *
4448 EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
4450 /// This method should be implemented by targets that mark instructions with
4451 /// the 'hasPostISelHook' flag. These instructions must be adjusted after
4452 /// instruction selection by target hooks. e.g. To fill in optional defs for
4453 /// ARM 's' setting instructions.
4454 virtual void AdjustInstrPostInstrSelection(MachineInstr &MI,
4455 SDNode *Node) const;
4457 /// If this function returns true, SelectionDAGBuilder emits a
4458 /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
4459 virtual bool useLoadStackGuardNode() const {
4463 virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
4464 const SDLoc &DL) const {
4465 llvm_unreachable("not implemented for this target");
4468 /// Lower TLS global address SDNode for target independent emulated TLS model.
4469 virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
4470 SelectionDAG &DAG) const;
4472 /// Expands target specific indirect branch for the case of JumpTable
4474 virtual SDValue expandIndirectJTBranch(const SDLoc& dl, SDValue Value, SDValue Addr,
4475 SelectionDAG &DAG) const {
4476 return DAG.getNode(ISD::BRIND, dl, MVT::Other, Value, Addr);
4479 // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
4480 // If we're comparing for equality to zero and isCtlzFast is true, expose the
4481 // fact that this can be implemented as a ctlz/srl pair, so that the dag
4482 // combiner can fold the new nodes.
4483 SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const;
4486 SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
4487 const SDLoc &DL, DAGCombinerInfo &DCI) const;
4488 SDValue foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
4489 const SDLoc &DL, DAGCombinerInfo &DCI) const;
4491 SDValue optimizeSetCCOfSignedTruncationCheck(EVT SCCVT, SDValue N0,
4492 SDValue N1, ISD::CondCode Cond,
4493 DAGCombinerInfo &DCI,
4494 const SDLoc &DL) const;
4496 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0
4497 SDValue optimizeSetCCByHoistingAndByConstFromLogicalShift(
4498 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond,
4499 DAGCombinerInfo &DCI, const SDLoc &DL) const;
4501 SDValue prepareUREMEqFold(EVT SETCCVT, SDValue REMNode,
4502 SDValue CompTargetNode, ISD::CondCode Cond,
4503 DAGCombinerInfo &DCI, const SDLoc &DL,
4504 SmallVectorImpl<SDNode *> &Created) const;
4505 SDValue buildUREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode,
4506 ISD::CondCode Cond, DAGCombinerInfo &DCI,
4507 const SDLoc &DL) const;
4509 SDValue prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
4510 SDValue CompTargetNode, ISD::CondCode Cond,
4511 DAGCombinerInfo &DCI, const SDLoc &DL,
4512 SmallVectorImpl<SDNode *> &Created) const;
4513 SDValue buildSREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode,
4514 ISD::CondCode Cond, DAGCombinerInfo &DCI,
4515 const SDLoc &DL) const;
4518 /// Given an LLVM IR type and return type attributes, compute the return value
4519 /// EVTs and flags, and optionally also the offsets, if the return value is
4520 /// being lowered to memory.
4521 void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr,
4522 SmallVectorImpl<ISD::OutputArg> &Outs,
4523 const TargetLowering &TLI, const DataLayout &DL);
4525 } // end namespace llvm
4527 #endif // LLVM_CODEGEN_TARGETLOWERING_H