1 //===- SelectionDAGBuilder.h - Selection-DAG building -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H
15 #define LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H
17 #include "StatepointLowering.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/ISDOpcodes.h"
24 #include "llvm/CodeGen/SelectionDAG.h"
25 #include "llvm/CodeGen/SelectionDAGNodes.h"
26 #include "llvm/CodeGen/TargetLowering.h"
27 #include "llvm/CodeGen/ValueTypes.h"
28 #include "llvm/IR/CallSite.h"
29 #include "llvm/IR/DebugLoc.h"
30 #include "llvm/IR/Instruction.h"
31 #include "llvm/IR/Statepoint.h"
32 #include "llvm/Support/BranchProbability.h"
33 #include "llvm/Support/CodeGen.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/MachineValueType.h"
45 class AtomicCmpXchgInst;
51 class CatchReturnInst;
52 class CatchSwitchInst;
54 class CleanupReturnInst;
57 class ConstrainedFPIntrinsic;
61 class DILocalVariable;
64 class FunctionLoweringInfo;
73 class MachineBasicBlock;
80 class TargetLibraryInfo;
84 class UnreachableInst;
89 //===----------------------------------------------------------------------===//
90 /// SelectionDAGBuilder - This is the common target-independent lowering
91 /// implementation that is parameterized by a TargetLowering object.
93 class SelectionDAGBuilder {
94 /// CurInst - The current instruction being visited
95 const Instruction *CurInst = nullptr;
97 DenseMap<const Value*, SDValue> NodeMap;
99 /// UnusedArgNodeMap - Maps argument value for unused arguments. This is used
100 /// to preserve debug information for incoming arguments.
101 DenseMap<const Value*, SDValue> UnusedArgNodeMap;
103 /// DanglingDebugInfo - Helper type for DanglingDebugInfoMap.
104 class DanglingDebugInfo {
105 const DbgValueInst* DI = nullptr;
107 unsigned SDNodeOrder = 0;
110 DanglingDebugInfo() = default;
111 DanglingDebugInfo(const DbgValueInst *di, DebugLoc DL, unsigned SDNO)
112 : DI(di), dl(std::move(DL)), SDNodeOrder(SDNO) {}
114 const DbgValueInst* getDI() { return DI; }
115 DebugLoc getdl() { return dl; }
116 unsigned getSDNodeOrder() { return SDNodeOrder; }
119 /// DanglingDebugInfoVector - Helper type for DanglingDebugInfoMap.
120 typedef std::vector<DanglingDebugInfo> DanglingDebugInfoVector;
122 /// DanglingDebugInfoMap - Keeps track of dbg_values for which we have not
123 /// yet seen the referent. We defer handling these until we do see it.
124 DenseMap<const Value*, DanglingDebugInfoVector> DanglingDebugInfoMap;
127 /// PendingLoads - Loads are not emitted to the program immediately. We bunch
128 /// them up and then emit token factor nodes when possible. This allows us to
129 /// get simple disambiguation between loads without worrying about alias
131 SmallVector<SDValue, 8> PendingLoads;
133 /// State used while lowering a statepoint sequence (gc_statepoint,
134 /// gc_relocate, and gc_result). See StatepointLowering.hpp/cpp for details.
135 StatepointLoweringState StatepointLowering;
138 /// PendingExports - CopyToReg nodes that copy values to virtual registers
139 /// for export to other blocks need to be emitted before any terminator
140 /// instruction, but they have no other ordering requirements. We bunch them
141 /// up and the emit a single tokenfactor for them just before terminator
143 SmallVector<SDValue, 8> PendingExports;
145 /// SDNodeOrder - A unique monotonically increasing number used to order the
146 /// SDNodes we create.
147 unsigned SDNodeOrder;
149 enum CaseClusterKind {
150 /// A cluster of adjacent case labels with the same destination, or just one
153 /// A cluster of cases suitable for jump table lowering.
155 /// A cluster of cases suitable for bit test lowering.
159 /// A cluster of case labels.
161 CaseClusterKind Kind;
162 const ConstantInt *Low, *High;
164 MachineBasicBlock *MBB;
165 unsigned JTCasesIndex;
166 unsigned BTCasesIndex;
168 BranchProbability Prob;
170 static CaseCluster range(const ConstantInt *Low, const ConstantInt *High,
171 MachineBasicBlock *MBB, BranchProbability Prob) {
181 static CaseCluster jumpTable(const ConstantInt *Low,
182 const ConstantInt *High, unsigned JTCasesIndex,
183 BranchProbability Prob) {
185 C.Kind = CC_JumpTable;
188 C.JTCasesIndex = JTCasesIndex;
193 static CaseCluster bitTests(const ConstantInt *Low, const ConstantInt *High,
194 unsigned BTCasesIndex, BranchProbability Prob) {
196 C.Kind = CC_BitTests;
199 C.BTCasesIndex = BTCasesIndex;
205 using CaseClusterVector = std::vector<CaseCluster>;
206 using CaseClusterIt = CaseClusterVector::iterator;
210 MachineBasicBlock* BB = nullptr;
212 BranchProbability ExtraProb;
214 CaseBits() = default;
215 CaseBits(uint64_t mask, MachineBasicBlock* bb, unsigned bits,
216 BranchProbability Prob):
217 Mask(mask), BB(bb), Bits(bits), ExtraProb(Prob) {}
220 using CaseBitsVector = std::vector<CaseBits>;
222 /// Sort Clusters and merge adjacent cases.
223 void sortAndRangeify(CaseClusterVector &Clusters);
225 /// CaseBlock - This structure is used to communicate between
226 /// SelectionDAGBuilder and SDISel for the code generation of additional basic
227 /// blocks needed by multi-case switch statements.
229 // CC - the condition code to use for the case block's setcc node
232 // CmpLHS/CmpRHS/CmpMHS - The LHS/MHS/RHS of the comparison to emit.
233 // Emit by default LHS op RHS. MHS is used for range comparisons:
234 // If MHS is not null: (LHS <= MHS) and (MHS <= RHS).
235 const Value *CmpLHS, *CmpMHS, *CmpRHS;
237 // TrueBB/FalseBB - the block to branch to if the setcc is true/false.
238 MachineBasicBlock *TrueBB, *FalseBB;
240 // ThisBB - the block into which to emit the code for the setcc and branches
241 MachineBasicBlock *ThisBB;
243 /// The debug location of the instruction this CaseBlock was
247 // TrueProb/FalseProb - branch weights.
248 BranchProbability TrueProb, FalseProb;
250 CaseBlock(ISD::CondCode cc, const Value *cmplhs, const Value *cmprhs,
251 const Value *cmpmiddle, MachineBasicBlock *truebb,
252 MachineBasicBlock *falsebb, MachineBasicBlock *me,
254 BranchProbability trueprob = BranchProbability::getUnknown(),
255 BranchProbability falseprob = BranchProbability::getUnknown())
256 : CC(cc), CmpLHS(cmplhs), CmpMHS(cmpmiddle), CmpRHS(cmprhs),
257 TrueBB(truebb), FalseBB(falsebb), ThisBB(me), DL(dl),
258 TrueProb(trueprob), FalseProb(falseprob) {}
262 /// Reg - the virtual register containing the index of the jump table entry
265 /// JTI - the JumpTableIndex for this jump table in the function.
267 /// MBB - the MBB into which to emit the code for the indirect jump.
268 MachineBasicBlock *MBB;
269 /// Default - the MBB of the default bb, which is a successor of the range
270 /// check MBB. This is when updating PHI nodes in successors.
271 MachineBasicBlock *Default;
273 JumpTable(unsigned R, unsigned J, MachineBasicBlock *M,
274 MachineBasicBlock *D): Reg(R), JTI(J), MBB(M), Default(D) {}
276 struct JumpTableHeader {
280 MachineBasicBlock *HeaderBB;
283 JumpTableHeader(APInt F, APInt L, const Value *SV, MachineBasicBlock *H,
285 : First(std::move(F)), Last(std::move(L)), SValue(SV), HeaderBB(H),
288 using JumpTableBlock = std::pair<JumpTableHeader, JumpTable>;
292 MachineBasicBlock *ThisBB;
293 MachineBasicBlock *TargetBB;
294 BranchProbability ExtraProb;
296 BitTestCase(uint64_t M, MachineBasicBlock* T, MachineBasicBlock* Tr,
297 BranchProbability Prob):
298 Mask(M), ThisBB(T), TargetBB(Tr), ExtraProb(Prob) {}
301 using BitTestInfo = SmallVector<BitTestCase, 3>;
303 struct BitTestBlock {
310 bool ContiguousRange;
311 MachineBasicBlock *Parent;
312 MachineBasicBlock *Default;
314 BranchProbability Prob;
315 BranchProbability DefaultProb;
317 BitTestBlock(APInt F, APInt R, const Value *SV, unsigned Rg, MVT RgVT,
318 bool E, bool CR, MachineBasicBlock *P, MachineBasicBlock *D,
319 BitTestInfo C, BranchProbability Pr)
320 : First(std::move(F)), Range(std::move(R)), SValue(SV), Reg(Rg),
321 RegVT(RgVT), Emitted(E), ContiguousRange(CR), Parent(P), Default(D),
322 Cases(std::move(C)), Prob(Pr) {}
325 /// Return the range of value in [First..Last].
326 uint64_t getJumpTableRange(const CaseClusterVector &Clusters, unsigned First,
327 unsigned Last) const;
329 /// Return the number of cases in [First..Last].
330 uint64_t getJumpTableNumCases(const SmallVectorImpl<unsigned> &TotalCases,
331 unsigned First, unsigned Last) const;
333 /// Build a jump table cluster from Clusters[First..Last]. Returns false if it
334 /// decides it's not a good idea.
335 bool buildJumpTable(const CaseClusterVector &Clusters, unsigned First,
336 unsigned Last, const SwitchInst *SI,
337 MachineBasicBlock *DefaultMBB, CaseCluster &JTCluster);
339 /// Find clusters of cases suitable for jump table lowering.
340 void findJumpTables(CaseClusterVector &Clusters, const SwitchInst *SI,
341 MachineBasicBlock *DefaultMBB);
343 /// Build a bit test cluster from Clusters[First..Last]. Returns false if it
344 /// decides it's not a good idea.
345 bool buildBitTests(CaseClusterVector &Clusters, unsigned First, unsigned Last,
346 const SwitchInst *SI, CaseCluster &BTCluster);
348 /// Find clusters of cases suitable for bit test lowering.
349 void findBitTestClusters(CaseClusterVector &Clusters, const SwitchInst *SI);
351 struct SwitchWorkListItem {
352 MachineBasicBlock *MBB;
353 CaseClusterIt FirstCluster;
354 CaseClusterIt LastCluster;
355 const ConstantInt *GE;
356 const ConstantInt *LT;
357 BranchProbability DefaultProb;
359 using SwitchWorkList = SmallVector<SwitchWorkListItem, 4>;
361 /// Determine the rank by weight of CC in [First,Last]. If CC has more weight
362 /// than each cluster in the range, its rank is 0.
363 static unsigned caseClusterRank(const CaseCluster &CC, CaseClusterIt First,
366 /// Emit comparison and split W into two subtrees.
367 void splitWorkItem(SwitchWorkList &WorkList, const SwitchWorkListItem &W,
368 Value *Cond, MachineBasicBlock *SwitchMBB);
371 void lowerWorkItem(SwitchWorkListItem W, Value *Cond,
372 MachineBasicBlock *SwitchMBB,
373 MachineBasicBlock *DefaultMBB);
375 /// Peel the top probability case if it exceeds the threshold
376 MachineBasicBlock *peelDominantCaseCluster(const SwitchInst &SI,
377 CaseClusterVector &Clusters,
378 BranchProbability &PeeledCaseProb);
380 /// A class which encapsulates all of the information needed to generate a
381 /// stack protector check and signals to isel via its state being initialized
382 /// that a stack protector needs to be generated.
384 /// *NOTE* The following is a high level documentation of SelectionDAG Stack
385 /// Protector Generation. The reason that it is placed here is for a lack of
386 /// other good places to stick it.
388 /// High Level Overview of SelectionDAG Stack Protector Generation:
390 /// Previously, generation of stack protectors was done exclusively in the
391 /// pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated
392 /// splitting basic blocks at the IR level to create the success/failure basic
393 /// blocks in the tail of the basic block in question. As a result of this,
394 /// calls that would have qualified for the sibling call optimization were no
395 /// longer eligible for optimization since said calls were no longer right in
396 /// the "tail position" (i.e. the immediate predecessor of a ReturnInst
399 /// Then it was noticed that since the sibling call optimization causes the
400 /// callee to reuse the caller's stack, if we could delay the generation of
401 /// the stack protector check until later in CodeGen after the sibling call
402 /// decision was made, we get both the tail call optimization and the stack
405 /// A few goals in solving this problem were:
407 /// 1. Preserve the architecture independence of stack protector generation.
409 /// 2. Preserve the normal IR level stack protector check for platforms like
410 /// OpenBSD for which we support platform-specific stack protector
413 /// The main problem that guided the present solution is that one can not
414 /// solve this problem in an architecture independent manner at the IR level
415 /// only. This is because:
417 /// 1. The decision on whether or not to perform a sibling call on certain
418 /// platforms (for instance i386) requires lower level information
419 /// related to available registers that can not be known at the IR level.
421 /// 2. Even if the previous point were not true, the decision on whether to
422 /// perform a tail call is done in LowerCallTo in SelectionDAG which
423 /// occurs after the Stack Protector Pass. As a result, one would need to
424 /// put the relevant callinst into the stack protector check success
425 /// basic block (where the return inst is placed) and then move it back
426 /// later at SelectionDAG/MI time before the stack protector check if the
427 /// tail call optimization failed. The MI level option was nixed
428 /// immediately since it would require platform-specific pattern
429 /// matching. The SelectionDAG level option was nixed because
430 /// SelectionDAG only processes one IR level basic block at a time
431 /// implying one could not create a DAG Combine to move the callinst.
433 /// To get around this problem a few things were realized:
435 /// 1. While one can not handle multiple IR level basic blocks at the
436 /// SelectionDAG Level, one can generate multiple machine basic blocks
437 /// for one IR level basic block. This is how we handle bit tests and
440 /// 2. At the MI level, tail calls are represented via a special return
441 /// MIInst called "tcreturn". Thus if we know the basic block in which we
442 /// wish to insert the stack protector check, we get the correct behavior
443 /// by always inserting the stack protector check right before the return
444 /// statement. This is a "magical transformation" since no matter where
445 /// the stack protector check intrinsic is, we always insert the stack
446 /// protector check code at the end of the BB.
448 /// Given the aforementioned constraints, the following solution was devised:
450 /// 1. On platforms that do not support SelectionDAG stack protector check
451 /// generation, allow for the normal IR level stack protector check
452 /// generation to continue.
454 /// 2. On platforms that do support SelectionDAG stack protector check
457 /// a. Use the IR level stack protector pass to decide if a stack
458 /// protector is required/which BB we insert the stack protector check
459 /// in by reusing the logic already therein. If we wish to generate a
460 /// stack protector check in a basic block, we place a special IR
461 /// intrinsic called llvm.stackprotectorcheck right before the BB's
462 /// returninst or if there is a callinst that could potentially be
463 /// sibling call optimized, before the call inst.
465 /// b. Then when a BB with said intrinsic is processed, we codegen the BB
466 /// normally via SelectBasicBlock. In said process, when we visit the
467 /// stack protector check, we do not actually emit anything into the
468 /// BB. Instead, we just initialize the stack protector descriptor
469 /// class (which involves stashing information/creating the success
470 /// mbbb and the failure mbb if we have not created one for this
471 /// function yet) and export the guard variable that we are going to
474 /// c. After we finish selecting the basic block, in FinishBasicBlock if
475 /// the StackProtectorDescriptor attached to the SelectionDAGBuilder is
476 /// initialized, we produce the validation code with one of these
478 /// 1) with a call to a guard check function
479 /// 2) with inlined instrumentation
481 /// 1) We insert a call to the check function before the terminator.
483 /// 2) We first find a splice point in the parent basic block
484 /// before the terminator and then splice the terminator of said basic
485 /// block into the success basic block. Then we code-gen a new tail for
486 /// the parent basic block consisting of the two loads, the comparison,
487 /// and finally two branches to the success/failure basic blocks. We
488 /// conclude by code-gening the failure basic block if we have not
489 /// code-gened it already (all stack protector checks we generate in
490 /// the same function, use the same failure basic block).
491 class StackProtectorDescriptor {
493 StackProtectorDescriptor() = default;
495 /// Returns true if all fields of the stack protector descriptor are
496 /// initialized implying that we should/are ready to emit a stack protector.
497 bool shouldEmitStackProtector() const {
498 return ParentMBB && SuccessMBB && FailureMBB;
501 bool shouldEmitFunctionBasedCheckStackProtector() const {
502 return ParentMBB && !SuccessMBB && !FailureMBB;
505 /// Initialize the stack protector descriptor structure for a new basic
507 void initialize(const BasicBlock *BB, MachineBasicBlock *MBB,
508 bool FunctionBasedInstrumentation) {
509 // Make sure we are not initialized yet.
510 assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is "
511 "already initialized!");
513 if (!FunctionBasedInstrumentation) {
514 SuccessMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ true);
515 FailureMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB);
519 /// Reset state that changes when we handle different basic blocks.
521 /// This currently includes:
523 /// 1. The specific basic block we are generating a
524 /// stack protector for (ParentMBB).
526 /// 2. The successor machine basic block that will contain the tail of
527 /// parent mbb after we create the stack protector check (SuccessMBB). This
528 /// BB is visited only on stack protector check success.
529 void resetPerBBState() {
531 SuccessMBB = nullptr;
534 /// Reset state that only changes when we switch functions.
536 /// This currently includes:
538 /// 1. FailureMBB since we reuse the failure code path for all stack
539 /// protector checks created in an individual function.
541 /// 2.The guard variable since the guard variable we are checking against is
543 void resetPerFunctionState() {
544 FailureMBB = nullptr;
547 MachineBasicBlock *getParentMBB() { return ParentMBB; }
548 MachineBasicBlock *getSuccessMBB() { return SuccessMBB; }
549 MachineBasicBlock *getFailureMBB() { return FailureMBB; }
552 /// The basic block for which we are generating the stack protector.
554 /// As a result of stack protector generation, we will splice the
555 /// terminators of this basic block into the successor mbb SuccessMBB and
556 /// replace it with a compare/branch to the successor mbbs
557 /// SuccessMBB/FailureMBB depending on whether or not the stack protector
559 MachineBasicBlock *ParentMBB = nullptr;
561 /// A basic block visited on stack protector check success that contains the
562 /// terminators of ParentMBB.
563 MachineBasicBlock *SuccessMBB = nullptr;
565 /// This basic block visited on stack protector check failure that will
566 /// contain a call to __stack_chk_fail().
567 MachineBasicBlock *FailureMBB = nullptr;
569 /// Add a successor machine basic block to ParentMBB. If the successor mbb
570 /// has not been created yet (i.e. if SuccMBB = 0), then the machine basic
571 /// block will be created. Assign a large weight if IsLikely is true.
572 MachineBasicBlock *AddSuccessorMBB(const BasicBlock *BB,
573 MachineBasicBlock *ParentMBB,
575 MachineBasicBlock *SuccMBB = nullptr);
579 const TargetMachine &TM;
582 /// Lowest valid SDNodeOrder. The special case 0 is reserved for scheduling
583 /// nodes without a corresponding SDNode.
584 static const unsigned LowestSDNodeOrder = 1;
587 const DataLayout *DL = nullptr;
588 AliasAnalysis *AA = nullptr;
589 const TargetLibraryInfo *LibInfo;
591 /// SwitchCases - Vector of CaseBlock structures used to communicate
592 /// SwitchInst code generation information.
593 std::vector<CaseBlock> SwitchCases;
595 /// JTCases - Vector of JumpTable structures used to communicate
596 /// SwitchInst code generation information.
597 std::vector<JumpTableBlock> JTCases;
599 /// BitTestCases - Vector of BitTestBlock structures used to communicate
600 /// SwitchInst code generation information.
601 std::vector<BitTestBlock> BitTestCases;
603 /// A StackProtectorDescriptor structure used to communicate stack protector
604 /// information in between SelectBasicBlock and FinishBasicBlock.
605 StackProtectorDescriptor SPDescriptor;
607 // Emit PHI-node-operand constants only once even if used by multiple
609 DenseMap<const Constant *, unsigned> ConstantsOut;
611 /// FuncInfo - Information about the function as a whole.
613 FunctionLoweringInfo &FuncInfo;
615 /// GFI - Garbage collection metadata for the function.
618 /// LPadToCallSiteMap - Map a landing pad to the call site indexes.
619 DenseMap<MachineBasicBlock *, SmallVector<unsigned, 4>> LPadToCallSiteMap;
621 /// HasTailCall - This is set to true if a call in the current
622 /// block has been translated as a tail call. In this case,
623 /// no subsequent DAG nodes should be created.
624 bool HasTailCall = false;
626 LLVMContext *Context;
628 SelectionDAGBuilder(SelectionDAG &dag, FunctionLoweringInfo &funcinfo,
629 CodeGenOpt::Level ol)
630 : SDNodeOrder(LowestSDNodeOrder), TM(dag.getTarget()), DAG(dag),
631 FuncInfo(funcinfo) {}
633 void init(GCFunctionInfo *gfi, AliasAnalysis *AA,
634 const TargetLibraryInfo *li);
636 /// Clear out the current SelectionDAG and the associated state and prepare
637 /// this SelectionDAGBuilder object to be used for a new block. This doesn't
638 /// clear out information about additional blocks that are needed to complete
639 /// switch lowering or PHI node updating; that information is cleared out as
643 /// Clear the dangling debug information map. This function is separated from
644 /// the clear so that debug information that is dangling in a basic block can
645 /// be properly resolved in a different basic block. This allows the
646 /// SelectionDAG to resolve dangling debug information attached to PHI nodes.
647 void clearDanglingDebugInfo();
649 /// Return the current virtual root of the Selection DAG, flushing any
650 /// PendingLoad items. This must be done before emitting a store or any other
651 /// node that may need to be ordered after any prior load instructions.
654 /// Similar to getRoot, but instead of flushing all the PendingLoad items,
655 /// flush all the PendingExports items. It is necessary to do this before
656 /// emitting a terminator instruction.
657 SDValue getControlRoot();
659 SDLoc getCurSDLoc() const {
660 return SDLoc(CurInst, SDNodeOrder);
663 DebugLoc getCurDebugLoc() const {
664 return CurInst ? CurInst->getDebugLoc() : DebugLoc();
667 void CopyValueToVirtualRegister(const Value *V, unsigned Reg);
669 void visit(const Instruction &I);
671 void visit(unsigned Opcode, const User &I);
673 /// getCopyFromRegs - If there was virtual register allocated for the value V
674 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
675 SDValue getCopyFromRegs(const Value *V, Type *Ty);
677 /// If we have dangling debug info that describes \p Variable, or an
678 /// overlapping part of variable considering the \p Expr, then this method
679 /// weill drop that debug info as it isn't valid any longer.
680 void dropDanglingDebugInfo(const DILocalVariable *Variable,
681 const DIExpression *Expr);
683 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
684 // generate the debug data structures now that we've seen its definition.
685 void resolveDanglingDebugInfo(const Value *V, SDValue Val);
687 SDValue getValue(const Value *V);
688 bool findValue(const Value *V) const;
690 /// Return the SDNode for the specified IR value if it exists.
691 SDNode *getNodeForIRValue(const Value *V) {
692 if (NodeMap.find(V) == NodeMap.end())
694 return NodeMap[V].getNode();
697 SDValue getNonRegisterValue(const Value *V);
698 SDValue getValueImpl(const Value *V);
700 void setValue(const Value *V, SDValue NewN) {
701 SDValue &N = NodeMap[V];
702 assert(!N.getNode() && "Already set a value for this node!");
706 void setUnusedArgValue(const Value *V, SDValue NewN) {
707 SDValue &N = UnusedArgNodeMap[V];
708 assert(!N.getNode() && "Already set a value for this node!");
712 void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
713 MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
714 MachineBasicBlock *SwitchBB,
715 Instruction::BinaryOps Opc, BranchProbability TProb,
716 BranchProbability FProb, bool InvertCond);
717 void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
718 MachineBasicBlock *FBB,
719 MachineBasicBlock *CurBB,
720 MachineBasicBlock *SwitchBB,
721 BranchProbability TProb, BranchProbability FProb,
723 bool ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases);
724 bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB);
725 void CopyToExportRegsIfNeeded(const Value *V);
726 void ExportFromCurrentBlock(const Value *V);
727 void LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool IsTailCall,
728 const BasicBlock *EHPadBB = nullptr);
730 // Lower range metadata from 0 to N to assert zext to an integer of nearest
731 // floor power of two.
732 SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I,
735 void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI,
736 ImmutableCallSite CS, unsigned ArgIdx,
737 unsigned NumArgs, SDValue Callee,
738 Type *ReturnTy, bool IsPatchPoint);
740 std::pair<SDValue, SDValue>
741 lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
742 const BasicBlock *EHPadBB = nullptr);
744 /// UpdateSplitBlock - When an MBB was split during scheduling, update the
745 /// references that need to refer to the last resulting block.
746 void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last);
748 /// Describes a gc.statepoint or a gc.statepoint like thing for the purposes
749 /// of lowering into a STATEPOINT node.
750 struct StatepointLoweringInfo {
751 /// Bases[i] is the base pointer for Ptrs[i]. Together they denote the set
752 /// of gc pointers this STATEPOINT has to relocate.
753 SmallVector<const Value *, 16> Bases;
754 SmallVector<const Value *, 16> Ptrs;
756 /// The set of gc.relocate calls associated with this gc.statepoint.
757 SmallVector<const GCRelocateInst *, 16> GCRelocates;
759 /// The full list of gc arguments to the gc.statepoint being lowered.
760 ArrayRef<const Use> GCArgs;
762 /// The gc.statepoint instruction.
763 const Instruction *StatepointInstr = nullptr;
765 /// The list of gc transition arguments present in the gc.statepoint being
767 ArrayRef<const Use> GCTransitionArgs;
769 /// The ID that the resulting STATEPOINT instruction has to report.
772 /// Information regarding the underlying call instruction.
773 TargetLowering::CallLoweringInfo CLI;
775 /// The deoptimization state associated with this gc.statepoint call, if
777 ArrayRef<const Use> DeoptState;
779 /// Flags associated with the meta arguments being lowered.
780 uint64_t StatepointFlags = -1;
782 /// The number of patchable bytes the call needs to get lowered into.
783 unsigned NumPatchBytes = -1;
785 /// The exception handling unwind destination, in case this represents an
786 /// invoke of gc.statepoint.
787 const BasicBlock *EHPadBB = nullptr;
789 explicit StatepointLoweringInfo(SelectionDAG &DAG) : CLI(DAG) {}
792 /// Lower \p SLI into a STATEPOINT instruction.
793 SDValue LowerAsSTATEPOINT(StatepointLoweringInfo &SI);
795 // This function is responsible for the whole statepoint lowering process.
796 // It uniformly handles invoke and call statepoints.
797 void LowerStatepoint(ImmutableStatepoint ISP,
798 const BasicBlock *EHPadBB = nullptr);
800 void LowerCallSiteWithDeoptBundle(ImmutableCallSite CS, SDValue Callee,
801 const BasicBlock *EHPadBB);
803 void LowerDeoptimizeCall(const CallInst *CI);
804 void LowerDeoptimizingReturn();
806 void LowerCallSiteWithDeoptBundleImpl(ImmutableCallSite CS, SDValue Callee,
807 const BasicBlock *EHPadBB,
808 bool VarArgDisallowed,
809 bool ForceVoidReturnTy);
811 /// Returns the type of FrameIndex and TargetFrameIndex nodes.
812 MVT getFrameIndexTy() {
813 return DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout());
817 // Terminator instructions.
818 void visitRet(const ReturnInst &I);
819 void visitBr(const BranchInst &I);
820 void visitSwitch(const SwitchInst &I);
821 void visitIndirectBr(const IndirectBrInst &I);
822 void visitUnreachable(const UnreachableInst &I);
823 void visitCleanupRet(const CleanupReturnInst &I);
824 void visitCatchSwitch(const CatchSwitchInst &I);
825 void visitCatchRet(const CatchReturnInst &I);
826 void visitCatchPad(const CatchPadInst &I);
827 void visitCleanupPad(const CleanupPadInst &CPI);
829 BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
830 const MachineBasicBlock *Dst) const;
831 void addSuccessorWithProb(
832 MachineBasicBlock *Src, MachineBasicBlock *Dst,
833 BranchProbability Prob = BranchProbability::getUnknown());
836 void visitSwitchCase(CaseBlock &CB,
837 MachineBasicBlock *SwitchBB);
838 void visitSPDescriptorParent(StackProtectorDescriptor &SPD,
839 MachineBasicBlock *ParentBB);
840 void visitSPDescriptorFailure(StackProtectorDescriptor &SPD);
841 void visitBitTestHeader(BitTestBlock &B, MachineBasicBlock *SwitchBB);
842 void visitBitTestCase(BitTestBlock &BB,
843 MachineBasicBlock* NextMBB,
844 BranchProbability BranchProbToNext,
847 MachineBasicBlock *SwitchBB);
848 void visitJumpTable(JumpTable &JT);
849 void visitJumpTableHeader(JumpTable &JT, JumpTableHeader &JTH,
850 MachineBasicBlock *SwitchBB);
853 // These all get lowered before this pass.
854 void visitInvoke(const InvokeInst &I);
855 void visitResume(const ResumeInst &I);
857 void visitUnary(const User &I, unsigned Opcode);
858 void visitFNeg(const User &I) { visitUnary(I, ISD::FNEG); }
860 void visitBinary(const User &I, unsigned Opcode);
861 void visitShift(const User &I, unsigned Opcode);
862 void visitAdd(const User &I) { visitBinary(I, ISD::ADD); }
863 void visitFAdd(const User &I) { visitBinary(I, ISD::FADD); }
864 void visitSub(const User &I) { visitBinary(I, ISD::SUB); }
865 void visitFSub(const User &I);
866 void visitMul(const User &I) { visitBinary(I, ISD::MUL); }
867 void visitFMul(const User &I) { visitBinary(I, ISD::FMUL); }
868 void visitURem(const User &I) { visitBinary(I, ISD::UREM); }
869 void visitSRem(const User &I) { visitBinary(I, ISD::SREM); }
870 void visitFRem(const User &I) { visitBinary(I, ISD::FREM); }
871 void visitUDiv(const User &I) { visitBinary(I, ISD::UDIV); }
872 void visitSDiv(const User &I);
873 void visitFDiv(const User &I) { visitBinary(I, ISD::FDIV); }
874 void visitAnd (const User &I) { visitBinary(I, ISD::AND); }
875 void visitOr (const User &I) { visitBinary(I, ISD::OR); }
876 void visitXor (const User &I) { visitBinary(I, ISD::XOR); }
877 void visitShl (const User &I) { visitShift(I, ISD::SHL); }
878 void visitLShr(const User &I) { visitShift(I, ISD::SRL); }
879 void visitAShr(const User &I) { visitShift(I, ISD::SRA); }
880 void visitICmp(const User &I);
881 void visitFCmp(const User &I);
882 // Visit the conversion instructions
883 void visitTrunc(const User &I);
884 void visitZExt(const User &I);
885 void visitSExt(const User &I);
886 void visitFPTrunc(const User &I);
887 void visitFPExt(const User &I);
888 void visitFPToUI(const User &I);
889 void visitFPToSI(const User &I);
890 void visitUIToFP(const User &I);
891 void visitSIToFP(const User &I);
892 void visitPtrToInt(const User &I);
893 void visitIntToPtr(const User &I);
894 void visitBitCast(const User &I);
895 void visitAddrSpaceCast(const User &I);
897 void visitExtractElement(const User &I);
898 void visitInsertElement(const User &I);
899 void visitShuffleVector(const User &I);
901 void visitExtractValue(const User &I);
902 void visitInsertValue(const User &I);
903 void visitLandingPad(const LandingPadInst &LP);
905 void visitGetElementPtr(const User &I);
906 void visitSelect(const User &I);
908 void visitAlloca(const AllocaInst &I);
909 void visitLoad(const LoadInst &I);
910 void visitStore(const StoreInst &I);
911 void visitMaskedLoad(const CallInst &I, bool IsExpanding = false);
912 void visitMaskedStore(const CallInst &I, bool IsCompressing = false);
913 void visitMaskedGather(const CallInst &I);
914 void visitMaskedScatter(const CallInst &I);
915 void visitAtomicCmpXchg(const AtomicCmpXchgInst &I);
916 void visitAtomicRMW(const AtomicRMWInst &I);
917 void visitFence(const FenceInst &I);
918 void visitPHI(const PHINode &I);
919 void visitCall(const CallInst &I);
920 bool visitMemCmpCall(const CallInst &I);
921 bool visitMemPCpyCall(const CallInst &I);
922 bool visitMemChrCall(const CallInst &I);
923 bool visitStrCpyCall(const CallInst &I, bool isStpcpy);
924 bool visitStrCmpCall(const CallInst &I);
925 bool visitStrLenCall(const CallInst &I);
926 bool visitStrNLenCall(const CallInst &I);
927 bool visitUnaryFloatCall(const CallInst &I, unsigned Opcode);
928 bool visitBinaryFloatCall(const CallInst &I, unsigned Opcode);
929 void visitAtomicLoad(const LoadInst &I);
930 void visitAtomicStore(const StoreInst &I);
931 void visitLoadFromSwiftError(const LoadInst &I);
932 void visitStoreToSwiftError(const StoreInst &I);
934 void visitInlineAsm(ImmutableCallSite CS);
935 const char *visitIntrinsicCall(const CallInst &I, unsigned Intrinsic);
936 void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic);
937 void visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI);
939 void visitVAStart(const CallInst &I);
940 void visitVAArg(const VAArgInst &I);
941 void visitVAEnd(const CallInst &I);
942 void visitVACopy(const CallInst &I);
943 void visitStackmap(const CallInst &I);
944 void visitPatchpoint(ImmutableCallSite CS,
945 const BasicBlock *EHPadBB = nullptr);
947 // These two are implemented in StatepointLowering.cpp
948 void visitGCRelocate(const GCRelocateInst &Relocate);
949 void visitGCResult(const GCResultInst &I);
951 void visitVectorReduce(const CallInst &I, unsigned Intrinsic);
953 void visitUserOp1(const Instruction &I) {
954 llvm_unreachable("UserOp1 should not exist at instruction selection time!");
956 void visitUserOp2(const Instruction &I) {
957 llvm_unreachable("UserOp2 should not exist at instruction selection time!");
960 void processIntegerCallValue(const Instruction &I,
961 SDValue Value, bool IsSigned);
963 void HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
965 void emitInlineAsmError(ImmutableCallSite CS, const Twine &Message);
967 /// If V is an function argument then create corresponding DBG_VALUE machine
968 /// instruction for it now. At the end of instruction selection, they will be
969 /// inserted to the entry BB.
970 bool EmitFuncArgumentDbgValue(const Value *V, DILocalVariable *Variable,
971 DIExpression *Expr, DILocation *DL,
972 bool IsDbgDeclare, const SDValue &N);
974 /// Return the next block after MBB, or nullptr if there is none.
975 MachineBasicBlock *NextBlock(MachineBasicBlock *MBB);
977 /// Update the DAG and DAG builder with the relevant information after
978 /// a new root node has been created which could be a tail call.
979 void updateDAGForMaybeTailCall(SDValue MaybeTC);
981 /// Return the appropriate SDDbgValue based on N.
982 SDDbgValue *getDbgValue(SDValue N, DILocalVariable *Variable,
983 DIExpression *Expr, const DebugLoc &dl,
984 unsigned DbgSDNodeOrder);
987 /// RegsForValue - This struct represents the registers (physical or virtual)
988 /// that a particular set of values is assigned, and the type information about
989 /// the value. The most common situation is to represent one value at a time,
990 /// but struct or array values are handled element-wise as multiple values. The
991 /// splitting of aggregates is performed recursively, so that we never have
992 /// aggregate-typed registers. The values at this point do not necessarily have
993 /// legal types, so each value may require one or more registers of some legal
996 struct RegsForValue {
997 /// The value types of the values, which may not be legal, and
998 /// may need be promoted or synthesized from one or more registers.
999 SmallVector<EVT, 4> ValueVTs;
1001 /// The value types of the registers. This is the same size as ValueVTs and it
1002 /// records, for each value, what the type of the assigned register or
1003 /// registers are. (Individual values are never synthesized from more than one
1004 /// type of register.)
1006 /// With virtual registers, the contents of RegVTs is redundant with TLI's
1007 /// getRegisterType member function, however when with physical registers
1008 /// it is necessary to have a separate record of the types.
1009 SmallVector<MVT, 4> RegVTs;
1011 /// This list holds the registers assigned to the values.
1012 /// Each legal or promoted value requires one register, and each
1013 /// expanded value requires multiple registers.
1014 SmallVector<unsigned, 4> Regs;
1016 /// This list holds the number of registers for each value.
1017 SmallVector<unsigned, 4> RegCount;
1019 /// Records if this value needs to be treated in an ABI dependant manner,
1020 /// different to normal type legalization.
1021 Optional<CallingConv::ID> CallConv;
1023 RegsForValue() = default;
1024 RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt, EVT valuevt,
1025 Optional<CallingConv::ID> CC = None);
1026 RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
1027 const DataLayout &DL, unsigned Reg, Type *Ty,
1028 Optional<CallingConv::ID> CC);
1030 bool isABIMangled() const {
1031 return CallConv.hasValue();
1034 /// Add the specified values to this one.
1035 void append(const RegsForValue &RHS) {
1036 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
1037 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
1038 Regs.append(RHS.Regs.begin(), RHS.Regs.end());
1039 RegCount.push_back(RHS.Regs.size());
1042 /// Emit a series of CopyFromReg nodes that copies from this value and returns
1043 /// the result as a ValueVTs value. This uses Chain/Flag as the input and
1044 /// updates them for the output Chain/Flag. If the Flag pointer is NULL, no
1046 SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
1047 const SDLoc &dl, SDValue &Chain, SDValue *Flag,
1048 const Value *V = nullptr) const;
1050 /// Emit a series of CopyToReg nodes that copies the specified value into the
1051 /// registers specified by this object. This uses Chain/Flag as the input and
1052 /// updates them for the output Chain/Flag. If the Flag pointer is nullptr, no
1053 /// flag is used. If V is not nullptr, then it is used in printing better
1054 /// diagnostic messages on error.
1055 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl,
1056 SDValue &Chain, SDValue *Flag, const Value *V = nullptr,
1057 ISD::NodeType PreferredExtendType = ISD::ANY_EXTEND) const;
1059 /// Add this value to the specified inlineasm node operand list. This adds the
1060 /// code marker, matching input operand index (if applicable), and includes
1061 /// the number of values added into it.
1062 void AddInlineAsmOperands(unsigned Code, bool HasMatching,
1063 unsigned MatchingIdx, const SDLoc &dl,
1064 SelectionDAG &DAG, std::vector<SDValue> &Ops) const;
1066 /// Check if the total RegCount is greater than one.
1067 bool occupiesMultipleRegs() const {
1068 return std::accumulate(RegCount.begin(), RegCount.end(), 0) > 1;
1071 /// Return a list of registers and their sizes.
1072 SmallVector<std::pair<unsigned, unsigned>, 4> getRegsAndSizes() const;
1075 } // end namespace llvm
1077 #endif // LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H