1 //===-- SelectionDAGBuilder.h - Selection-DAG building --------*- C++ -*---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H
15 #define LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H
17 #include "StatepointLowering.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/CodeGen/SelectionDAG.h"
22 #include "llvm/CodeGen/SelectionDAGNodes.h"
23 #include "llvm/IR/CallSite.h"
24 #include "llvm/IR/Constants.h"
25 #include "llvm/IR/Statepoint.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Target/TargetLowering.h"
33 class AddrSpaceCastInst;
40 class ExtractElementInst;
47 class FunctionLoweringInfo;
48 class GetElementPtrInst;
54 class InsertElementInst;
57 class MachineBasicBlock;
59 class MachineRegisterInfo;
68 class ShuffleVectorInst;
73 class TargetLibraryInfo;
77 class UnreachableInst;
81 //===----------------------------------------------------------------------===//
82 /// SelectionDAGBuilder - This is the common target-independent lowering
83 /// implementation that is parameterized by a TargetLowering object.
85 class SelectionDAGBuilder {
86 /// CurInst - The current instruction being visited
87 const Instruction *CurInst;
89 DenseMap<const Value*, SDValue> NodeMap;
91 /// UnusedArgNodeMap - Maps argument value for unused arguments. This is used
92 /// to preserve debug information for incoming arguments.
93 DenseMap<const Value*, SDValue> UnusedArgNodeMap;
95 /// DanglingDebugInfo - Helper type for DanglingDebugInfoMap.
96 class DanglingDebugInfo {
97 const DbgValueInst* DI;
101 DanglingDebugInfo() : DI(nullptr), dl(DebugLoc()), SDNodeOrder(0) { }
102 DanglingDebugInfo(const DbgValueInst *di, DebugLoc DL, unsigned SDNO)
103 : DI(di), dl(std::move(DL)), SDNodeOrder(SDNO) {}
104 const DbgValueInst* getDI() { return DI; }
105 DebugLoc getdl() { return dl; }
106 unsigned getSDNodeOrder() { return SDNodeOrder; }
109 /// DanglingDebugInfoMap - Keeps track of dbg_values for which we have not
110 /// yet seen the referent. We defer handling these until we do see it.
111 DenseMap<const Value*, DanglingDebugInfo> DanglingDebugInfoMap;
114 /// PendingLoads - Loads are not emitted to the program immediately. We bunch
115 /// them up and then emit token factor nodes when possible. This allows us to
116 /// get simple disambiguation between loads without worrying about alias
118 SmallVector<SDValue, 8> PendingLoads;
120 /// State used while lowering a statepoint sequence (gc_statepoint,
121 /// gc_relocate, and gc_result). See StatepointLowering.hpp/cpp for details.
122 StatepointLoweringState StatepointLowering;
125 /// PendingExports - CopyToReg nodes that copy values to virtual registers
126 /// for export to other blocks need to be emitted before any terminator
127 /// instruction, but they have no other ordering requirements. We bunch them
128 /// up and the emit a single tokenfactor for them just before terminator
130 SmallVector<SDValue, 8> PendingExports;
132 /// SDNodeOrder - A unique monotonically increasing number used to order the
133 /// SDNodes we create.
134 unsigned SDNodeOrder;
136 enum CaseClusterKind {
137 /// A cluster of adjacent case labels with the same destination, or just one
140 /// A cluster of cases suitable for jump table lowering.
142 /// A cluster of cases suitable for bit test lowering.
146 /// A cluster of case labels.
148 CaseClusterKind Kind;
149 const ConstantInt *Low, *High;
151 MachineBasicBlock *MBB;
152 unsigned JTCasesIndex;
153 unsigned BTCasesIndex;
155 BranchProbability Prob;
157 static CaseCluster range(const ConstantInt *Low, const ConstantInt *High,
158 MachineBasicBlock *MBB, BranchProbability Prob) {
168 static CaseCluster jumpTable(const ConstantInt *Low,
169 const ConstantInt *High, unsigned JTCasesIndex,
170 BranchProbability Prob) {
172 C.Kind = CC_JumpTable;
175 C.JTCasesIndex = JTCasesIndex;
180 static CaseCluster bitTests(const ConstantInt *Low, const ConstantInt *High,
181 unsigned BTCasesIndex, BranchProbability Prob) {
183 C.Kind = CC_BitTests;
186 C.BTCasesIndex = BTCasesIndex;
192 typedef std::vector<CaseCluster> CaseClusterVector;
193 typedef CaseClusterVector::iterator CaseClusterIt;
197 MachineBasicBlock* BB;
199 BranchProbability ExtraProb;
201 CaseBits(uint64_t mask, MachineBasicBlock* bb, unsigned bits,
202 BranchProbability Prob):
203 Mask(mask), BB(bb), Bits(bits), ExtraProb(Prob) { }
205 CaseBits() : Mask(0), BB(nullptr), Bits(0) {}
208 typedef std::vector<CaseBits> CaseBitsVector;
210 /// Sort Clusters and merge adjacent cases.
211 void sortAndRangeify(CaseClusterVector &Clusters);
213 /// CaseBlock - This structure is used to communicate between
214 /// SelectionDAGBuilder and SDISel for the code generation of additional basic
215 /// blocks needed by multi-case switch statements.
217 CaseBlock(ISD::CondCode cc, const Value *cmplhs, const Value *cmprhs,
218 const Value *cmpmiddle, MachineBasicBlock *truebb,
219 MachineBasicBlock *falsebb, MachineBasicBlock *me,
220 BranchProbability trueprob = BranchProbability::getUnknown(),
221 BranchProbability falseprob = BranchProbability::getUnknown())
222 : CC(cc), CmpLHS(cmplhs), CmpMHS(cmpmiddle), CmpRHS(cmprhs),
223 TrueBB(truebb), FalseBB(falsebb), ThisBB(me), TrueProb(trueprob),
224 FalseProb(falseprob) {}
226 // CC - the condition code to use for the case block's setcc node
229 // CmpLHS/CmpRHS/CmpMHS - The LHS/MHS/RHS of the comparison to emit.
230 // Emit by default LHS op RHS. MHS is used for range comparisons:
231 // If MHS is not null: (LHS <= MHS) and (MHS <= RHS).
232 const Value *CmpLHS, *CmpMHS, *CmpRHS;
234 // TrueBB/FalseBB - the block to branch to if the setcc is true/false.
235 MachineBasicBlock *TrueBB, *FalseBB;
237 // ThisBB - the block into which to emit the code for the setcc and branches
238 MachineBasicBlock *ThisBB;
240 // TrueProb/FalseProb - branch weights.
241 BranchProbability TrueProb, FalseProb;
245 JumpTable(unsigned R, unsigned J, MachineBasicBlock *M,
246 MachineBasicBlock *D): Reg(R), JTI(J), MBB(M), Default(D) {}
248 /// Reg - the virtual register containing the index of the jump table entry
251 /// JTI - the JumpTableIndex for this jump table in the function.
253 /// MBB - the MBB into which to emit the code for the indirect jump.
254 MachineBasicBlock *MBB;
255 /// Default - the MBB of the default bb, which is a successor of the range
256 /// check MBB. This is when updating PHI nodes in successors.
257 MachineBasicBlock *Default;
259 struct JumpTableHeader {
260 JumpTableHeader(APInt F, APInt L, const Value *SV, MachineBasicBlock *H,
262 : First(std::move(F)), Last(std::move(L)), SValue(SV), HeaderBB(H),
267 MachineBasicBlock *HeaderBB;
270 typedef std::pair<JumpTableHeader, JumpTable> JumpTableBlock;
273 BitTestCase(uint64_t M, MachineBasicBlock* T, MachineBasicBlock* Tr,
274 BranchProbability Prob):
275 Mask(M), ThisBB(T), TargetBB(Tr), ExtraProb(Prob) { }
277 MachineBasicBlock *ThisBB;
278 MachineBasicBlock *TargetBB;
279 BranchProbability ExtraProb;
282 typedef SmallVector<BitTestCase, 3> BitTestInfo;
284 struct BitTestBlock {
285 BitTestBlock(APInt F, APInt R, const Value *SV, unsigned Rg, MVT RgVT,
286 bool E, bool CR, MachineBasicBlock *P, MachineBasicBlock *D,
287 BitTestInfo C, BranchProbability Pr)
288 : First(std::move(F)), Range(std::move(R)), SValue(SV), Reg(Rg),
289 RegVT(RgVT), Emitted(E), ContiguousRange(CR), Parent(P), Default(D),
290 Cases(std::move(C)), Prob(Pr) {}
297 bool ContiguousRange;
298 MachineBasicBlock *Parent;
299 MachineBasicBlock *Default;
301 BranchProbability Prob;
302 BranchProbability DefaultProb;
305 /// Return the range of value in [First..Last].
306 uint64_t getJumpTableRange(const CaseClusterVector &Clusters, unsigned First,
307 unsigned Last) const;
309 /// Return the number of cases in [First..Last].
310 uint64_t getJumpTableNumCases(const SmallVectorImpl<unsigned> &TotalCases,
311 unsigned First, unsigned Last) const;
313 /// Build a jump table cluster from Clusters[First..Last]. Returns false if it
314 /// decides it's not a good idea.
315 bool buildJumpTable(const CaseClusterVector &Clusters, unsigned First,
316 unsigned Last, const SwitchInst *SI,
317 MachineBasicBlock *DefaultMBB, CaseCluster &JTCluster);
319 /// Find clusters of cases suitable for jump table lowering.
320 void findJumpTables(CaseClusterVector &Clusters, const SwitchInst *SI,
321 MachineBasicBlock *DefaultMBB);
323 /// Build a bit test cluster from Clusters[First..Last]. Returns false if it
324 /// decides it's not a good idea.
325 bool buildBitTests(CaseClusterVector &Clusters, unsigned First, unsigned Last,
326 const SwitchInst *SI, CaseCluster &BTCluster);
328 /// Find clusters of cases suitable for bit test lowering.
329 void findBitTestClusters(CaseClusterVector &Clusters, const SwitchInst *SI);
331 struct SwitchWorkListItem {
332 MachineBasicBlock *MBB;
333 CaseClusterIt FirstCluster;
334 CaseClusterIt LastCluster;
335 const ConstantInt *GE;
336 const ConstantInt *LT;
337 BranchProbability DefaultProb;
339 typedef SmallVector<SwitchWorkListItem, 4> SwitchWorkList;
341 /// Determine the rank by weight of CC in [First,Last]. If CC has more weight
342 /// than each cluster in the range, its rank is 0.
343 static unsigned caseClusterRank(const CaseCluster &CC, CaseClusterIt First,
346 /// Emit comparison and split W into two subtrees.
347 void splitWorkItem(SwitchWorkList &WorkList, const SwitchWorkListItem &W,
348 Value *Cond, MachineBasicBlock *SwitchMBB);
351 void lowerWorkItem(SwitchWorkListItem W, Value *Cond,
352 MachineBasicBlock *SwitchMBB,
353 MachineBasicBlock *DefaultMBB);
356 /// A class which encapsulates all of the information needed to generate a
357 /// stack protector check and signals to isel via its state being initialized
358 /// that a stack protector needs to be generated.
360 /// *NOTE* The following is a high level documentation of SelectionDAG Stack
361 /// Protector Generation. The reason that it is placed here is for a lack of
362 /// other good places to stick it.
364 /// High Level Overview of SelectionDAG Stack Protector Generation:
366 /// Previously, generation of stack protectors was done exclusively in the
367 /// pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated
368 /// splitting basic blocks at the IR level to create the success/failure basic
369 /// blocks in the tail of the basic block in question. As a result of this,
370 /// calls that would have qualified for the sibling call optimization were no
371 /// longer eligible for optimization since said calls were no longer right in
372 /// the "tail position" (i.e. the immediate predecessor of a ReturnInst
375 /// Then it was noticed that since the sibling call optimization causes the
376 /// callee to reuse the caller's stack, if we could delay the generation of
377 /// the stack protector check until later in CodeGen after the sibling call
378 /// decision was made, we get both the tail call optimization and the stack
381 /// A few goals in solving this problem were:
383 /// 1. Preserve the architecture independence of stack protector generation.
385 /// 2. Preserve the normal IR level stack protector check for platforms like
386 /// OpenBSD for which we support platform-specific stack protector
389 /// The main problem that guided the present solution is that one can not
390 /// solve this problem in an architecture independent manner at the IR level
391 /// only. This is because:
393 /// 1. The decision on whether or not to perform a sibling call on certain
394 /// platforms (for instance i386) requires lower level information
395 /// related to available registers that can not be known at the IR level.
397 /// 2. Even if the previous point were not true, the decision on whether to
398 /// perform a tail call is done in LowerCallTo in SelectionDAG which
399 /// occurs after the Stack Protector Pass. As a result, one would need to
400 /// put the relevant callinst into the stack protector check success
401 /// basic block (where the return inst is placed) and then move it back
402 /// later at SelectionDAG/MI time before the stack protector check if the
403 /// tail call optimization failed. The MI level option was nixed
404 /// immediately since it would require platform-specific pattern
405 /// matching. The SelectionDAG level option was nixed because
406 /// SelectionDAG only processes one IR level basic block at a time
407 /// implying one could not create a DAG Combine to move the callinst.
409 /// To get around this problem a few things were realized:
411 /// 1. While one can not handle multiple IR level basic blocks at the
412 /// SelectionDAG Level, one can generate multiple machine basic blocks
413 /// for one IR level basic block. This is how we handle bit tests and
416 /// 2. At the MI level, tail calls are represented via a special return
417 /// MIInst called "tcreturn". Thus if we know the basic block in which we
418 /// wish to insert the stack protector check, we get the correct behavior
419 /// by always inserting the stack protector check right before the return
420 /// statement. This is a "magical transformation" since no matter where
421 /// the stack protector check intrinsic is, we always insert the stack
422 /// protector check code at the end of the BB.
424 /// Given the aforementioned constraints, the following solution was devised:
426 /// 1. On platforms that do not support SelectionDAG stack protector check
427 /// generation, allow for the normal IR level stack protector check
428 /// generation to continue.
430 /// 2. On platforms that do support SelectionDAG stack protector check
433 /// a. Use the IR level stack protector pass to decide if a stack
434 /// protector is required/which BB we insert the stack protector check
435 /// in by reusing the logic already therein. If we wish to generate a
436 /// stack protector check in a basic block, we place a special IR
437 /// intrinsic called llvm.stackprotectorcheck right before the BB's
438 /// returninst or if there is a callinst that could potentially be
439 /// sibling call optimized, before the call inst.
441 /// b. Then when a BB with said intrinsic is processed, we codegen the BB
442 /// normally via SelectBasicBlock. In said process, when we visit the
443 /// stack protector check, we do not actually emit anything into the
444 /// BB. Instead, we just initialize the stack protector descriptor
445 /// class (which involves stashing information/creating the success
446 /// mbbb and the failure mbb if we have not created one for this
447 /// function yet) and export the guard variable that we are going to
450 /// c. After we finish selecting the basic block, in FinishBasicBlock if
451 /// the StackProtectorDescriptor attached to the SelectionDAGBuilder is
452 /// initialized, we produce the validation code with one of these
454 /// 1) with a call to a guard check function
455 /// 2) with inlined instrumentation
457 /// 1) We insert a call to the check function before the terminator.
459 /// 2) We first find a splice point in the parent basic block
460 /// before the terminator and then splice the terminator of said basic
461 /// block into the success basic block. Then we code-gen a new tail for
462 /// the parent basic block consisting of the two loads, the comparison,
463 /// and finally two branches to the success/failure basic blocks. We
464 /// conclude by code-gening the failure basic block if we have not
465 /// code-gened it already (all stack protector checks we generate in
466 /// the same function, use the same failure basic block).
467 class StackProtectorDescriptor {
469 StackProtectorDescriptor()
470 : ParentMBB(nullptr), SuccessMBB(nullptr), FailureMBB(nullptr) {}
472 /// Returns true if all fields of the stack protector descriptor are
473 /// initialized implying that we should/are ready to emit a stack protector.
474 bool shouldEmitStackProtector() const {
475 return ParentMBB && SuccessMBB && FailureMBB;
478 bool shouldEmitFunctionBasedCheckStackProtector() const {
479 return ParentMBB && !SuccessMBB && !FailureMBB;
482 /// Initialize the stack protector descriptor structure for a new basic
484 void initialize(const BasicBlock *BB, MachineBasicBlock *MBB,
485 bool FunctionBasedInstrumentation) {
486 // Make sure we are not initialized yet.
487 assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is "
488 "already initialized!");
490 if (!FunctionBasedInstrumentation) {
491 SuccessMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ true);
492 FailureMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB);
496 /// Reset state that changes when we handle different basic blocks.
498 /// This currently includes:
500 /// 1. The specific basic block we are generating a
501 /// stack protector for (ParentMBB).
503 /// 2. The successor machine basic block that will contain the tail of
504 /// parent mbb after we create the stack protector check (SuccessMBB). This
505 /// BB is visited only on stack protector check success.
506 void resetPerBBState() {
508 SuccessMBB = nullptr;
511 /// Reset state that only changes when we switch functions.
513 /// This currently includes:
515 /// 1. FailureMBB since we reuse the failure code path for all stack
516 /// protector checks created in an individual function.
518 /// 2.The guard variable since the guard variable we are checking against is
520 void resetPerFunctionState() {
521 FailureMBB = nullptr;
524 MachineBasicBlock *getParentMBB() { return ParentMBB; }
525 MachineBasicBlock *getSuccessMBB() { return SuccessMBB; }
526 MachineBasicBlock *getFailureMBB() { return FailureMBB; }
529 /// The basic block for which we are generating the stack protector.
531 /// As a result of stack protector generation, we will splice the
532 /// terminators of this basic block into the successor mbb SuccessMBB and
533 /// replace it with a compare/branch to the successor mbbs
534 /// SuccessMBB/FailureMBB depending on whether or not the stack protector
536 MachineBasicBlock *ParentMBB;
538 /// A basic block visited on stack protector check success that contains the
539 /// terminators of ParentMBB.
540 MachineBasicBlock *SuccessMBB;
542 /// This basic block visited on stack protector check failure that will
543 /// contain a call to __stack_chk_fail().
544 MachineBasicBlock *FailureMBB;
546 /// Add a successor machine basic block to ParentMBB. If the successor mbb
547 /// has not been created yet (i.e. if SuccMBB = 0), then the machine basic
548 /// block will be created. Assign a large weight if IsLikely is true.
549 MachineBasicBlock *AddSuccessorMBB(const BasicBlock *BB,
550 MachineBasicBlock *ParentMBB,
552 MachineBasicBlock *SuccMBB = nullptr);
556 const TargetMachine &TM;
558 /// Lowest valid SDNodeOrder. The special case 0 is reserved for scheduling
559 /// nodes without a corresponding SDNode.
560 static const unsigned LowestSDNodeOrder = 1;
563 const DataLayout *DL;
565 const TargetLibraryInfo *LibInfo;
567 /// SwitchCases - Vector of CaseBlock structures used to communicate
568 /// SwitchInst code generation information.
569 std::vector<CaseBlock> SwitchCases;
570 /// JTCases - Vector of JumpTable structures used to communicate
571 /// SwitchInst code generation information.
572 std::vector<JumpTableBlock> JTCases;
573 /// BitTestCases - Vector of BitTestBlock structures used to communicate
574 /// SwitchInst code generation information.
575 std::vector<BitTestBlock> BitTestCases;
576 /// A StackProtectorDescriptor structure used to communicate stack protector
577 /// information in between SelectBasicBlock and FinishBasicBlock.
578 StackProtectorDescriptor SPDescriptor;
580 // Emit PHI-node-operand constants only once even if used by multiple
582 DenseMap<const Constant *, unsigned> ConstantsOut;
584 /// FuncInfo - Information about the function as a whole.
586 FunctionLoweringInfo &FuncInfo;
588 /// GFI - Garbage collection metadata for the function.
591 /// LPadToCallSiteMap - Map a landing pad to the call site indexes.
592 DenseMap<MachineBasicBlock*, SmallVector<unsigned, 4> > LPadToCallSiteMap;
594 /// HasTailCall - This is set to true if a call in the current
595 /// block has been translated as a tail call. In this case,
596 /// no subsequent DAG nodes should be created.
600 LLVMContext *Context;
602 SelectionDAGBuilder(SelectionDAG &dag, FunctionLoweringInfo &funcinfo,
603 CodeGenOpt::Level ol)
604 : CurInst(nullptr), SDNodeOrder(LowestSDNodeOrder), TM(dag.getTarget()),
605 DAG(dag), DL(nullptr), AA(nullptr), FuncInfo(funcinfo),
609 void init(GCFunctionInfo *gfi, AliasAnalysis *AA,
610 const TargetLibraryInfo *li);
612 /// Clear out the current SelectionDAG and the associated state and prepare
613 /// this SelectionDAGBuilder object to be used for a new block. This doesn't
614 /// clear out information about additional blocks that are needed to complete
615 /// switch lowering or PHI node updating; that information is cleared out as
619 /// Clear the dangling debug information map. This function is separated from
620 /// the clear so that debug information that is dangling in a basic block can
621 /// be properly resolved in a different basic block. This allows the
622 /// SelectionDAG to resolve dangling debug information attached to PHI nodes.
623 void clearDanglingDebugInfo();
625 /// Return the current virtual root of the Selection DAG, flushing any
626 /// PendingLoad items. This must be done before emitting a store or any other
627 /// node that may need to be ordered after any prior load instructions.
630 /// Similar to getRoot, but instead of flushing all the PendingLoad items,
631 /// flush all the PendingExports items. It is necessary to do this before
632 /// emitting a terminator instruction.
633 SDValue getControlRoot();
635 SDLoc getCurSDLoc() const {
636 return SDLoc(CurInst, SDNodeOrder);
639 DebugLoc getCurDebugLoc() const {
640 return CurInst ? CurInst->getDebugLoc() : DebugLoc();
643 void CopyValueToVirtualRegister(const Value *V, unsigned Reg);
645 void visit(const Instruction &I);
647 void visit(unsigned Opcode, const User &I);
649 /// getCopyFromRegs - If there was virtual register allocated for the value V
650 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
651 SDValue getCopyFromRegs(const Value *V, Type *Ty);
653 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
654 // generate the debug data structures now that we've seen its definition.
655 void resolveDanglingDebugInfo(const Value *V, SDValue Val);
656 SDValue getValue(const Value *V);
657 bool findValue(const Value *V) const;
659 SDValue getNonRegisterValue(const Value *V);
660 SDValue getValueImpl(const Value *V);
662 void setValue(const Value *V, SDValue NewN) {
663 SDValue &N = NodeMap[V];
664 assert(!N.getNode() && "Already set a value for this node!");
668 void setUnusedArgValue(const Value *V, SDValue NewN) {
669 SDValue &N = UnusedArgNodeMap[V];
670 assert(!N.getNode() && "Already set a value for this node!");
674 void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
675 MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
676 MachineBasicBlock *SwitchBB,
677 Instruction::BinaryOps Opc, BranchProbability TW,
678 BranchProbability FW, bool InvertCond);
679 void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
680 MachineBasicBlock *FBB,
681 MachineBasicBlock *CurBB,
682 MachineBasicBlock *SwitchBB,
683 BranchProbability TW, BranchProbability FW,
685 bool ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases);
686 bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB);
687 void CopyToExportRegsIfNeeded(const Value *V);
688 void ExportFromCurrentBlock(const Value *V);
689 void LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool IsTailCall,
690 const BasicBlock *EHPadBB = nullptr);
692 // Lower range metadata from 0 to N to assert zext to an integer of nearest
693 // floor power of two.
694 SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I,
697 void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI,
698 ImmutableCallSite CS, unsigned ArgIdx,
699 unsigned NumArgs, SDValue Callee,
700 Type *ReturnTy, bool IsPatchPoint);
702 std::pair<SDValue, SDValue>
703 lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
704 const BasicBlock *EHPadBB = nullptr);
706 /// UpdateSplitBlock - When an MBB was split during scheduling, update the
707 /// references that need to refer to the last resulting block.
708 void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last);
710 /// Describes a gc.statepoint or a gc.statepoint like thing for the purposes
711 /// of lowering into a STATEPOINT node.
712 struct StatepointLoweringInfo {
713 /// Bases[i] is the base pointer for Ptrs[i]. Together they denote the set
714 /// of gc pointers this STATEPOINT has to relocate.
715 SmallVector<const Value *, 16> Bases;
716 SmallVector<const Value *, 16> Ptrs;
718 /// The set of gc.relocate calls associated with this gc.statepoint.
719 SmallVector<const GCRelocateInst *, 16> GCRelocates;
721 /// The full list of gc arguments to the gc.statepoint being lowered.
722 ArrayRef<const Use> GCArgs;
724 /// The gc.statepoint instruction.
725 const Instruction *StatepointInstr = nullptr;
727 /// The list of gc transition arguments present in the gc.statepoint being
729 ArrayRef<const Use> GCTransitionArgs;
731 /// The ID that the resulting STATEPOINT instruction has to report.
734 /// Information regarding the underlying call instruction.
735 TargetLowering::CallLoweringInfo CLI;
737 /// The deoptimization state associated with this gc.statepoint call, if
739 ArrayRef<const Use> DeoptState;
741 /// Flags associated with the meta arguments being lowered.
742 uint64_t StatepointFlags = -1;
744 /// The number of patchable bytes the call needs to get lowered into.
745 unsigned NumPatchBytes = -1;
747 /// The exception handling unwind destination, in case this represents an
748 /// invoke of gc.statepoint.
749 const BasicBlock *EHPadBB = nullptr;
751 explicit StatepointLoweringInfo(SelectionDAG &DAG) : CLI(DAG) {}
754 /// Lower \p SLI into a STATEPOINT instruction.
755 SDValue LowerAsSTATEPOINT(StatepointLoweringInfo &SLI);
757 // This function is responsible for the whole statepoint lowering process.
758 // It uniformly handles invoke and call statepoints.
759 void LowerStatepoint(ImmutableStatepoint Statepoint,
760 const BasicBlock *EHPadBB = nullptr);
762 void LowerCallSiteWithDeoptBundle(ImmutableCallSite CS, SDValue Callee,
763 const BasicBlock *EHPadBB);
765 void LowerDeoptimizeCall(const CallInst *CI);
766 void LowerDeoptimizingReturn();
768 void LowerCallSiteWithDeoptBundleImpl(ImmutableCallSite CS, SDValue Callee,
769 const BasicBlock *EHPadBB,
770 bool VarArgDisallowed,
771 bool ForceVoidReturnTy);
773 /// Returns the type of FrameIndex and TargetFrameIndex nodes.
774 MVT getFrameIndexTy() {
775 return DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout());
779 // Terminator instructions.
780 void visitRet(const ReturnInst &I);
781 void visitBr(const BranchInst &I);
782 void visitSwitch(const SwitchInst &I);
783 void visitIndirectBr(const IndirectBrInst &I);
784 void visitUnreachable(const UnreachableInst &I);
785 void visitCleanupRet(const CleanupReturnInst &I);
786 void visitCatchSwitch(const CatchSwitchInst &I);
787 void visitCatchRet(const CatchReturnInst &I);
788 void visitCatchPad(const CatchPadInst &I);
789 void visitCleanupPad(const CleanupPadInst &CPI);
791 BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
792 const MachineBasicBlock *Dst) const;
793 void addSuccessorWithProb(
794 MachineBasicBlock *Src, MachineBasicBlock *Dst,
795 BranchProbability Prob = BranchProbability::getUnknown());
798 void visitSwitchCase(CaseBlock &CB,
799 MachineBasicBlock *SwitchBB);
800 void visitSPDescriptorParent(StackProtectorDescriptor &SPD,
801 MachineBasicBlock *ParentBB);
802 void visitSPDescriptorFailure(StackProtectorDescriptor &SPD);
803 void visitBitTestHeader(BitTestBlock &B, MachineBasicBlock *SwitchBB);
804 void visitBitTestCase(BitTestBlock &BB,
805 MachineBasicBlock* NextMBB,
806 BranchProbability BranchProbToNext,
809 MachineBasicBlock *SwitchBB);
810 void visitJumpTable(JumpTable &JT);
811 void visitJumpTableHeader(JumpTable &JT, JumpTableHeader &JTH,
812 MachineBasicBlock *SwitchBB);
815 // These all get lowered before this pass.
816 void visitInvoke(const InvokeInst &I);
817 void visitResume(const ResumeInst &I);
819 void visitBinary(const User &I, unsigned OpCode);
820 void visitShift(const User &I, unsigned Opcode);
821 void visitAdd(const User &I) { visitBinary(I, ISD::ADD); }
822 void visitFAdd(const User &I) { visitBinary(I, ISD::FADD); }
823 void visitSub(const User &I) { visitBinary(I, ISD::SUB); }
824 void visitFSub(const User &I);
825 void visitMul(const User &I) { visitBinary(I, ISD::MUL); }
826 void visitFMul(const User &I) { visitBinary(I, ISD::FMUL); }
827 void visitURem(const User &I) { visitBinary(I, ISD::UREM); }
828 void visitSRem(const User &I) { visitBinary(I, ISD::SREM); }
829 void visitFRem(const User &I) { visitBinary(I, ISD::FREM); }
830 void visitUDiv(const User &I) { visitBinary(I, ISD::UDIV); }
831 void visitSDiv(const User &I);
832 void visitFDiv(const User &I) { visitBinary(I, ISD::FDIV); }
833 void visitAnd (const User &I) { visitBinary(I, ISD::AND); }
834 void visitOr (const User &I) { visitBinary(I, ISD::OR); }
835 void visitXor (const User &I) { visitBinary(I, ISD::XOR); }
836 void visitShl (const User &I) { visitShift(I, ISD::SHL); }
837 void visitLShr(const User &I) { visitShift(I, ISD::SRL); }
838 void visitAShr(const User &I) { visitShift(I, ISD::SRA); }
839 void visitICmp(const User &I);
840 void visitFCmp(const User &I);
841 // Visit the conversion instructions
842 void visitTrunc(const User &I);
843 void visitZExt(const User &I);
844 void visitSExt(const User &I);
845 void visitFPTrunc(const User &I);
846 void visitFPExt(const User &I);
847 void visitFPToUI(const User &I);
848 void visitFPToSI(const User &I);
849 void visitUIToFP(const User &I);
850 void visitSIToFP(const User &I);
851 void visitPtrToInt(const User &I);
852 void visitIntToPtr(const User &I);
853 void visitBitCast(const User &I);
854 void visitAddrSpaceCast(const User &I);
856 void visitExtractElement(const User &I);
857 void visitInsertElement(const User &I);
858 void visitShuffleVector(const User &I);
860 void visitExtractValue(const User &I);
861 void visitInsertValue(const User &I);
862 void visitLandingPad(const LandingPadInst &I);
864 void visitGetElementPtr(const User &I);
865 void visitSelect(const User &I);
867 void visitAlloca(const AllocaInst &I);
868 void visitLoad(const LoadInst &I);
869 void visitStore(const StoreInst &I);
870 void visitMaskedLoad(const CallInst &I, bool IsExpanding = false);
871 void visitMaskedStore(const CallInst &I, bool IsCompressing = false);
872 void visitMaskedGather(const CallInst &I);
873 void visitMaskedScatter(const CallInst &I);
874 void visitAtomicCmpXchg(const AtomicCmpXchgInst &I);
875 void visitAtomicRMW(const AtomicRMWInst &I);
876 void visitFence(const FenceInst &I);
877 void visitPHI(const PHINode &I);
878 void visitCall(const CallInst &I);
879 bool visitMemCmpCall(const CallInst &I);
880 bool visitMemPCpyCall(const CallInst &I);
881 bool visitMemChrCall(const CallInst &I);
882 bool visitStrCpyCall(const CallInst &I, bool isStpcpy);
883 bool visitStrCmpCall(const CallInst &I);
884 bool visitStrLenCall(const CallInst &I);
885 bool visitStrNLenCall(const CallInst &I);
886 bool visitUnaryFloatCall(const CallInst &I, unsigned Opcode);
887 bool visitBinaryFloatCall(const CallInst &I, unsigned Opcode);
888 void visitAtomicLoad(const LoadInst &I);
889 void visitAtomicStore(const StoreInst &I);
890 void visitLoadFromSwiftError(const LoadInst &I);
891 void visitStoreToSwiftError(const StoreInst &I);
893 void visitInlineAsm(ImmutableCallSite CS);
894 const char *visitIntrinsicCall(const CallInst &I, unsigned Intrinsic);
895 void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic);
896 void visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI);
898 void visitVAStart(const CallInst &I);
899 void visitVAArg(const VAArgInst &I);
900 void visitVAEnd(const CallInst &I);
901 void visitVACopy(const CallInst &I);
902 void visitStackmap(const CallInst &I);
903 void visitPatchpoint(ImmutableCallSite CS,
904 const BasicBlock *EHPadBB = nullptr);
906 // These two are implemented in StatepointLowering.cpp
907 void visitGCRelocate(const GCRelocateInst &I);
908 void visitGCResult(const GCResultInst &I);
910 void visitVectorReduce(const CallInst &I, unsigned Intrinsic);
912 void visitUserOp1(const Instruction &I) {
913 llvm_unreachable("UserOp1 should not exist at instruction selection time!");
915 void visitUserOp2(const Instruction &I) {
916 llvm_unreachable("UserOp2 should not exist at instruction selection time!");
919 void processIntegerCallValue(const Instruction &I,
920 SDValue Value, bool IsSigned);
922 void HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
924 void emitInlineAsmError(ImmutableCallSite CS, const Twine &Message);
926 /// EmitFuncArgumentDbgValue - If V is an function argument then create
927 /// corresponding DBG_VALUE machine instruction for it now. At the end of
928 /// instruction selection, they will be inserted to the entry BB.
929 bool EmitFuncArgumentDbgValue(const Value *V, DILocalVariable *Variable,
930 DIExpression *Expr, DILocation *DL,
931 int64_t Offset, bool IsDbgDeclare,
934 /// Return the next block after MBB, or nullptr if there is none.
935 MachineBasicBlock *NextBlock(MachineBasicBlock *MBB);
937 /// Update the DAG and DAG builder with the relevant information after
938 /// a new root node has been created which could be a tail call.
939 void updateDAGForMaybeTailCall(SDValue MaybeTC);
941 /// Return the appropriate SDDbgValue based on N.
942 SDDbgValue *getDbgValue(SDValue N, DILocalVariable *Variable,
943 DIExpression *Expr, int64_t Offset,
944 const DebugLoc &dl, unsigned DbgSDNodeOrder);
947 /// RegsForValue - This struct represents the registers (physical or virtual)
948 /// that a particular set of values is assigned, and the type information about
949 /// the value. The most common situation is to represent one value at a time,
950 /// but struct or array values are handled element-wise as multiple values. The
951 /// splitting of aggregates is performed recursively, so that we never have
952 /// aggregate-typed registers. The values at this point do not necessarily have
953 /// legal types, so each value may require one or more registers of some legal
956 struct RegsForValue {
957 /// The value types of the values, which may not be legal, and
958 /// may need be promoted or synthesized from one or more registers.
959 SmallVector<EVT, 4> ValueVTs;
961 /// The value types of the registers. This is the same size as ValueVTs and it
962 /// records, for each value, what the type of the assigned register or
963 /// registers are. (Individual values are never synthesized from more than one
964 /// type of register.)
966 /// With virtual registers, the contents of RegVTs is redundant with TLI's
967 /// getRegisterType member function, however when with physical registers
968 /// it is necessary to have a separate record of the types.
969 SmallVector<MVT, 4> RegVTs;
971 /// This list holds the registers assigned to the values.
972 /// Each legal or promoted value requires one register, and each
973 /// expanded value requires multiple registers.
974 SmallVector<unsigned, 4> Regs;
976 /// This list holds the number of registers for each value.
977 SmallVector<unsigned, 4> RegCount;
979 /// Records if this value needs to be treated in an ABI dependant manner,
980 /// different to normal type legalization.
985 RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt, EVT valuevt,
986 bool IsABIMangledValue = false);
988 RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
989 const DataLayout &DL, unsigned Reg, Type *Ty,
990 bool IsABIMangledValue = false);
992 /// Add the specified values to this one.
993 void append(const RegsForValue &RHS) {
994 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
995 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
996 Regs.append(RHS.Regs.begin(), RHS.Regs.end());
997 RegCount.push_back(RHS.Regs.size());
1000 /// Emit a series of CopyFromReg nodes that copies from this value and returns
1001 /// the result as a ValueVTs value. This uses Chain/Flag as the input and
1002 /// updates them for the output Chain/Flag. If the Flag pointer is NULL, no
1004 SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
1005 const SDLoc &dl, SDValue &Chain, SDValue *Flag,
1006 const Value *V = nullptr) const;
1008 /// Emit a series of CopyToReg nodes that copies the specified value into the
1009 /// registers specified by this object. This uses Chain/Flag as the input and
1010 /// updates them for the output Chain/Flag. If the Flag pointer is nullptr, no
1011 /// flag is used. If V is not nullptr, then it is used in printing better
1012 /// diagnostic messages on error.
1013 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl,
1014 SDValue &Chain, SDValue *Flag, const Value *V = nullptr,
1015 ISD::NodeType PreferredExtendType = ISD::ANY_EXTEND) const;
1017 /// Add this value to the specified inlineasm node operand list. This adds the
1018 /// code marker, matching input operand index (if applicable), and includes
1019 /// the number of values added into it.
1020 void AddInlineAsmOperands(unsigned Kind, bool HasMatching,
1021 unsigned MatchingIdx, const SDLoc &dl,
1022 SelectionDAG &DAG, std::vector<SDValue> &Ops) const;
1025 } // end namespace llvm