1 //===- SelectionDAGBuilder.h - Selection-DAG building -----------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
11 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H
14 #define LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H
16 #include "StatepointLowering.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/CodeGen/ISDOpcodes.h"
23 #include "llvm/CodeGen/SelectionDAGNodes.h"
24 #include "llvm/CodeGen/SwitchLoweringUtils.h"
25 #include "llvm/CodeGen/TargetLowering.h"
26 #include "llvm/CodeGen/ValueTypes.h"
27 #include "llvm/IR/DebugLoc.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/IR/Statepoint.h"
30 #include "llvm/Support/BranchProbability.h"
31 #include "llvm/Support/CodeGen.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/MachineValueType.h"
43 class AtomicCmpXchgInst;
50 class CatchReturnInst;
51 class CatchSwitchInst;
53 class CleanupReturnInst;
55 class ConstrainedFPIntrinsic;
59 class DILocalVariable;
62 class FunctionLoweringInfo;
71 class MachineBasicBlock;
78 class SwiftErrorValueTracking;
80 class TargetLibraryInfo;
84 class UnreachableInst;
89 //===----------------------------------------------------------------------===//
90 /// SelectionDAGBuilder - This is the common target-independent lowering
91 /// implementation that is parameterized by a TargetLowering object.
93 class SelectionDAGBuilder {
94 /// The current instruction being visited.
95 const Instruction *CurInst = nullptr;
97 DenseMap<const Value*, SDValue> NodeMap;
99 /// Maps argument value for unused arguments. This is used
100 /// to preserve debug information for incoming arguments.
101 DenseMap<const Value*, SDValue> UnusedArgNodeMap;
103 /// Helper type for DanglingDebugInfoMap.
104 class DanglingDebugInfo {
105 const DbgValueInst* DI = nullptr;
107 unsigned SDNodeOrder = 0;
110 DanglingDebugInfo() = default;
111 DanglingDebugInfo(const DbgValueInst *di, DebugLoc DL, unsigned SDNO)
112 : DI(di), dl(std::move(DL)), SDNodeOrder(SDNO) {}
114 const DbgValueInst* getDI() { return DI; }
115 DebugLoc getdl() { return dl; }
116 unsigned getSDNodeOrder() { return SDNodeOrder; }
119 /// Helper type for DanglingDebugInfoMap.
120 typedef std::vector<DanglingDebugInfo> DanglingDebugInfoVector;
122 /// Keeps track of dbg_values for which we have not yet seen the referent.
123 /// We defer handling these until we do see it.
124 MapVector<const Value*, DanglingDebugInfoVector> DanglingDebugInfoMap;
127 /// Loads are not emitted to the program immediately. We bunch them up and
128 /// then emit token factor nodes when possible. This allows us to get simple
129 /// disambiguation between loads without worrying about alias analysis.
130 SmallVector<SDValue, 8> PendingLoads;
132 /// State used while lowering a statepoint sequence (gc_statepoint,
133 /// gc_relocate, and gc_result). See StatepointLowering.hpp/cpp for details.
134 StatepointLoweringState StatepointLowering;
137 /// CopyToReg nodes that copy values to virtual registers for export to other
138 /// blocks need to be emitted before any terminator instruction, but they have
139 /// no other ordering requirements. We bunch them up and the emit a single
140 /// tokenfactor for them just before terminator instructions.
141 SmallVector<SDValue, 8> PendingExports;
143 /// Similar to loads, nodes corresponding to constrained FP intrinsics are
144 /// bunched up and emitted when necessary. These can be moved across each
145 /// other and any (normal) memory operation (load or store), but not across
146 /// calls or instructions having unspecified side effects. As a special
147 /// case, constrained FP intrinsics using fpexcept.strict may not be deleted
148 /// even if otherwise unused, so they need to be chained before any
149 /// terminator instruction (like PendingExports). We track the latter
150 /// set of nodes in a separate list.
151 SmallVector<SDValue, 8> PendingConstrainedFP;
152 SmallVector<SDValue, 8> PendingConstrainedFPStrict;
154 /// Update root to include all chains from the Pending list.
155 SDValue updateRoot(SmallVectorImpl<SDValue> &Pending);
157 /// A unique monotonically increasing number used to order the SDNodes we
159 unsigned SDNodeOrder;
161 /// Determine the rank by weight of CC in [First,Last]. If CC has more weight
162 /// than each cluster in the range, its rank is 0.
163 unsigned caseClusterRank(const SwitchCG::CaseCluster &CC,
164 SwitchCG::CaseClusterIt First,
165 SwitchCG::CaseClusterIt Last);
167 /// Emit comparison and split W into two subtrees.
168 void splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
169 const SwitchCG::SwitchWorkListItem &W, Value *Cond,
170 MachineBasicBlock *SwitchMBB);
173 void lowerWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
174 MachineBasicBlock *SwitchMBB,
175 MachineBasicBlock *DefaultMBB);
177 /// Peel the top probability case if it exceeds the threshold
179 peelDominantCaseCluster(const SwitchInst &SI,
180 SwitchCG::CaseClusterVector &Clusters,
181 BranchProbability &PeeledCaseProb);
183 /// A class which encapsulates all of the information needed to generate a
184 /// stack protector check and signals to isel via its state being initialized
185 /// that a stack protector needs to be generated.
187 /// *NOTE* The following is a high level documentation of SelectionDAG Stack
188 /// Protector Generation. The reason that it is placed here is for a lack of
189 /// other good places to stick it.
191 /// High Level Overview of SelectionDAG Stack Protector Generation:
193 /// Previously, generation of stack protectors was done exclusively in the
194 /// pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated
195 /// splitting basic blocks at the IR level to create the success/failure basic
196 /// blocks in the tail of the basic block in question. As a result of this,
197 /// calls that would have qualified for the sibling call optimization were no
198 /// longer eligible for optimization since said calls were no longer right in
199 /// the "tail position" (i.e. the immediate predecessor of a ReturnInst
202 /// Then it was noticed that since the sibling call optimization causes the
203 /// callee to reuse the caller's stack, if we could delay the generation of
204 /// the stack protector check until later in CodeGen after the sibling call
205 /// decision was made, we get both the tail call optimization and the stack
208 /// A few goals in solving this problem were:
210 /// 1. Preserve the architecture independence of stack protector generation.
212 /// 2. Preserve the normal IR level stack protector check for platforms like
213 /// OpenBSD for which we support platform-specific stack protector
216 /// The main problem that guided the present solution is that one can not
217 /// solve this problem in an architecture independent manner at the IR level
218 /// only. This is because:
220 /// 1. The decision on whether or not to perform a sibling call on certain
221 /// platforms (for instance i386) requires lower level information
222 /// related to available registers that can not be known at the IR level.
224 /// 2. Even if the previous point were not true, the decision on whether to
225 /// perform a tail call is done in LowerCallTo in SelectionDAG which
226 /// occurs after the Stack Protector Pass. As a result, one would need to
227 /// put the relevant callinst into the stack protector check success
228 /// basic block (where the return inst is placed) and then move it back
229 /// later at SelectionDAG/MI time before the stack protector check if the
230 /// tail call optimization failed. The MI level option was nixed
231 /// immediately since it would require platform-specific pattern
232 /// matching. The SelectionDAG level option was nixed because
233 /// SelectionDAG only processes one IR level basic block at a time
234 /// implying one could not create a DAG Combine to move the callinst.
236 /// To get around this problem a few things were realized:
238 /// 1. While one can not handle multiple IR level basic blocks at the
239 /// SelectionDAG Level, one can generate multiple machine basic blocks
240 /// for one IR level basic block. This is how we handle bit tests and
243 /// 2. At the MI level, tail calls are represented via a special return
244 /// MIInst called "tcreturn". Thus if we know the basic block in which we
245 /// wish to insert the stack protector check, we get the correct behavior
246 /// by always inserting the stack protector check right before the return
247 /// statement. This is a "magical transformation" since no matter where
248 /// the stack protector check intrinsic is, we always insert the stack
249 /// protector check code at the end of the BB.
251 /// Given the aforementioned constraints, the following solution was devised:
253 /// 1. On platforms that do not support SelectionDAG stack protector check
254 /// generation, allow for the normal IR level stack protector check
255 /// generation to continue.
257 /// 2. On platforms that do support SelectionDAG stack protector check
260 /// a. Use the IR level stack protector pass to decide if a stack
261 /// protector is required/which BB we insert the stack protector check
262 /// in by reusing the logic already therein. If we wish to generate a
263 /// stack protector check in a basic block, we place a special IR
264 /// intrinsic called llvm.stackprotectorcheck right before the BB's
265 /// returninst or if there is a callinst that could potentially be
266 /// sibling call optimized, before the call inst.
268 /// b. Then when a BB with said intrinsic is processed, we codegen the BB
269 /// normally via SelectBasicBlock. In said process, when we visit the
270 /// stack protector check, we do not actually emit anything into the
271 /// BB. Instead, we just initialize the stack protector descriptor
272 /// class (which involves stashing information/creating the success
273 /// mbbb and the failure mbb if we have not created one for this
274 /// function yet) and export the guard variable that we are going to
277 /// c. After we finish selecting the basic block, in FinishBasicBlock if
278 /// the StackProtectorDescriptor attached to the SelectionDAGBuilder is
279 /// initialized, we produce the validation code with one of these
281 /// 1) with a call to a guard check function
282 /// 2) with inlined instrumentation
284 /// 1) We insert a call to the check function before the terminator.
286 /// 2) We first find a splice point in the parent basic block
287 /// before the terminator and then splice the terminator of said basic
288 /// block into the success basic block. Then we code-gen a new tail for
289 /// the parent basic block consisting of the two loads, the comparison,
290 /// and finally two branches to the success/failure basic blocks. We
291 /// conclude by code-gening the failure basic block if we have not
292 /// code-gened it already (all stack protector checks we generate in
293 /// the same function, use the same failure basic block).
294 class StackProtectorDescriptor {
296 StackProtectorDescriptor() = default;
298 /// Returns true if all fields of the stack protector descriptor are
299 /// initialized implying that we should/are ready to emit a stack protector.
300 bool shouldEmitStackProtector() const {
301 return ParentMBB && SuccessMBB && FailureMBB;
304 bool shouldEmitFunctionBasedCheckStackProtector() const {
305 return ParentMBB && !SuccessMBB && !FailureMBB;
308 /// Initialize the stack protector descriptor structure for a new basic
310 void initialize(const BasicBlock *BB, MachineBasicBlock *MBB,
311 bool FunctionBasedInstrumentation) {
312 // Make sure we are not initialized yet.
313 assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is "
314 "already initialized!");
316 if (!FunctionBasedInstrumentation) {
317 SuccessMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ true);
318 FailureMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB);
322 /// Reset state that changes when we handle different basic blocks.
324 /// This currently includes:
326 /// 1. The specific basic block we are generating a
327 /// stack protector for (ParentMBB).
329 /// 2. The successor machine basic block that will contain the tail of
330 /// parent mbb after we create the stack protector check (SuccessMBB). This
331 /// BB is visited only on stack protector check success.
332 void resetPerBBState() {
334 SuccessMBB = nullptr;
337 /// Reset state that only changes when we switch functions.
339 /// This currently includes:
341 /// 1. FailureMBB since we reuse the failure code path for all stack
342 /// protector checks created in an individual function.
344 /// 2.The guard variable since the guard variable we are checking against is
346 void resetPerFunctionState() {
347 FailureMBB = nullptr;
350 MachineBasicBlock *getParentMBB() { return ParentMBB; }
351 MachineBasicBlock *getSuccessMBB() { return SuccessMBB; }
352 MachineBasicBlock *getFailureMBB() { return FailureMBB; }
355 /// The basic block for which we are generating the stack protector.
357 /// As a result of stack protector generation, we will splice the
358 /// terminators of this basic block into the successor mbb SuccessMBB and
359 /// replace it with a compare/branch to the successor mbbs
360 /// SuccessMBB/FailureMBB depending on whether or not the stack protector
362 MachineBasicBlock *ParentMBB = nullptr;
364 /// A basic block visited on stack protector check success that contains the
365 /// terminators of ParentMBB.
366 MachineBasicBlock *SuccessMBB = nullptr;
368 /// This basic block visited on stack protector check failure that will
369 /// contain a call to __stack_chk_fail().
370 MachineBasicBlock *FailureMBB = nullptr;
372 /// Add a successor machine basic block to ParentMBB. If the successor mbb
373 /// has not been created yet (i.e. if SuccMBB = 0), then the machine basic
374 /// block will be created. Assign a large weight if IsLikely is true.
375 MachineBasicBlock *AddSuccessorMBB(const BasicBlock *BB,
376 MachineBasicBlock *ParentMBB,
378 MachineBasicBlock *SuccMBB = nullptr);
382 const TargetMachine &TM;
385 /// Lowest valid SDNodeOrder. The special case 0 is reserved for scheduling
386 /// nodes without a corresponding SDNode.
387 static const unsigned LowestSDNodeOrder = 1;
390 const DataLayout *DL = nullptr;
391 AliasAnalysis *AA = nullptr;
392 const TargetLibraryInfo *LibInfo;
394 class SDAGSwitchLowering : public SwitchCG::SwitchLowering {
396 SDAGSwitchLowering(SelectionDAGBuilder *sdb, FunctionLoweringInfo &funcinfo)
397 : SwitchCG::SwitchLowering(funcinfo), SDB(sdb) {}
399 virtual void addSuccessorWithProb(
400 MachineBasicBlock *Src, MachineBasicBlock *Dst,
401 BranchProbability Prob = BranchProbability::getUnknown()) override {
402 SDB->addSuccessorWithProb(Src, Dst, Prob);
406 SelectionDAGBuilder *SDB;
409 // Data related to deferred switch lowerings. Used to construct additional
410 // Basic Blocks in SelectionDAGISel::FinishBasicBlock.
411 std::unique_ptr<SDAGSwitchLowering> SL;
413 /// A StackProtectorDescriptor structure used to communicate stack protector
414 /// information in between SelectBasicBlock and FinishBasicBlock.
415 StackProtectorDescriptor SPDescriptor;
417 // Emit PHI-node-operand constants only once even if used by multiple
419 DenseMap<const Constant *, unsigned> ConstantsOut;
421 /// Information about the function as a whole.
422 FunctionLoweringInfo &FuncInfo;
424 /// Information about the swifterror values used throughout the function.
425 SwiftErrorValueTracking &SwiftError;
427 /// Garbage collection metadata for the function.
430 /// Map a landing pad to the call site indexes.
431 DenseMap<MachineBasicBlock *, SmallVector<unsigned, 4>> LPadToCallSiteMap;
433 /// This is set to true if a call in the current block has been translated as
434 /// a tail call. In this case, no subsequent DAG nodes should be created.
435 bool HasTailCall = false;
437 LLVMContext *Context;
439 SelectionDAGBuilder(SelectionDAG &dag, FunctionLoweringInfo &funcinfo,
440 SwiftErrorValueTracking &swifterror, CodeGenOpt::Level ol)
441 : SDNodeOrder(LowestSDNodeOrder), TM(dag.getTarget()), DAG(dag),
442 SL(std::make_unique<SDAGSwitchLowering>(this, funcinfo)), FuncInfo(funcinfo),
443 SwiftError(swifterror) {}
445 void init(GCFunctionInfo *gfi, AliasAnalysis *AA,
446 const TargetLibraryInfo *li);
448 /// Clear out the current SelectionDAG and the associated state and prepare
449 /// this SelectionDAGBuilder object to be used for a new block. This doesn't
450 /// clear out information about additional blocks that are needed to complete
451 /// switch lowering or PHI node updating; that information is cleared out as
455 /// Clear the dangling debug information map. This function is separated from
456 /// the clear so that debug information that is dangling in a basic block can
457 /// be properly resolved in a different basic block. This allows the
458 /// SelectionDAG to resolve dangling debug information attached to PHI nodes.
459 void clearDanglingDebugInfo();
461 /// Return the current virtual root of the Selection DAG, flushing any
462 /// PendingLoad items. This must be done before emitting a store or any other
463 /// memory node that may need to be ordered after any prior load instructions.
464 SDValue getMemoryRoot();
466 /// Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict)
467 /// items. This must be done before emitting any call other any other node
468 /// that may need to be ordered after FP instructions due to other side
472 /// Similar to getRoot, but instead of flushing all the PendingLoad items,
473 /// flush all the PendingExports (and PendingConstrainedFPStrict) items.
474 /// It is necessary to do this before emitting a terminator instruction.
475 SDValue getControlRoot();
477 SDLoc getCurSDLoc() const {
478 return SDLoc(CurInst, SDNodeOrder);
481 DebugLoc getCurDebugLoc() const {
482 return CurInst ? CurInst->getDebugLoc() : DebugLoc();
485 void CopyValueToVirtualRegister(const Value *V, unsigned Reg);
487 void visit(const Instruction &I);
489 void visit(unsigned Opcode, const User &I);
491 /// If there was virtual register allocated for the value V emit CopyFromReg
492 /// of the specified type Ty. Return empty SDValue() otherwise.
493 SDValue getCopyFromRegs(const Value *V, Type *Ty);
495 /// If we have dangling debug info that describes \p Variable, or an
496 /// overlapping part of variable considering the \p Expr, then this method
497 /// will drop that debug info as it isn't valid any longer.
498 void dropDanglingDebugInfo(const DILocalVariable *Variable,
499 const DIExpression *Expr);
501 /// If we saw an earlier dbg_value referring to V, generate the debug data
502 /// structures now that we've seen its definition.
503 void resolveDanglingDebugInfo(const Value *V, SDValue Val);
505 /// For the given dangling debuginfo record, perform last-ditch efforts to
506 /// resolve the debuginfo to something that is represented in this DAG. If
507 /// this cannot be done, produce an Undef debug value record.
508 void salvageUnresolvedDbgValue(DanglingDebugInfo &DDI);
510 /// For a given Value, attempt to create and record a SDDbgValue in the
512 bool handleDebugValue(const Value *V, DILocalVariable *Var,
513 DIExpression *Expr, DebugLoc CurDL,
514 DebugLoc InstDL, unsigned Order);
516 /// Evict any dangling debug information, attempting to salvage it first.
517 void resolveOrClearDbgInfo();
519 SDValue getValue(const Value *V);
521 /// Return the SDNode for the specified IR value if it exists.
522 SDNode *getNodeForIRValue(const Value *V) {
523 if (NodeMap.find(V) == NodeMap.end())
525 return NodeMap[V].getNode();
528 SDValue getNonRegisterValue(const Value *V);
529 SDValue getValueImpl(const Value *V);
531 void setValue(const Value *V, SDValue NewN) {
532 SDValue &N = NodeMap[V];
533 assert(!N.getNode() && "Already set a value for this node!");
537 void setUnusedArgValue(const Value *V, SDValue NewN) {
538 SDValue &N = UnusedArgNodeMap[V];
539 assert(!N.getNode() && "Already set a value for this node!");
543 void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
544 MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
545 MachineBasicBlock *SwitchBB,
546 Instruction::BinaryOps Opc, BranchProbability TProb,
547 BranchProbability FProb, bool InvertCond);
548 void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
549 MachineBasicBlock *FBB,
550 MachineBasicBlock *CurBB,
551 MachineBasicBlock *SwitchBB,
552 BranchProbability TProb, BranchProbability FProb,
554 bool ShouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
555 bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB);
556 void CopyToExportRegsIfNeeded(const Value *V);
557 void ExportFromCurrentBlock(const Value *V);
558 void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall,
559 const BasicBlock *EHPadBB = nullptr);
561 // Lower range metadata from 0 to N to assert zext to an integer of nearest
562 // floor power of two.
563 SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I,
566 void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI,
567 const CallBase *Call, unsigned ArgIdx,
568 unsigned NumArgs, SDValue Callee,
569 Type *ReturnTy, bool IsPatchPoint);
571 std::pair<SDValue, SDValue>
572 lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
573 const BasicBlock *EHPadBB = nullptr);
575 /// When an MBB was split during scheduling, update the
576 /// references that need to refer to the last resulting block.
577 void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last);
579 /// Describes a gc.statepoint or a gc.statepoint like thing for the purposes
580 /// of lowering into a STATEPOINT node.
581 struct StatepointLoweringInfo {
582 /// Bases[i] is the base pointer for Ptrs[i]. Together they denote the set
583 /// of gc pointers this STATEPOINT has to relocate.
584 SmallVector<const Value *, 16> Bases;
585 SmallVector<const Value *, 16> Ptrs;
587 /// The set of gc.relocate calls associated with this gc.statepoint.
588 SmallVector<const GCRelocateInst *, 16> GCRelocates;
590 /// The full list of gc arguments to the gc.statepoint being lowered.
591 ArrayRef<const Use> GCArgs;
593 /// The gc.statepoint instruction.
594 const Instruction *StatepointInstr = nullptr;
596 /// The list of gc transition arguments present in the gc.statepoint being
598 ArrayRef<const Use> GCTransitionArgs;
600 /// The ID that the resulting STATEPOINT instruction has to report.
603 /// Information regarding the underlying call instruction.
604 TargetLowering::CallLoweringInfo CLI;
606 /// The deoptimization state associated with this gc.statepoint call, if
608 ArrayRef<const Use> DeoptState;
610 /// Flags associated with the meta arguments being lowered.
611 uint64_t StatepointFlags = -1;
613 /// The number of patchable bytes the call needs to get lowered into.
614 unsigned NumPatchBytes = -1;
616 /// The exception handling unwind destination, in case this represents an
617 /// invoke of gc.statepoint.
618 const BasicBlock *EHPadBB = nullptr;
620 explicit StatepointLoweringInfo(SelectionDAG &DAG) : CLI(DAG) {}
623 /// Lower \p SLI into a STATEPOINT instruction.
624 SDValue LowerAsSTATEPOINT(StatepointLoweringInfo &SI);
626 // This function is responsible for the whole statepoint lowering process.
627 // It uniformly handles invoke and call statepoints.
628 void LowerStatepoint(const GCStatepointInst &I,
629 const BasicBlock *EHPadBB = nullptr);
631 void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee,
632 const BasicBlock *EHPadBB);
634 void LowerDeoptimizeCall(const CallInst *CI);
635 void LowerDeoptimizingReturn();
637 void LowerCallSiteWithDeoptBundleImpl(const CallBase *Call, SDValue Callee,
638 const BasicBlock *EHPadBB,
639 bool VarArgDisallowed,
640 bool ForceVoidReturnTy);
642 /// Returns the type of FrameIndex and TargetFrameIndex nodes.
643 MVT getFrameIndexTy() {
644 return DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout());
648 // Terminator instructions.
649 void visitRet(const ReturnInst &I);
650 void visitBr(const BranchInst &I);
651 void visitSwitch(const SwitchInst &I);
652 void visitIndirectBr(const IndirectBrInst &I);
653 void visitUnreachable(const UnreachableInst &I);
654 void visitCleanupRet(const CleanupReturnInst &I);
655 void visitCatchSwitch(const CatchSwitchInst &I);
656 void visitCatchRet(const CatchReturnInst &I);
657 void visitCatchPad(const CatchPadInst &I);
658 void visitCleanupPad(const CleanupPadInst &CPI);
660 BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
661 const MachineBasicBlock *Dst) const;
662 void addSuccessorWithProb(
663 MachineBasicBlock *Src, MachineBasicBlock *Dst,
664 BranchProbability Prob = BranchProbability::getUnknown());
667 void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB);
668 void visitSPDescriptorParent(StackProtectorDescriptor &SPD,
669 MachineBasicBlock *ParentBB);
670 void visitSPDescriptorFailure(StackProtectorDescriptor &SPD);
671 void visitBitTestHeader(SwitchCG::BitTestBlock &B,
672 MachineBasicBlock *SwitchBB);
673 void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
674 BranchProbability BranchProbToNext, unsigned Reg,
675 SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
676 void visitJumpTable(SwitchCG::JumpTable &JT);
677 void visitJumpTableHeader(SwitchCG::JumpTable &JT,
678 SwitchCG::JumpTableHeader &JTH,
679 MachineBasicBlock *SwitchBB);
682 // These all get lowered before this pass.
683 void visitInvoke(const InvokeInst &I);
684 void visitCallBr(const CallBrInst &I);
685 void visitResume(const ResumeInst &I);
687 void visitUnary(const User &I, unsigned Opcode);
688 void visitFNeg(const User &I) { visitUnary(I, ISD::FNEG); }
690 void visitBinary(const User &I, unsigned Opcode);
691 void visitShift(const User &I, unsigned Opcode);
692 void visitAdd(const User &I) { visitBinary(I, ISD::ADD); }
693 void visitFAdd(const User &I) { visitBinary(I, ISD::FADD); }
694 void visitSub(const User &I) { visitBinary(I, ISD::SUB); }
695 void visitFSub(const User &I);
696 void visitMul(const User &I) { visitBinary(I, ISD::MUL); }
697 void visitFMul(const User &I) { visitBinary(I, ISD::FMUL); }
698 void visitURem(const User &I) { visitBinary(I, ISD::UREM); }
699 void visitSRem(const User &I) { visitBinary(I, ISD::SREM); }
700 void visitFRem(const User &I) { visitBinary(I, ISD::FREM); }
701 void visitUDiv(const User &I) { visitBinary(I, ISD::UDIV); }
702 void visitSDiv(const User &I);
703 void visitFDiv(const User &I) { visitBinary(I, ISD::FDIV); }
704 void visitAnd (const User &I) { visitBinary(I, ISD::AND); }
705 void visitOr (const User &I) { visitBinary(I, ISD::OR); }
706 void visitXor (const User &I) { visitBinary(I, ISD::XOR); }
707 void visitShl (const User &I) { visitShift(I, ISD::SHL); }
708 void visitLShr(const User &I) { visitShift(I, ISD::SRL); }
709 void visitAShr(const User &I) { visitShift(I, ISD::SRA); }
710 void visitICmp(const User &I);
711 void visitFCmp(const User &I);
712 // Visit the conversion instructions
713 void visitTrunc(const User &I);
714 void visitZExt(const User &I);
715 void visitSExt(const User &I);
716 void visitFPTrunc(const User &I);
717 void visitFPExt(const User &I);
718 void visitFPToUI(const User &I);
719 void visitFPToSI(const User &I);
720 void visitUIToFP(const User &I);
721 void visitSIToFP(const User &I);
722 void visitPtrToInt(const User &I);
723 void visitIntToPtr(const User &I);
724 void visitBitCast(const User &I);
725 void visitAddrSpaceCast(const User &I);
727 void visitExtractElement(const User &I);
728 void visitInsertElement(const User &I);
729 void visitShuffleVector(const User &I);
731 void visitExtractValue(const User &I);
732 void visitInsertValue(const User &I);
733 void visitLandingPad(const LandingPadInst &LP);
735 void visitGetElementPtr(const User &I);
736 void visitSelect(const User &I);
738 void visitAlloca(const AllocaInst &I);
739 void visitLoad(const LoadInst &I);
740 void visitStore(const StoreInst &I);
741 void visitMaskedLoad(const CallInst &I, bool IsExpanding = false);
742 void visitMaskedStore(const CallInst &I, bool IsCompressing = false);
743 void visitMaskedGather(const CallInst &I);
744 void visitMaskedScatter(const CallInst &I);
745 void visitAtomicCmpXchg(const AtomicCmpXchgInst &I);
746 void visitAtomicRMW(const AtomicRMWInst &I);
747 void visitFence(const FenceInst &I);
748 void visitPHI(const PHINode &I);
749 void visitCall(const CallInst &I);
750 bool visitMemCmpCall(const CallInst &I);
751 bool visitMemPCpyCall(const CallInst &I);
752 bool visitMemChrCall(const CallInst &I);
753 bool visitStrCpyCall(const CallInst &I, bool isStpcpy);
754 bool visitStrCmpCall(const CallInst &I);
755 bool visitStrLenCall(const CallInst &I);
756 bool visitStrNLenCall(const CallInst &I);
757 bool visitUnaryFloatCall(const CallInst &I, unsigned Opcode);
758 bool visitBinaryFloatCall(const CallInst &I, unsigned Opcode);
759 void visitAtomicLoad(const LoadInst &I);
760 void visitAtomicStore(const StoreInst &I);
761 void visitLoadFromSwiftError(const LoadInst &I);
762 void visitStoreToSwiftError(const StoreInst &I);
763 void visitFreeze(const FreezeInst &I);
765 void visitInlineAsm(const CallBase &Call);
766 void visitIntrinsicCall(const CallInst &I, unsigned Intrinsic);
767 void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic);
768 void visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI);
770 void visitVAStart(const CallInst &I);
771 void visitVAArg(const VAArgInst &I);
772 void visitVAEnd(const CallInst &I);
773 void visitVACopy(const CallInst &I);
774 void visitStackmap(const CallInst &I);
775 void visitPatchpoint(const CallBase &CB, const BasicBlock *EHPadBB = nullptr);
777 // These two are implemented in StatepointLowering.cpp
778 void visitGCRelocate(const GCRelocateInst &Relocate);
779 void visitGCResult(const GCResultInst &I);
781 void visitVectorReduce(const CallInst &I, unsigned Intrinsic);
783 void visitUserOp1(const Instruction &I) {
784 llvm_unreachable("UserOp1 should not exist at instruction selection time!");
786 void visitUserOp2(const Instruction &I) {
787 llvm_unreachable("UserOp2 should not exist at instruction selection time!");
790 void processIntegerCallValue(const Instruction &I,
791 SDValue Value, bool IsSigned);
793 void HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
795 void emitInlineAsmError(const CallBase &Call, const Twine &Message);
797 /// If V is an function argument then create corresponding DBG_VALUE machine
798 /// instruction for it now. At the end of instruction selection, they will be
799 /// inserted to the entry BB.
800 bool EmitFuncArgumentDbgValue(const Value *V, DILocalVariable *Variable,
801 DIExpression *Expr, DILocation *DL,
802 bool IsDbgDeclare, const SDValue &N);
804 /// Return the next block after MBB, or nullptr if there is none.
805 MachineBasicBlock *NextBlock(MachineBasicBlock *MBB);
807 /// Update the DAG and DAG builder with the relevant information after
808 /// a new root node has been created which could be a tail call.
809 void updateDAGForMaybeTailCall(SDValue MaybeTC);
811 /// Return the appropriate SDDbgValue based on N.
812 SDDbgValue *getDbgValue(SDValue N, DILocalVariable *Variable,
813 DIExpression *Expr, const DebugLoc &dl,
814 unsigned DbgSDNodeOrder);
816 /// Lowers CallInst to an external symbol.
817 void lowerCallToExternalSymbol(const CallInst &I, const char *FunctionName);
820 /// This struct represents the registers (physical or virtual)
821 /// that a particular set of values is assigned, and the type information about
822 /// the value. The most common situation is to represent one value at a time,
823 /// but struct or array values are handled element-wise as multiple values. The
824 /// splitting of aggregates is performed recursively, so that we never have
825 /// aggregate-typed registers. The values at this point do not necessarily have
826 /// legal types, so each value may require one or more registers of some legal
829 struct RegsForValue {
830 /// The value types of the values, which may not be legal, and
831 /// may need be promoted or synthesized from one or more registers.
832 SmallVector<EVT, 4> ValueVTs;
834 /// The value types of the registers. This is the same size as ValueVTs and it
835 /// records, for each value, what the type of the assigned register or
836 /// registers are. (Individual values are never synthesized from more than one
837 /// type of register.)
839 /// With virtual registers, the contents of RegVTs is redundant with TLI's
840 /// getRegisterType member function, however when with physical registers
841 /// it is necessary to have a separate record of the types.
842 SmallVector<MVT, 4> RegVTs;
844 /// This list holds the registers assigned to the values.
845 /// Each legal or promoted value requires one register, and each
846 /// expanded value requires multiple registers.
847 SmallVector<unsigned, 4> Regs;
849 /// This list holds the number of registers for each value.
850 SmallVector<unsigned, 4> RegCount;
852 /// Records if this value needs to be treated in an ABI dependant manner,
853 /// different to normal type legalization.
854 Optional<CallingConv::ID> CallConv;
856 RegsForValue() = default;
857 RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt, EVT valuevt,
858 Optional<CallingConv::ID> CC = None);
859 RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
860 const DataLayout &DL, unsigned Reg, Type *Ty,
861 Optional<CallingConv::ID> CC);
863 bool isABIMangled() const {
864 return CallConv.hasValue();
867 /// Add the specified values to this one.
868 void append(const RegsForValue &RHS) {
869 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
870 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
871 Regs.append(RHS.Regs.begin(), RHS.Regs.end());
872 RegCount.push_back(RHS.Regs.size());
875 /// Emit a series of CopyFromReg nodes that copies from this value and returns
876 /// the result as a ValueVTs value. This uses Chain/Flag as the input and
877 /// updates them for the output Chain/Flag. If the Flag pointer is NULL, no
879 SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
880 const SDLoc &dl, SDValue &Chain, SDValue *Flag,
881 const Value *V = nullptr) const;
883 /// Emit a series of CopyToReg nodes that copies the specified value into the
884 /// registers specified by this object. This uses Chain/Flag as the input and
885 /// updates them for the output Chain/Flag. If the Flag pointer is nullptr, no
886 /// flag is used. If V is not nullptr, then it is used in printing better
887 /// diagnostic messages on error.
888 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl,
889 SDValue &Chain, SDValue *Flag, const Value *V = nullptr,
890 ISD::NodeType PreferredExtendType = ISD::ANY_EXTEND) const;
892 /// Add this value to the specified inlineasm node operand list. This adds the
893 /// code marker, matching input operand index (if applicable), and includes
894 /// the number of values added into it.
895 void AddInlineAsmOperands(unsigned Code, bool HasMatching,
896 unsigned MatchingIdx, const SDLoc &dl,
897 SelectionDAG &DAG, std::vector<SDValue> &Ops) const;
899 /// Check if the total RegCount is greater than one.
900 bool occupiesMultipleRegs() const {
901 return std::accumulate(RegCount.begin(), RegCount.end(), 0) > 1;
904 /// Return a list of registers and their sizes.
905 SmallVector<std::pair<unsigned, unsigned>, 4> getRegsAndSizes() const;
908 } // end namespace llvm
910 #endif // LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H