1 //===- VPlan.h - Represent A Vectorizer Plan --------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file contains the declarations of the Vectorization Plan base classes:
11 /// 1. VPBasicBlock and VPRegionBlock that inherit from a common pure virtual
12 /// VPBlockBase, together implementing a Hierarchical CFG;
13 /// 2. Specializations of GraphTraits that allow VPBlockBase graphs to be
14 /// treated as proper graphs for generic algorithms;
15 /// 3. Pure virtual VPRecipeBase serving as the base class for recipes contained
16 /// within VPBasicBlocks;
17 /// 4. VPInstruction, a concrete Recipe and VPUser modeling a single planned
19 /// 5. The VPlan class holding a candidate for vectorization;
20 /// 6. The VPlanPrinter class providing a way to print a plan in dot format;
21 /// These are documented in docs/VectorizationPlan.rst.
23 //===----------------------------------------------------------------------===//
25 #ifndef LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
26 #define LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
28 #include "VPlanLoopInfo.h"
29 #include "VPlanValue.h"
30 #include "llvm/ADT/DenseMap.h"
31 #include "llvm/ADT/DepthFirstIterator.h"
32 #include "llvm/ADT/GraphTraits.h"
33 #include "llvm/ADT/Optional.h"
34 #include "llvm/ADT/SmallBitVector.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/SmallSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Twine.h"
39 #include "llvm/ADT/ilist.h"
40 #include "llvm/ADT/ilist_node.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/IR/IRBuilder.h"
43 #include "llvm/Support/InstructionCost.h"
54 class InnerLoopVectorizer;
57 class RecurrenceDescriptor;
62 class VPReplicateRecipe;
65 /// Returns a calculation for the total number of elements for a given \p VF.
66 /// For fixed width vectors this value is a constant, whereas for scalable
67 /// vectors it is an expression determined at runtime.
68 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF);
70 /// A range of powers-of-2 vectorization factors with fixed start and
71 /// adjustable end. The range includes start and excludes end, e.g.,:
72 /// [1, 9) = {1, 2, 4, 8}
75 const ElementCount Start;
77 // Need not be a power of 2. If End <= Start range is empty.
80 bool isEmpty() const {
81 return End.getKnownMinValue() <= Start.getKnownMinValue();
84 VFRange(const ElementCount &Start, const ElementCount &End)
85 : Start(Start), End(End) {
86 assert(Start.isScalable() == End.isScalable() &&
87 "Both Start and End should have the same scalable flag");
88 assert(isPowerOf2_32(Start.getKnownMinValue()) &&
89 "Expected Start to be a power of 2");
93 using VPlanPtr = std::unique_ptr<VPlan>;
95 /// In what follows, the term "input IR" refers to code that is fed into the
96 /// vectorizer whereas the term "output IR" refers to code that is generated by
99 /// VPLane provides a way to access lanes in both fixed width and scalable
100 /// vectors, where for the latter the lane index sometimes needs calculating
101 /// as a runtime expression.
104 /// Kind describes how to interpret Lane.
105 enum class Kind : uint8_t {
106 /// For First, Lane is the index into the first N elements of a
107 /// fixed-vector <N x <ElTy>> or a scalable vector <vscale x N x <ElTy>>.
109 /// For ScalableLast, Lane is the offset from the start of the last
110 /// N-element subvector in a scalable vector <vscale x N x <ElTy>>. For
111 /// example, a Lane of 0 corresponds to lane `(vscale - 1) * N`, a Lane of
112 /// 1 corresponds to `((vscale - 1) * N) + 1`, etc.
120 /// Indicates how the Lane should be interpreted, as described above.
124 VPLane(unsigned Lane, Kind LaneKind) : Lane(Lane), LaneKind(LaneKind) {}
126 static VPLane getFirstLane() { return VPLane(0, VPLane::Kind::First); }
128 static VPLane getLastLaneForVF(const ElementCount &VF) {
129 unsigned LaneOffset = VF.getKnownMinValue() - 1;
132 // In this case 'LaneOffset' refers to the offset from the start of the
133 // last subvector with VF.getKnownMinValue() elements.
134 LaneKind = VPLane::Kind::ScalableLast;
136 LaneKind = VPLane::Kind::First;
137 return VPLane(LaneOffset, LaneKind);
140 /// Returns a compile-time known value for the lane index and asserts if the
141 /// lane can only be calculated at runtime.
142 unsigned getKnownLane() const {
143 assert(LaneKind == Kind::First);
147 /// Returns an expression describing the lane index that can be used at
149 Value *getAsRuntimeExpr(IRBuilder<> &Builder, const ElementCount &VF) const;
151 /// Returns the Kind of lane offset.
152 Kind getKind() const { return LaneKind; }
154 /// Returns true if this is the first lane of the whole vector.
155 bool isFirstLane() const { return Lane == 0 && LaneKind == Kind::First; }
157 /// Maps the lane to a cache index based on \p VF.
158 unsigned mapToCacheIndex(const ElementCount &VF) const {
160 case VPLane::Kind::ScalableLast:
161 assert(VF.isScalable() && Lane < VF.getKnownMinValue());
162 return VF.getKnownMinValue() + Lane;
164 assert(Lane < VF.getKnownMinValue());
169 /// Returns the maxmimum number of lanes that we are able to consider
170 /// caching for \p VF.
171 static unsigned getNumCachedLanes(const ElementCount &VF) {
172 return VF.getKnownMinValue() * (VF.isScalable() ? 2 : 1);
176 /// VPIteration represents a single point in the iteration space of the output
177 /// (vectorized and/or unrolled) IR loop.
184 VPIteration(unsigned Part, unsigned Lane,
185 VPLane::Kind Kind = VPLane::Kind::First)
186 : Part(Part), Lane(Lane, Kind) {}
188 VPIteration(unsigned Part, const VPLane &Lane) : Part(Part), Lane(Lane) {}
190 bool isFirstIteration() const { return Part == 0 && Lane.isFirstLane(); }
193 /// VPTransformState holds information passed down when "executing" a VPlan,
194 /// needed for generating the output IR.
195 struct VPTransformState {
196 VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
197 DominatorTree *DT, IRBuilder<> &Builder,
198 InnerLoopVectorizer *ILV, VPlan *Plan)
199 : VF(VF), UF(UF), Instance(), LI(LI), DT(DT), Builder(Builder), ILV(ILV),
202 /// The chosen Vectorization and Unroll Factors of the loop being vectorized.
206 /// Hold the indices to generate specific scalar instructions. Null indicates
207 /// that all instances are to be generated, using either scalar or vector
209 Optional<VPIteration> Instance;
212 /// A type for vectorized values in the new loop. Each value from the
213 /// original loop, when vectorized, is represented by UF vector values in
214 /// the new unrolled loop, where UF is the unroll factor.
215 typedef SmallVector<Value *, 2> PerPartValuesTy;
217 DenseMap<VPValue *, PerPartValuesTy> PerPartOutput;
219 using ScalarsPerPartValuesTy = SmallVector<SmallVector<Value *, 4>, 2>;
220 DenseMap<VPValue *, ScalarsPerPartValuesTy> PerPartScalars;
223 /// Get the generated Value for a given VPValue and a given Part. Note that
224 /// as some Defs are still created by ILV and managed in its ValueMap, this
225 /// method will delegate the call to ILV in such cases in order to provide
226 /// callers a consistent API.
228 Value *get(VPValue *Def, unsigned Part);
230 /// Get the generated Value for a given VPValue and given Part and Lane.
231 Value *get(VPValue *Def, const VPIteration &Instance);
233 bool hasVectorValue(VPValue *Def, unsigned Part) {
234 auto I = Data.PerPartOutput.find(Def);
235 return I != Data.PerPartOutput.end() && Part < I->second.size() &&
239 bool hasAnyVectorValue(VPValue *Def) const {
240 return Data.PerPartOutput.find(Def) != Data.PerPartOutput.end();
243 bool hasScalarValue(VPValue *Def, VPIteration Instance) {
244 auto I = Data.PerPartScalars.find(Def);
245 if (I == Data.PerPartScalars.end())
247 unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
248 return Instance.Part < I->second.size() &&
249 CacheIdx < I->second[Instance.Part].size() &&
250 I->second[Instance.Part][CacheIdx];
253 /// Set the generated Value for a given VPValue and a given Part.
254 void set(VPValue *Def, Value *V, unsigned Part) {
255 if (!Data.PerPartOutput.count(Def)) {
256 DataState::PerPartValuesTy Entry(UF);
257 Data.PerPartOutput[Def] = Entry;
259 Data.PerPartOutput[Def][Part] = V;
261 /// Reset an existing vector value for \p Def and a given \p Part.
262 void reset(VPValue *Def, Value *V, unsigned Part) {
263 auto Iter = Data.PerPartOutput.find(Def);
264 assert(Iter != Data.PerPartOutput.end() &&
265 "need to overwrite existing value");
266 Iter->second[Part] = V;
269 /// Set the generated scalar \p V for \p Def and the given \p Instance.
270 void set(VPValue *Def, Value *V, const VPIteration &Instance) {
271 auto Iter = Data.PerPartScalars.insert({Def, {}});
272 auto &PerPartVec = Iter.first->second;
273 while (PerPartVec.size() <= Instance.Part)
274 PerPartVec.emplace_back();
275 auto &Scalars = PerPartVec[Instance.Part];
276 unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
277 while (Scalars.size() <= CacheIdx)
278 Scalars.push_back(nullptr);
279 assert(!Scalars[CacheIdx] && "should overwrite existing value");
280 Scalars[CacheIdx] = V;
283 /// Reset an existing scalar value for \p Def and a given \p Instance.
284 void reset(VPValue *Def, Value *V, const VPIteration &Instance) {
285 auto Iter = Data.PerPartScalars.find(Def);
286 assert(Iter != Data.PerPartScalars.end() &&
287 "need to overwrite existing value");
288 assert(Instance.Part < Iter->second.size() &&
289 "need to overwrite existing value");
290 unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
291 assert(CacheIdx < Iter->second[Instance.Part].size() &&
292 "need to overwrite existing value");
293 Iter->second[Instance.Part][CacheIdx] = V;
296 /// Hold state information used when constructing the CFG of the output IR,
297 /// traversing the VPBasicBlocks and generating corresponding IR BasicBlocks.
299 /// The previous VPBasicBlock visited. Initially set to null.
300 VPBasicBlock *PrevVPBB = nullptr;
302 /// The previous IR BasicBlock created or used. Initially set to the new
303 /// header BasicBlock.
304 BasicBlock *PrevBB = nullptr;
306 /// The last IR BasicBlock in the output IR. Set to the new latch
307 /// BasicBlock, used for placing the newly created BasicBlocks.
308 BasicBlock *LastBB = nullptr;
310 /// The IR BasicBlock that is the preheader of the vector loop in the output
312 /// FIXME: The vector preheader should also be modeled in VPlan, so any code
313 /// that needs to be added to the preheader gets directly generated by
314 /// VPlan. There should be no need to manage a pointer to the IR BasicBlock.
315 BasicBlock *VectorPreHeader = nullptr;
317 /// A mapping of each VPBasicBlock to the corresponding BasicBlock. In case
318 /// of replication, maps the BasicBlock of the last replica created.
319 SmallDenseMap<VPBasicBlock *, BasicBlock *> VPBB2IRBB;
321 /// Vector of VPBasicBlocks whose terminator instruction needs to be fixed
322 /// up at the end of vector code generation.
323 SmallVector<VPBasicBlock *, 8> VPBBsToFix;
325 CFGState() = default;
328 /// Hold a pointer to LoopInfo to register new basic blocks in the loop.
331 /// Hold a pointer to Dominator Tree to register new basic blocks in the loop.
334 /// Hold a reference to the IRBuilder used to generate output IR code.
335 IRBuilder<> &Builder;
337 VPValue2ValueTy VPValue2Value;
339 /// Hold the canonical scalar IV of the vector loop (start=0, step=VF*UF).
340 Value *CanonicalIV = nullptr;
342 /// Hold the trip count of the scalar loop.
343 Value *TripCount = nullptr;
345 /// Hold a pointer to InnerLoopVectorizer to reuse its IR generation methods.
346 InnerLoopVectorizer *ILV;
348 /// Pointer to the VPlan code is generated for.
351 /// Holds recipes that may generate a poison value that is used after
352 /// vectorization, even when their operands are not poison.
353 SmallPtrSet<VPRecipeBase *, 16> MayGeneratePoisonRecipes;
356 /// VPUsers instance used by VPBlockBase to manage CondBit and the block
357 /// predicate. Currently VPBlockUsers are used in VPBlockBase for historical
358 /// reasons, but in the future the only VPUsers should either be recipes or
359 /// live-outs.VPBlockBase uses.
360 struct VPBlockUser : public VPUser {
361 VPBlockUser() : VPUser({}, VPUserID::Block) {}
363 VPValue *getSingleOperandOrNull() {
364 if (getNumOperands() == 1)
365 return getOperand(0);
369 const VPValue *getSingleOperandOrNull() const {
370 if (getNumOperands() == 1)
371 return getOperand(0);
376 void resetSingleOpUser(VPValue *NewVal) {
377 assert(getNumOperands() <= 1 && "Didn't expect more than one operand!");
379 if (getNumOperands() == 1)
384 if (getNumOperands() == 1)
385 setOperand(0, NewVal);
391 /// VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
392 /// A VPBlockBase can be either a VPBasicBlock or a VPRegionBlock.
394 friend class VPBlockUtils;
396 const unsigned char SubclassID; ///< Subclass identifier (for isa/dyn_cast).
398 /// An optional name for the block.
401 /// The immediate VPRegionBlock which this VPBlockBase belongs to, or null if
402 /// it is a topmost VPBlockBase.
403 VPRegionBlock *Parent = nullptr;
405 /// List of predecessor blocks.
406 SmallVector<VPBlockBase *, 1> Predecessors;
408 /// List of successor blocks.
409 SmallVector<VPBlockBase *, 1> Successors;
411 /// Successor selector managed by a VPUser. For blocks with zero or one
412 /// successors, there is no operand. Otherwise there is exactly one operand
413 /// which is the branch condition.
414 VPBlockUser CondBitUser;
416 /// If the block is predicated, its predicate is stored as an operand of this
417 /// VPUser to maintain the def-use relations. Otherwise there is no operand
419 VPBlockUser PredicateUser;
421 /// VPlan containing the block. Can only be set on the entry block of the
423 VPlan *Plan = nullptr;
425 /// Add \p Successor as the last successor to this block.
426 void appendSuccessor(VPBlockBase *Successor) {
427 assert(Successor && "Cannot add nullptr successor!");
428 Successors.push_back(Successor);
431 /// Add \p Predecessor as the last predecessor to this block.
432 void appendPredecessor(VPBlockBase *Predecessor) {
433 assert(Predecessor && "Cannot add nullptr predecessor!");
434 Predecessors.push_back(Predecessor);
437 /// Remove \p Predecessor from the predecessors of this block.
438 void removePredecessor(VPBlockBase *Predecessor) {
439 auto Pos = find(Predecessors, Predecessor);
440 assert(Pos && "Predecessor does not exist");
441 Predecessors.erase(Pos);
444 /// Remove \p Successor from the successors of this block.
445 void removeSuccessor(VPBlockBase *Successor) {
446 auto Pos = find(Successors, Successor);
447 assert(Pos && "Successor does not exist");
448 Successors.erase(Pos);
452 VPBlockBase(const unsigned char SC, const std::string &N)
453 : SubclassID(SC), Name(N) {}
456 /// An enumeration for keeping track of the concrete subclass of VPBlockBase
457 /// that are actually instantiated. Values of this enumeration are kept in the
458 /// SubclassID field of the VPBlockBase objects. They are used for concrete
459 /// type identification.
460 using VPBlockTy = enum { VPBasicBlockSC, VPRegionBlockSC };
462 using VPBlocksTy = SmallVectorImpl<VPBlockBase *>;
464 virtual ~VPBlockBase() = default;
466 const std::string &getName() const { return Name; }
468 void setName(const Twine &newName) { Name = newName.str(); }
470 /// \return an ID for the concrete type of this object.
471 /// This is used to implement the classof checks. This should not be used
472 /// for any other purpose, as the values may change as LLVM evolves.
473 unsigned getVPBlockID() const { return SubclassID; }
475 VPRegionBlock *getParent() { return Parent; }
476 const VPRegionBlock *getParent() const { return Parent; }
478 /// \return A pointer to the plan containing the current block.
480 const VPlan *getPlan() const;
482 /// Sets the pointer of the plan containing the block. The block must be the
483 /// entry block into the VPlan.
484 void setPlan(VPlan *ParentPlan);
486 void setParent(VPRegionBlock *P) { Parent = P; }
488 /// \return the VPBasicBlock that is the entry of this VPBlockBase,
489 /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
490 /// VPBlockBase is a VPBasicBlock, it is returned.
491 const VPBasicBlock *getEntryBasicBlock() const;
492 VPBasicBlock *getEntryBasicBlock();
494 /// \return the VPBasicBlock that is the exit of this VPBlockBase,
495 /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
496 /// VPBlockBase is a VPBasicBlock, it is returned.
497 const VPBasicBlock *getExitBasicBlock() const;
498 VPBasicBlock *getExitBasicBlock();
500 const VPBlocksTy &getSuccessors() const { return Successors; }
501 VPBlocksTy &getSuccessors() { return Successors; }
503 const VPBlocksTy &getPredecessors() const { return Predecessors; }
504 VPBlocksTy &getPredecessors() { return Predecessors; }
506 /// \return the successor of this VPBlockBase if it has a single successor.
507 /// Otherwise return a null pointer.
508 VPBlockBase *getSingleSuccessor() const {
509 return (Successors.size() == 1 ? *Successors.begin() : nullptr);
512 /// \return the predecessor of this VPBlockBase if it has a single
513 /// predecessor. Otherwise return a null pointer.
514 VPBlockBase *getSinglePredecessor() const {
515 return (Predecessors.size() == 1 ? *Predecessors.begin() : nullptr);
518 size_t getNumSuccessors() const { return Successors.size(); }
519 size_t getNumPredecessors() const { return Predecessors.size(); }
521 /// An Enclosing Block of a block B is any block containing B, including B
522 /// itself. \return the closest enclosing block starting from "this", which
523 /// has successors. \return the root enclosing block if all enclosing blocks
524 /// have no successors.
525 VPBlockBase *getEnclosingBlockWithSuccessors();
527 /// \return the closest enclosing block starting from "this", which has
528 /// predecessors. \return the root enclosing block if all enclosing blocks
529 /// have no predecessors.
530 VPBlockBase *getEnclosingBlockWithPredecessors();
532 /// \return the successors either attached directly to this VPBlockBase or, if
533 /// this VPBlockBase is the exit block of a VPRegionBlock and has no
534 /// successors of its own, search recursively for the first enclosing
535 /// VPRegionBlock that has successors and return them. If no such
536 /// VPRegionBlock exists, return the (empty) successors of the topmost
537 /// VPBlockBase reached.
538 const VPBlocksTy &getHierarchicalSuccessors() {
539 return getEnclosingBlockWithSuccessors()->getSuccessors();
542 /// \return the hierarchical successor of this VPBlockBase if it has a single
543 /// hierarchical successor. Otherwise return a null pointer.
544 VPBlockBase *getSingleHierarchicalSuccessor() {
545 return getEnclosingBlockWithSuccessors()->getSingleSuccessor();
548 /// \return the predecessors either attached directly to this VPBlockBase or,
549 /// if this VPBlockBase is the entry block of a VPRegionBlock and has no
550 /// predecessors of its own, search recursively for the first enclosing
551 /// VPRegionBlock that has predecessors and return them. If no such
552 /// VPRegionBlock exists, return the (empty) predecessors of the topmost
553 /// VPBlockBase reached.
554 const VPBlocksTy &getHierarchicalPredecessors() {
555 return getEnclosingBlockWithPredecessors()->getPredecessors();
558 /// \return the hierarchical predecessor of this VPBlockBase if it has a
559 /// single hierarchical predecessor. Otherwise return a null pointer.
560 VPBlockBase *getSingleHierarchicalPredecessor() {
561 return getEnclosingBlockWithPredecessors()->getSinglePredecessor();
564 /// \return the condition bit selecting the successor.
565 VPValue *getCondBit();
566 /// \return the condition bit selecting the successor.
567 const VPValue *getCondBit() const;
568 /// Set the condition bit selecting the successor.
569 void setCondBit(VPValue *CV);
571 /// \return the block's predicate.
572 VPValue *getPredicate();
573 /// \return the block's predicate.
574 const VPValue *getPredicate() const;
575 /// Set the block's predicate.
576 void setPredicate(VPValue *Pred);
578 /// Set a given VPBlockBase \p Successor as the single successor of this
579 /// VPBlockBase. This VPBlockBase is not added as predecessor of \p Successor.
580 /// This VPBlockBase must have no successors.
581 void setOneSuccessor(VPBlockBase *Successor) {
582 assert(Successors.empty() && "Setting one successor when others exist.");
583 appendSuccessor(Successor);
586 /// Set two given VPBlockBases \p IfTrue and \p IfFalse to be the two
587 /// successors of this VPBlockBase. \p Condition is set as the successor
588 /// selector. This VPBlockBase is not added as predecessor of \p IfTrue or \p
589 /// IfFalse. This VPBlockBase must have no successors.
590 void setTwoSuccessors(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
591 VPValue *Condition) {
592 assert(Successors.empty() && "Setting two successors when others exist.");
593 assert(Condition && "Setting two successors without condition!");
594 setCondBit(Condition);
595 appendSuccessor(IfTrue);
596 appendSuccessor(IfFalse);
599 /// Set each VPBasicBlock in \p NewPreds as predecessor of this VPBlockBase.
600 /// This VPBlockBase must have no predecessors. This VPBlockBase is not added
601 /// as successor of any VPBasicBlock in \p NewPreds.
602 void setPredecessors(ArrayRef<VPBlockBase *> NewPreds) {
603 assert(Predecessors.empty() && "Block predecessors already set.");
604 for (auto *Pred : NewPreds)
605 appendPredecessor(Pred);
608 /// Remove all the predecessor of this block.
609 void clearPredecessors() { Predecessors.clear(); }
611 /// Remove all the successors of this block and set to null its condition bit
612 void clearSuccessors() {
617 /// The method which generates the output IR that correspond to this
618 /// VPBlockBase, thereby "executing" the VPlan.
619 virtual void execute(struct VPTransformState *State) = 0;
621 /// Delete all blocks reachable from a given VPBlockBase, inclusive.
622 static void deleteCFG(VPBlockBase *Entry);
624 /// Return true if it is legal to hoist instructions into this block.
625 bool isLegalToHoistInto() {
626 // There are currently no constraints that prevent an instruction to be
627 // hoisted into a VPBlockBase.
631 /// Replace all operands of VPUsers in the block with \p NewValue and also
632 /// replaces all uses of VPValues defined in the block with NewValue.
633 virtual void dropAllReferences(VPValue *NewValue) = 0;
635 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
636 void printAsOperand(raw_ostream &OS, bool PrintType) const {
640 /// Print plain-text dump of this VPBlockBase to \p O, prefixing all lines
641 /// with \p Indent. \p SlotTracker is used to print unnamed VPValue's using
642 /// consequtive numbers.
644 /// Note that the numbering is applied to the whole VPlan, so printing
645 /// individual blocks is consistent with the whole VPlan printing.
646 virtual void print(raw_ostream &O, const Twine &Indent,
647 VPSlotTracker &SlotTracker) const = 0;
649 /// Print plain-text dump of this VPlan to \p O.
650 void print(raw_ostream &O) const {
651 VPSlotTracker SlotTracker(getPlan());
652 print(O, "", SlotTracker);
655 /// Print the successors of this block to \p O, prefixing all lines with \p
657 void printSuccessors(raw_ostream &O, const Twine &Indent) const;
659 /// Dump this VPBlockBase to dbgs().
660 LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
664 /// VPRecipeBase is a base class modeling a sequence of one or more output IR
665 /// instructions. VPRecipeBase owns the the VPValues it defines through VPDef
666 /// and is responsible for deleting its defined values. Single-value
667 /// VPRecipeBases that also inherit from VPValue must make sure to inherit from
668 /// VPRecipeBase before VPValue.
669 class VPRecipeBase : public ilist_node_with_parent<VPRecipeBase, VPBasicBlock>,
673 friend class VPBlockUtils;
675 /// Each VPRecipe belongs to a single VPBasicBlock.
676 VPBasicBlock *Parent = nullptr;
679 VPRecipeBase(const unsigned char SC, ArrayRef<VPValue *> Operands)
680 : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
682 template <typename IterT>
683 VPRecipeBase(const unsigned char SC, iterator_range<IterT> Operands)
684 : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
685 virtual ~VPRecipeBase() = default;
687 /// \return the VPBasicBlock which this VPRecipe belongs to.
688 VPBasicBlock *getParent() { return Parent; }
689 const VPBasicBlock *getParent() const { return Parent; }
691 /// The method which generates the output IR instructions that correspond to
692 /// this VPRecipe, thereby "executing" the VPlan.
693 virtual void execute(struct VPTransformState &State) = 0;
695 /// Insert an unlinked recipe into a basic block immediately before
696 /// the specified recipe.
697 void insertBefore(VPRecipeBase *InsertPos);
699 /// Insert an unlinked Recipe into a basic block immediately after
700 /// the specified Recipe.
701 void insertAfter(VPRecipeBase *InsertPos);
703 /// Unlink this recipe from its current VPBasicBlock and insert it into
704 /// the VPBasicBlock that MovePos lives in, right after MovePos.
705 void moveAfter(VPRecipeBase *MovePos);
707 /// Unlink this recipe and insert into BB before I.
709 /// \pre I is a valid iterator into BB.
710 void moveBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator I);
712 /// This method unlinks 'this' from the containing basic block, but does not
714 void removeFromParent();
716 /// This method unlinks 'this' from the containing basic block and deletes it.
718 /// \returns an iterator pointing to the element after the erased one
719 iplist<VPRecipeBase>::iterator eraseFromParent();
721 /// Returns the underlying instruction, if the recipe is a VPValue or nullptr
723 Instruction *getUnderlyingInstr() {
724 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
726 const Instruction *getUnderlyingInstr() const {
727 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
730 /// Method to support type inquiry through isa, cast, and dyn_cast.
731 static inline bool classof(const VPDef *D) {
732 // All VPDefs are also VPRecipeBases.
736 static inline bool classof(const VPUser *U) {
737 return U->getVPUserID() == VPUser::VPUserID::Recipe;
740 /// Returns true if the recipe may have side-effects.
741 bool mayHaveSideEffects() const;
743 /// Returns true for PHI-like recipes.
745 return getVPDefID() >= VPFirstPHISC && getVPDefID() <= VPLastPHISC;
748 /// Returns true if the recipe may read from memory.
749 bool mayReadFromMemory() const;
751 /// Returns true if the recipe may write to memory.
752 bool mayWriteToMemory() const;
754 /// Returns true if the recipe may read from or write to memory.
755 bool mayReadOrWriteMemory() const {
756 return mayReadFromMemory() || mayWriteToMemory();
760 inline bool VPUser::classof(const VPDef *Def) {
761 return Def->getVPDefID() == VPRecipeBase::VPInstructionSC ||
762 Def->getVPDefID() == VPRecipeBase::VPWidenSC ||
763 Def->getVPDefID() == VPRecipeBase::VPWidenCallSC ||
764 Def->getVPDefID() == VPRecipeBase::VPWidenSelectSC ||
765 Def->getVPDefID() == VPRecipeBase::VPWidenGEPSC ||
766 Def->getVPDefID() == VPRecipeBase::VPBlendSC ||
767 Def->getVPDefID() == VPRecipeBase::VPInterleaveSC ||
768 Def->getVPDefID() == VPRecipeBase::VPReplicateSC ||
769 Def->getVPDefID() == VPRecipeBase::VPReductionSC ||
770 Def->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC ||
771 Def->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
774 /// This is a concrete Recipe that models a single VPlan-level instruction.
775 /// While as any Recipe it may generate a sequence of IR instructions when
776 /// executed, these instructions would always form a single-def expression as
777 /// the VPInstruction is also a single def-use vertex.
778 class VPInstruction : public VPRecipeBase, public VPValue {
779 friend class VPlanSlp;
782 /// VPlan opcodes, extending LLVM IR with idiomatics instructions.
784 FirstOrderRecurrenceSplice =
785 Instruction::OtherOpsEnd + 1, // Combines the incoming and previous
786 // values of a first-order recurrence.
795 typedef unsigned char OpcodeTy;
799 /// Utility method serving execute(): generates a single instance of the
800 /// modeled instruction.
801 void generateInstruction(VPTransformState &State, unsigned Part);
804 void setUnderlyingInstr(Instruction *I) { setUnderlyingValue(I); }
807 VPInstruction(unsigned Opcode, ArrayRef<VPValue *> Operands)
808 : VPRecipeBase(VPRecipeBase::VPInstructionSC, Operands),
809 VPValue(VPValue::VPVInstructionSC, nullptr, this), Opcode(Opcode) {}
811 VPInstruction(unsigned Opcode, std::initializer_list<VPValue *> Operands)
812 : VPInstruction(Opcode, ArrayRef<VPValue *>(Operands)) {}
814 /// Method to support type inquiry through isa, cast, and dyn_cast.
815 static inline bool classof(const VPValue *V) {
816 return V->getVPValueID() == VPValue::VPVInstructionSC;
819 VPInstruction *clone() const {
820 SmallVector<VPValue *, 2> Operands(operands());
821 return new VPInstruction(Opcode, Operands);
824 /// Method to support type inquiry through isa, cast, and dyn_cast.
825 static inline bool classof(const VPDef *R) {
826 return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
829 unsigned getOpcode() const { return Opcode; }
831 /// Generate the instruction.
832 /// TODO: We currently execute only per-part unless a specific instance is
834 void execute(VPTransformState &State) override;
836 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
837 /// Print the VPInstruction to \p O.
838 void print(raw_ostream &O, const Twine &Indent,
839 VPSlotTracker &SlotTracker) const override;
841 /// Print the VPInstruction to dbgs() (for debugging).
842 LLVM_DUMP_METHOD void dump() const;
845 /// Return true if this instruction may modify memory.
846 bool mayWriteToMemory() const {
847 // TODO: we can use attributes of the called function to rule out memory
849 return Opcode == Instruction::Store || Opcode == Instruction::Call ||
850 Opcode == Instruction::Invoke || Opcode == SLPStore;
853 bool hasResult() const {
854 // CallInst may or may not have a result, depending on the called function.
855 // Conservatively return calls have results for now.
856 switch (getOpcode()) {
857 case Instruction::Ret:
858 case Instruction::Br:
859 case Instruction::Store:
860 case Instruction::Switch:
861 case Instruction::IndirectBr:
862 case Instruction::Resume:
863 case Instruction::CatchRet:
864 case Instruction::Unreachable:
865 case Instruction::Fence:
866 case Instruction::AtomicRMW:
873 /// Set the fast-math flags.
874 void setFastMathFlags(FastMathFlags FMFNew);
877 /// VPWidenRecipe is a recipe for producing a copy of vector type its
878 /// ingredient. This recipe covers most of the traditional vectorization cases
879 /// where each ingredient transforms into a vectorized version of itself.
880 class VPWidenRecipe : public VPRecipeBase, public VPValue {
882 template <typename IterT>
883 VPWidenRecipe(Instruction &I, iterator_range<IterT> Operands)
884 : VPRecipeBase(VPRecipeBase::VPWidenSC, Operands),
885 VPValue(VPValue::VPVWidenSC, &I, this) {}
887 ~VPWidenRecipe() override = default;
889 /// Method to support type inquiry through isa, cast, and dyn_cast.
890 static inline bool classof(const VPDef *D) {
891 return D->getVPDefID() == VPRecipeBase::VPWidenSC;
893 static inline bool classof(const VPValue *V) {
894 return V->getVPValueID() == VPValue::VPVWidenSC;
897 /// Produce widened copies of all Ingredients.
898 void execute(VPTransformState &State) override;
900 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
901 /// Print the recipe.
902 void print(raw_ostream &O, const Twine &Indent,
903 VPSlotTracker &SlotTracker) const override;
907 /// A recipe for widening Call instructions.
908 class VPWidenCallRecipe : public VPRecipeBase, public VPValue {
911 template <typename IterT>
912 VPWidenCallRecipe(CallInst &I, iterator_range<IterT> CallArguments)
913 : VPRecipeBase(VPRecipeBase::VPWidenCallSC, CallArguments),
914 VPValue(VPValue::VPVWidenCallSC, &I, this) {}
916 ~VPWidenCallRecipe() override = default;
918 /// Method to support type inquiry through isa, cast, and dyn_cast.
919 static inline bool classof(const VPDef *D) {
920 return D->getVPDefID() == VPRecipeBase::VPWidenCallSC;
923 /// Produce a widened version of the call instruction.
924 void execute(VPTransformState &State) override;
926 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
927 /// Print the recipe.
928 void print(raw_ostream &O, const Twine &Indent,
929 VPSlotTracker &SlotTracker) const override;
933 /// A recipe for widening select instructions.
934 class VPWidenSelectRecipe : public VPRecipeBase, public VPValue {
936 /// Is the condition of the select loop invariant?
940 template <typename IterT>
941 VPWidenSelectRecipe(SelectInst &I, iterator_range<IterT> Operands,
943 : VPRecipeBase(VPRecipeBase::VPWidenSelectSC, Operands),
944 VPValue(VPValue::VPVWidenSelectSC, &I, this),
945 InvariantCond(InvariantCond) {}
947 ~VPWidenSelectRecipe() override = default;
949 /// Method to support type inquiry through isa, cast, and dyn_cast.
950 static inline bool classof(const VPDef *D) {
951 return D->getVPDefID() == VPRecipeBase::VPWidenSelectSC;
954 /// Produce a widened version of the select instruction.
955 void execute(VPTransformState &State) override;
957 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
958 /// Print the recipe.
959 void print(raw_ostream &O, const Twine &Indent,
960 VPSlotTracker &SlotTracker) const override;
964 /// A recipe for handling GEP instructions.
965 class VPWidenGEPRecipe : public VPRecipeBase, public VPValue {
966 bool IsPtrLoopInvariant;
967 SmallBitVector IsIndexLoopInvariant;
970 template <typename IterT>
971 VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands)
972 : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
973 VPValue(VPWidenGEPSC, GEP, this),
974 IsIndexLoopInvariant(GEP->getNumIndices(), false) {}
976 template <typename IterT>
977 VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands,
979 : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
980 VPValue(VPValue::VPVWidenGEPSC, GEP, this),
981 IsIndexLoopInvariant(GEP->getNumIndices(), false) {
982 IsPtrLoopInvariant = OrigLoop->isLoopInvariant(GEP->getPointerOperand());
983 for (auto Index : enumerate(GEP->indices()))
984 IsIndexLoopInvariant[Index.index()] =
985 OrigLoop->isLoopInvariant(Index.value().get());
987 ~VPWidenGEPRecipe() override = default;
989 /// Method to support type inquiry through isa, cast, and dyn_cast.
990 static inline bool classof(const VPDef *D) {
991 return D->getVPDefID() == VPRecipeBase::VPWidenGEPSC;
994 /// Generate the gep nodes.
995 void execute(VPTransformState &State) override;
997 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
998 /// Print the recipe.
999 void print(raw_ostream &O, const Twine &Indent,
1000 VPSlotTracker &SlotTracker) const override;
1004 /// A recipe for handling phi nodes of integer and floating-point inductions,
1005 /// producing their vector and scalar values.
1006 class VPWidenIntOrFpInductionRecipe : public VPRecipeBase {
1010 VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start, Instruction *Cast,
1011 TruncInst *Trunc = nullptr)
1012 : VPRecipeBase(VPWidenIntOrFpInductionSC, {Start}), IV(IV) {
1014 new VPValue(Trunc, this);
1016 new VPValue(IV, this);
1019 new VPValue(Cast, this);
1021 ~VPWidenIntOrFpInductionRecipe() override = default;
1023 /// Method to support type inquiry through isa, cast, and dyn_cast.
1024 static inline bool classof(const VPDef *D) {
1025 return D->getVPDefID() == VPRecipeBase::VPWidenIntOrFpInductionSC;
1028 /// Generate the vectorized and scalarized versions of the phi node as
1029 /// needed by their users.
1030 void execute(VPTransformState &State) override;
1032 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1033 /// Print the recipe.
1034 void print(raw_ostream &O, const Twine &Indent,
1035 VPSlotTracker &SlotTracker) const override;
1038 /// Returns the start value of the induction.
1039 VPValue *getStartValue() { return getOperand(0); }
1041 /// Returns the cast VPValue, if one is attached, or nullptr otherwise.
1042 VPValue *getCastValue() {
1043 if (getNumDefinedValues() != 2)
1045 return getVPValue(1);
1048 /// Returns the first defined value as TruncInst, if it is one or nullptr
1050 TruncInst *getTruncInst() {
1051 return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1053 const TruncInst *getTruncInst() const {
1054 return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1058 /// A recipe for handling first order recurrences and pointer inductions. For
1059 /// first-order recurrences, the start value is the first operand of the recipe
1060 /// and the incoming value from the backedge is the second operand. It also
1061 /// serves as base class for VPReductionPHIRecipe. In the VPlan native path, all
1062 /// incoming VPValues & VPBasicBlock pairs are managed in the recipe directly.
1063 class VPWidenPHIRecipe : public VPRecipeBase, public VPValue {
1064 /// List of incoming blocks. Only used in the VPlan native path.
1065 SmallVector<VPBasicBlock *, 2> IncomingBlocks;
1068 VPWidenPHIRecipe(unsigned char VPVID, unsigned char VPDefID, PHINode *Phi,
1069 VPValue *Start = nullptr)
1070 : VPRecipeBase(VPDefID, {}), VPValue(VPVID, Phi, this) {
1076 /// Create a VPWidenPHIRecipe for \p Phi
1077 VPWidenPHIRecipe(PHINode *Phi)
1078 : VPWidenPHIRecipe(VPVWidenPHISC, VPWidenPHISC, Phi) {}
1080 /// Create a new VPWidenPHIRecipe for \p Phi with start value \p Start.
1081 VPWidenPHIRecipe(PHINode *Phi, VPValue &Start) : VPWidenPHIRecipe(Phi) {
1085 ~VPWidenPHIRecipe() override = default;
1087 /// Method to support type inquiry through isa, cast, and dyn_cast.
1088 static inline bool classof(const VPRecipeBase *B) {
1089 return B->getVPDefID() == VPRecipeBase::VPWidenPHISC ||
1090 B->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC ||
1091 B->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1093 static inline bool classof(const VPValue *V) {
1094 return V->getVPValueID() == VPValue::VPVWidenPHISC ||
1095 V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC ||
1096 V->getVPValueID() == VPValue::VPVReductionPHISC;
1099 /// Generate the phi/select nodes.
1100 void execute(VPTransformState &State) override;
1102 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1103 /// Print the recipe.
1104 void print(raw_ostream &O, const Twine &Indent,
1105 VPSlotTracker &SlotTracker) const override;
1108 /// Returns the start value of the phi, if it is a reduction or first-order
1110 VPValue *getStartValue() {
1111 return getNumOperands() == 0 ? nullptr : getOperand(0);
1114 /// Returns the incoming value from the loop backedge, if it is a reduction or
1115 /// first-order recurrence.
1116 VPValue *getBackedgeValue() {
1117 return getOperand(1);
1120 /// Returns the backedge value as a recipe. The backedge value is guaranteed
1122 VPRecipeBase *getBackedgeRecipe() {
1123 return cast<VPRecipeBase>(getBackedgeValue()->getDef());
1126 /// Adds a pair (\p IncomingV, \p IncomingBlock) to the phi.
1127 void addIncoming(VPValue *IncomingV, VPBasicBlock *IncomingBlock) {
1128 addOperand(IncomingV);
1129 IncomingBlocks.push_back(IncomingBlock);
1132 /// Returns the \p I th incoming VPValue.
1133 VPValue *getIncomingValue(unsigned I) { return getOperand(I); }
1135 /// Returns the \p I th incoming VPBasicBlock.
1136 VPBasicBlock *getIncomingBlock(unsigned I) { return IncomingBlocks[I]; }
1139 /// A recipe for handling first-order recurrence phis. The start value is the
1140 /// first operand of the recipe and the incoming value from the backedge is the
1142 struct VPFirstOrderRecurrencePHIRecipe : public VPWidenPHIRecipe {
1143 VPFirstOrderRecurrencePHIRecipe(PHINode *Phi, VPValue &Start)
1144 : VPWidenPHIRecipe(VPVFirstOrderRecurrencePHISC,
1145 VPFirstOrderRecurrencePHISC, Phi, &Start) {}
1147 /// Method to support type inquiry through isa, cast, and dyn_cast.
1148 static inline bool classof(const VPRecipeBase *R) {
1149 return R->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
1151 static inline bool classof(const VPWidenPHIRecipe *D) {
1152 return D->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
1154 static inline bool classof(const VPValue *V) {
1155 return V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC;
1158 void execute(VPTransformState &State) override;
1160 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1161 /// Print the recipe.
1162 void print(raw_ostream &O, const Twine &Indent,
1163 VPSlotTracker &SlotTracker) const override;
1167 /// A recipe for handling reduction phis. The start value is the first operand
1168 /// of the recipe and the incoming value from the backedge is the second
1170 class VPReductionPHIRecipe : public VPWidenPHIRecipe {
1171 /// Descriptor for the reduction.
1172 RecurrenceDescriptor &RdxDesc;
1174 /// The phi is part of an in-loop reduction.
1177 /// The phi is part of an ordered reduction. Requires IsInLoop to be true.
1181 /// Create a new VPReductionPHIRecipe for the reduction \p Phi described by \p
1183 VPReductionPHIRecipe(PHINode *Phi, RecurrenceDescriptor &RdxDesc,
1184 VPValue &Start, bool IsInLoop = false,
1185 bool IsOrdered = false)
1186 : VPWidenPHIRecipe(VPVReductionPHISC, VPReductionPHISC, Phi, &Start),
1187 RdxDesc(RdxDesc), IsInLoop(IsInLoop), IsOrdered(IsOrdered) {
1188 assert((!IsOrdered || IsInLoop) && "IsOrdered requires IsInLoop");
1191 ~VPReductionPHIRecipe() override = default;
1193 /// Method to support type inquiry through isa, cast, and dyn_cast.
1194 static inline bool classof(const VPRecipeBase *R) {
1195 return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1197 static inline bool classof(const VPValue *V) {
1198 return V->getVPValueID() == VPValue::VPVReductionPHISC;
1200 static inline bool classof(const VPWidenPHIRecipe *R) {
1201 return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1204 /// Generate the phi/select nodes.
1205 void execute(VPTransformState &State) override;
1207 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1208 /// Print the recipe.
1209 void print(raw_ostream &O, const Twine &Indent,
1210 VPSlotTracker &SlotTracker) const override;
1213 RecurrenceDescriptor &getRecurrenceDescriptor() { return RdxDesc; }
1215 /// Returns true, if the phi is part of an ordered reduction.
1216 bool isOrdered() const { return IsOrdered; }
1218 /// Returns true, if the phi is part of an in-loop reduction.
1219 bool isInLoop() const { return IsInLoop; }
1222 /// A recipe for vectorizing a phi-node as a sequence of mask-based select
1224 class VPBlendRecipe : public VPRecipeBase, public VPValue {
1228 /// The blend operation is a User of the incoming values and of their
1229 /// respective masks, ordered [I0, M0, I1, M1, ...]. Note that a single value
1230 /// might be incoming with a full mask for which there is no VPValue.
1231 VPBlendRecipe(PHINode *Phi, ArrayRef<VPValue *> Operands)
1232 : VPRecipeBase(VPBlendSC, Operands),
1233 VPValue(VPValue::VPVBlendSC, Phi, this), Phi(Phi) {
1234 assert(Operands.size() > 0 &&
1235 ((Operands.size() == 1) || (Operands.size() % 2 == 0)) &&
1236 "Expected either a single incoming value or a positive even number "
1240 /// Method to support type inquiry through isa, cast, and dyn_cast.
1241 static inline bool classof(const VPDef *D) {
1242 return D->getVPDefID() == VPRecipeBase::VPBlendSC;
1245 /// Return the number of incoming values, taking into account that a single
1246 /// incoming value has no mask.
1247 unsigned getNumIncomingValues() const { return (getNumOperands() + 1) / 2; }
1249 /// Return incoming value number \p Idx.
1250 VPValue *getIncomingValue(unsigned Idx) const { return getOperand(Idx * 2); }
1252 /// Return mask number \p Idx.
1253 VPValue *getMask(unsigned Idx) const { return getOperand(Idx * 2 + 1); }
1255 /// Generate the phi/select nodes.
1256 void execute(VPTransformState &State) override;
1258 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1259 /// Print the recipe.
1260 void print(raw_ostream &O, const Twine &Indent,
1261 VPSlotTracker &SlotTracker) const override;
1265 /// VPInterleaveRecipe is a recipe for transforming an interleave group of load
1266 /// or stores into one wide load/store and shuffles. The first operand of a
1267 /// VPInterleave recipe is the address, followed by the stored values, followed
1268 /// by an optional mask.
1269 class VPInterleaveRecipe : public VPRecipeBase {
1270 const InterleaveGroup<Instruction> *IG;
1272 bool HasMask = false;
1275 VPInterleaveRecipe(const InterleaveGroup<Instruction> *IG, VPValue *Addr,
1276 ArrayRef<VPValue *> StoredValues, VPValue *Mask)
1277 : VPRecipeBase(VPInterleaveSC, {Addr}), IG(IG) {
1278 for (unsigned i = 0; i < IG->getFactor(); ++i)
1279 if (Instruction *I = IG->getMember(i)) {
1280 if (I->getType()->isVoidTy())
1282 new VPValue(I, this);
1285 for (auto *SV : StoredValues)
1292 ~VPInterleaveRecipe() override = default;
1294 /// Method to support type inquiry through isa, cast, and dyn_cast.
1295 static inline bool classof(const VPDef *D) {
1296 return D->getVPDefID() == VPRecipeBase::VPInterleaveSC;
1299 /// Return the address accessed by this recipe.
1300 VPValue *getAddr() const {
1301 return getOperand(0); // Address is the 1st, mandatory operand.
1304 /// Return the mask used by this recipe. Note that a full mask is represented
1306 VPValue *getMask() const {
1307 // Mask is optional and therefore the last, currently 2nd operand.
1308 return HasMask ? getOperand(getNumOperands() - 1) : nullptr;
1311 /// Return the VPValues stored by this interleave group. If it is a load
1312 /// interleave group, return an empty ArrayRef.
1313 ArrayRef<VPValue *> getStoredValues() const {
1314 // The first operand is the address, followed by the stored values, followed
1315 // by an optional mask.
1316 return ArrayRef<VPValue *>(op_begin(), getNumOperands())
1317 .slice(1, getNumStoreOperands());
1320 /// Generate the wide load or store, and shuffles.
1321 void execute(VPTransformState &State) override;
1323 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1324 /// Print the recipe.
1325 void print(raw_ostream &O, const Twine &Indent,
1326 VPSlotTracker &SlotTracker) const override;
1329 const InterleaveGroup<Instruction> *getInterleaveGroup() { return IG; }
1331 /// Returns the number of stored operands of this interleave group. Returns 0
1332 /// for load interleave groups.
1333 unsigned getNumStoreOperands() const {
1334 return getNumOperands() - (HasMask ? 2 : 1);
1338 /// A recipe to represent inloop reduction operations, performing a reduction on
1339 /// a vector operand into a scalar value, and adding the result to a chain.
1340 /// The Operands are {ChainOp, VecOp, [Condition]}.
1341 class VPReductionRecipe : public VPRecipeBase, public VPValue {
1342 /// The recurrence decriptor for the reduction in question.
1343 RecurrenceDescriptor *RdxDesc;
1344 /// Pointer to the TTI, needed to create the target reduction
1345 const TargetTransformInfo *TTI;
1348 VPReductionRecipe(RecurrenceDescriptor *R, Instruction *I, VPValue *ChainOp,
1349 VPValue *VecOp, VPValue *CondOp,
1350 const TargetTransformInfo *TTI)
1351 : VPRecipeBase(VPRecipeBase::VPReductionSC, {ChainOp, VecOp}),
1352 VPValue(VPValue::VPVReductionSC, I, this), RdxDesc(R), TTI(TTI) {
1357 ~VPReductionRecipe() override = default;
1359 /// Method to support type inquiry through isa, cast, and dyn_cast.
1360 static inline bool classof(const VPValue *V) {
1361 return V->getVPValueID() == VPValue::VPVReductionSC;
1364 /// Generate the reduction in the loop
1365 void execute(VPTransformState &State) override;
1367 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1368 /// Print the recipe.
1369 void print(raw_ostream &O, const Twine &Indent,
1370 VPSlotTracker &SlotTracker) const override;
1373 /// The VPValue of the scalar Chain being accumulated.
1374 VPValue *getChainOp() const { return getOperand(0); }
1375 /// The VPValue of the vector value to be reduced.
1376 VPValue *getVecOp() const { return getOperand(1); }
1377 /// The VPValue of the condition for the block.
1378 VPValue *getCondOp() const {
1379 return getNumOperands() > 2 ? getOperand(2) : nullptr;
1383 /// VPReplicateRecipe replicates a given instruction producing multiple scalar
1384 /// copies of the original scalar type, one per lane, instead of producing a
1385 /// single copy of widened type for all lanes. If the instruction is known to be
1386 /// uniform only one copy, per lane zero, will be generated.
1387 class VPReplicateRecipe : public VPRecipeBase, public VPValue {
1388 /// Indicator if only a single replica per lane is needed.
1391 /// Indicator if the replicas are also predicated.
1394 /// Indicator if the scalar values should also be packed into a vector.
1398 template <typename IterT>
1399 VPReplicateRecipe(Instruction *I, iterator_range<IterT> Operands,
1400 bool IsUniform, bool IsPredicated = false)
1401 : VPRecipeBase(VPReplicateSC, Operands), VPValue(VPVReplicateSC, I, this),
1402 IsUniform(IsUniform), IsPredicated(IsPredicated) {
1403 // Retain the previous behavior of predicateInstructions(), where an
1404 // insert-element of a predicated instruction got hoisted into the
1405 // predicated basic block iff it was its only user. This is achieved by
1406 // having predicated instructions also pack their values into a vector by
1407 // default unless they have a replicated user which uses their scalar value.
1408 AlsoPack = IsPredicated && !I->use_empty();
1411 ~VPReplicateRecipe() override = default;
1413 /// Method to support type inquiry through isa, cast, and dyn_cast.
1414 static inline bool classof(const VPDef *D) {
1415 return D->getVPDefID() == VPRecipeBase::VPReplicateSC;
1418 static inline bool classof(const VPValue *V) {
1419 return V->getVPValueID() == VPValue::VPVReplicateSC;
1422 /// Generate replicas of the desired Ingredient. Replicas will be generated
1423 /// for all parts and lanes unless a specific part and lane are specified in
1425 void execute(VPTransformState &State) override;
1427 void setAlsoPack(bool Pack) { AlsoPack = Pack; }
1429 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1430 /// Print the recipe.
1431 void print(raw_ostream &O, const Twine &Indent,
1432 VPSlotTracker &SlotTracker) const override;
1435 bool isUniform() const { return IsUniform; }
1437 bool isPacked() const { return AlsoPack; }
1439 bool isPredicated() const { return IsPredicated; }
1442 /// A recipe for generating conditional branches on the bits of a mask.
1443 class VPBranchOnMaskRecipe : public VPRecipeBase {
1445 VPBranchOnMaskRecipe(VPValue *BlockInMask)
1446 : VPRecipeBase(VPBranchOnMaskSC, {}) {
1447 if (BlockInMask) // nullptr means all-one mask.
1448 addOperand(BlockInMask);
1451 /// Method to support type inquiry through isa, cast, and dyn_cast.
1452 static inline bool classof(const VPDef *D) {
1453 return D->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC;
1456 /// Generate the extraction of the appropriate bit from the block mask and the
1457 /// conditional branch.
1458 void execute(VPTransformState &State) override;
1460 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1461 /// Print the recipe.
1462 void print(raw_ostream &O, const Twine &Indent,
1463 VPSlotTracker &SlotTracker) const override {
1464 O << Indent << "BRANCH-ON-MASK ";
1465 if (VPValue *Mask = getMask())
1466 Mask->printAsOperand(O, SlotTracker);
1472 /// Return the mask used by this recipe. Note that a full mask is represented
1474 VPValue *getMask() const {
1475 assert(getNumOperands() <= 1 && "should have either 0 or 1 operands");
1476 // Mask is optional.
1477 return getNumOperands() == 1 ? getOperand(0) : nullptr;
1481 /// VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when
1482 /// control converges back from a Branch-on-Mask. The phi nodes are needed in
1483 /// order to merge values that are set under such a branch and feed their uses.
1484 /// The phi nodes can be scalar or vector depending on the users of the value.
1485 /// This recipe works in concert with VPBranchOnMaskRecipe.
1486 class VPPredInstPHIRecipe : public VPRecipeBase, public VPValue {
1488 /// Construct a VPPredInstPHIRecipe given \p PredInst whose value needs a phi
1489 /// nodes after merging back from a Branch-on-Mask.
1490 VPPredInstPHIRecipe(VPValue *PredV)
1491 : VPRecipeBase(VPPredInstPHISC, PredV),
1492 VPValue(VPValue::VPVPredInstPHI, nullptr, this) {}
1493 ~VPPredInstPHIRecipe() override = default;
1495 /// Method to support type inquiry through isa, cast, and dyn_cast.
1496 static inline bool classof(const VPDef *D) {
1497 return D->getVPDefID() == VPRecipeBase::VPPredInstPHISC;
1500 /// Generates phi nodes for live-outs as needed to retain SSA form.
1501 void execute(VPTransformState &State) override;
1503 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1504 /// Print the recipe.
1505 void print(raw_ostream &O, const Twine &Indent,
1506 VPSlotTracker &SlotTracker) const override;
1510 /// A Recipe for widening load/store operations.
1511 /// The recipe uses the following VPValues:
1512 /// - For load: Address, optional mask
1513 /// - For store: Address, stored value, optional mask
1514 /// TODO: We currently execute only per-part unless a specific instance is
1516 class VPWidenMemoryInstructionRecipe : public VPRecipeBase, public VPValue {
1517 Instruction &Ingredient;
1519 // Whether the loaded-from / stored-to addresses are consecutive.
1522 // Whether the consecutive loaded/stored addresses are in reverse order.
1525 void setMask(VPValue *Mask) {
1531 bool isMasked() const {
1532 return isStore() ? getNumOperands() == 3 : getNumOperands() == 2;
1536 VPWidenMemoryInstructionRecipe(LoadInst &Load, VPValue *Addr, VPValue *Mask,
1537 bool Consecutive, bool Reverse)
1538 : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr}),
1539 VPValue(VPValue::VPVMemoryInstructionSC, &Load, this), Ingredient(Load),
1540 Consecutive(Consecutive), Reverse(Reverse) {
1541 assert((Consecutive || !Reverse) && "Reverse implies consecutive");
1545 VPWidenMemoryInstructionRecipe(StoreInst &Store, VPValue *Addr,
1546 VPValue *StoredValue, VPValue *Mask,
1547 bool Consecutive, bool Reverse)
1548 : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr, StoredValue}),
1549 VPValue(VPValue::VPVMemoryInstructionSC, &Store, this),
1550 Ingredient(Store), Consecutive(Consecutive), Reverse(Reverse) {
1551 assert((Consecutive || !Reverse) && "Reverse implies consecutive");
1555 /// Method to support type inquiry through isa, cast, and dyn_cast.
1556 static inline bool classof(const VPDef *D) {
1557 return D->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
1560 /// Return the address accessed by this recipe.
1561 VPValue *getAddr() const {
1562 return getOperand(0); // Address is the 1st, mandatory operand.
1565 /// Return the mask used by this recipe. Note that a full mask is represented
1567 VPValue *getMask() const {
1568 // Mask is optional and therefore the last operand.
1569 return isMasked() ? getOperand(getNumOperands() - 1) : nullptr;
1572 /// Returns true if this recipe is a store.
1573 bool isStore() const { return isa<StoreInst>(Ingredient); }
1575 /// Return the address accessed by this recipe.
1576 VPValue *getStoredValue() const {
1577 assert(isStore() && "Stored value only available for store instructions");
1578 return getOperand(1); // Stored value is the 2nd, mandatory operand.
1581 // Return whether the loaded-from / stored-to addresses are consecutive.
1582 bool isConsecutive() const { return Consecutive; }
1584 // Return whether the consecutive loaded/stored addresses are in reverse
1586 bool isReverse() const { return Reverse; }
1588 /// Generate the wide load/store.
1589 void execute(VPTransformState &State) override;
1591 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1592 /// Print the recipe.
1593 void print(raw_ostream &O, const Twine &Indent,
1594 VPSlotTracker &SlotTracker) const override;
1598 /// A Recipe for widening the canonical induction variable of the vector loop.
1599 class VPWidenCanonicalIVRecipe : public VPRecipeBase, public VPValue {
1601 VPWidenCanonicalIVRecipe()
1602 : VPRecipeBase(VPWidenCanonicalIVSC, {}),
1603 VPValue(VPValue::VPVWidenCanonicalIVSC, nullptr, this) {}
1605 ~VPWidenCanonicalIVRecipe() override = default;
1607 /// Method to support type inquiry through isa, cast, and dyn_cast.
1608 static inline bool classof(const VPDef *D) {
1609 return D->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1612 /// Generate a canonical vector induction variable of the vector loop, with
1613 /// start = {<Part*VF, Part*VF+1, ..., Part*VF+VF-1> for 0 <= Part < UF}, and
1614 /// step = <VF*UF, VF*UF, ..., VF*UF>.
1615 void execute(VPTransformState &State) override;
1617 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1618 /// Print the recipe.
1619 void print(raw_ostream &O, const Twine &Indent,
1620 VPSlotTracker &SlotTracker) const override;
1624 /// VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph. It
1625 /// holds a sequence of zero or more VPRecipe's each representing a sequence of
1626 /// output IR instructions. All PHI-like recipes must come before any non-PHI recipes.
1627 class VPBasicBlock : public VPBlockBase {
1629 using RecipeListTy = iplist<VPRecipeBase>;
1632 /// The VPRecipes held in the order of output instructions to generate.
1633 RecipeListTy Recipes;
1636 VPBasicBlock(const Twine &Name = "", VPRecipeBase *Recipe = nullptr)
1637 : VPBlockBase(VPBasicBlockSC, Name.str()) {
1639 appendRecipe(Recipe);
1642 ~VPBasicBlock() override {
1643 while (!Recipes.empty())
1647 /// Instruction iterators...
1648 using iterator = RecipeListTy::iterator;
1649 using const_iterator = RecipeListTy::const_iterator;
1650 using reverse_iterator = RecipeListTy::reverse_iterator;
1651 using const_reverse_iterator = RecipeListTy::const_reverse_iterator;
1653 //===--------------------------------------------------------------------===//
1654 /// Recipe iterator methods
1656 inline iterator begin() { return Recipes.begin(); }
1657 inline const_iterator begin() const { return Recipes.begin(); }
1658 inline iterator end() { return Recipes.end(); }
1659 inline const_iterator end() const { return Recipes.end(); }
1661 inline reverse_iterator rbegin() { return Recipes.rbegin(); }
1662 inline const_reverse_iterator rbegin() const { return Recipes.rbegin(); }
1663 inline reverse_iterator rend() { return Recipes.rend(); }
1664 inline const_reverse_iterator rend() const { return Recipes.rend(); }
1666 inline size_t size() const { return Recipes.size(); }
1667 inline bool empty() const { return Recipes.empty(); }
1668 inline const VPRecipeBase &front() const { return Recipes.front(); }
1669 inline VPRecipeBase &front() { return Recipes.front(); }
1670 inline const VPRecipeBase &back() const { return Recipes.back(); }
1671 inline VPRecipeBase &back() { return Recipes.back(); }
1673 /// Returns a reference to the list of recipes.
1674 RecipeListTy &getRecipeList() { return Recipes; }
1676 /// Returns a pointer to a member of the recipe list.
1677 static RecipeListTy VPBasicBlock::*getSublistAccess(VPRecipeBase *) {
1678 return &VPBasicBlock::Recipes;
1681 /// Method to support type inquiry through isa, cast, and dyn_cast.
1682 static inline bool classof(const VPBlockBase *V) {
1683 return V->getVPBlockID() == VPBlockBase::VPBasicBlockSC;
1686 void insert(VPRecipeBase *Recipe, iterator InsertPt) {
1687 assert(Recipe && "No recipe to append.");
1688 assert(!Recipe->Parent && "Recipe already in VPlan");
1689 Recipe->Parent = this;
1690 Recipes.insert(InsertPt, Recipe);
1693 /// Augment the existing recipes of a VPBasicBlock with an additional
1694 /// \p Recipe as the last recipe.
1695 void appendRecipe(VPRecipeBase *Recipe) { insert(Recipe, end()); }
1697 /// The method which generates the output IR instructions that correspond to
1698 /// this VPBasicBlock, thereby "executing" the VPlan.
1699 void execute(struct VPTransformState *State) override;
1701 /// Return the position of the first non-phi node recipe in the block.
1702 iterator getFirstNonPhi();
1704 /// Returns an iterator range over the PHI-like recipes in the block.
1705 iterator_range<iterator> phis() {
1706 return make_range(begin(), getFirstNonPhi());
1709 void dropAllReferences(VPValue *NewValue) override;
1711 /// Split current block at \p SplitAt by inserting a new block between the
1712 /// current block and its successors and moving all recipes starting at
1713 /// SplitAt to the new block. Returns the new block.
1714 VPBasicBlock *splitAt(iterator SplitAt);
1716 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1717 /// Print this VPBsicBlock to \p O, prefixing all lines with \p Indent. \p
1718 /// SlotTracker is used to print unnamed VPValue's using consequtive numbers.
1720 /// Note that the numbering is applied to the whole VPlan, so printing
1721 /// individual blocks is consistent with the whole VPlan printing.
1722 void print(raw_ostream &O, const Twine &Indent,
1723 VPSlotTracker &SlotTracker) const override;
1724 using VPBlockBase::print; // Get the print(raw_stream &O) version.
1728 /// Create an IR BasicBlock to hold the output instructions generated by this
1729 /// VPBasicBlock, and return it. Update the CFGState accordingly.
1730 BasicBlock *createEmptyBasicBlock(VPTransformState::CFGState &CFG);
1733 /// VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks
1734 /// which form a Single-Entry-Single-Exit subgraph of the output IR CFG.
1735 /// A VPRegionBlock may indicate that its contents are to be replicated several
1736 /// times. This is designed to support predicated scalarization, in which a
1737 /// scalar if-then code structure needs to be generated VF * UF times. Having
1738 /// this replication indicator helps to keep a single model for multiple
1739 /// candidate VF's. The actual replication takes place only once the desired VF
1740 /// and UF have been determined.
1741 class VPRegionBlock : public VPBlockBase {
1742 /// Hold the Single Entry of the SESE region modelled by the VPRegionBlock.
1745 /// Hold the Single Exit of the SESE region modelled by the VPRegionBlock.
1748 /// An indicator whether this region is to generate multiple replicated
1749 /// instances of output IR corresponding to its VPBlockBases.
1753 VPRegionBlock(VPBlockBase *Entry, VPBlockBase *Exit,
1754 const std::string &Name = "", bool IsReplicator = false)
1755 : VPBlockBase(VPRegionBlockSC, Name), Entry(Entry), Exit(Exit),
1756 IsReplicator(IsReplicator) {
1757 assert(Entry->getPredecessors().empty() && "Entry block has predecessors.");
1758 assert(Exit->getSuccessors().empty() && "Exit block has successors.");
1759 Entry->setParent(this);
1760 Exit->setParent(this);
1762 VPRegionBlock(const std::string &Name = "", bool IsReplicator = false)
1763 : VPBlockBase(VPRegionBlockSC, Name), Entry(nullptr), Exit(nullptr),
1764 IsReplicator(IsReplicator) {}
1766 ~VPRegionBlock() override {
1769 Entry->dropAllReferences(&DummyValue);
1774 /// Method to support type inquiry through isa, cast, and dyn_cast.
1775 static inline bool classof(const VPBlockBase *V) {
1776 return V->getVPBlockID() == VPBlockBase::VPRegionBlockSC;
1779 const VPBlockBase *getEntry() const { return Entry; }
1780 VPBlockBase *getEntry() { return Entry; }
1782 /// Set \p EntryBlock as the entry VPBlockBase of this VPRegionBlock. \p
1783 /// EntryBlock must have no predecessors.
1784 void setEntry(VPBlockBase *EntryBlock) {
1785 assert(EntryBlock->getPredecessors().empty() &&
1786 "Entry block cannot have predecessors.");
1788 EntryBlock->setParent(this);
1791 // FIXME: DominatorTreeBase is doing 'A->getParent()->front()'. 'front' is a
1792 // specific interface of llvm::Function, instead of using
1793 // GraphTraints::getEntryNode. We should add a new template parameter to
1794 // DominatorTreeBase representing the Graph type.
1795 VPBlockBase &front() const { return *Entry; }
1797 const VPBlockBase *getExit() const { return Exit; }
1798 VPBlockBase *getExit() { return Exit; }
1800 /// Set \p ExitBlock as the exit VPBlockBase of this VPRegionBlock. \p
1801 /// ExitBlock must have no successors.
1802 void setExit(VPBlockBase *ExitBlock) {
1803 assert(ExitBlock->getSuccessors().empty() &&
1804 "Exit block cannot have successors.");
1806 ExitBlock->setParent(this);
1809 /// An indicator whether this region is to generate multiple replicated
1810 /// instances of output IR corresponding to its VPBlockBases.
1811 bool isReplicator() const { return IsReplicator; }
1813 /// The method which generates the output IR instructions that correspond to
1814 /// this VPRegionBlock, thereby "executing" the VPlan.
1815 void execute(struct VPTransformState *State) override;
1817 void dropAllReferences(VPValue *NewValue) override;
1819 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1820 /// Print this VPRegionBlock to \p O (recursively), prefixing all lines with
1821 /// \p Indent. \p SlotTracker is used to print unnamed VPValue's using
1822 /// consequtive numbers.
1824 /// Note that the numbering is applied to the whole VPlan, so printing
1825 /// individual regions is consistent with the whole VPlan printing.
1826 void print(raw_ostream &O, const Twine &Indent,
1827 VPSlotTracker &SlotTracker) const override;
1828 using VPBlockBase::print; // Get the print(raw_stream &O) version.
1832 //===----------------------------------------------------------------------===//
1833 // GraphTraits specializations for VPlan Hierarchical Control-Flow Graphs //
1834 //===----------------------------------------------------------------------===//
1836 // The following set of template specializations implement GraphTraits to treat
1837 // any VPBlockBase as a node in a graph of VPBlockBases. It's important to note
1838 // that VPBlockBase traits don't recurse into VPRegioBlocks, i.e., if the
1839 // VPBlockBase is a VPRegionBlock, this specialization provides access to its
1840 // successors/predecessors but not to the blocks inside the region.
1842 template <> struct GraphTraits<VPBlockBase *> {
1843 using NodeRef = VPBlockBase *;
1844 using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
1846 static NodeRef getEntryNode(NodeRef N) { return N; }
1848 static inline ChildIteratorType child_begin(NodeRef N) {
1849 return N->getSuccessors().begin();
1852 static inline ChildIteratorType child_end(NodeRef N) {
1853 return N->getSuccessors().end();
1857 template <> struct GraphTraits<const VPBlockBase *> {
1858 using NodeRef = const VPBlockBase *;
1859 using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::const_iterator;
1861 static NodeRef getEntryNode(NodeRef N) { return N; }
1863 static inline ChildIteratorType child_begin(NodeRef N) {
1864 return N->getSuccessors().begin();
1867 static inline ChildIteratorType child_end(NodeRef N) {
1868 return N->getSuccessors().end();
1872 // Inverse order specialization for VPBasicBlocks. Predecessors are used instead
1873 // of successors for the inverse traversal.
1874 template <> struct GraphTraits<Inverse<VPBlockBase *>> {
1875 using NodeRef = VPBlockBase *;
1876 using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
1878 static NodeRef getEntryNode(Inverse<NodeRef> B) { return B.Graph; }
1880 static inline ChildIteratorType child_begin(NodeRef N) {
1881 return N->getPredecessors().begin();
1884 static inline ChildIteratorType child_end(NodeRef N) {
1885 return N->getPredecessors().end();
1889 // The following set of template specializations implement GraphTraits to
1890 // treat VPRegionBlock as a graph and recurse inside its nodes. It's important
1891 // to note that the blocks inside the VPRegionBlock are treated as VPBlockBases
1892 // (i.e., no dyn_cast is performed, VPBlockBases specialization is used), so
1893 // there won't be automatic recursion into other VPBlockBases that turn to be
1897 struct GraphTraits<VPRegionBlock *> : public GraphTraits<VPBlockBase *> {
1898 using GraphRef = VPRegionBlock *;
1899 using nodes_iterator = df_iterator<NodeRef>;
1901 static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
1903 static nodes_iterator nodes_begin(GraphRef N) {
1904 return nodes_iterator::begin(N->getEntry());
1907 static nodes_iterator nodes_end(GraphRef N) {
1908 // df_iterator::end() returns an empty iterator so the node used doesn't
1910 return nodes_iterator::end(N);
1915 struct GraphTraits<const VPRegionBlock *>
1916 : public GraphTraits<const VPBlockBase *> {
1917 using GraphRef = const VPRegionBlock *;
1918 using nodes_iterator = df_iterator<NodeRef>;
1920 static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
1922 static nodes_iterator nodes_begin(GraphRef N) {
1923 return nodes_iterator::begin(N->getEntry());
1926 static nodes_iterator nodes_end(GraphRef N) {
1927 // df_iterator::end() returns an empty iterator so the node used doesn't
1929 return nodes_iterator::end(N);
1934 struct GraphTraits<Inverse<VPRegionBlock *>>
1935 : public GraphTraits<Inverse<VPBlockBase *>> {
1936 using GraphRef = VPRegionBlock *;
1937 using nodes_iterator = df_iterator<NodeRef>;
1939 static NodeRef getEntryNode(Inverse<GraphRef> N) {
1940 return N.Graph->getExit();
1943 static nodes_iterator nodes_begin(GraphRef N) {
1944 return nodes_iterator::begin(N->getExit());
1947 static nodes_iterator nodes_end(GraphRef N) {
1948 // df_iterator::end() returns an empty iterator so the node used doesn't
1950 return nodes_iterator::end(N);
1954 /// Iterator to traverse all successors of a VPBlockBase node. This includes the
1955 /// entry node of VPRegionBlocks. Exit blocks of a region implicitly have their
1956 /// parent region's successors. This ensures all blocks in a region are visited
1957 /// before any blocks in a successor region when doing a reverse post-order
1958 // traversal of the graph.
1959 template <typename BlockPtrTy>
1960 class VPAllSuccessorsIterator
1961 : public iterator_facade_base<VPAllSuccessorsIterator<BlockPtrTy>,
1962 std::forward_iterator_tag, VPBlockBase> {
1964 /// Index of the current successor. For VPBasicBlock nodes, this simply is the
1965 /// index for the successor array. For VPRegionBlock, SuccessorIdx == 0 is
1966 /// used for the region's entry block, and SuccessorIdx - 1 are the indices
1967 /// for the successor array.
1968 size_t SuccessorIdx;
1970 static BlockPtrTy getBlockWithSuccs(BlockPtrTy Current) {
1971 while (Current && Current->getNumSuccessors() == 0)
1972 Current = Current->getParent();
1976 /// Templated helper to dereference successor \p SuccIdx of \p Block. Used by
1977 /// both the const and non-const operator* implementations.
1978 template <typename T1> static T1 deref(T1 Block, unsigned SuccIdx) {
1979 if (auto *R = dyn_cast<VPRegionBlock>(Block)) {
1981 return R->getEntry();
1985 // For exit blocks, use the next parent region with successors.
1986 return getBlockWithSuccs(Block)->getSuccessors()[SuccIdx];
1990 VPAllSuccessorsIterator(BlockPtrTy Block, size_t Idx = 0)
1991 : Block(Block), SuccessorIdx(Idx) {}
1992 VPAllSuccessorsIterator(const VPAllSuccessorsIterator &Other)
1993 : Block(Other.Block), SuccessorIdx(Other.SuccessorIdx) {}
1995 VPAllSuccessorsIterator &operator=(const VPAllSuccessorsIterator &R) {
1997 SuccessorIdx = R.SuccessorIdx;
2001 static VPAllSuccessorsIterator end(BlockPtrTy Block) {
2002 BlockPtrTy ParentWithSuccs = getBlockWithSuccs(Block);
2003 unsigned NumSuccessors = ParentWithSuccs
2004 ? ParentWithSuccs->getNumSuccessors()
2005 : Block->getNumSuccessors();
2007 if (auto *R = dyn_cast<VPRegionBlock>(Block))
2008 return {R, NumSuccessors + 1};
2009 return {Block, NumSuccessors};
2012 bool operator==(const VPAllSuccessorsIterator &R) const {
2013 return Block == R.Block && SuccessorIdx == R.SuccessorIdx;
2016 const VPBlockBase *operator*() const { return deref(Block, SuccessorIdx); }
2018 BlockPtrTy operator*() { return deref(Block, SuccessorIdx); }
2020 VPAllSuccessorsIterator &operator++() {
2025 VPAllSuccessorsIterator operator++(int X) {
2026 VPAllSuccessorsIterator Orig = *this;
2032 /// Helper for GraphTraits specialization that traverses through VPRegionBlocks.
2033 template <typename BlockTy> class VPBlockRecursiveTraversalWrapper {
2037 VPBlockRecursiveTraversalWrapper(BlockTy Entry) : Entry(Entry) {}
2038 BlockTy getEntry() { return Entry; }
2041 /// GraphTraits specialization to recursively traverse VPBlockBase nodes,
2042 /// including traversing through VPRegionBlocks. Exit blocks of a region
2043 /// implicitly have their parent region's successors. This ensures all blocks in
2044 /// a region are visited before any blocks in a successor region when doing a
2045 /// reverse post-order traversal of the graph.
2047 struct GraphTraits<VPBlockRecursiveTraversalWrapper<VPBlockBase *>> {
2048 using NodeRef = VPBlockBase *;
2049 using ChildIteratorType = VPAllSuccessorsIterator<VPBlockBase *>;
2052 getEntryNode(VPBlockRecursiveTraversalWrapper<VPBlockBase *> N) {
2053 return N.getEntry();
2056 static inline ChildIteratorType child_begin(NodeRef N) {
2057 return ChildIteratorType(N);
2060 static inline ChildIteratorType child_end(NodeRef N) {
2061 return ChildIteratorType::end(N);
2066 struct GraphTraits<VPBlockRecursiveTraversalWrapper<const VPBlockBase *>> {
2067 using NodeRef = const VPBlockBase *;
2068 using ChildIteratorType = VPAllSuccessorsIterator<const VPBlockBase *>;
2071 getEntryNode(VPBlockRecursiveTraversalWrapper<const VPBlockBase *> N) {
2072 return N.getEntry();
2075 static inline ChildIteratorType child_begin(NodeRef N) {
2076 return ChildIteratorType(N);
2079 static inline ChildIteratorType child_end(NodeRef N) {
2080 return ChildIteratorType::end(N);
2084 /// VPlan models a candidate for vectorization, encoding various decisions take
2085 /// to produce efficient output IR, including which branches, basic-blocks and
2086 /// output IR instructions to generate, and their cost. VPlan holds a
2087 /// Hierarchical-CFG of VPBasicBlocks and VPRegionBlocks rooted at an Entry
2090 friend class VPlanPrinter;
2091 friend class VPSlotTracker;
2093 /// Hold the single entry to the Hierarchical CFG of the VPlan.
2096 /// Holds the VFs applicable to this VPlan.
2097 SmallSetVector<ElementCount, 2> VFs;
2099 /// Holds the name of the VPlan, for printing.
2102 /// Holds all the external definitions created for this VPlan.
2103 // TODO: Introduce a specific representation for external definitions in
2104 // VPlan. External definitions must be immutable and hold a pointer to its
2105 // underlying IR that will be used to implement its structural comparison
2106 // (operators '==' and '<').
2107 SetVector<VPValue *> VPExternalDefs;
2109 /// Represents the backedge taken count of the original loop, for folding
2111 VPValue *BackedgeTakenCount = nullptr;
2113 /// Holds a mapping between Values and their corresponding VPValue inside
2115 Value2VPValueTy Value2VPValue;
2117 /// Contains all VPValues that been allocated by addVPValue directly and need
2118 /// to be free when the plan's destructor is called.
2119 SmallVector<VPValue *, 16> VPValuesToFree;
2121 /// Holds the VPLoopInfo analysis for this VPlan.
2124 /// Indicates whether it is safe use the Value2VPValue mapping or if the
2125 /// mapping cannot be used any longer, because it is stale.
2126 bool Value2VPValueEnabled = true;
2129 VPlan(VPBlockBase *Entry = nullptr) : Entry(Entry) {
2131 Entry->setPlan(this);
2137 for (VPBlockBase *Block : depth_first(Entry))
2138 Block->dropAllReferences(&DummyValue);
2140 VPBlockBase::deleteCFG(Entry);
2142 for (VPValue *VPV : VPValuesToFree)
2144 if (BackedgeTakenCount)
2145 delete BackedgeTakenCount;
2146 for (VPValue *Def : VPExternalDefs)
2150 /// Generate the IR code for this VPlan.
2151 void execute(struct VPTransformState *State);
2153 VPBlockBase *getEntry() { return Entry; }
2154 const VPBlockBase *getEntry() const { return Entry; }
2156 VPBlockBase *setEntry(VPBlockBase *Block) {
2158 Block->setPlan(this);
2162 /// The backedge taken count of the original loop.
2163 VPValue *getOrCreateBackedgeTakenCount() {
2164 if (!BackedgeTakenCount)
2165 BackedgeTakenCount = new VPValue();
2166 return BackedgeTakenCount;
2169 /// Mark the plan to indicate that using Value2VPValue is not safe any
2170 /// longer, because it may be stale.
2171 void disableValue2VPValue() { Value2VPValueEnabled = false; }
2173 void addVF(ElementCount VF) { VFs.insert(VF); }
2175 bool hasVF(ElementCount VF) { return VFs.count(VF); }
2177 const std::string &getName() const { return Name; }
2179 void setName(const Twine &newName) { Name = newName.str(); }
2181 /// Add \p VPVal to the pool of external definitions if it's not already
2183 void addExternalDef(VPValue *VPVal) { VPExternalDefs.insert(VPVal); }
2185 void addVPValue(Value *V) {
2186 assert(Value2VPValueEnabled &&
2187 "IR value to VPValue mapping may be out of date!");
2188 assert(V && "Trying to add a null Value to VPlan");
2189 assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2190 VPValue *VPV = new VPValue(V);
2191 Value2VPValue[V] = VPV;
2192 VPValuesToFree.push_back(VPV);
2195 void addVPValue(Value *V, VPValue *VPV) {
2196 assert(Value2VPValueEnabled && "Value2VPValue mapping may be out of date!");
2197 assert(V && "Trying to add a null Value to VPlan");
2198 assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2199 Value2VPValue[V] = VPV;
2202 /// Returns the VPValue for \p V. \p OverrideAllowed can be used to disable
2203 /// checking whether it is safe to query VPValues using IR Values.
2204 VPValue *getVPValue(Value *V, bool OverrideAllowed = false) {
2205 assert((OverrideAllowed || isa<Constant>(V) || Value2VPValueEnabled) &&
2206 "Value2VPValue mapping may be out of date!");
2207 assert(V && "Trying to get the VPValue of a null Value");
2208 assert(Value2VPValue.count(V) && "Value does not exist in VPlan");
2209 return Value2VPValue[V];
2212 /// Gets the VPValue or adds a new one (if none exists yet) for \p V. \p
2213 /// OverrideAllowed can be used to disable checking whether it is safe to
2214 /// query VPValues using IR Values.
2215 VPValue *getOrAddVPValue(Value *V, bool OverrideAllowed = false) {
2216 assert((OverrideAllowed || isa<Constant>(V) || Value2VPValueEnabled) &&
2217 "Value2VPValue mapping may be out of date!");
2218 assert(V && "Trying to get or add the VPValue of a null Value");
2219 if (!Value2VPValue.count(V))
2221 return getVPValue(V);
2224 void removeVPValueFor(Value *V) {
2225 assert(Value2VPValueEnabled &&
2226 "IR value to VPValue mapping may be out of date!");
2227 Value2VPValue.erase(V);
2230 /// Return the VPLoopInfo analysis for this VPlan.
2231 VPLoopInfo &getVPLoopInfo() { return VPLInfo; }
2232 const VPLoopInfo &getVPLoopInfo() const { return VPLInfo; }
2234 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2235 /// Print this VPlan to \p O.
2236 void print(raw_ostream &O) const;
2238 /// Print this VPlan in DOT format to \p O.
2239 void printDOT(raw_ostream &O) const;
2241 /// Dump the plan to stderr (for debugging).
2242 LLVM_DUMP_METHOD void dump() const;
2245 /// Returns a range mapping the values the range \p Operands to their
2246 /// corresponding VPValues.
2247 iterator_range<mapped_iterator<Use *, std::function<VPValue *(Value *)>>>
2248 mapToVPValues(User::op_range Operands) {
2249 std::function<VPValue *(Value *)> Fn = [this](Value *Op) {
2250 return getOrAddVPValue(Op);
2252 return map_range(Operands, Fn);
2256 /// Add to the given dominator tree the header block and every new basic block
2257 /// that was created between it and the latch block, inclusive.
2258 static void updateDominatorTree(DominatorTree *DT, BasicBlock *LoopLatchBB,
2259 BasicBlock *LoopPreHeaderBB,
2260 BasicBlock *LoopExitBB);
2263 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2264 /// VPlanPrinter prints a given VPlan to a given output stream. The printing is
2265 /// indented and follows the dot format.
2266 class VPlanPrinter {
2270 unsigned TabWidth = 2;
2273 SmallDenseMap<const VPBlockBase *, unsigned> BlockID;
2275 VPSlotTracker SlotTracker;
2277 /// Handle indentation.
2278 void bumpIndent(int b) { Indent = std::string((Depth += b) * TabWidth, ' '); }
2280 /// Print a given \p Block of the Plan.
2281 void dumpBlock(const VPBlockBase *Block);
2283 /// Print the information related to the CFG edges going out of a given
2284 /// \p Block, followed by printing the successor blocks themselves.
2285 void dumpEdges(const VPBlockBase *Block);
2287 /// Print a given \p BasicBlock, including its VPRecipes, followed by printing
2288 /// its successor blocks.
2289 void dumpBasicBlock(const VPBasicBlock *BasicBlock);
2291 /// Print a given \p Region of the Plan.
2292 void dumpRegion(const VPRegionBlock *Region);
2294 unsigned getOrCreateBID(const VPBlockBase *Block) {
2295 return BlockID.count(Block) ? BlockID[Block] : BlockID[Block] = BID++;
2298 Twine getOrCreateName(const VPBlockBase *Block);
2300 Twine getUID(const VPBlockBase *Block);
2302 /// Print the information related to a CFG edge between two VPBlockBases.
2303 void drawEdge(const VPBlockBase *From, const VPBlockBase *To, bool Hidden,
2304 const Twine &Label);
2307 VPlanPrinter(raw_ostream &O, const VPlan &P)
2308 : OS(O), Plan(P), SlotTracker(&P) {}
2310 LLVM_DUMP_METHOD void dump();
2313 struct VPlanIngredient {
2316 VPlanIngredient(const Value *V) : V(V) {}
2318 void print(raw_ostream &O) const;
2321 inline raw_ostream &operator<<(raw_ostream &OS, const VPlanIngredient &I) {
2326 inline raw_ostream &operator<<(raw_ostream &OS, const VPlan &Plan) {
2332 //===----------------------------------------------------------------------===//
2334 //===----------------------------------------------------------------------===//
2336 /// Class that provides utilities for VPBlockBases in VPlan.
2337 class VPBlockUtils {
2339 VPBlockUtils() = delete;
2341 /// Insert disconnected VPBlockBase \p NewBlock after \p BlockPtr. Add \p
2342 /// NewBlock as successor of \p BlockPtr and \p BlockPtr as predecessor of \p
2343 /// NewBlock, and propagate \p BlockPtr parent to \p NewBlock. If \p BlockPtr
2344 /// has more than one successor, its conditional bit is propagated to \p
2345 /// NewBlock. \p NewBlock must have neither successors nor predecessors.
2346 static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr) {
2347 assert(NewBlock->getSuccessors().empty() &&
2348 "Can't insert new block with successors.");
2349 // TODO: move successors from BlockPtr to NewBlock when this functionality
2350 // is necessary. For now, setBlockSingleSuccessor will assert if BlockPtr
2351 // already has successors.
2352 BlockPtr->setOneSuccessor(NewBlock);
2353 NewBlock->setPredecessors({BlockPtr});
2354 NewBlock->setParent(BlockPtr->getParent());
2357 /// Insert disconnected VPBlockBases \p IfTrue and \p IfFalse after \p
2358 /// BlockPtr. Add \p IfTrue and \p IfFalse as succesors of \p BlockPtr and \p
2359 /// BlockPtr as predecessor of \p IfTrue and \p IfFalse. Propagate \p BlockPtr
2360 /// parent to \p IfTrue and \p IfFalse. \p Condition is set as the successor
2361 /// selector. \p BlockPtr must have no successors and \p IfTrue and \p IfFalse
2362 /// must have neither successors nor predecessors.
2363 static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
2364 VPValue *Condition, VPBlockBase *BlockPtr) {
2365 assert(IfTrue->getSuccessors().empty() &&
2366 "Can't insert IfTrue with successors.");
2367 assert(IfFalse->getSuccessors().empty() &&
2368 "Can't insert IfFalse with successors.");
2369 BlockPtr->setTwoSuccessors(IfTrue, IfFalse, Condition);
2370 IfTrue->setPredecessors({BlockPtr});
2371 IfFalse->setPredecessors({BlockPtr});
2372 IfTrue->setParent(BlockPtr->getParent());
2373 IfFalse->setParent(BlockPtr->getParent());
2376 /// Connect VPBlockBases \p From and \p To bi-directionally. Append \p To to
2377 /// the successors of \p From and \p From to the predecessors of \p To. Both
2378 /// VPBlockBases must have the same parent, which can be null. Both
2379 /// VPBlockBases can be already connected to other VPBlockBases.
2380 static void connectBlocks(VPBlockBase *From, VPBlockBase *To) {
2381 assert((From->getParent() == To->getParent()) &&
2382 "Can't connect two block with different parents");
2383 assert(From->getNumSuccessors() < 2 &&
2384 "Blocks can't have more than two successors.");
2385 From->appendSuccessor(To);
2386 To->appendPredecessor(From);
2389 /// Disconnect VPBlockBases \p From and \p To bi-directionally. Remove \p To
2390 /// from the successors of \p From and \p From from the predecessors of \p To.
2391 static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To) {
2392 assert(To && "Successor to disconnect is null.");
2393 From->removeSuccessor(To);
2394 To->removePredecessor(From);
2397 /// Returns true if the edge \p FromBlock -> \p ToBlock is a back-edge.
2398 static bool isBackEdge(const VPBlockBase *FromBlock,
2399 const VPBlockBase *ToBlock, const VPLoopInfo *VPLI) {
2400 assert(FromBlock->getParent() == ToBlock->getParent() &&
2401 FromBlock->getParent() && "Must be in same region");
2402 const VPLoop *FromLoop = VPLI->getLoopFor(FromBlock);
2403 const VPLoop *ToLoop = VPLI->getLoopFor(ToBlock);
2404 if (!FromLoop || !ToLoop || FromLoop != ToLoop)
2407 // A back-edge is a branch from the loop latch to its header.
2408 return ToLoop->isLoopLatch(FromBlock) && ToBlock == ToLoop->getHeader();
2411 /// Returns true if \p Block is a loop latch
2412 static bool blockIsLoopLatch(const VPBlockBase *Block,
2413 const VPLoopInfo *VPLInfo) {
2414 if (const VPLoop *ParentVPL = VPLInfo->getLoopFor(Block))
2415 return ParentVPL->isLoopLatch(Block);
2420 /// Count and return the number of succesors of \p PredBlock excluding any
2422 static unsigned countSuccessorsNoBE(VPBlockBase *PredBlock,
2425 for (VPBlockBase *SuccBlock : PredBlock->getSuccessors()) {
2426 if (!VPBlockUtils::isBackEdge(PredBlock, SuccBlock, VPLI))
2432 /// Return an iterator range over \p Range which only includes \p BlockTy
2433 /// blocks. The accesses are casted to \p BlockTy.
2434 template <typename BlockTy, typename T>
2435 static auto blocksOnly(const T &Range) {
2436 // Create BaseTy with correct const-ness based on BlockTy.
2438 typename std::conditional<std::is_const<BlockTy>::value,
2439 const VPBlockBase, VPBlockBase>::type;
2441 // We need to first create an iterator range over (const) BlocktTy & instead
2442 // of (const) BlockTy * for filter_range to work properly.
2444 map_range(Range, [](BaseTy *Block) -> BaseTy & { return *Block; });
2445 auto Filter = make_filter_range(
2446 Mapped, [](BaseTy &Block) { return isa<BlockTy>(&Block); });
2447 return map_range(Filter, [](BaseTy &Block) -> BlockTy * {
2448 return cast<BlockTy>(&Block);
2453 class VPInterleavedAccessInfo {
2454 DenseMap<VPInstruction *, InterleaveGroup<VPInstruction> *>
2457 /// Type for mapping of instruction based interleave groups to VPInstruction
2458 /// interleave groups
2459 using Old2NewTy = DenseMap<InterleaveGroup<Instruction> *,
2460 InterleaveGroup<VPInstruction> *>;
2462 /// Recursively \p Region and populate VPlan based interleave groups based on
2464 void visitRegion(VPRegionBlock *Region, Old2NewTy &Old2New,
2465 InterleavedAccessInfo &IAI);
2466 /// Recursively traverse \p Block and populate VPlan based interleave groups
2467 /// based on \p IAI.
2468 void visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
2469 InterleavedAccessInfo &IAI);
2472 VPInterleavedAccessInfo(VPlan &Plan, InterleavedAccessInfo &IAI);
2474 ~VPInterleavedAccessInfo() {
2475 SmallPtrSet<InterleaveGroup<VPInstruction> *, 4> DelSet;
2476 // Avoid releasing a pointer twice.
2477 for (auto &I : InterleaveGroupMap)
2478 DelSet.insert(I.second);
2479 for (auto *Ptr : DelSet)
2483 /// Get the interleave group that \p Instr belongs to.
2485 /// \returns nullptr if doesn't have such group.
2486 InterleaveGroup<VPInstruction> *
2487 getInterleaveGroup(VPInstruction *Instr) const {
2488 return InterleaveGroupMap.lookup(Instr);
2492 /// Class that maps (parts of) an existing VPlan to trees of combined
2495 enum class OpMode { Failed, Load, Opcode };
2497 /// A DenseMapInfo implementation for using SmallVector<VPValue *, 4> as
2499 struct BundleDenseMapInfo {
2500 static SmallVector<VPValue *, 4> getEmptyKey() {
2501 return {reinterpret_cast<VPValue *>(-1)};
2504 static SmallVector<VPValue *, 4> getTombstoneKey() {
2505 return {reinterpret_cast<VPValue *>(-2)};
2508 static unsigned getHashValue(const SmallVector<VPValue *, 4> &V) {
2509 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
2512 static bool isEqual(const SmallVector<VPValue *, 4> &LHS,
2513 const SmallVector<VPValue *, 4> &RHS) {
2518 /// Mapping of values in the original VPlan to a combined VPInstruction.
2519 DenseMap<SmallVector<VPValue *, 4>, VPInstruction *, BundleDenseMapInfo>
2522 VPInterleavedAccessInfo &IAI;
2524 /// Basic block to operate on. For now, only instructions in a single BB are
2526 const VPBasicBlock &BB;
2528 /// Indicates whether we managed to combine all visited instructions or not.
2529 bool CompletelySLP = true;
2531 /// Width of the widest combined bundle in bits.
2532 unsigned WidestBundleBits = 0;
2534 using MultiNodeOpTy =
2535 typename std::pair<VPInstruction *, SmallVector<VPValue *, 4>>;
2537 // Input operand bundles for the current multi node. Each multi node operand
2538 // bundle contains values not matching the multi node's opcode. They will
2539 // be reordered in reorderMultiNodeOps, once we completed building a
2541 SmallVector<MultiNodeOpTy, 4> MultiNodeOps;
2543 /// Indicates whether we are building a multi node currently.
2544 bool MultiNodeActive = false;
2546 /// Check if we can vectorize Operands together.
2547 bool areVectorizable(ArrayRef<VPValue *> Operands) const;
2549 /// Add combined instruction \p New for the bundle \p Operands.
2550 void addCombined(ArrayRef<VPValue *> Operands, VPInstruction *New);
2552 /// Indicate we hit a bundle we failed to combine. Returns nullptr for now.
2553 VPInstruction *markFailed();
2555 /// Reorder operands in the multi node to maximize sequential memory access
2556 /// and commutative operations.
2557 SmallVector<MultiNodeOpTy, 4> reorderMultiNodeOps();
2559 /// Choose the best candidate to use for the lane after \p Last. The set of
2560 /// candidates to choose from are values with an opcode matching \p Last's
2561 /// or loads consecutive to \p Last.
2562 std::pair<OpMode, VPValue *> getBest(OpMode Mode, VPValue *Last,
2563 SmallPtrSetImpl<VPValue *> &Candidates,
2564 VPInterleavedAccessInfo &IAI);
2566 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2567 /// Print bundle \p Values to dbgs().
2568 void dumpBundle(ArrayRef<VPValue *> Values);
2572 VPlanSlp(VPInterleavedAccessInfo &IAI, VPBasicBlock &BB) : IAI(IAI), BB(BB) {}
2574 ~VPlanSlp() = default;
2576 /// Tries to build an SLP tree rooted at \p Operands and returns a
2577 /// VPInstruction combining \p Operands, if they can be combined.
2578 VPInstruction *buildGraph(ArrayRef<VPValue *> Operands);
2580 /// Return the width of the widest combined bundle in bits.
2581 unsigned getWidestBundleBits() const { return WidestBundleBits; }
2583 /// Return true if all visited instruction can be combined.
2584 bool isCompletelySLP() const { return CompletelySLP; }
2586 } // end namespace llvm
2588 #endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_H