1 //===- VPlanSLP.cpp - SLP Analysis based on VPlan -------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
9 /// This file implements SLP analysis based on VPlan. The analysis is based on
10 /// the ideas described in
12 /// Look-ahead SLP: auto-vectorization in the presence of commutative
13 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha,
16 //===----------------------------------------------------------------------===//
19 #include "llvm/ADT/DepthFirstIterator.h"
20 #include "llvm/ADT/PostOrderIterator.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Twine.h"
23 #include "llvm/Analysis/LoopInfo.h"
24 #include "llvm/Analysis/VectorUtils.h"
25 #include "llvm/IR/BasicBlock.h"
26 #include "llvm/IR/CFG.h"
27 #include "llvm/IR/Dominators.h"
28 #include "llvm/IR/InstrTypes.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/Type.h"
32 #include "llvm/IR/Value.h"
33 #include "llvm/Support/Casting.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/GraphWriter.h"
37 #include "llvm/Support/raw_ostream.h"
38 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
46 #define DEBUG_TYPE "vplan-slp"
48 // Number of levels to look ahead when re-ordering multi node operands.
49 static unsigned LookaheadMaxDepth = 5;
51 VPInstruction *VPlanSlp::markFailed() {
52 // FIXME: Currently this is used to signal we hit instructions we cannot
54 CompletelySLP = false;
58 void VPlanSlp::addCombined(ArrayRef<VPValue *> Operands, VPInstruction *New) {
59 if (all_of(Operands, [](VPValue *V) {
60 return cast<VPInstruction>(V)->getUnderlyingInstr();
62 unsigned BundleSize = 0;
63 for (VPValue *V : Operands) {
64 Type *T = cast<VPInstruction>(V)->getUnderlyingInstr()->getType();
65 assert(!T->isVectorTy() && "Only scalar types supported for now");
66 BundleSize += T->getScalarSizeInBits();
68 WidestBundleBits = std::max(WidestBundleBits, BundleSize);
71 auto Res = BundleToCombined.try_emplace(to_vector<4>(Operands), New);
73 "Already created a combined instruction for the operand bundle");
77 bool VPlanSlp::areVectorizable(ArrayRef<VPValue *> Operands) const {
78 // Currently we only support VPInstructions.
79 if (!all_of(Operands, [](VPValue *Op) {
80 return Op && isa<VPInstruction>(Op) &&
81 cast<VPInstruction>(Op)->getUnderlyingInstr();
83 LLVM_DEBUG(dbgs() << "VPSLP: not all operands are VPInstructions\n");
87 // Check if opcodes and type width agree for all instructions in the bundle.
88 // FIXME: Differing widths/opcodes can be handled by inserting additional
90 // FIXME: Deal with non-primitive types.
91 const Instruction *OriginalInstr =
92 cast<VPInstruction>(Operands[0])->getUnderlyingInstr();
93 unsigned Opcode = OriginalInstr->getOpcode();
94 unsigned Width = OriginalInstr->getType()->getPrimitiveSizeInBits();
95 if (!all_of(Operands, [Opcode, Width](VPValue *Op) {
96 const Instruction *I = cast<VPInstruction>(Op)->getUnderlyingInstr();
97 return I->getOpcode() == Opcode &&
98 I->getType()->getPrimitiveSizeInBits() == Width;
100 LLVM_DEBUG(dbgs() << "VPSLP: Opcodes do not agree \n");
104 // For now, all operands must be defined in the same BB.
105 if (any_of(Operands, [this](VPValue *Op) {
106 return cast<VPInstruction>(Op)->getParent() != &this->BB;
108 LLVM_DEBUG(dbgs() << "VPSLP: operands in different BBs\n");
113 [](VPValue *Op) { return Op->hasMoreThanOneUniqueUser(); })) {
114 LLVM_DEBUG(dbgs() << "VPSLP: Some operands have multiple users.\n");
118 // For loads, check that there are no instructions writing to memory in
120 // TODO: we only have to forbid instructions writing to memory that could
121 // interfere with any of the loads in the bundle
122 if (Opcode == Instruction::Load) {
123 unsigned LoadsSeen = 0;
124 VPBasicBlock *Parent = cast<VPInstruction>(Operands[0])->getParent();
125 for (auto &I : *Parent) {
126 auto *VPI = cast<VPInstruction>(&I);
127 if (VPI->getOpcode() == Instruction::Load &&
128 std::find(Operands.begin(), Operands.end(), VPI) != Operands.end())
131 if (LoadsSeen == Operands.size())
133 if (LoadsSeen > 0 && VPI->mayWriteToMemory()) {
135 dbgs() << "VPSLP: instruction modifying memory between loads\n");
140 if (!all_of(Operands, [](VPValue *Op) {
141 return cast<LoadInst>(cast<VPInstruction>(Op)->getUnderlyingInstr())
144 LLVM_DEBUG(dbgs() << "VPSLP: only simple loads are supported.\n");
149 if (Opcode == Instruction::Store)
150 if (!all_of(Operands, [](VPValue *Op) {
151 return cast<StoreInst>(cast<VPInstruction>(Op)->getUnderlyingInstr())
154 LLVM_DEBUG(dbgs() << "VPSLP: only simple stores are supported.\n");
161 static SmallVector<VPValue *, 4> getOperands(ArrayRef<VPValue *> Values,
162 unsigned OperandIndex) {
163 SmallVector<VPValue *, 4> Operands;
164 for (VPValue *V : Values) {
165 auto *U = cast<VPUser>(V);
166 Operands.push_back(U->getOperand(OperandIndex));
171 static bool areCommutative(ArrayRef<VPValue *> Values) {
172 return Instruction::isCommutative(
173 cast<VPInstruction>(Values[0])->getOpcode());
176 static SmallVector<SmallVector<VPValue *, 4>, 4>
177 getOperands(ArrayRef<VPValue *> Values) {
178 SmallVector<SmallVector<VPValue *, 4>, 4> Result;
179 auto *VPI = cast<VPInstruction>(Values[0]);
181 switch (VPI->getOpcode()) {
182 case Instruction::Load:
183 llvm_unreachable("Loads terminate a tree, no need to get operands");
184 case Instruction::Store:
185 Result.push_back(getOperands(Values, 0));
188 for (unsigned I = 0, NumOps = VPI->getNumOperands(); I < NumOps; ++I)
189 Result.push_back(getOperands(Values, I));
196 /// Returns the opcode of Values or ~0 if they do not all agree.
197 static Optional<unsigned> getOpcode(ArrayRef<VPValue *> Values) {
198 unsigned Opcode = cast<VPInstruction>(Values[0])->getOpcode();
199 if (any_of(Values, [Opcode](VPValue *V) {
200 return cast<VPInstruction>(V)->getOpcode() != Opcode;
206 /// Returns true if A and B access sequential memory if they are loads or
207 /// stores or if they have identical opcodes otherwise.
208 static bool areConsecutiveOrMatch(VPInstruction *A, VPInstruction *B,
209 VPInterleavedAccessInfo &IAI) {
210 if (A->getOpcode() != B->getOpcode())
213 if (A->getOpcode() != Instruction::Load &&
214 A->getOpcode() != Instruction::Store)
216 auto *GA = IAI.getInterleaveGroup(A);
217 auto *GB = IAI.getInterleaveGroup(B);
219 return GA && GB && GA == GB && GA->getIndex(A) + 1 == GB->getIndex(B);
222 /// Implements getLAScore from Listing 7 in the paper.
223 /// Traverses and compares operands of V1 and V2 to MaxLevel.
224 static unsigned getLAScore(VPValue *V1, VPValue *V2, unsigned MaxLevel,
225 VPInterleavedAccessInfo &IAI) {
226 if (!isa<VPInstruction>(V1) || !isa<VPInstruction>(V2))
230 return (unsigned)areConsecutiveOrMatch(cast<VPInstruction>(V1),
231 cast<VPInstruction>(V2), IAI);
234 for (unsigned I = 0, EV1 = cast<VPUser>(V1)->getNumOperands(); I < EV1; ++I)
235 for (unsigned J = 0, EV2 = cast<VPUser>(V2)->getNumOperands(); J < EV2; ++J)
236 Score += getLAScore(cast<VPUser>(V1)->getOperand(I),
237 cast<VPUser>(V2)->getOperand(J), MaxLevel - 1, IAI);
241 std::pair<VPlanSlp::OpMode, VPValue *>
242 VPlanSlp::getBest(OpMode Mode, VPValue *Last,
243 SmallPtrSetImpl<VPValue *> &Candidates,
244 VPInterleavedAccessInfo &IAI) {
245 assert((Mode == OpMode::Load || Mode == OpMode::Opcode) &&
246 "Currently we only handle load and commutative opcodes");
247 LLVM_DEBUG(dbgs() << " getBest\n");
249 SmallVector<VPValue *, 4> BestCandidates;
250 LLVM_DEBUG(dbgs() << " Candidates for "
251 << *cast<VPInstruction>(Last)->getUnderlyingInstr() << " ");
252 for (auto *Candidate : Candidates) {
253 auto *LastI = cast<VPInstruction>(Last);
254 auto *CandidateI = cast<VPInstruction>(Candidate);
255 if (areConsecutiveOrMatch(LastI, CandidateI, IAI)) {
256 LLVM_DEBUG(dbgs() << *cast<VPInstruction>(Candidate)->getUnderlyingInstr()
258 BestCandidates.push_back(Candidate);
261 LLVM_DEBUG(dbgs() << "\n");
263 if (BestCandidates.empty())
264 return {OpMode::Failed, nullptr};
266 if (BestCandidates.size() == 1)
267 return {Mode, BestCandidates[0]};
269 VPValue *Best = nullptr;
270 unsigned BestScore = 0;
271 for (unsigned Depth = 1; Depth < LookaheadMaxDepth; Depth++) {
272 unsigned PrevScore = ~0u;
275 // FIXME: Avoid visiting the same operands multiple times.
276 for (auto *Candidate : BestCandidates) {
277 unsigned Score = getLAScore(Last, Candidate, Depth, IAI);
278 if (PrevScore == ~0u)
280 if (PrevScore != Score)
284 if (Score > BestScore) {
292 LLVM_DEBUG(dbgs() << "Found best "
293 << *cast<VPInstruction>(Best)->getUnderlyingInstr()
295 Candidates.erase(Best);
300 SmallVector<VPlanSlp::MultiNodeOpTy, 4> VPlanSlp::reorderMultiNodeOps() {
301 SmallVector<MultiNodeOpTy, 4> FinalOrder;
302 SmallVector<OpMode, 4> Mode;
303 FinalOrder.reserve(MultiNodeOps.size());
304 Mode.reserve(MultiNodeOps.size());
306 LLVM_DEBUG(dbgs() << "Reordering multinode\n");
308 for (auto &Operands : MultiNodeOps) {
309 FinalOrder.push_back({Operands.first, {Operands.second[0]}});
310 if (cast<VPInstruction>(Operands.second[0])->getOpcode() ==
312 Mode.push_back(OpMode::Load);
314 Mode.push_back(OpMode::Opcode);
317 for (unsigned Lane = 1, E = MultiNodeOps[0].second.size(); Lane < E; ++Lane) {
318 LLVM_DEBUG(dbgs() << " Finding best value for lane " << Lane << "\n");
319 SmallPtrSet<VPValue *, 4> Candidates;
320 LLVM_DEBUG(dbgs() << " Candidates ");
321 for (auto Ops : MultiNodeOps) {
323 dbgs() << *cast<VPInstruction>(Ops.second[Lane])->getUnderlyingInstr()
325 Candidates.insert(Ops.second[Lane]);
327 LLVM_DEBUG(dbgs() << "\n");
329 for (unsigned Op = 0, E = MultiNodeOps.size(); Op < E; ++Op) {
330 LLVM_DEBUG(dbgs() << " Checking " << Op << "\n");
331 if (Mode[Op] == OpMode::Failed)
334 VPValue *Last = FinalOrder[Op].second[Lane - 1];
335 std::pair<OpMode, VPValue *> Res =
336 getBest(Mode[Op], Last, Candidates, IAI);
338 FinalOrder[Op].second.push_back(Res.second);
340 // TODO: handle this case
341 FinalOrder[Op].second.push_back(markFailed());
348 void VPlanSlp::dumpBundle(ArrayRef<VPValue *> Values) {
350 for (auto Op : Values)
351 if (auto *Instr = cast_or_null<VPInstruction>(Op)->getUnderlyingInstr())
352 dbgs() << *Instr << " | ";
354 dbgs() << " nullptr | ";
358 VPInstruction *VPlanSlp::buildGraph(ArrayRef<VPValue *> Values) {
359 assert(!Values.empty() && "Need some operands!");
361 // If we already visited this instruction bundle, re-use the existing node
362 auto I = BundleToCombined.find(to_vector<4>(Values));
363 if (I != BundleToCombined.end()) {
365 // Check that the resulting graph is a tree. If we re-use a node, this means
366 // its values have multiple users. We only allow this, if all users of each
367 // value are the same instruction.
368 for (auto *V : Values) {
369 auto UI = V->user_begin();
370 auto *FirstUser = *UI++;
371 while (UI != V->user_end()) {
372 assert(*UI == FirstUser && "Currently we only support SLP trees.");
382 dbgs() << "buildGraph: ";
386 if (!areVectorizable(Values))
389 assert(getOpcode(Values) && "Opcodes for all values must match");
390 unsigned ValuesOpcode = getOpcode(Values).getValue();
392 SmallVector<VPValue *, 4> CombinedOperands;
393 if (areCommutative(Values)) {
394 bool MultiNodeRoot = !MultiNodeActive;
395 MultiNodeActive = true;
396 for (auto &Operands : getOperands(Values)) {
398 dbgs() << " Visiting Commutative";
399 dumpBundle(Operands);
402 auto OperandsOpcode = getOpcode(Operands);
403 if (OperandsOpcode && OperandsOpcode == getOpcode(Values)) {
404 LLVM_DEBUG(dbgs() << " Same opcode, continue building\n");
405 CombinedOperands.push_back(buildGraph(Operands));
407 LLVM_DEBUG(dbgs() << " Adding multinode Ops\n");
408 // Create dummy VPInstruction, which will we replace later by the
409 // re-ordered operand.
410 VPInstruction *Op = new VPInstruction(0, {});
411 CombinedOperands.push_back(Op);
412 MultiNodeOps.emplace_back(Op, Operands);
417 LLVM_DEBUG(dbgs() << "Reorder \n");
418 MultiNodeActive = false;
420 auto FinalOrder = reorderMultiNodeOps();
422 MultiNodeOps.clear();
423 for (auto &Ops : FinalOrder) {
424 VPInstruction *NewOp = buildGraph(Ops.second);
425 Ops.first->replaceAllUsesWith(NewOp);
426 for (unsigned i = 0; i < CombinedOperands.size(); i++)
427 if (CombinedOperands[i] == Ops.first)
428 CombinedOperands[i] = NewOp;
432 LLVM_DEBUG(dbgs() << "Found final order\n");
435 LLVM_DEBUG(dbgs() << " NonCommuntative\n");
436 if (ValuesOpcode == Instruction::Load)
437 for (VPValue *V : Values)
438 CombinedOperands.push_back(cast<VPInstruction>(V)->getOperand(0));
440 for (auto &Operands : getOperands(Values))
441 CombinedOperands.push_back(buildGraph(Operands));
445 switch (ValuesOpcode) {
446 case Instruction::Load:
447 Opcode = VPInstruction::SLPLoad;
449 case Instruction::Store:
450 Opcode = VPInstruction::SLPStore;
453 Opcode = ValuesOpcode;
460 assert(CombinedOperands.size() > 0 && "Need more some operands");
461 auto *VPI = new VPInstruction(Opcode, CombinedOperands);
462 VPI->setUnderlyingInstr(cast<VPInstruction>(Values[0])->getUnderlyingInstr());
464 LLVM_DEBUG(dbgs() << "Create VPInstruction "; VPI->print(dbgs());
465 cast<VPInstruction>(Values[0])->print(dbgs()); dbgs() << "\n");
466 addCombined(Values, VPI);