1 //===-- WebAssemblyCFGStackify.cpp - CFG Stackification -------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This file implements a CFG stacking pass.
13 /// This pass reorders the blocks in a function to put them into topological
14 /// order, ignoring loop backedges, and without any loop being interrupted
15 /// by a block not dominated by the loop header, with special care to keep the
16 /// order as similar as possible to the original order.
18 /// Then, it inserts BLOCK and LOOP markers to mark the start of scopes, since
19 /// scope boundaries serve as the labels for WebAssembly's control transfers.
21 /// This is sufficient to convert arbitrary CFGs into a form that works on
22 /// WebAssembly, provided that all loops are single-entry.
24 //===----------------------------------------------------------------------===//
26 #include "WebAssembly.h"
27 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
28 #include "WebAssemblyMachineFunctionInfo.h"
29 #include "WebAssemblySubtarget.h"
30 #include "WebAssemblyUtilities.h"
31 #include "llvm/ADT/PriorityQueue.h"
32 #include "llvm/ADT/SetVector.h"
33 #include "llvm/CodeGen/MachineDominators.h"
34 #include "llvm/CodeGen/MachineFunction.h"
35 #include "llvm/CodeGen/MachineInstrBuilder.h"
36 #include "llvm/CodeGen/MachineLoopInfo.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/Passes.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/raw_ostream.h"
43 #define DEBUG_TYPE "wasm-cfg-stackify"
46 class WebAssemblyCFGStackify final : public MachineFunctionPass {
47 StringRef getPassName() const override { return "WebAssembly CFG Stackify"; }
49 void getAnalysisUsage(AnalysisUsage &AU) const override {
51 AU.addRequired<MachineDominatorTree>();
52 AU.addPreserved<MachineDominatorTree>();
53 AU.addRequired<MachineLoopInfo>();
54 AU.addPreserved<MachineLoopInfo>();
55 MachineFunctionPass::getAnalysisUsage(AU);
58 bool runOnMachineFunction(MachineFunction &MF) override;
61 static char ID; // Pass identification, replacement for typeid
62 WebAssemblyCFGStackify() : MachineFunctionPass(ID) {}
64 } // end anonymous namespace
66 char WebAssemblyCFGStackify::ID = 0;
67 FunctionPass *llvm::createWebAssemblyCFGStackify() {
68 return new WebAssemblyCFGStackify();
71 /// Return the "bottom" block of a loop. This differs from
72 /// MachineLoop::getBottomBlock in that it works even if the loop is
74 static MachineBasicBlock *LoopBottom(const MachineLoop *Loop) {
75 MachineBasicBlock *Bottom = Loop->getHeader();
76 for (MachineBasicBlock *MBB : Loop->blocks())
77 if (MBB->getNumber() > Bottom->getNumber())
82 static void MaybeUpdateTerminator(MachineBasicBlock *MBB) {
84 bool AnyBarrier = false;
86 bool AllAnalyzable = true;
87 for (const MachineInstr &Term : MBB->terminators()) {
89 AnyBarrier |= Term.isBarrier();
91 AllAnalyzable &= Term.isBranch() && !Term.isIndirectBranch();
93 assert((AnyBarrier || AllAnalyzable) &&
94 "AnalyzeBranch needs to analyze any block with a fallthrough");
96 MBB->updateTerminator();
100 /// Sort blocks by their number.
101 struct CompareBlockNumbers {
102 bool operator()(const MachineBasicBlock *A,
103 const MachineBasicBlock *B) const {
104 return A->getNumber() > B->getNumber();
107 /// Sort blocks by their number in the opposite order..
108 struct CompareBlockNumbersBackwards {
109 bool operator()(const MachineBasicBlock *A,
110 const MachineBasicBlock *B) const {
111 return A->getNumber() < B->getNumber();
114 /// Bookkeeping for a loop to help ensure that we don't mix blocks not dominated
115 /// by the loop header among the loop's blocks.
117 const MachineLoop *Loop;
118 unsigned NumBlocksLeft;
120 /// List of blocks not dominated by Loop's header that are deferred until
121 /// after all of Loop's blocks have been seen.
122 std::vector<MachineBasicBlock *> Deferred;
124 explicit Entry(const MachineLoop *L)
125 : Loop(L), NumBlocksLeft(L->getNumBlocks()) {}
129 /// Sort the blocks, taking special care to make sure that loops are not
130 /// interrupted by blocks not dominated by their header.
131 /// TODO: There are many opportunities for improving the heuristics here.
133 static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI,
134 const MachineDominatorTree &MDT) {
135 // Prepare for a topological sort: Record the number of predecessors each
136 // block has, ignoring loop backedges.
138 SmallVector<unsigned, 16> NumPredsLeft(MF.getNumBlockIDs(), 0);
139 for (MachineBasicBlock &MBB : MF) {
140 unsigned N = MBB.pred_size();
141 if (MachineLoop *L = MLI.getLoopFor(&MBB))
142 if (L->getHeader() == &MBB)
143 for (const MachineBasicBlock *Pred : MBB.predecessors())
144 if (L->contains(Pred))
146 NumPredsLeft[MBB.getNumber()] = N;
149 // Topological sort the CFG, with additional constraints:
150 // - Between a loop header and the last block in the loop, there can be
151 // no blocks not dominated by the loop header.
152 // - It's desirable to preserve the original block order when possible.
153 // We use two ready lists; Preferred and Ready. Preferred has recently
154 // processed sucessors, to help preserve block sequences from the original
155 // order. Ready has the remaining ready blocks.
156 PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
159 PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
160 CompareBlockNumbersBackwards>
162 SmallVector<Entry, 4> Loops;
163 for (MachineBasicBlock *MBB = &MF.front();;) {
164 const MachineLoop *L = MLI.getLoopFor(MBB);
166 // If MBB is a loop header, add it to the active loop list. We can't put
167 // any blocks that it doesn't dominate until we see the end of the loop.
168 if (L->getHeader() == MBB)
169 Loops.push_back(Entry(L));
170 // For each active loop the block is in, decrement the count. If MBB is
171 // the last block in an active loop, take it off the list and pick up any
172 // blocks deferred because the header didn't dominate them.
173 for (Entry &E : Loops)
174 if (E.Loop->contains(MBB) && --E.NumBlocksLeft == 0)
175 for (auto DeferredBlock : E.Deferred)
176 Ready.push(DeferredBlock);
177 while (!Loops.empty() && Loops.back().NumBlocksLeft == 0)
180 // The main topological sort logic.
181 for (MachineBasicBlock *Succ : MBB->successors()) {
183 if (MachineLoop *SuccL = MLI.getLoopFor(Succ))
184 if (SuccL->getHeader() == Succ && SuccL->contains(MBB))
186 // Decrement the predecessor count. If it's now zero, it's ready.
187 if (--NumPredsLeft[Succ->getNumber()] == 0)
188 Preferred.push(Succ);
190 // Determine the block to follow MBB. First try to find a preferred block,
191 // to preserve the original block order when possible.
192 MachineBasicBlock *Next = nullptr;
193 while (!Preferred.empty()) {
194 Next = Preferred.top();
196 // If X isn't dominated by the top active loop header, defer it until that
198 if (!Loops.empty() &&
199 !MDT.dominates(Loops.back().Loop->getHeader(), Next)) {
200 Loops.back().Deferred.push_back(Next);
204 // If Next was originally ordered before MBB, and it isn't because it was
205 // loop-rotated above the header, it's not preferred.
206 if (Next->getNumber() < MBB->getNumber() &&
207 (!L || !L->contains(Next) ||
208 L->getHeader()->getNumber() < Next->getNumber())) {
215 // If we didn't find a suitable block in the Preferred list, check the
216 // general Ready list.
218 // If there are no more blocks to process, we're done.
220 MaybeUpdateTerminator(MBB);
226 // If Next isn't dominated by the top active loop header, defer it until
227 // that loop is done.
228 if (!Loops.empty() &&
229 !MDT.dominates(Loops.back().Loop->getHeader(), Next)) {
230 Loops.back().Deferred.push_back(Next);
236 // Move the next block into place and iterate.
237 Next->moveAfter(MBB);
238 MaybeUpdateTerminator(MBB);
241 assert(Loops.empty() && "Active loop list not finished");
245 SmallSetVector<MachineLoop *, 8> OnStack;
247 // Insert a sentinel representing the degenerate loop that starts at the
248 // function entry block and includes the entire function as a "loop" that
250 OnStack.insert(nullptr);
252 for (auto &MBB : MF) {
253 assert(MBB.getNumber() >= 0 && "Renumbered blocks should be non-negative.");
255 MachineLoop *Loop = MLI.getLoopFor(&MBB);
256 if (Loop && &MBB == Loop->getHeader()) {
257 // Loop header. The loop predecessor should be sorted above, and the other
258 // predecessors should be backedges below.
259 for (auto Pred : MBB.predecessors())
261 (Pred->getNumber() < MBB.getNumber() || Loop->contains(Pred)) &&
262 "Loop header predecessors must be loop predecessors or backedges");
263 assert(OnStack.insert(Loop) && "Loops should be declared at most once.");
265 // Not a loop header. All predecessors should be sorted above.
266 for (auto Pred : MBB.predecessors())
267 assert(Pred->getNumber() < MBB.getNumber() &&
268 "Non-loop-header predecessors should be topologically sorted");
269 assert(OnStack.count(MLI.getLoopFor(&MBB)) &&
270 "Blocks must be nested in their loops");
272 while (OnStack.size() > 1 && &MBB == LoopBottom(OnStack.back()))
275 assert(OnStack.pop_back_val() == nullptr &&
276 "The function entry block shouldn't actually be a loop header");
277 assert(OnStack.empty() &&
278 "Control flow stack pushes and pops should be balanced.");
282 /// Test whether Pred has any terminators explicitly branching to MBB, as
283 /// opposed to falling through. Note that it's possible (eg. in unoptimized
284 /// code) for a branch instruction to both branch to a block and fallthrough
285 /// to it, so we check the actual branch operands to see if there are any
286 /// explicit mentions.
287 static bool ExplicitlyBranchesTo(MachineBasicBlock *Pred,
288 MachineBasicBlock *MBB) {
289 for (MachineInstr &MI : Pred->terminators())
290 for (MachineOperand &MO : MI.explicit_operands())
291 if (MO.isMBB() && MO.getMBB() == MBB)
296 /// Insert a BLOCK marker for branches to MBB (if needed).
297 static void PlaceBlockMarker(
298 MachineBasicBlock &MBB, MachineFunction &MF,
299 SmallVectorImpl<MachineBasicBlock *> &ScopeTops,
300 DenseMap<const MachineInstr *, MachineInstr *> &BlockTops,
301 DenseMap<const MachineInstr *, MachineInstr *> &LoopTops,
302 const WebAssemblyInstrInfo &TII,
303 const MachineLoopInfo &MLI,
304 MachineDominatorTree &MDT,
305 WebAssemblyFunctionInfo &MFI) {
306 // First compute the nearest common dominator of all forward non-fallthrough
307 // predecessors so that we minimize the time that the BLOCK is on the stack,
308 // which reduces overall stack height.
309 MachineBasicBlock *Header = nullptr;
310 bool IsBranchedTo = false;
311 int MBBNumber = MBB.getNumber();
312 for (MachineBasicBlock *Pred : MBB.predecessors())
313 if (Pred->getNumber() < MBBNumber) {
314 Header = Header ? MDT.findNearestCommonDominator(Header, Pred) : Pred;
315 if (ExplicitlyBranchesTo(Pred, &MBB))
323 assert(&MBB != &MF.front() && "Header blocks shouldn't have predecessors");
324 MachineBasicBlock *LayoutPred = &*std::prev(MachineFunction::iterator(&MBB));
326 // If the nearest common dominator is inside a more deeply nested context,
327 // walk out to the nearest scope which isn't more deeply nested.
328 for (MachineFunction::iterator I(LayoutPred), E(Header); I != E; --I) {
329 if (MachineBasicBlock *ScopeTop = ScopeTops[I->getNumber()]) {
330 if (ScopeTop->getNumber() > Header->getNumber()) {
331 // Skip over an intervening scope.
332 I = std::next(MachineFunction::iterator(ScopeTop));
334 // We found a scope level at an appropriate depth.
341 // Decide where in Header to put the BLOCK.
342 MachineBasicBlock::iterator InsertPos;
343 MachineLoop *HeaderLoop = MLI.getLoopFor(Header);
344 if (HeaderLoop && MBB.getNumber() > LoopBottom(HeaderLoop)->getNumber()) {
345 // Header is the header of a loop that does not lexically contain MBB, so
346 // the BLOCK needs to be above the LOOP, after any END constructs.
347 InsertPos = Header->begin();
348 while (InsertPos->getOpcode() == WebAssembly::END_BLOCK ||
349 InsertPos->getOpcode() == WebAssembly::END_LOOP)
352 // Otherwise, insert the BLOCK as late in Header as we can, but before the
353 // beginning of the local expression tree and any nested BLOCKs.
354 InsertPos = Header->getFirstTerminator();
355 while (InsertPos != Header->begin() &&
356 WebAssembly::isChild(*std::prev(InsertPos), MFI) &&
357 std::prev(InsertPos)->getOpcode() != WebAssembly::LOOP &&
358 std::prev(InsertPos)->getOpcode() != WebAssembly::END_BLOCK &&
359 std::prev(InsertPos)->getOpcode() != WebAssembly::END_LOOP)
364 MachineInstr *Begin = BuildMI(*Header, InsertPos, DebugLoc(),
365 TII.get(WebAssembly::BLOCK))
366 .addImm(int64_t(WebAssembly::ExprType::Void));
368 // Mark the end of the block.
369 InsertPos = MBB.begin();
370 while (InsertPos != MBB.end() &&
371 InsertPos->getOpcode() == WebAssembly::END_LOOP &&
372 LoopTops[&*InsertPos]->getParent()->getNumber() >= Header->getNumber())
374 MachineInstr *End = BuildMI(MBB, InsertPos, DebugLoc(),
375 TII.get(WebAssembly::END_BLOCK));
376 BlockTops[End] = Begin;
378 // Track the farthest-spanning scope that ends at this point.
379 int Number = MBB.getNumber();
380 if (!ScopeTops[Number] ||
381 ScopeTops[Number]->getNumber() > Header->getNumber())
382 ScopeTops[Number] = Header;
385 /// Insert a LOOP marker for a loop starting at MBB (if it's a loop header).
386 static void PlaceLoopMarker(
387 MachineBasicBlock &MBB, MachineFunction &MF,
388 SmallVectorImpl<MachineBasicBlock *> &ScopeTops,
389 DenseMap<const MachineInstr *, MachineInstr *> &LoopTops,
390 const WebAssemblyInstrInfo &TII, const MachineLoopInfo &MLI) {
391 MachineLoop *Loop = MLI.getLoopFor(&MBB);
392 if (!Loop || Loop->getHeader() != &MBB)
395 // The operand of a LOOP is the first block after the loop. If the loop is the
396 // bottom of the function, insert a dummy block at the end.
397 MachineBasicBlock *Bottom = LoopBottom(Loop);
398 auto Iter = std::next(MachineFunction::iterator(Bottom));
399 if (Iter == MF.end()) {
400 MachineBasicBlock *Label = MF.CreateMachineBasicBlock();
401 // Give it a fake predecessor so that AsmPrinter prints its label.
402 Label->addSuccessor(Label);
404 Iter = std::next(MachineFunction::iterator(Bottom));
406 MachineBasicBlock *AfterLoop = &*Iter;
408 // Mark the beginning of the loop (after the end of any existing loop that
410 auto InsertPos = MBB.begin();
411 while (InsertPos != MBB.end() &&
412 InsertPos->getOpcode() == WebAssembly::END_LOOP)
414 MachineInstr *Begin = BuildMI(MBB, InsertPos, DebugLoc(),
415 TII.get(WebAssembly::LOOP))
416 .addImm(int64_t(WebAssembly::ExprType::Void));
418 // Mark the end of the loop.
419 MachineInstr *End = BuildMI(*AfterLoop, AfterLoop->begin(), DebugLoc(),
420 TII.get(WebAssembly::END_LOOP));
421 LoopTops[End] = Begin;
423 assert((!ScopeTops[AfterLoop->getNumber()] ||
424 ScopeTops[AfterLoop->getNumber()]->getNumber() < MBB.getNumber()) &&
425 "With block sorting the outermost loop for a block should be first.");
426 if (!ScopeTops[AfterLoop->getNumber()])
427 ScopeTops[AfterLoop->getNumber()] = &MBB;
431 GetDepth(const SmallVectorImpl<const MachineBasicBlock *> &Stack,
432 const MachineBasicBlock *MBB) {
434 for (auto X : reverse(Stack)) {
439 assert(Depth < Stack.size() && "Branch destination should be in scope");
443 /// In normal assembly languages, when the end of a function is unreachable,
444 /// because the function ends in an infinite loop or a noreturn call or similar,
445 /// it isn't necessary to worry about the function return type at the end of
446 /// the function, because it's never reached. However, in WebAssembly, blocks
447 /// that end at the function end need to have a return type signature that
448 /// matches the function signature, even though it's unreachable. This function
449 /// checks for such cases and fixes up the signatures.
450 static void FixEndsAtEndOfFunction(
452 const WebAssemblyFunctionInfo &MFI,
453 DenseMap<const MachineInstr *, MachineInstr *> &BlockTops,
454 DenseMap<const MachineInstr *, MachineInstr *> &LoopTops) {
455 assert(MFI.getResults().size() <= 1);
457 if (MFI.getResults().empty())
460 WebAssembly::ExprType retType;
461 switch (MFI.getResults().front().SimpleTy) {
462 case MVT::i32: retType = WebAssembly::ExprType::I32; break;
463 case MVT::i64: retType = WebAssembly::ExprType::I64; break;
464 case MVT::f32: retType = WebAssembly::ExprType::F32; break;
465 case MVT::f64: retType = WebAssembly::ExprType::F64; break;
466 case MVT::v16i8: retType = WebAssembly::ExprType::I8x16; break;
467 case MVT::v8i16: retType = WebAssembly::ExprType::I16x8; break;
468 case MVT::v4i32: retType = WebAssembly::ExprType::I32x4; break;
469 case MVT::v4f32: retType = WebAssembly::ExprType::F32x4; break;
470 default: llvm_unreachable("unexpected return type");
473 for (MachineBasicBlock &MBB : reverse(MF)) {
474 for (MachineInstr &MI : reverse(MBB)) {
475 if (MI.isPosition() || MI.isDebugValue())
477 if (MI.getOpcode() == WebAssembly::END_BLOCK) {
478 BlockTops[&MI]->getOperand(0).setImm(int32_t(retType));
481 if (MI.getOpcode() == WebAssembly::END_LOOP) {
482 LoopTops[&MI]->getOperand(0).setImm(int32_t(retType));
485 // Something other than an `end`. We're done.
491 /// Insert LOOP and BLOCK markers at appropriate places.
492 static void PlaceMarkers(MachineFunction &MF, const MachineLoopInfo &MLI,
493 const WebAssemblyInstrInfo &TII,
494 MachineDominatorTree &MDT,
495 WebAssemblyFunctionInfo &MFI) {
496 // For each block whose label represents the end of a scope, record the block
497 // which holds the beginning of the scope. This will allow us to quickly skip
498 // over scoped regions when walking blocks. We allocate one more than the
499 // number of blocks in the function to accommodate for the possible fake block
500 // we may insert at the end.
501 SmallVector<MachineBasicBlock *, 8> ScopeTops(MF.getNumBlockIDs() + 1);
503 // For each LOOP_END, the corresponding LOOP.
504 DenseMap<const MachineInstr *, MachineInstr *> LoopTops;
506 // For each END_BLOCK, the corresponding BLOCK.
507 DenseMap<const MachineInstr *, MachineInstr *> BlockTops;
509 for (auto &MBB : MF) {
510 // Place the LOOP for MBB if MBB is the header of a loop.
511 PlaceLoopMarker(MBB, MF, ScopeTops, LoopTops, TII, MLI);
513 // Place the BLOCK for MBB if MBB is branched to from above.
514 PlaceBlockMarker(MBB, MF, ScopeTops, BlockTops, LoopTops, TII, MLI, MDT, MFI);
517 // Now rewrite references to basic blocks to be depth immediates.
518 SmallVector<const MachineBasicBlock *, 8> Stack;
519 for (auto &MBB : reverse(MF)) {
520 for (auto &MI : reverse(MBB)) {
521 switch (MI.getOpcode()) {
522 case WebAssembly::BLOCK:
523 assert(ScopeTops[Stack.back()->getNumber()]->getNumber() <= MBB.getNumber() &&
524 "Block should be balanced");
527 case WebAssembly::LOOP:
528 assert(Stack.back() == &MBB && "Loop top should be balanced");
531 case WebAssembly::END_BLOCK:
532 Stack.push_back(&MBB);
534 case WebAssembly::END_LOOP:
535 Stack.push_back(LoopTops[&MI]->getParent());
538 if (MI.isTerminator()) {
539 // Rewrite MBB operands to be depth immediates.
540 SmallVector<MachineOperand, 4> Ops(MI.operands());
541 while (MI.getNumOperands() > 0)
542 MI.RemoveOperand(MI.getNumOperands() - 1);
543 for (auto MO : Ops) {
545 MO = MachineOperand::CreateImm(GetDepth(Stack, MO.getMBB()));
546 MI.addOperand(MF, MO);
553 assert(Stack.empty() && "Control flow should be balanced");
555 // Fix up block/loop signatures at the end of the function to conform to
556 // WebAssembly's rules.
557 FixEndsAtEndOfFunction(MF, MFI, BlockTops, LoopTops);
560 bool WebAssemblyCFGStackify::runOnMachineFunction(MachineFunction &MF) {
561 DEBUG(dbgs() << "********** CFG Stackifying **********\n"
562 "********** Function: "
563 << MF.getName() << '\n');
565 const auto &MLI = getAnalysis<MachineLoopInfo>();
566 auto &MDT = getAnalysis<MachineDominatorTree>();
567 // Liveness is not tracked for VALUE_STACK physreg.
568 const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
569 WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
570 MF.getRegInfo().invalidateLiveness();
572 // Sort the blocks, with contiguous loops.
573 SortBlocks(MF, MLI, MDT);
575 // Place the BLOCK and LOOP markers to indicate the beginnings of scopes.
576 PlaceMarkers(MF, MLI, TII, MDT, MFI);