1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
9 // This file contains classes used to discover if for a particular value
10 // there from sue to definition that crosses a suspend block.
12 // Using the information discovered we form a Coroutine Frame structure to
13 // contain those values. All uses of those values are replaced with appropriate
14 // GEP + load from the coroutine frame. At the point of the definition we spill
15 // the value into the coroutine frame.
17 // TODO: pack values tightly using liveness info.
18 //===----------------------------------------------------------------------===//
20 #include "CoroInternal.h"
21 #include "llvm/ADT/BitVector.h"
22 #include "llvm/Transforms/Utils/Local.h"
23 #include "llvm/Config/llvm-config.h"
24 #include "llvm/IR/CFG.h"
25 #include "llvm/IR/Dominators.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/InstIterator.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/MathExtras.h"
30 #include "llvm/Support/circular_raw_ostream.h"
31 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
35 // The "coro-suspend-crossing" flag is very noisy. There is another debug type,
36 // "coro-frame", which results in leaner debug spew.
37 #define DEBUG_TYPE "coro-suspend-crossing"
39 enum { SmallVectorThreshold = 32 };
41 // Provides two way mapping between the blocks and numbers.
43 class BlockToIndexMapping {
44 SmallVector<BasicBlock *, SmallVectorThreshold> V;
47 size_t size() const { return V.size(); }
49 BlockToIndexMapping(Function &F) {
50 for (BasicBlock &BB : F)
52 llvm::sort(V.begin(), V.end());
55 size_t blockToIndex(BasicBlock *BB) const {
56 auto *I = std::lower_bound(V.begin(), V.end(), BB);
57 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
61 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
63 } // end anonymous namespace
65 // The SuspendCrossingInfo maintains data that allows to answer a question
66 // whether given two BasicBlocks A and B there is a path from A to B that
67 // passes through a suspend point.
69 // For every basic block 'i' it maintains a BlockData that consists of:
70 // Consumes: a bit vector which contains a set of indices of blocks that can
72 // Kills: a bit vector which contains a set of indices of blocks that can
73 // reach block 'i', but one of the path will cross a suspend point
74 // Suspend: a boolean indicating whether block 'i' contains a suspend point.
75 // End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
78 struct SuspendCrossingInfo {
79 BlockToIndexMapping Mapping;
87 SmallVector<BlockData, SmallVectorThreshold> Block;
89 iterator_range<succ_iterator> successors(BlockData const &BD) const {
90 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
91 return llvm::successors(BB);
94 BlockData &getBlockData(BasicBlock *BB) {
95 return Block[Mapping.blockToIndex(BB)];
99 void dump(StringRef Label, BitVector const &BV) const;
101 SuspendCrossingInfo(Function &F, coro::Shape &Shape);
103 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
104 size_t const DefIndex = Mapping.blockToIndex(DefBB);
105 size_t const UseIndex = Mapping.blockToIndex(UseBB);
107 assert(Block[UseIndex].Consumes[DefIndex] && "use must consume def");
108 bool const Result = Block[UseIndex].Kills[DefIndex];
109 LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
110 << " answer is " << Result << "\n");
114 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
115 auto *I = cast<Instruction>(U);
117 // We rewrote PHINodes, so that only the ones with exactly one incoming
118 // value need to be analyzed.
119 if (auto *PN = dyn_cast<PHINode>(I))
120 if (PN->getNumIncomingValues() > 1)
123 BasicBlock *UseBB = I->getParent();
124 return hasPathCrossingSuspendPoint(DefBB, UseBB);
127 bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
128 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
131 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
132 return isDefinitionAcrossSuspend(I.getParent(), U);
135 } // end anonymous namespace
137 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
138 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
139 BitVector const &BV) const {
140 dbgs() << Label << ":";
141 for (size_t I = 0, N = BV.size(); I < N; ++I)
143 dbgs() << " " << Mapping.indexToBlock(I)->getName();
147 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
148 for (size_t I = 0, N = Block.size(); I < N; ++I) {
149 BasicBlock *const B = Mapping.indexToBlock(I);
150 dbgs() << B->getName() << ":\n";
151 dump(" Consumes", Block[I].Consumes);
152 dump(" Kills", Block[I].Kills);
158 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
160 const size_t N = Mapping.size();
163 // Initialize every block so that it consumes itself
164 for (size_t I = 0; I < N; ++I) {
166 B.Consumes.resize(N);
171 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
172 // the code beyond coro.end is reachable during initial invocation of the
174 for (auto *CE : Shape.CoroEnds)
175 getBlockData(CE->getParent()).End = true;
177 // Mark all suspend blocks and indicate that they kill everything they
178 // consume. Note, that crossing coro.save also requires a spill, as any code
179 // between coro.save and coro.suspend may resume the coroutine and all of the
180 // state needs to be saved by that time.
181 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
182 BasicBlock *SuspendBlock = BarrierInst->getParent();
183 auto &B = getBlockData(SuspendBlock);
185 B.Kills |= B.Consumes;
187 for (CoroSuspendInst *CSI : Shape.CoroSuspends) {
188 markSuspendBlock(CSI);
189 markSuspendBlock(CSI->getCoroSave());
192 // Iterate propagating consumes and kills until they stop changing.
198 LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
199 LLVM_DEBUG(dbgs() << "==============\n");
202 for (size_t I = 0; I < N; ++I) {
204 for (BasicBlock *SI : successors(B)) {
206 auto SuccNo = Mapping.blockToIndex(SI);
208 // Saved Consumes and Kills bitsets so that it is easy to see
209 // if anything changed after propagation.
210 auto &S = Block[SuccNo];
211 auto SavedConsumes = S.Consumes;
212 auto SavedKills = S.Kills;
214 // Propagate Kills and Consumes from block B into its successor S.
215 S.Consumes |= B.Consumes;
218 // If block B is a suspend block, it should propagate kills into the
219 // its successor for every block B consumes.
221 S.Kills |= B.Consumes;
224 // If block S is a suspend block, it should kill all of the blocks it
226 S.Kills |= S.Consumes;
228 // If block S is an end block, it should not propagate kills as the
229 // blocks following coro.end() are reached during initial invocation
230 // of the coroutine while all the data are still available on the
231 // stack or in the registers.
234 // This is reached when S block it not Suspend nor coro.end and it
235 // need to make sure that it is not in the kill set.
236 S.Kills.reset(SuccNo);
239 // See if anything changed.
240 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
242 if (S.Kills != SavedKills) {
243 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
245 LLVM_DEBUG(dump("S.Kills", S.Kills));
246 LLVM_DEBUG(dump("SavedKills", SavedKills));
248 if (S.Consumes != SavedConsumes) {
249 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
250 LLVM_DEBUG(dump("S.Consume", S.Consumes));
251 LLVM_DEBUG(dump("SavedCons", SavedConsumes));
259 #undef DEBUG_TYPE // "coro-suspend-crossing"
260 #define DEBUG_TYPE "coro-frame"
262 // We build up the list of spills for every case where a use is separated
263 // from the definition by a suspend point.
267 Value *Def = nullptr;
268 Instruction *User = nullptr;
269 unsigned FieldNo = 0;
272 Spill(Value *Def, llvm::User *U) : Def(Def), User(cast<Instruction>(U)) {}
274 Value *def() const { return Def; }
275 Instruction *user() const { return User; }
276 BasicBlock *userBlock() const { return User->getParent(); }
278 // Note that field index is stored in the first SpillEntry for a particular
279 // definition. Subsequent mentions of a defintion do not have fieldNo
280 // assigned. This works out fine as the users of Spills capture the info about
281 // the definition the first time they encounter it. Consider refactoring
282 // SpillInfo into two arrays to normalize the spill representation.
283 unsigned fieldIndex() const {
284 assert(FieldNo && "Accessing unassigned field");
287 void setFieldIndex(unsigned FieldNumber) {
288 assert(!FieldNo && "Reassigning field number");
289 FieldNo = FieldNumber;
294 // Note that there may be more than one record with the same value of Def in
295 // the SpillInfo vector.
296 using SpillInfo = SmallVector<Spill, 8>;
299 static void dump(StringRef Title, SpillInfo const &Spills) {
300 dbgs() << "------------- " << Title << "--------------\n";
301 Value *CurrentValue = nullptr;
302 for (auto const &E : Spills) {
303 if (CurrentValue != E.def()) {
304 CurrentValue = E.def();
305 CurrentValue->dump();
314 // We cannot rely solely on natural alignment of a type when building a
315 // coroutine frame and if the alignment specified on the Alloca instruction
316 // differs from the natural alignment of the alloca type we will need to insert
318 struct PaddingCalculator {
319 const DataLayout &DL;
320 LLVMContext &Context;
321 unsigned StructSize = 0;
323 PaddingCalculator(LLVMContext &Context, DataLayout const &DL)
324 : DL(DL), Context(Context) {}
326 // Replicate the logic from IR/DataLayout.cpp to match field offset
327 // computation for LLVM structs.
328 void addType(Type *Ty) {
329 unsigned TyAlign = DL.getABITypeAlignment(Ty);
330 if ((StructSize & (TyAlign - 1)) != 0)
331 StructSize = alignTo(StructSize, TyAlign);
333 StructSize += DL.getTypeAllocSize(Ty); // Consume space for this data item.
336 void addTypes(SmallVectorImpl<Type *> const &Types) {
337 for (auto *Ty : Types)
341 unsigned computePadding(Type *Ty, unsigned ForcedAlignment) {
342 unsigned TyAlign = DL.getABITypeAlignment(Ty);
343 auto Natural = alignTo(StructSize, TyAlign);
344 auto Forced = alignTo(StructSize, ForcedAlignment);
346 // Return how many bytes of padding we need to insert.
347 if (Natural != Forced)
348 return std::max(Natural, Forced) - StructSize;
350 // Rely on natural alignment.
354 // If padding required, return the padding field type to insert.
355 ArrayType *getPaddingType(Type *Ty, unsigned ForcedAlignment) {
356 if (auto Padding = computePadding(Ty, ForcedAlignment))
357 return ArrayType::get(Type::getInt8Ty(Context), Padding);
364 // Build a struct that will keep state for an active coroutine.
366 // ResumeFnTy ResumeFnAddr;
367 // ResumeFnTy DestroyFnAddr;
369 // ... promise (if present) ...
372 static StructType *buildFrameType(Function &F, coro::Shape &Shape,
374 LLVMContext &C = F.getContext();
375 const DataLayout &DL = F.getParent()->getDataLayout();
376 PaddingCalculator Padder(C, DL);
377 SmallString<32> Name(F.getName());
378 Name.append(".Frame");
379 StructType *FrameTy = StructType::create(C, Name);
380 auto *FramePtrTy = FrameTy->getPointerTo();
381 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
382 /*IsVarArgs=*/false);
383 auto *FnPtrTy = FnTy->getPointerTo();
385 // Figure out how wide should be an integer type storing the suspend index.
386 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
387 Type *PromiseType = Shape.PromiseAlloca
388 ? Shape.PromiseAlloca->getType()->getElementType()
389 : Type::getInt1Ty(C);
390 SmallVector<Type *, 8> Types{FnPtrTy, FnPtrTy, PromiseType,
391 Type::getIntNTy(C, IndexBits)};
392 Value *CurrentDef = nullptr;
394 Padder.addTypes(Types);
396 // Create an entry for every spilled value.
397 for (auto &S : Spills) {
398 if (CurrentDef == S.def())
401 CurrentDef = S.def();
402 // PromiseAlloca was already added to Types array earlier.
403 if (CurrentDef == Shape.PromiseAlloca)
407 if (auto *AI = dyn_cast<AllocaInst>(CurrentDef)) {
408 Ty = AI->getAllocatedType();
409 if (unsigned AllocaAlignment = AI->getAlignment()) {
410 // If alignment is specified in alloca, see if we need to insert extra
412 if (auto PaddingTy = Padder.getPaddingType(Ty, AllocaAlignment)) {
413 Types.push_back(PaddingTy);
414 Padder.addType(PaddingTy);
418 Ty = CurrentDef->getType();
420 S.setFieldIndex(Types.size());
424 FrameTy->setBody(Types);
429 // We need to make room to insert a spill after initial PHIs, but before
430 // catchswitch instruction. Placing it before violates the requirement that
431 // catchswitch, like all other EHPads must be the first nonPHI in a block.
433 // Split away catchswitch into a separate block and insert in its place:
435 // cleanuppad <InsertPt> cleanupret.
437 // cleanupret instruction will act as an insert point for the spill.
438 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
439 BasicBlock *CurrentBlock = CatchSwitch->getParent();
440 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
441 CurrentBlock->getTerminator()->eraseFromParent();
444 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
446 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
450 // Replace all alloca and SSA values that are accessed across suspend points
451 // with GetElementPointer from coroutine frame + loads and stores. Create an
452 // AllocaSpillBB that will become the new entry block for the resume parts of
455 // %hdl = coro.begin(...)
460 // %hdl = coro.begin(...)
461 // %FramePtr = bitcast i8* hdl to %f.frame*
462 // br label %AllocaSpillBB
465 // ; geps corresponding to allocas that were moved to coroutine frame
466 // br label PostSpill
472 static Instruction *insertSpills(SpillInfo &Spills, coro::Shape &Shape) {
473 auto *CB = Shape.CoroBegin;
474 IRBuilder<> Builder(CB->getNextNode());
475 PointerType *FramePtrTy = Shape.FrameTy->getPointerTo();
477 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
478 Type *FrameTy = FramePtrTy->getElementType();
480 Value *CurrentValue = nullptr;
481 BasicBlock *CurrentBlock = nullptr;
482 Value *CurrentReload = nullptr;
483 unsigned Index = 0; // Proper field number will be read from field definition.
485 // We need to keep track of any allocas that need "spilling"
486 // since they will live in the coroutine frame now, all access to them
487 // need to be changed, not just the access across suspend points
488 // we remember allocas and their indices to be handled once we processed
490 SmallVector<std::pair<AllocaInst *, unsigned>, 4> Allocas;
491 // Promise alloca (if present) has a fixed field number (Shape::PromiseField)
492 if (Shape.PromiseAlloca)
493 Allocas.emplace_back(Shape.PromiseAlloca, coro::Shape::PromiseField);
495 // Create a load instruction to reload the spilled value from the coroutine
497 auto CreateReload = [&](Instruction *InsertBefore) {
498 assert(Index && "accessing unassigned field number");
499 Builder.SetInsertPoint(InsertBefore);
500 auto *G = Builder.CreateConstInBoundsGEP2_32(FrameTy, FramePtr, 0, Index,
501 CurrentValue->getName() +
502 Twine(".reload.addr"));
503 return isa<AllocaInst>(CurrentValue)
505 : Builder.CreateLoad(G,
506 CurrentValue->getName() + Twine(".reload"));
509 for (auto const &E : Spills) {
510 // If we have not seen the value, generate a spill.
511 if (CurrentValue != E.def()) {
512 CurrentValue = E.def();
513 CurrentBlock = nullptr;
514 CurrentReload = nullptr;
516 Index = E.fieldIndex();
518 if (auto *AI = dyn_cast<AllocaInst>(CurrentValue)) {
519 // Spilled AllocaInst will be replaced with GEP from the coroutine frame
520 // there is no spill required.
521 Allocas.emplace_back(AI, Index);
522 if (!AI->isStaticAlloca())
523 report_fatal_error("Coroutines cannot handle non static allocas yet");
525 // Otherwise, create a store instruction storing the value into the
528 Instruction *InsertPt = nullptr;
529 if (isa<Argument>(CurrentValue)) {
530 // For arguments, we will place the store instruction right after
531 // the coroutine frame pointer instruction, i.e. bitcast of
532 // coro.begin from i8* to %f.frame*.
533 InsertPt = FramePtr->getNextNode();
534 } else if (auto *II = dyn_cast<InvokeInst>(CurrentValue)) {
535 // If we are spilling the result of the invoke instruction, split the
536 // normal edge and insert the spill in the new block.
537 auto NewBB = SplitEdge(II->getParent(), II->getNormalDest());
538 InsertPt = NewBB->getTerminator();
539 } else if (dyn_cast<PHINode>(CurrentValue)) {
540 // Skip the PHINodes and EH pads instructions.
541 BasicBlock *DefBlock = cast<Instruction>(E.def())->getParent();
542 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
543 InsertPt = splitBeforeCatchSwitch(CSI);
545 InsertPt = &*DefBlock->getFirstInsertionPt();
547 // For all other values, the spill is placed immediately after
549 assert(!isa<TerminatorInst>(E.def()) && "unexpected terminator");
550 InsertPt = cast<Instruction>(E.def())->getNextNode();
553 Builder.SetInsertPoint(InsertPt);
554 auto *G = Builder.CreateConstInBoundsGEP2_32(
555 FrameTy, FramePtr, 0, Index,
556 CurrentValue->getName() + Twine(".spill.addr"));
557 Builder.CreateStore(CurrentValue, G);
561 // If we have not seen the use block, generate a reload in it.
562 if (CurrentBlock != E.userBlock()) {
563 CurrentBlock = E.userBlock();
564 CurrentReload = CreateReload(&*CurrentBlock->getFirstInsertionPt());
567 // If we have a single edge PHINode, remove it and replace it with a reload
568 // from the coroutine frame. (We already took care of multi edge PHINodes
569 // by rewriting them in the rewritePHIs function).
570 if (auto *PN = dyn_cast<PHINode>(E.user())) {
571 assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming "
572 "values in the PHINode");
573 PN->replaceAllUsesWith(CurrentReload);
574 PN->eraseFromParent();
578 // Replace all uses of CurrentValue in the current instruction with reload.
579 E.user()->replaceUsesOfWith(CurrentValue, CurrentReload);
582 BasicBlock *FramePtrBB = FramePtr->getParent();
583 Shape.AllocaSpillBlock =
584 FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
585 Shape.AllocaSpillBlock->splitBasicBlock(&Shape.AllocaSpillBlock->front(),
588 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
589 // If we found any allocas, replace all of their remaining uses with Geps.
590 for (auto &P : Allocas) {
592 Builder.CreateConstInBoundsGEP2_32(FrameTy, FramePtr, 0, P.second);
593 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G)) here,
594 // as we are changing location of the instruction.
595 G->takeName(P.first);
596 P.first->replaceAllUsesWith(G);
597 P.first->eraseFromParent();
602 // Sets the unwind edge of an instruction to a particular successor.
603 static void setUnwindEdgeTo(TerminatorInst *TI, BasicBlock *Succ) {
604 if (auto *II = dyn_cast<InvokeInst>(TI))
605 II->setUnwindDest(Succ);
606 else if (auto *CS = dyn_cast<CatchSwitchInst>(TI))
607 CS->setUnwindDest(Succ);
608 else if (auto *CR = dyn_cast<CleanupReturnInst>(TI))
609 CR->setUnwindDest(Succ);
611 llvm_unreachable("unexpected terminator instruction");
614 // Replaces all uses of OldPred with the NewPred block in all PHINodes in a
616 static void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred,
618 PHINode *LandingPadReplacement) {
620 for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) {
621 PHINode *PN = cast<PHINode>(I);
623 // We manually update the LandingPadReplacement PHINode and it is the last
624 // PHI Node. So, if we find it, we are done.
625 if (LandingPadReplacement == PN)
628 // Reuse the previous value of BBIdx if it lines up. In cases where we
629 // have multiple phi nodes with *lots* of predecessors, this is a speed
630 // win because we don't have to scan the PHI looking for TIBB. This
631 // happens because the BB list of PHI nodes are usually in the same
633 if (PN->getIncomingBlock(BBIdx) != OldPred)
634 BBIdx = PN->getBasicBlockIndex(OldPred);
636 assert(BBIdx != (unsigned)-1 && "Invalid PHI Index!");
637 PN->setIncomingBlock(BBIdx, NewPred);
641 // Uses SplitEdge unless the successor block is an EHPad, in which case do EH
642 // specific handling.
643 static BasicBlock *ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ,
644 LandingPadInst *OriginalPad,
645 PHINode *LandingPadReplacement) {
646 auto *PadInst = Succ->getFirstNonPHI();
647 if (!LandingPadReplacement && !PadInst->isEHPad())
648 return SplitEdge(BB, Succ);
650 auto *NewBB = BasicBlock::Create(BB->getContext(), "", BB->getParent(), Succ);
651 setUnwindEdgeTo(BB->getTerminator(), NewBB);
652 updatePhiNodes(Succ, BB, NewBB, LandingPadReplacement);
654 if (LandingPadReplacement) {
655 auto *NewLP = OriginalPad->clone();
656 auto *Terminator = BranchInst::Create(Succ, NewBB);
657 NewLP->insertBefore(Terminator);
658 LandingPadReplacement->addIncoming(NewLP, NewBB);
661 Value *ParentPad = nullptr;
662 if (auto *FuncletPad = dyn_cast<FuncletPadInst>(PadInst))
663 ParentPad = FuncletPad->getParentPad();
664 else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(PadInst))
665 ParentPad = CatchSwitch->getParentPad();
667 llvm_unreachable("handling for other EHPads not implemented yet");
669 auto *NewCleanupPad = CleanupPadInst::Create(ParentPad, {}, "", NewBB);
670 CleanupReturnInst::Create(NewCleanupPad, Succ, NewBB);
674 static void rewritePHIs(BasicBlock &BB) {
675 // For every incoming edge we will create a block holding all
676 // incoming values in a single PHI nodes.
679 // %n.val = phi i32[%n, %entry], [%inc, %loop]
684 // %n.loop.pre = phi i32 [%n, %entry]
687 // %inc.loop.pre = phi i32 [%inc, %loop]
690 // After this rewrite, further analysis will ignore any phi nodes with more
691 // than one incoming edge.
693 // TODO: Simplify PHINodes in the basic block to remove duplicate
696 LandingPadInst *LandingPad = nullptr;
697 PHINode *ReplPHI = nullptr;
698 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
699 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
700 // We replace the original landing pad with a PHINode that will collect the
701 // results from all of them.
702 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
703 ReplPHI->takeName(LandingPad);
704 LandingPad->replaceAllUsesWith(ReplPHI);
705 // We will erase the original landing pad at the end of this function after
706 // ehAwareSplitEdge cloned it in the transition blocks.
709 SmallVector<BasicBlock *, 8> Preds(pred_begin(&BB), pred_end(&BB));
710 for (BasicBlock *Pred : Preds) {
711 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
712 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
713 auto *PN = cast<PHINode>(&BB.front());
715 int Index = PN->getBasicBlockIndex(IncomingBB);
716 Value *V = PN->getIncomingValue(Index);
717 PHINode *InputV = PHINode::Create(
718 V->getType(), 1, V->getName() + Twine(".") + BB.getName(),
719 &IncomingBB->front());
720 InputV->addIncoming(V, Pred);
721 PN->setIncomingValue(Index, InputV);
722 PN = dyn_cast<PHINode>(PN->getNextNode());
723 } while (PN != ReplPHI); // ReplPHI is either null or the PHI that replaced
728 // Calls to ehAwareSplitEdge function cloned the original lading pad.
729 // No longer need it.
730 LandingPad->eraseFromParent();
734 static void rewritePHIs(Function &F) {
735 SmallVector<BasicBlock *, 8> WorkList;
737 for (BasicBlock &BB : F)
738 if (auto *PN = dyn_cast<PHINode>(&BB.front()))
739 if (PN->getNumIncomingValues() > 1)
740 WorkList.push_back(&BB);
742 for (BasicBlock *BB : WorkList)
746 // Check for instructions that we can recreate on resume as opposed to spill
747 // the result into a coroutine frame.
748 static bool materializable(Instruction &V) {
749 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
750 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
753 // Check for structural coroutine intrinsics that should not be spilled into
754 // the coroutine frame.
755 static bool isCoroutineStructureIntrinsic(Instruction &I) {
756 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
757 isa<CoroSuspendInst>(&I);
760 // For every use of the value that is across suspend point, recreate that value
761 // after a suspend point.
762 static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
763 SpillInfo const &Spills) {
764 BasicBlock *CurrentBlock = nullptr;
765 Instruction *CurrentMaterialization = nullptr;
766 Instruction *CurrentDef = nullptr;
768 for (auto const &E : Spills) {
769 // If it is a new definition, update CurrentXXX variables.
770 if (CurrentDef != E.def()) {
771 CurrentDef = cast<Instruction>(E.def());
772 CurrentBlock = nullptr;
773 CurrentMaterialization = nullptr;
776 // If we have not seen this block, materialize the value.
777 if (CurrentBlock != E.userBlock()) {
778 CurrentBlock = E.userBlock();
779 CurrentMaterialization = cast<Instruction>(CurrentDef)->clone();
780 CurrentMaterialization->setName(CurrentDef->getName());
781 CurrentMaterialization->insertBefore(
782 &*CurrentBlock->getFirstInsertionPt());
785 if (auto *PN = dyn_cast<PHINode>(E.user())) {
786 assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming "
787 "values in the PHINode");
788 PN->replaceAllUsesWith(CurrentMaterialization);
789 PN->eraseFromParent();
793 // Replace all uses of CurrentDef in the current instruction with the
794 // CurrentMaterialization for the block.
795 E.user()->replaceUsesOfWith(CurrentDef, CurrentMaterialization);
799 // Move early uses of spilled variable after CoroBegin.
800 // For example, if a parameter had address taken, we may end up with the code
802 // define @f(i32 %n) {
803 // %n.addr = alloca i32
807 // we need to move the store after coro.begin
808 static void moveSpillUsesAfterCoroBegin(Function &F, SpillInfo const &Spills,
809 CoroBeginInst *CoroBegin) {
811 SmallVector<Instruction *, 8> NeedsMoving;
813 Value *CurrentValue = nullptr;
815 for (auto const &E : Spills) {
816 if (CurrentValue == E.def())
819 CurrentValue = E.def();
821 for (User *U : CurrentValue->users()) {
822 Instruction *I = cast<Instruction>(U);
823 if (!DT.dominates(CoroBegin, I)) {
824 LLVM_DEBUG(dbgs() << "will move: " << *I << "\n");
826 // TODO: Make this more robust. Currently if we run into a situation
827 // where simple instruction move won't work we panic and
828 // report_fatal_error.
829 for (User *UI : I->users()) {
830 if (!DT.dominates(CoroBegin, cast<Instruction>(UI)))
831 report_fatal_error("cannot move instruction since its users are not"
832 " dominated by CoroBegin");
835 NeedsMoving.push_back(I);
840 Instruction *InsertPt = CoroBegin->getNextNode();
841 for (Instruction *I : NeedsMoving)
842 I->moveBefore(InsertPt);
845 // Splits the block at a particular instruction unless it is the first
846 // instruction in the block with a single predecessor.
847 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
848 auto *BB = I->getParent();
849 if (&BB->front() == I) {
850 if (BB->getSinglePredecessor()) {
855 return BB->splitBasicBlock(I, Name);
858 // Split above and below a particular instruction so that it
859 // will be all alone by itself in a block.
860 static void splitAround(Instruction *I, const Twine &Name) {
861 splitBlockIfNotFirst(I, Name);
862 splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
865 void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
866 // Lower coro.dbg.declare to coro.dbg.value, since we are going to rewrite
867 // access to local variables.
870 Shape.PromiseAlloca = Shape.CoroBegin->getId()->getPromise();
871 if (Shape.PromiseAlloca) {
872 Shape.CoroBegin->getId()->clearPromise();
875 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
876 // intrinsics are in their own blocks to simplify the logic of building up
877 // SuspendCrossing data.
878 for (CoroSuspendInst *CSI : Shape.CoroSuspends) {
879 splitAround(CSI->getCoroSave(), "CoroSave");
880 splitAround(CSI, "CoroSuspend");
883 // Put CoroEnds into their own blocks.
884 for (CoroEndInst *CE : Shape.CoroEnds)
885 splitAround(CE, "CoroEnd");
887 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
888 // never has its definition separated from the PHI by the suspend point.
891 // Build suspend crossing info.
892 SuspendCrossingInfo Checker(F, Shape);
894 IRBuilder<> Builder(F.getContext());
897 for (int Repeat = 0; Repeat < 4; ++Repeat) {
898 // See if there are materializable instructions across suspend points.
899 for (Instruction &I : instructions(F))
900 if (materializable(I))
901 for (User *U : I.users())
902 if (Checker.isDefinitionAcrossSuspend(I, U))
903 Spills.emplace_back(&I, U);
908 // Rewrite materializable instructions to be materialized at the use point.
909 LLVM_DEBUG(dump("Materializations", Spills));
910 rewriteMaterializableInstructions(Builder, Spills);
914 // Collect the spills for arguments and other not-materializable values.
915 for (Argument &A : F.args())
916 for (User *U : A.users())
917 if (Checker.isDefinitionAcrossSuspend(A, U))
918 Spills.emplace_back(&A, U);
920 for (Instruction &I : instructions(F)) {
921 // Values returned from coroutine structure intrinsics should not be part
922 // of the Coroutine Frame.
923 if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
925 // The Coroutine Promise always included into coroutine frame, no need to
926 // check for suspend crossing.
927 if (Shape.PromiseAlloca == &I)
930 for (User *U : I.users())
931 if (Checker.isDefinitionAcrossSuspend(I, U)) {
932 // We cannot spill a token.
933 if (I.getType()->isTokenTy())
935 "token definition is separated from the use by a suspend point");
936 Spills.emplace_back(&I, U);
939 LLVM_DEBUG(dump("Spills", Spills));
940 moveSpillUsesAfterCoroBegin(F, Spills, Shape.CoroBegin);
941 Shape.FrameTy = buildFrameType(F, Shape, Spills);
942 Shape.FramePtr = insertSpills(Spills, Shape);