1 //===-- AMDILCFGStructurizer.cpp - CFG Structurizer -----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
9 //==-----------------------------------------------------------------------===//
11 #define DEBUG_TYPE "structcfg"
14 #include "AMDGPUInstrInfo.h"
15 #include "R600InstrInfo.h"
16 #include "llvm/Support/Debug.h"
17 #include "llvm/Support/raw_ostream.h"
18 #include "llvm/ADT/SCCIterator.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/ADT/DepthFirstIterator.h"
22 #include "llvm/Analysis/DominatorInternals.h"
23 #include "llvm/Analysis/Dominators.h"
24 #include "llvm/CodeGen/MachineDominators.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineFunctionAnalysis.h"
27 #include "llvm/CodeGen/MachineFunctionPass.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineJumpTableInfo.h"
30 #include "llvm/CodeGen/MachineLoopInfo.h"
31 #include "llvm/CodeGen/MachinePostDominators.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
38 #define DEFAULT_VEC_SLOTS 8
42 //===----------------------------------------------------------------------===//
44 // Statistics for CFGStructurizer.
46 //===----------------------------------------------------------------------===//
48 STATISTIC(numSerialPatternMatch, "CFGStructurizer number of serial pattern "
50 STATISTIC(numIfPatternMatch, "CFGStructurizer number of if pattern "
52 STATISTIC(numLoopcontPatternMatch, "CFGStructurizer number of loop-continue "
54 STATISTIC(numClonedBlock, "CFGStructurizer cloned blocks");
55 STATISTIC(numClonedInstr, "CFGStructurizer cloned instructions");
57 //===----------------------------------------------------------------------===//
59 // Miscellaneous utility for CFGStructurizer.
61 //===----------------------------------------------------------------------===//
63 #define SHOWNEWINSTR(i) \
64 DEBUG(dbgs() << "New instr: " << *i << "\n");
66 #define SHOWNEWBLK(b, msg) \
68 dbgs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
72 #define SHOWBLK_DETAIL(b, msg) \
75 dbgs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
81 #define INVALIDSCCNUM -1
84 void ReverseVector(SmallVectorImpl<NodeT *> &Src) {
85 size_t sz = Src.size();
86 for (size_t i = 0; i < sz/2; ++i) {
88 Src[i] = Src[sz - i - 1];
93 } // end anonymous namespace
95 //===----------------------------------------------------------------------===//
97 // supporting data structure for CFGStructurizer
99 //===----------------------------------------------------------------------===//
104 class BlockInformation {
108 BlockInformation() : IsRetired(false), SccNum(INVALIDSCCNUM) {}
111 } // end anonymous namespace
113 //===----------------------------------------------------------------------===//
117 //===----------------------------------------------------------------------===//
120 class AMDGPUCFGStructurizer : public MachineFunctionPass {
122 typedef SmallVector<MachineBasicBlock *, 32> MBBVector;
123 typedef std::map<MachineBasicBlock *, BlockInformation *> MBBInfoMap;
124 typedef std::map<MachineLoop *, MachineBasicBlock *> LoopLandInfoMap;
128 SinglePath_InPath = 1,
129 SinglePath_NotInPath = 2
134 AMDGPUCFGStructurizer(TargetMachine &tm) :
135 MachineFunctionPass(ID), TM(tm),
136 TII(static_cast<const R600InstrInfo *>(tm.getInstrInfo())),
137 TRI(&TII->getRegisterInfo()) { }
139 const char *getPassName() const {
140 return "AMD IL Control Flow Graph structurizer Pass";
143 void getAnalysisUsage(AnalysisUsage &AU) const {
144 AU.addPreserved<MachineFunctionAnalysis>();
145 AU.addRequired<MachineFunctionAnalysis>();
146 AU.addRequired<MachineDominatorTree>();
147 AU.addRequired<MachinePostDominatorTree>();
148 AU.addRequired<MachineLoopInfo>();
151 /// Perform the CFG structurization
154 /// Perform the CFG preparation
155 /// This step will remove every unconditionnal/dead jump instructions and make
156 /// sure all loops have an exit block
159 bool runOnMachineFunction(MachineFunction &MF) {
163 MLI = &getAnalysis<MachineLoopInfo>();
164 DEBUG(dbgs() << "LoopInfo:\n"; PrintLoopinfo(*MLI););
165 MDT = &getAnalysis<MachineDominatorTree>();
166 DEBUG(MDT->print(dbgs(), (const llvm::Module*)0););
167 PDT = &getAnalysis<MachinePostDominatorTree>();
168 DEBUG(PDT->print(dbgs()););
177 MachineDominatorTree *MDT;
178 MachinePostDominatorTree *PDT;
179 MachineLoopInfo *MLI;
180 const R600InstrInfo *TII;
181 const AMDGPURegisterInfo *TRI;
184 /// Print the ordered Blocks.
185 void printOrderedBlocks() const {
187 for (MBBVector::const_iterator iterBlk = OrderedBlks.begin(),
188 iterBlkEnd = OrderedBlks.end(); iterBlk != iterBlkEnd; ++iterBlk, ++i) {
189 dbgs() << "BB" << (*iterBlk)->getNumber();
190 dbgs() << "(" << getSCCNum(*iterBlk) << "," << (*iterBlk)->size() << ")";
191 if (i != 0 && i % 10 == 0) {
198 static void PrintLoopinfo(const MachineLoopInfo &LoopInfo) {
199 for (MachineLoop::iterator iter = LoopInfo.begin(),
200 iterEnd = LoopInfo.end(); iter != iterEnd; ++iter) {
201 (*iter)->print(dbgs(), 0);
206 int getSCCNum(MachineBasicBlock *MBB) const;
207 MachineBasicBlock *getLoopLandInfo(MachineLoop *LoopRep) const;
208 bool hasBackEdge(MachineBasicBlock *MBB) const;
209 static unsigned getLoopDepth(MachineLoop *LoopRep);
210 bool isRetiredBlock(MachineBasicBlock *MBB) const;
211 bool isActiveLoophead(MachineBasicBlock *MBB) const;
212 PathToKind singlePathTo(MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB,
213 bool AllowSideEntry = true) const;
214 int countActiveBlock(MBBVector::const_iterator It,
215 MBBVector::const_iterator E) const;
216 bool needMigrateBlock(MachineBasicBlock *MBB) const;
219 void reversePredicateSetter(MachineBasicBlock::iterator I);
220 /// Compute the reversed DFS post order of Blocks
221 void orderBlocks(MachineFunction *MF);
223 // Function originaly from CFGStructTraits
224 void insertInstrEnd(MachineBasicBlock *MBB, int NewOpcode,
225 DebugLoc DL = DebugLoc());
226 MachineInstr *insertInstrBefore(MachineBasicBlock *MBB, int NewOpcode,
227 DebugLoc DL = DebugLoc());
228 MachineInstr *insertInstrBefore(MachineBasicBlock::iterator I, int NewOpcode);
229 void insertCondBranchBefore(MachineBasicBlock::iterator I, int NewOpcode,
231 void insertCondBranchBefore(MachineBasicBlock *MBB,
232 MachineBasicBlock::iterator I, int NewOpcode, int RegNum,
234 void insertCondBranchEnd(MachineBasicBlock *MBB, int NewOpcode, int RegNum);
235 static int getBranchNzeroOpcode(int OldOpcode);
236 static int getBranchZeroOpcode(int OldOpcode);
237 static int getContinueNzeroOpcode(int OldOpcode);
238 static int getContinueZeroOpcode(int OldOpcode);
239 static MachineBasicBlock *getTrueBranch(MachineInstr *MI);
240 static void setTrueBranch(MachineInstr *MI, MachineBasicBlock *MBB);
241 static MachineBasicBlock *getFalseBranch(MachineBasicBlock *MBB,
243 static bool isCondBranch(MachineInstr *MI);
244 static bool isUncondBranch(MachineInstr *MI);
245 static DebugLoc getLastDebugLocInBB(MachineBasicBlock *MBB);
246 static MachineInstr *getNormalBlockBranchInstr(MachineBasicBlock *MBB);
247 /// The correct naming for this is getPossibleLoopendBlockBranchInstr.
249 /// BB with backward-edge could have move instructions after the branch
250 /// instruction. Such move instruction "belong to" the loop backward-edge.
251 MachineInstr *getLoopendBlockBranchInstr(MachineBasicBlock *MBB);
252 static MachineInstr *getReturnInstr(MachineBasicBlock *MBB);
253 static MachineInstr *getContinueInstr(MachineBasicBlock *MBB);
254 static bool isReturnBlock(MachineBasicBlock *MBB);
255 static void cloneSuccessorList(MachineBasicBlock *DstMBB,
256 MachineBasicBlock *SrcMBB) ;
257 static MachineBasicBlock *clone(MachineBasicBlock *MBB);
258 /// MachineBasicBlock::ReplaceUsesOfBlockWith doesn't serve the purpose
259 /// because the AMDGPU instruction is not recognized as terminator fix this
260 /// and retire this routine
261 void replaceInstrUseOfBlockWith(MachineBasicBlock *SrcMBB,
262 MachineBasicBlock *OldMBB, MachineBasicBlock *NewBlk);
263 static void wrapup(MachineBasicBlock *MBB);
266 int patternMatch(MachineBasicBlock *MBB);
267 int patternMatchGroup(MachineBasicBlock *MBB);
268 int serialPatternMatch(MachineBasicBlock *MBB);
269 int ifPatternMatch(MachineBasicBlock *MBB);
270 int loopendPatternMatch();
271 int mergeLoop(MachineLoop *LoopRep);
272 int loopcontPatternMatch(MachineLoop *LoopRep, MachineBasicBlock *LoopHeader);
274 void handleLoopcontBlock(MachineBasicBlock *ContingMBB,
275 MachineLoop *ContingLoop, MachineBasicBlock *ContMBB,
276 MachineLoop *ContLoop);
277 /// return true iff src1Blk->succ_size() == 0 && src1Blk and src2Blk are in
278 /// the same loop with LoopLandInfo without explicitly keeping track of
279 /// loopContBlks and loopBreakBlks, this is a method to get the information.
280 bool isSameloopDetachedContbreak(MachineBasicBlock *Src1MBB,
281 MachineBasicBlock *Src2MBB);
282 int handleJumpintoIf(MachineBasicBlock *HeadMBB,
283 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB);
284 int handleJumpintoIfImp(MachineBasicBlock *HeadMBB,
285 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB);
286 int improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
287 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
288 MachineBasicBlock **LandMBBPtr);
289 void showImproveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
290 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
291 MachineBasicBlock *LandMBB, bool Detail = false);
292 int cloneOnSideEntryTo(MachineBasicBlock *PreMBB,
293 MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB);
294 void mergeSerialBlock(MachineBasicBlock *DstMBB,
295 MachineBasicBlock *SrcMBB);
297 void mergeIfthenelseBlock(MachineInstr *BranchMI,
298 MachineBasicBlock *MBB, MachineBasicBlock *TrueMBB,
299 MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB);
300 void mergeLooplandBlock(MachineBasicBlock *DstMBB,
301 MachineBasicBlock *LandMBB);
302 void mergeLoopbreakBlock(MachineBasicBlock *ExitingMBB,
303 MachineBasicBlock *LandMBB);
304 void settleLoopcontBlock(MachineBasicBlock *ContingMBB,
305 MachineBasicBlock *ContMBB);
306 /// normalizeInfiniteLoopExit change
308 /// uncond_br LoopHeader
312 /// cond_br 1 LoopHeader dummyExit
313 /// and return the newly added dummy exit block
314 MachineBasicBlock *normalizeInfiniteLoopExit(MachineLoop *LoopRep);
315 void removeUnconditionalBranch(MachineBasicBlock *MBB);
316 /// Remove duplicate branches instructions in a block.
321 /// is transformed to
324 void removeRedundantConditionalBranch(MachineBasicBlock *MBB);
325 void addDummyExitBlock(SmallVectorImpl<MachineBasicBlock *> &RetMBB);
326 void removeSuccessor(MachineBasicBlock *MBB);
327 MachineBasicBlock *cloneBlockForPredecessor(MachineBasicBlock *MBB,
328 MachineBasicBlock *PredMBB);
329 void migrateInstruction(MachineBasicBlock *SrcMBB,
330 MachineBasicBlock *DstMBB, MachineBasicBlock::iterator I);
331 void recordSccnum(MachineBasicBlock *MBB, int SCCNum);
332 void retireBlock(MachineBasicBlock *MBB);
333 void setLoopLandBlock(MachineLoop *LoopRep, MachineBasicBlock *MBB = NULL);
335 MachineBasicBlock *findNearestCommonPostDom(std::set<MachineBasicBlock *>&);
336 /// This is work around solution for findNearestCommonDominator not avaiable
337 /// to post dom a proper fix should go to Dominators.h.
338 MachineBasicBlock *findNearestCommonPostDom(MachineBasicBlock *MBB1,
339 MachineBasicBlock *MBB2);
342 MBBInfoMap BlockInfoMap;
343 LoopLandInfoMap LLInfoMap;
344 std::map<MachineLoop *, bool> Visited;
345 MachineFunction *FuncRep;
346 SmallVector<MachineBasicBlock *, DEFAULT_VEC_SLOTS> OrderedBlks;
349 int AMDGPUCFGStructurizer::getSCCNum(MachineBasicBlock *MBB) const {
350 MBBInfoMap::const_iterator It = BlockInfoMap.find(MBB);
351 if (It == BlockInfoMap.end())
352 return INVALIDSCCNUM;
353 return (*It).second->SccNum;
356 MachineBasicBlock *AMDGPUCFGStructurizer::getLoopLandInfo(MachineLoop *LoopRep)
358 LoopLandInfoMap::const_iterator It = LLInfoMap.find(LoopRep);
359 if (It == LLInfoMap.end())
364 bool AMDGPUCFGStructurizer::hasBackEdge(MachineBasicBlock *MBB) const {
365 MachineLoop *LoopRep = MLI->getLoopFor(MBB);
368 MachineBasicBlock *LoopHeader = LoopRep->getHeader();
369 return MBB->isSuccessor(LoopHeader);
372 unsigned AMDGPUCFGStructurizer::getLoopDepth(MachineLoop *LoopRep) {
373 return LoopRep ? LoopRep->getLoopDepth() : 0;
376 bool AMDGPUCFGStructurizer::isRetiredBlock(MachineBasicBlock *MBB) const {
377 MBBInfoMap::const_iterator It = BlockInfoMap.find(MBB);
378 if (It == BlockInfoMap.end())
380 return (*It).second->IsRetired;
383 bool AMDGPUCFGStructurizer::isActiveLoophead(MachineBasicBlock *MBB) const {
384 MachineLoop *LoopRep = MLI->getLoopFor(MBB);
385 while (LoopRep && LoopRep->getHeader() == MBB) {
386 MachineBasicBlock *LoopLand = getLoopLandInfo(LoopRep);
389 if (!isRetiredBlock(LoopLand))
391 LoopRep = LoopRep->getParentLoop();
395 AMDGPUCFGStructurizer::PathToKind AMDGPUCFGStructurizer::singlePathTo(
396 MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB,
397 bool AllowSideEntry) const {
399 if (SrcMBB == DstMBB)
400 return SinglePath_InPath;
401 while (SrcMBB && SrcMBB->succ_size() == 1) {
402 SrcMBB = *SrcMBB->succ_begin();
403 if (SrcMBB == DstMBB)
404 return SinglePath_InPath;
405 if (!AllowSideEntry && SrcMBB->pred_size() > 1)
406 return Not_SinglePath;
408 if (SrcMBB && SrcMBB->succ_size()==0)
409 return SinglePath_NotInPath;
410 return Not_SinglePath;
413 int AMDGPUCFGStructurizer::countActiveBlock(MBBVector::const_iterator It,
414 MBBVector::const_iterator E) const {
417 if (!isRetiredBlock(*It))
424 bool AMDGPUCFGStructurizer::needMigrateBlock(MachineBasicBlock *MBB) const {
425 unsigned BlockSizeThreshold = 30;
426 unsigned CloneInstrThreshold = 100;
427 bool MultiplePreds = MBB && (MBB->pred_size() > 1);
431 unsigned BlkSize = MBB->size();
432 return ((BlkSize > BlockSizeThreshold) &&
433 (BlkSize * (MBB->pred_size() - 1) > CloneInstrThreshold));
436 void AMDGPUCFGStructurizer::reversePredicateSetter(
437 MachineBasicBlock::iterator I) {
439 if (I->getOpcode() == AMDGPU::PRED_X) {
440 switch (static_cast<MachineInstr *>(I)->getOperand(2).getImm()) {
441 case OPCODE_IS_ZERO_INT:
442 static_cast<MachineInstr *>(I)->getOperand(2)
443 .setImm(OPCODE_IS_NOT_ZERO_INT);
445 case OPCODE_IS_NOT_ZERO_INT:
446 static_cast<MachineInstr *>(I)->getOperand(2)
447 .setImm(OPCODE_IS_ZERO_INT);
450 static_cast<MachineInstr *>(I)->getOperand(2)
451 .setImm(OPCODE_IS_NOT_ZERO);
453 case OPCODE_IS_NOT_ZERO:
454 static_cast<MachineInstr *>(I)->getOperand(2)
455 .setImm(OPCODE_IS_ZERO);
458 llvm_unreachable("PRED_X Opcode invalid!");
464 void AMDGPUCFGStructurizer::insertInstrEnd(MachineBasicBlock *MBB,
465 int NewOpcode, DebugLoc DL) {
466 MachineInstr *MI = MBB->getParent()
467 ->CreateMachineInstr(TII->get(NewOpcode), DL);
469 //assume the instruction doesn't take any reg operand ...
473 MachineInstr *AMDGPUCFGStructurizer::insertInstrBefore(MachineBasicBlock *MBB,
474 int NewOpcode, DebugLoc DL) {
476 MBB->getParent()->CreateMachineInstr(TII->get(NewOpcode), DL);
477 if (MBB->begin() != MBB->end())
478 MBB->insert(MBB->begin(), MI);
485 MachineInstr *AMDGPUCFGStructurizer::insertInstrBefore(
486 MachineBasicBlock::iterator I, int NewOpcode) {
487 MachineInstr *OldMI = &(*I);
488 MachineBasicBlock *MBB = OldMI->getParent();
489 MachineInstr *NewMBB =
490 MBB->getParent()->CreateMachineInstr(TII->get(NewOpcode), DebugLoc());
491 MBB->insert(I, NewMBB);
492 //assume the instruction doesn't take any reg operand ...
493 SHOWNEWINSTR(NewMBB);
497 void AMDGPUCFGStructurizer::insertCondBranchBefore(
498 MachineBasicBlock::iterator I, int NewOpcode, DebugLoc DL) {
499 MachineInstr *OldMI = &(*I);
500 MachineBasicBlock *MBB = OldMI->getParent();
501 MachineFunction *MF = MBB->getParent();
502 MachineInstr *NewMI = MF->CreateMachineInstr(TII->get(NewOpcode), DL);
503 MBB->insert(I, NewMI);
504 MachineInstrBuilder MIB(*MF, NewMI);
505 MIB.addReg(OldMI->getOperand(1).getReg(), false);
507 //erase later oldInstr->eraseFromParent();
510 void AMDGPUCFGStructurizer::insertCondBranchBefore(MachineBasicBlock *blk,
511 MachineBasicBlock::iterator I, int NewOpcode, int RegNum,
513 MachineFunction *MF = blk->getParent();
514 MachineInstr *NewInstr = MF->CreateMachineInstr(TII->get(NewOpcode), DL);
516 blk->insert(I, NewInstr);
517 MachineInstrBuilder(*MF, NewInstr).addReg(RegNum, false);
518 SHOWNEWINSTR(NewInstr);
521 void AMDGPUCFGStructurizer::insertCondBranchEnd(MachineBasicBlock *MBB,
522 int NewOpcode, int RegNum) {
523 MachineFunction *MF = MBB->getParent();
524 MachineInstr *NewInstr =
525 MF->CreateMachineInstr(TII->get(NewOpcode), DebugLoc());
526 MBB->push_back(NewInstr);
527 MachineInstrBuilder(*MF, NewInstr).addReg(RegNum, false);
528 SHOWNEWINSTR(NewInstr);
531 int AMDGPUCFGStructurizer::getBranchNzeroOpcode(int OldOpcode) {
533 case AMDGPU::JUMP_COND:
534 case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET;
535 case AMDGPU::BRANCH_COND_i32:
536 case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALNZ_f32;
537 default: llvm_unreachable("internal error");
542 int AMDGPUCFGStructurizer::getBranchZeroOpcode(int OldOpcode) {
544 case AMDGPU::JUMP_COND:
545 case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET;
546 case AMDGPU::BRANCH_COND_i32:
547 case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALZ_f32;
548 default: llvm_unreachable("internal error");
553 int AMDGPUCFGStructurizer::getContinueNzeroOpcode(int OldOpcode) {
555 case AMDGPU::JUMP_COND:
556 case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALNZ_i32;
557 default: llvm_unreachable("internal error");
562 int AMDGPUCFGStructurizer::getContinueZeroOpcode(int OldOpcode) {
564 case AMDGPU::JUMP_COND:
565 case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALZ_i32;
566 default: llvm_unreachable("internal error");
571 MachineBasicBlock *AMDGPUCFGStructurizer::getTrueBranch(MachineInstr *MI) {
572 return MI->getOperand(0).getMBB();
575 void AMDGPUCFGStructurizer::setTrueBranch(MachineInstr *MI,
576 MachineBasicBlock *MBB) {
577 MI->getOperand(0).setMBB(MBB);
581 AMDGPUCFGStructurizer::getFalseBranch(MachineBasicBlock *MBB,
583 assert(MBB->succ_size() == 2);
584 MachineBasicBlock *TrueBranch = getTrueBranch(MI);
585 MachineBasicBlock::succ_iterator It = MBB->succ_begin();
586 MachineBasicBlock::succ_iterator Next = It;
588 return (*It == TrueBranch) ? *Next : *It;
591 bool AMDGPUCFGStructurizer::isCondBranch(MachineInstr *MI) {
592 switch (MI->getOpcode()) {
593 case AMDGPU::JUMP_COND:
594 case AMDGPU::BRANCH_COND_i32:
595 case AMDGPU::BRANCH_COND_f32: return true;
602 bool AMDGPUCFGStructurizer::isUncondBranch(MachineInstr *MI) {
603 switch (MI->getOpcode()) {
613 DebugLoc AMDGPUCFGStructurizer::getLastDebugLocInBB(MachineBasicBlock *MBB) {
614 //get DebugLoc from the first MachineBasicBlock instruction with debug info
616 for (MachineBasicBlock::iterator It = MBB->begin(); It != MBB->end();
618 MachineInstr *instr = &(*It);
619 if (instr->getDebugLoc().isUnknown() == false)
620 DL = instr->getDebugLoc();
625 MachineInstr *AMDGPUCFGStructurizer::getNormalBlockBranchInstr(
626 MachineBasicBlock *MBB) {
627 MachineBasicBlock::reverse_iterator It = MBB->rbegin();
628 MachineInstr *MI = &*It;
629 if (MI && (isCondBranch(MI) || isUncondBranch(MI)))
634 MachineInstr *AMDGPUCFGStructurizer::getLoopendBlockBranchInstr(
635 MachineBasicBlock *MBB) {
636 for (MachineBasicBlock::reverse_iterator It = MBB->rbegin(), E = MBB->rend();
639 MachineInstr *MI = &*It;
641 if (isCondBranch(MI) || isUncondBranch(MI))
643 else if (!TII->isMov(MI->getOpcode()))
650 MachineInstr *AMDGPUCFGStructurizer::getReturnInstr(MachineBasicBlock *MBB) {
651 MachineBasicBlock::reverse_iterator It = MBB->rbegin();
652 if (It != MBB->rend()) {
653 MachineInstr *instr = &(*It);
654 if (instr->getOpcode() == AMDGPU::RETURN)
660 MachineInstr *AMDGPUCFGStructurizer::getContinueInstr(MachineBasicBlock *MBB) {
661 MachineBasicBlock::reverse_iterator It = MBB->rbegin();
662 if (It != MBB->rend()) {
663 MachineInstr *MI = &(*It);
664 if (MI->getOpcode() == AMDGPU::CONTINUE)
670 bool AMDGPUCFGStructurizer::isReturnBlock(MachineBasicBlock *MBB) {
671 MachineInstr *MI = getReturnInstr(MBB);
672 bool IsReturn = (MBB->succ_size() == 0);
677 dbgs() << "BB" << MBB->getNumber()
678 <<" is return block without RETURN instr\n";);
682 void AMDGPUCFGStructurizer::cloneSuccessorList(MachineBasicBlock *DstMBB,
683 MachineBasicBlock *SrcMBB) {
684 for (MachineBasicBlock::succ_iterator It = SrcMBB->succ_begin(),
685 iterEnd = SrcMBB->succ_end(); It != iterEnd; ++It)
686 DstMBB->addSuccessor(*It); // *iter's predecessor is also taken care of
689 MachineBasicBlock *AMDGPUCFGStructurizer::clone(MachineBasicBlock *MBB) {
690 MachineFunction *Func = MBB->getParent();
691 MachineBasicBlock *NewMBB = Func->CreateMachineBasicBlock();
692 Func->push_back(NewMBB); //insert to function
693 for (MachineBasicBlock::iterator It = MBB->begin(), E = MBB->end();
695 MachineInstr *MI = Func->CloneMachineInstr(It);
696 NewMBB->push_back(MI);
701 void AMDGPUCFGStructurizer::replaceInstrUseOfBlockWith(
702 MachineBasicBlock *SrcMBB, MachineBasicBlock *OldMBB,
703 MachineBasicBlock *NewBlk) {
704 MachineInstr *BranchMI = getLoopendBlockBranchInstr(SrcMBB);
705 if (BranchMI && isCondBranch(BranchMI) &&
706 getTrueBranch(BranchMI) == OldMBB)
707 setTrueBranch(BranchMI, NewBlk);
710 void AMDGPUCFGStructurizer::wrapup(MachineBasicBlock *MBB) {
711 assert((!MBB->getParent()->getJumpTableInfo()
712 || MBB->getParent()->getJumpTableInfo()->isEmpty())
713 && "found a jump table");
715 //collect continue right before endloop
716 SmallVector<MachineInstr *, DEFAULT_VEC_SLOTS> ContInstr;
717 MachineBasicBlock::iterator Pre = MBB->begin();
718 MachineBasicBlock::iterator E = MBB->end();
719 MachineBasicBlock::iterator It = Pre;
721 if (Pre->getOpcode() == AMDGPU::CONTINUE
722 && It->getOpcode() == AMDGPU::ENDLOOP)
723 ContInstr.push_back(Pre);
728 //delete continue right before endloop
729 for (unsigned i = 0; i < ContInstr.size(); ++i)
730 ContInstr[i]->eraseFromParent();
732 // TODO to fix up jump table so later phase won't be confused. if
733 // (jumpTableInfo->isEmpty() == false) { need to clean the jump table, but
734 // there isn't such an interface yet. alternatively, replace all the other
735 // blocks in the jump table with the entryBlk //}
740 bool AMDGPUCFGStructurizer::prepare() {
741 bool Changed = false;
743 //FIXME: if not reducible flow graph, make it so ???
745 DEBUG(dbgs() << "AMDGPUCFGStructurizer::prepare\n";);
747 orderBlocks(FuncRep);
749 SmallVector<MachineBasicBlock *, DEFAULT_VEC_SLOTS> RetBlks;
751 // Add an ExitBlk to loop that don't have one
752 for (MachineLoopInfo::iterator It = MLI->begin(),
753 E = MLI->end(); It != E; ++It) {
754 MachineLoop *LoopRep = (*It);
755 MBBVector ExitingMBBs;
756 LoopRep->getExitingBlocks(ExitingMBBs);
758 if (ExitingMBBs.size() == 0) {
759 MachineBasicBlock* DummyExitBlk = normalizeInfiniteLoopExit(LoopRep);
761 RetBlks.push_back(DummyExitBlk);
765 // Remove unconditional branch instr.
766 // Add dummy exit block iff there are multiple returns.
767 for (SmallVectorImpl<MachineBasicBlock *>::const_iterator
768 It = OrderedBlks.begin(), E = OrderedBlks.end(); It != E; ++It) {
769 MachineBasicBlock *MBB = *It;
770 removeUnconditionalBranch(MBB);
771 removeRedundantConditionalBranch(MBB);
772 if (isReturnBlock(MBB)) {
773 RetBlks.push_back(MBB);
775 assert(MBB->succ_size() <= 2);
778 if (RetBlks.size() >= 2) {
779 addDummyExitBlock(RetBlks);
786 bool AMDGPUCFGStructurizer::run() {
788 //Assume reducible CFG...
789 DEBUG(dbgs() << "AMDGPUCFGStructurizer::run\n";FuncRep->viewCFG(););
792 //Use the worse block ordering to test the algorithm.
793 ReverseVector(orderedBlks);
796 DEBUG(dbgs() << "Ordered blocks:\n"; printOrderedBlocks(););
799 MachineBasicBlock *MBB;
800 bool MakeProgress = false;
801 int NumRemainedBlk = countActiveBlock(OrderedBlks.begin(),
807 dbgs() << "numIter = " << NumIter
808 << ", numRemaintedBlk = " << NumRemainedBlk << "\n";
811 SmallVectorImpl<MachineBasicBlock *>::const_iterator It =
813 SmallVectorImpl<MachineBasicBlock *>::const_iterator E =
816 SmallVectorImpl<MachineBasicBlock *>::const_iterator SccBeginIter =
818 MachineBasicBlock *SccBeginMBB = NULL;
819 int SccNumBlk = 0; // The number of active blocks, init to a
820 // maximum possible number.
821 int SccNumIter; // Number of iteration in this SCC.
830 SccNumBlk = NumRemainedBlk; // Init to maximum possible number.
832 dbgs() << "start processing SCC" << getSCCNum(SccBeginMBB);
837 if (!isRetiredBlock(MBB))
842 bool ContNextScc = true;
844 || getSCCNum(SccBeginMBB) != getSCCNum(*It)) {
845 // Just finish one scc.
847 int sccRemainedNumBlk = countActiveBlock(SccBeginIter, It);
848 if (sccRemainedNumBlk != 1 && sccRemainedNumBlk >= SccNumBlk) {
850 dbgs() << "Can't reduce SCC " << getSCCNum(MBB)
851 << ", sccNumIter = " << SccNumIter;
852 dbgs() << "doesn't make any progress\n";
855 } else if (sccRemainedNumBlk != 1 && sccRemainedNumBlk < SccNumBlk) {
856 SccNumBlk = sccRemainedNumBlk;
860 dbgs() << "repeat processing SCC" << getSCCNum(MBB)
861 << "sccNumIter = " << SccNumIter << "\n";
865 // Finish the current scc.
869 // Continue on next component in the current scc.
875 } //while, "one iteration" over the function.
877 MachineBasicBlock *EntryMBB =
878 GraphTraits<MachineFunction *>::nodes_begin(FuncRep);
879 if (EntryMBB->succ_size() == 0) {
882 dbgs() << "Reduce to one block\n";
885 int NewnumRemainedBlk
886 = countActiveBlock(OrderedBlks.begin(), OrderedBlks.end());
887 // consider cloned blocks ??
888 if (NewnumRemainedBlk == 1 || NewnumRemainedBlk < NumRemainedBlk) {
890 NumRemainedBlk = NewnumRemainedBlk;
892 MakeProgress = false;
894 dbgs() << "No progress\n";
898 } while (!Finish && MakeProgress);
900 // Misc wrap up to maintain the consistency of the Function representation.
901 wrapup(GraphTraits<MachineFunction *>::nodes_begin(FuncRep));
903 // Detach retired Block, release memory.
904 for (MBBInfoMap::iterator It = BlockInfoMap.begin(), E = BlockInfoMap.end();
906 if ((*It).second && (*It).second->IsRetired) {
907 assert(((*It).first)->getNumber() != -1);
909 dbgs() << "Erase BB" << ((*It).first)->getNumber() << "\n";
911 (*It).first->eraseFromParent(); //Remove from the parent Function.
915 BlockInfoMap.clear();
923 llvm_unreachable("IRREDUCIBL_CF");
930 void AMDGPUCFGStructurizer::orderBlocks(MachineFunction *MF) {
932 MachineBasicBlock *MBB;
933 for (scc_iterator<MachineFunction *> It = scc_begin(MF), E = scc_end(MF);
934 It != E; ++It, ++SccNum) {
935 std::vector<MachineBasicBlock *> &SccNext = *It;
936 for (std::vector<MachineBasicBlock *>::const_iterator
937 blockIter = SccNext.begin(), blockEnd = SccNext.end();
938 blockIter != blockEnd; ++blockIter) {
940 OrderedBlks.push_back(MBB);
941 recordSccnum(MBB, SccNum);
945 //walk through all the block in func to check for unreachable
946 typedef GraphTraits<MachineFunction *> GTM;
947 MachineFunction::iterator It = GTM::nodes_begin(MF), E = GTM::nodes_end(MF);
948 for (; It != E; ++It) {
949 MachineBasicBlock *MBB = &(*It);
950 SccNum = getSCCNum(MBB);
951 if (SccNum == INVALIDSCCNUM)
952 dbgs() << "unreachable block BB" << MBB->getNumber() << "\n";
956 int AMDGPUCFGStructurizer::patternMatch(MachineBasicBlock *MBB) {
961 dbgs() << "Begin patternMatch BB" << MBB->getNumber() << "\n";
964 while ((CurMatch = patternMatchGroup(MBB)) > 0)
965 NumMatch += CurMatch;
968 dbgs() << "End patternMatch BB" << MBB->getNumber()
969 << ", numMatch = " << NumMatch << "\n";
975 int AMDGPUCFGStructurizer::patternMatchGroup(MachineBasicBlock *MBB) {
977 NumMatch += loopendPatternMatch();
978 NumMatch += serialPatternMatch(MBB);
979 NumMatch += ifPatternMatch(MBB);
984 int AMDGPUCFGStructurizer::serialPatternMatch(MachineBasicBlock *MBB) {
985 if (MBB->succ_size() != 1)
988 MachineBasicBlock *childBlk = *MBB->succ_begin();
989 if (childBlk->pred_size() != 1 || isActiveLoophead(childBlk))
992 mergeSerialBlock(MBB, childBlk);
993 ++numSerialPatternMatch;
997 int AMDGPUCFGStructurizer::ifPatternMatch(MachineBasicBlock *MBB) {
999 if (MBB->succ_size() != 2)
1001 if (hasBackEdge(MBB))
1003 MachineInstr *BranchMI = getNormalBlockBranchInstr(MBB);
1007 assert(isCondBranch(BranchMI));
1010 MachineBasicBlock *TrueMBB = getTrueBranch(BranchMI);
1011 NumMatch += serialPatternMatch(TrueMBB);
1012 NumMatch += ifPatternMatch(TrueMBB);
1013 MachineBasicBlock *FalseMBB = getFalseBranch(MBB, BranchMI);
1014 NumMatch += serialPatternMatch(FalseMBB);
1015 NumMatch += ifPatternMatch(FalseMBB);
1016 MachineBasicBlock *LandBlk;
1019 assert (!TrueMBB->succ_empty() || !FalseMBB->succ_empty());
1021 if (TrueMBB->succ_size() == 1 && FalseMBB->succ_size() == 1
1022 && *TrueMBB->succ_begin() == *FalseMBB->succ_begin()) {
1024 LandBlk = *TrueMBB->succ_begin();
1025 } else if (TrueMBB->succ_size() == 1 && *TrueMBB->succ_begin() == FalseMBB) {
1026 // Triangle pattern, false is empty
1029 } else if (FalseMBB->succ_size() == 1
1030 && *FalseMBB->succ_begin() == TrueMBB) {
1031 // Triangle pattern, true is empty
1032 // We reverse the predicate to make a triangle, empty false pattern;
1033 std::swap(TrueMBB, FalseMBB);
1034 reversePredicateSetter(MBB->end());
1037 } else if (FalseMBB->succ_size() == 1
1038 && isSameloopDetachedContbreak(TrueMBB, FalseMBB)) {
1039 LandBlk = *FalseMBB->succ_begin();
1040 } else if (TrueMBB->succ_size() == 1
1041 && isSameloopDetachedContbreak(FalseMBB, TrueMBB)) {
1042 LandBlk = *TrueMBB->succ_begin();
1044 return NumMatch + handleJumpintoIf(MBB, TrueMBB, FalseMBB);
1047 // improveSimpleJumpinfoIf can handle the case where landBlk == NULL but the
1048 // new BB created for landBlk==NULL may introduce new challenge to the
1049 // reduction process.
1051 ((TrueMBB && TrueMBB->pred_size() > 1)
1052 || (FalseMBB && FalseMBB->pred_size() > 1))) {
1053 Cloned += improveSimpleJumpintoIf(MBB, TrueMBB, FalseMBB, &LandBlk);
1056 if (TrueMBB && TrueMBB->pred_size() > 1) {
1057 TrueMBB = cloneBlockForPredecessor(TrueMBB, MBB);
1061 if (FalseMBB && FalseMBB->pred_size() > 1) {
1062 FalseMBB = cloneBlockForPredecessor(FalseMBB, MBB);
1066 mergeIfthenelseBlock(BranchMI, MBB, TrueMBB, FalseMBB, LandBlk);
1068 ++numIfPatternMatch;
1070 numClonedBlock += Cloned;
1072 return 1 + Cloned + NumMatch;
1075 int AMDGPUCFGStructurizer::loopendPatternMatch() {
1076 std::vector<MachineLoop *> NestedLoops;
1077 for (MachineLoopInfo::iterator It = MLI->begin(), E = MLI->end();
1079 df_iterator<MachineLoop *> LpIt = df_begin(*It),
1081 for (; LpIt != LpE; ++LpIt)
1082 NestedLoops.push_back(*LpIt);
1084 if (NestedLoops.size() == 0)
1087 // Process nested loop outside->inside, so "continue" to a outside loop won't
1088 // be mistaken as "break" of the current loop.
1090 for (std::vector<MachineLoop *>::reverse_iterator It = NestedLoops.rbegin(),
1091 E = NestedLoops.rend(); It != E; ++It) {
1092 MachineLoop *ExaminedLoop = *It;
1093 if (ExaminedLoop->getNumBlocks() == 0 || Visited[ExaminedLoop])
1095 DEBUG(dbgs() << "Processing:\n"; ExaminedLoop->dump(););
1096 int NumBreak = mergeLoop(ExaminedLoop);
1104 int AMDGPUCFGStructurizer::mergeLoop(MachineLoop *LoopRep) {
1105 MachineBasicBlock *LoopHeader = LoopRep->getHeader();
1106 MBBVector ExitingMBBs;
1107 LoopRep->getExitingBlocks(ExitingMBBs);
1108 assert(!ExitingMBBs.empty() && "Infinite Loop not supported");
1109 DEBUG(dbgs() << "Loop has " << ExitingMBBs.size() << " exiting blocks\n";);
1110 // We assume a single ExitBlk
1112 LoopRep->getExitBlocks(ExitBlks);
1113 SmallPtrSet<MachineBasicBlock *, 2> ExitBlkSet;
1114 for (unsigned i = 0, e = ExitBlks.size(); i < e; ++i)
1115 ExitBlkSet.insert(ExitBlks[i]);
1116 assert(ExitBlkSet.size() == 1);
1117 MachineBasicBlock *ExitBlk = *ExitBlks.begin();
1118 assert(ExitBlk && "Loop has several exit block");
1119 MBBVector LatchBlks;
1120 typedef GraphTraits<Inverse<MachineBasicBlock*> > InvMBBTraits;
1121 InvMBBTraits::ChildIteratorType PI = InvMBBTraits::child_begin(LoopHeader),
1122 PE = InvMBBTraits::child_end(LoopHeader);
1123 for (; PI != PE; PI++) {
1124 if (LoopRep->contains(*PI))
1125 LatchBlks.push_back(*PI);
1128 for (unsigned i = 0, e = ExitingMBBs.size(); i < e; ++i)
1129 mergeLoopbreakBlock(ExitingMBBs[i], ExitBlk);
1130 for (unsigned i = 0, e = LatchBlks.size(); i < e; ++i)
1131 settleLoopcontBlock(LatchBlks[i], LoopHeader);
1135 Match += serialPatternMatch(LoopHeader);
1136 Match += ifPatternMatch(LoopHeader);
1137 } while (Match > 0);
1138 mergeLooplandBlock(LoopHeader, ExitBlk);
1139 MachineLoop *ParentLoop = LoopRep->getParentLoop();
1141 MLI->changeLoopFor(LoopHeader, ParentLoop);
1143 MLI->removeBlock(LoopHeader);
1144 Visited[LoopRep] = true;
1148 int AMDGPUCFGStructurizer::loopcontPatternMatch(MachineLoop *LoopRep,
1149 MachineBasicBlock *LoopHeader) {
1151 SmallVector<MachineBasicBlock *, DEFAULT_VEC_SLOTS> ContMBB;
1152 typedef GraphTraits<Inverse<MachineBasicBlock *> > GTIM;
1153 GTIM::ChildIteratorType It = GTIM::child_begin(LoopHeader),
1154 E = GTIM::child_end(LoopHeader);
1155 for (; It != E; ++It) {
1156 MachineBasicBlock *MBB = *It;
1157 if (LoopRep->contains(MBB)) {
1158 handleLoopcontBlock(MBB, MLI->getLoopFor(MBB),
1159 LoopHeader, LoopRep);
1160 ContMBB.push_back(MBB);
1165 for (SmallVectorImpl<MachineBasicBlock *>::iterator It = ContMBB.begin(),
1166 E = ContMBB.end(); It != E; ++It) {
1167 (*It)->removeSuccessor(LoopHeader);
1170 numLoopcontPatternMatch += NumCont;
1176 bool AMDGPUCFGStructurizer::isSameloopDetachedContbreak(
1177 MachineBasicBlock *Src1MBB, MachineBasicBlock *Src2MBB) {
1178 if (Src1MBB->succ_size() == 0) {
1179 MachineLoop *LoopRep = MLI->getLoopFor(Src1MBB);
1180 if (LoopRep&& LoopRep == MLI->getLoopFor(Src2MBB)) {
1181 MachineBasicBlock *&TheEntry = LLInfoMap[LoopRep];
1184 dbgs() << "isLoopContBreakBlock yes src1 = BB"
1185 << Src1MBB->getNumber()
1186 << " src2 = BB" << Src2MBB->getNumber() << "\n";
1195 int AMDGPUCFGStructurizer::handleJumpintoIf(MachineBasicBlock *HeadMBB,
1196 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB) {
1197 int Num = handleJumpintoIfImp(HeadMBB, TrueMBB, FalseMBB);
1200 dbgs() << "handleJumpintoIf swap trueBlk and FalseBlk" << "\n";
1202 Num = handleJumpintoIfImp(HeadMBB, FalseMBB, TrueMBB);
1207 int AMDGPUCFGStructurizer::handleJumpintoIfImp(MachineBasicBlock *HeadMBB,
1208 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB) {
1210 MachineBasicBlock *DownBlk;
1212 //trueBlk could be the common post dominator
1216 dbgs() << "handleJumpintoIfImp head = BB" << HeadMBB->getNumber()
1217 << " true = BB" << TrueMBB->getNumber()
1218 << ", numSucc=" << TrueMBB->succ_size()
1219 << " false = BB" << FalseMBB->getNumber() << "\n";
1224 dbgs() << "check down = BB" << DownBlk->getNumber();
1227 if (singlePathTo(FalseMBB, DownBlk) == SinglePath_InPath) {
1229 dbgs() << " working\n";
1232 Num += cloneOnSideEntryTo(HeadMBB, TrueMBB, DownBlk);
1233 Num += cloneOnSideEntryTo(HeadMBB, FalseMBB, DownBlk);
1235 numClonedBlock += Num;
1236 Num += serialPatternMatch(*HeadMBB->succ_begin());
1237 Num += serialPatternMatch(*llvm::next(HeadMBB->succ_begin()));
1238 Num += ifPatternMatch(HeadMBB);
1244 dbgs() << " not working\n";
1246 DownBlk = (DownBlk->succ_size() == 1) ? (*DownBlk->succ_begin()) : NULL;
1247 } // walk down the postDomTree
1252 void AMDGPUCFGStructurizer::showImproveSimpleJumpintoIf(
1253 MachineBasicBlock *HeadMBB, MachineBasicBlock *TrueMBB,
1254 MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB, bool Detail) {
1255 dbgs() << "head = BB" << HeadMBB->getNumber()
1256 << " size = " << HeadMBB->size();
1259 HeadMBB->print(dbgs());
1264 dbgs() << ", true = BB" << TrueMBB->getNumber() << " size = "
1265 << TrueMBB->size() << " numPred = " << TrueMBB->pred_size();
1268 TrueMBB->print(dbgs());
1273 dbgs() << ", false = BB" << FalseMBB->getNumber() << " size = "
1274 << FalseMBB->size() << " numPred = " << FalseMBB->pred_size();
1277 FalseMBB->print(dbgs());
1282 dbgs() << ", land = BB" << LandMBB->getNumber() << " size = "
1283 << LandMBB->size() << " numPred = " << LandMBB->pred_size();
1286 LandMBB->print(dbgs());
1294 int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
1295 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
1296 MachineBasicBlock **LandMBBPtr) {
1297 bool MigrateTrue = false;
1298 bool MigrateFalse = false;
1300 MachineBasicBlock *LandBlk = *LandMBBPtr;
1302 assert((!TrueMBB || TrueMBB->succ_size() <= 1)
1303 && (!FalseMBB || FalseMBB->succ_size() <= 1));
1305 if (TrueMBB == FalseMBB)
1308 MigrateTrue = needMigrateBlock(TrueMBB);
1309 MigrateFalse = needMigrateBlock(FalseMBB);
1311 if (!MigrateTrue && !MigrateFalse)
1314 // If we need to migrate either trueBlk and falseBlk, migrate the rest that
1315 // have more than one predecessors. without doing this, its predecessor
1316 // rather than headBlk will have undefined value in initReg.
1317 if (!MigrateTrue && TrueMBB && TrueMBB->pred_size() > 1)
1319 if (!MigrateFalse && FalseMBB && FalseMBB->pred_size() > 1)
1320 MigrateFalse = true;
1323 dbgs() << "before improveSimpleJumpintoIf: ";
1324 showImproveSimpleJumpintoIf(HeadMBB, TrueMBB, FalseMBB, LandBlk, 0);
1327 // org: headBlk => if () {trueBlk} else {falseBlk} => landBlk
1329 // new: headBlk => if () {initReg = 1; org trueBlk branch} else
1330 // {initReg = 0; org falseBlk branch }
1331 // => landBlk => if (initReg) {org trueBlk} else {org falseBlk}
1333 // if landBlk->pred_size() > 2, put the about if-else inside
1334 // if (initReg !=2) {...}
1336 // add initReg = initVal to headBlk
1338 const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
1339 if (!MigrateTrue || !MigrateFalse) {
1340 // XXX: We have an opportunity here to optimize the "branch into if" case
1341 // here. Branch into if looks like this:
1344 // diamond_head branch_from
1346 // diamond_false diamond_true
1350 // The diamond_head block begins the "if" and the diamond_true block
1351 // is the block being "branched into".
1353 // If MigrateTrue is true, then TrueBB is the block being "branched into"
1354 // and if MigrateFalse is true, then FalseBB is the block being
1357 // Here is the pseudo code for how I think the optimization should work:
1358 // 1. Insert MOV GPR0, 0 before the branch instruction in diamond_head.
1359 // 2. Insert MOV GPR0, 1 before the branch instruction in branch_from.
1360 // 3. Move the branch instruction from diamond_head into its own basic
1361 // block (new_block).
1362 // 4. Add an unconditional branch from diamond_head to new_block
1363 // 5. Replace the branch instruction in branch_from with an unconditional
1364 // branch to new_block. If branch_from has multiple predecessors, then
1365 // we need to replace the True/False block in the branch
1366 // instruction instead of replacing it.
1367 // 6. Change the condition of the branch instruction in new_block from
1368 // COND to (COND || GPR0)
1370 // In order insert these MOV instruction, we will need to use the
1371 // RegisterScavenger. Usually liveness stops being tracked during
1372 // the late machine optimization passes, however if we implement
1373 // bool TargetRegisterInfo::requiresRegisterScavenging(
1374 // const MachineFunction &MF)
1375 // and have it return true, liveness will be tracked correctly
1376 // by generic optimization passes. We will also need to make sure that
1377 // all of our target-specific passes that run after regalloc and before
1378 // the CFGStructurizer track liveness and we will need to modify this pass
1379 // to correctly track liveness.
1381 // After the above changes, the new CFG should look like this:
1384 // diamond_head branch_from
1388 // diamond_false diamond_true
1392 // Without this optimization, we are forced to duplicate the diamond_true
1393 // block and we will end up with a CFG like this:
1397 // diamond_head branch_from
1399 // diamond_false diamond_true diamond_true (duplicate)
1401 // done --------------------|
1403 // Duplicating diamond_true can be very costly especially if it has a
1404 // lot of instructions.
1410 bool LandBlkHasOtherPred = (LandBlk->pred_size() > 2);
1412 //insert AMDGPU::ENDIF to avoid special case "input landBlk == NULL"
1413 MachineBasicBlock::iterator I = insertInstrBefore(LandBlk, AMDGPU::ENDIF);
1415 if (LandBlkHasOtherPred) {
1416 llvm_unreachable("Extra register needed to handle CFG");
1417 unsigned CmpResReg =
1418 HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC);
1419 llvm_unreachable("Extra compare instruction needed to handle CFG");
1420 insertCondBranchBefore(LandBlk, I, AMDGPU::IF_PREDICATE_SET,
1421 CmpResReg, DebugLoc());
1424 // XXX: We are running this after RA, so creating virtual registers will
1425 // cause an assertion failure in the PostRA scheduling pass.
1427 HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC);
1428 insertCondBranchBefore(LandBlk, I, AMDGPU::IF_PREDICATE_SET, InitReg,
1432 migrateInstruction(TrueMBB, LandBlk, I);
1433 // need to uncondionally insert the assignment to ensure a path from its
1434 // predecessor rather than headBlk has valid value in initReg if
1436 llvm_unreachable("Extra register needed to handle CFG");
1438 insertInstrBefore(I, AMDGPU::ELSE);
1441 migrateInstruction(FalseMBB, LandBlk, I);
1442 // need to uncondionally insert the assignment to ensure a path from its
1443 // predecessor rather than headBlk has valid value in initReg if
1445 llvm_unreachable("Extra register needed to handle CFG");
1448 if (LandBlkHasOtherPred) {
1450 insertInstrBefore(I, AMDGPU::ENDIF);
1452 // put initReg = 2 to other predecessors of landBlk
1453 for (MachineBasicBlock::pred_iterator PI = LandBlk->pred_begin(),
1454 PE = LandBlk->pred_end(); PI != PE; ++PI) {
1455 MachineBasicBlock *MBB = *PI;
1456 if (MBB != TrueMBB && MBB != FalseMBB)
1457 llvm_unreachable("Extra register needed to handle CFG");
1461 dbgs() << "result from improveSimpleJumpintoIf: ";
1462 showImproveSimpleJumpintoIf(HeadMBB, TrueMBB, FalseMBB, LandBlk, 0);
1466 *LandMBBPtr = LandBlk;
1471 void AMDGPUCFGStructurizer::handleLoopcontBlock(MachineBasicBlock *ContingMBB,
1472 MachineLoop *ContingLoop, MachineBasicBlock *ContMBB,
1473 MachineLoop *ContLoop) {
1474 DEBUG(dbgs() << "loopcontPattern cont = BB" << ContingMBB->getNumber()
1475 << " header = BB" << ContMBB->getNumber() << "\n";
1476 dbgs() << "Trying to continue loop-depth = "
1477 << getLoopDepth(ContLoop)
1478 << " from loop-depth = " << getLoopDepth(ContingLoop) << "\n";);
1479 settleLoopcontBlock(ContingMBB, ContMBB);
1482 void AMDGPUCFGStructurizer::mergeSerialBlock(MachineBasicBlock *DstMBB,
1483 MachineBasicBlock *SrcMBB) {
1485 dbgs() << "serialPattern BB" << DstMBB->getNumber()
1486 << " <= BB" << SrcMBB->getNumber() << "\n";
1488 DstMBB->splice(DstMBB->end(), SrcMBB, SrcMBB->begin(), SrcMBB->end());
1490 DstMBB->removeSuccessor(SrcMBB);
1491 cloneSuccessorList(DstMBB, SrcMBB);
1493 removeSuccessor(SrcMBB);
1494 MLI->removeBlock(SrcMBB);
1495 retireBlock(SrcMBB);
1498 void AMDGPUCFGStructurizer::mergeIfthenelseBlock(MachineInstr *BranchMI,
1499 MachineBasicBlock *MBB, MachineBasicBlock *TrueMBB,
1500 MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB) {
1503 dbgs() << "ifPattern BB" << MBB->getNumber();
1506 dbgs() << "BB" << TrueMBB->getNumber();
1508 dbgs() << " } else ";
1511 dbgs() << "BB" << FalseMBB->getNumber();
1514 dbgs() << "landBlock: ";
1518 dbgs() << "BB" << LandMBB->getNumber();
1523 int OldOpcode = BranchMI->getOpcode();
1524 DebugLoc BranchDL = BranchMI->getDebugLoc();
1534 MachineBasicBlock::iterator I = BranchMI;
1535 insertCondBranchBefore(I, getBranchNzeroOpcode(OldOpcode),
1539 MBB->splice(I, TrueMBB, TrueMBB->begin(), TrueMBB->end());
1540 MBB->removeSuccessor(TrueMBB);
1541 if (LandMBB && TrueMBB->succ_size()!=0)
1542 TrueMBB->removeSuccessor(LandMBB);
1543 retireBlock(TrueMBB);
1544 MLI->removeBlock(TrueMBB);
1548 insertInstrBefore(I, AMDGPU::ELSE);
1549 MBB->splice(I, FalseMBB, FalseMBB->begin(),
1551 MBB->removeSuccessor(FalseMBB);
1552 if (LandMBB && FalseMBB->succ_size() != 0)
1553 FalseMBB->removeSuccessor(LandMBB);
1554 retireBlock(FalseMBB);
1555 MLI->removeBlock(FalseMBB);
1557 insertInstrBefore(I, AMDGPU::ENDIF);
1559 BranchMI->eraseFromParent();
1561 if (LandMBB && TrueMBB && FalseMBB)
1562 MBB->addSuccessor(LandMBB);
1566 void AMDGPUCFGStructurizer::mergeLooplandBlock(MachineBasicBlock *DstBlk,
1567 MachineBasicBlock *LandMBB) {
1568 DEBUG(dbgs() << "loopPattern header = BB" << DstBlk->getNumber()
1569 << " land = BB" << LandMBB->getNumber() << "\n";);
1571 insertInstrBefore(DstBlk, AMDGPU::WHILELOOP, DebugLoc());
1572 insertInstrEnd(DstBlk, AMDGPU::ENDLOOP, DebugLoc());
1573 DstBlk->addSuccessor(LandMBB);
1574 DstBlk->removeSuccessor(DstBlk);
1578 void AMDGPUCFGStructurizer::mergeLoopbreakBlock(MachineBasicBlock *ExitingMBB,
1579 MachineBasicBlock *LandMBB) {
1580 DEBUG(dbgs() << "loopbreakPattern exiting = BB" << ExitingMBB->getNumber()
1581 << " land = BB" << LandMBB->getNumber() << "\n";);
1582 MachineInstr *BranchMI = getLoopendBlockBranchInstr(ExitingMBB);
1583 assert(BranchMI && isCondBranch(BranchMI));
1584 DebugLoc DL = BranchMI->getDebugLoc();
1585 MachineBasicBlock *TrueBranch = getTrueBranch(BranchMI);
1586 MachineBasicBlock::iterator I = BranchMI;
1587 if (TrueBranch != LandMBB)
1588 reversePredicateSetter(I);
1589 insertCondBranchBefore(ExitingMBB, I, AMDGPU::IF_PREDICATE_SET, AMDGPU::PREDICATE_BIT, DL);
1590 insertInstrBefore(I, AMDGPU::BREAK);
1591 insertInstrBefore(I, AMDGPU::ENDIF);
1592 //now branchInst can be erase safely
1593 BranchMI->eraseFromParent();
1594 //now take care of successors, retire blocks
1595 ExitingMBB->removeSuccessor(LandMBB);
1598 void AMDGPUCFGStructurizer::settleLoopcontBlock(MachineBasicBlock *ContingMBB,
1599 MachineBasicBlock *ContMBB) {
1600 DEBUG(dbgs() << "settleLoopcontBlock conting = BB"
1601 << ContingMBB->getNumber()
1602 << ", cont = BB" << ContMBB->getNumber() << "\n";);
1604 MachineInstr *MI = getLoopendBlockBranchInstr(ContingMBB);
1606 assert(isCondBranch(MI));
1607 MachineBasicBlock::iterator I = MI;
1608 MachineBasicBlock *TrueBranch = getTrueBranch(MI);
1609 int OldOpcode = MI->getOpcode();
1610 DebugLoc DL = MI->getDebugLoc();
1612 bool UseContinueLogical = ((&*ContingMBB->rbegin()) == MI);
1614 if (UseContinueLogical == false) {
1616 TrueBranch == ContMBB ? getBranchNzeroOpcode(OldOpcode) :
1617 getBranchZeroOpcode(OldOpcode);
1618 insertCondBranchBefore(I, BranchOpcode, DL);
1619 // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
1620 insertInstrEnd(ContingMBB, AMDGPU::CONTINUE, DL);
1621 insertInstrEnd(ContingMBB, AMDGPU::ENDIF, DL);
1624 TrueBranch == ContMBB ? getContinueNzeroOpcode(OldOpcode) :
1625 getContinueZeroOpcode(OldOpcode);
1626 insertCondBranchBefore(I, BranchOpcode, DL);
1629 MI->eraseFromParent();
1631 // if we've arrived here then we've already erased the branch instruction
1632 // travel back up the basic block to see the last reference of our debug
1633 // location we've just inserted that reference here so it should be
1634 // representative insertEnd to ensure phi-moves, if exist, go before the
1636 insertInstrEnd(ContingMBB, AMDGPU::CONTINUE,
1637 getLastDebugLocInBB(ContingMBB));
1641 int AMDGPUCFGStructurizer::cloneOnSideEntryTo(MachineBasicBlock *PreMBB,
1642 MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB) {
1644 assert(PreMBB->isSuccessor(SrcMBB));
1645 while (SrcMBB && SrcMBB != DstMBB) {
1646 assert(SrcMBB->succ_size() == 1);
1647 if (SrcMBB->pred_size() > 1) {
1648 SrcMBB = cloneBlockForPredecessor(SrcMBB, PreMBB);
1653 SrcMBB = *SrcMBB->succ_begin();
1660 AMDGPUCFGStructurizer::cloneBlockForPredecessor(MachineBasicBlock *MBB,
1661 MachineBasicBlock *PredMBB) {
1662 assert(PredMBB->isSuccessor(MBB) &&
1663 "succBlk is not a prececessor of curBlk");
1665 MachineBasicBlock *CloneMBB = clone(MBB); //clone instructions
1666 replaceInstrUseOfBlockWith(PredMBB, MBB, CloneMBB);
1667 //srcBlk, oldBlk, newBlk
1669 PredMBB->removeSuccessor(MBB);
1670 PredMBB->addSuccessor(CloneMBB);
1672 // add all successor to cloneBlk
1673 cloneSuccessorList(CloneMBB, MBB);
1675 numClonedInstr += MBB->size();
1678 dbgs() << "Cloned block: " << "BB"
1679 << MBB->getNumber() << "size " << MBB->size() << "\n";
1682 SHOWNEWBLK(CloneMBB, "result of Cloned block: ");
1687 void AMDGPUCFGStructurizer::migrateInstruction(MachineBasicBlock *SrcMBB,
1688 MachineBasicBlock *DstMBB, MachineBasicBlock::iterator I) {
1689 MachineBasicBlock::iterator SpliceEnd;
1690 //look for the input branchinstr, not the AMDGPU branchinstr
1691 MachineInstr *BranchMI = getNormalBlockBranchInstr(SrcMBB);
1694 dbgs() << "migrateInstruction don't see branch instr\n" ;
1696 SpliceEnd = SrcMBB->end();
1699 dbgs() << "migrateInstruction see branch instr\n" ;
1702 SpliceEnd = BranchMI;
1705 dbgs() << "migrateInstruction before splice dstSize = " << DstMBB->size()
1706 << "srcSize = " << SrcMBB->size() << "\n";
1709 //splice insert before insertPos
1710 DstMBB->splice(I, SrcMBB, SrcMBB->begin(), SpliceEnd);
1713 dbgs() << "migrateInstruction after splice dstSize = " << DstMBB->size()
1714 << "srcSize = " << SrcMBB->size() << "\n";
1719 AMDGPUCFGStructurizer::normalizeInfiniteLoopExit(MachineLoop* LoopRep) {
1720 MachineBasicBlock *LoopHeader = LoopRep->getHeader();
1721 MachineBasicBlock *LoopLatch = LoopRep->getLoopLatch();
1722 const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
1724 if (!LoopHeader || !LoopLatch)
1726 MachineInstr *BranchMI = getLoopendBlockBranchInstr(LoopLatch);
1727 // Is LoopRep an infinite loop ?
1728 if (!BranchMI || !isUncondBranch(BranchMI))
1731 MachineBasicBlock *DummyExitBlk = FuncRep->CreateMachineBasicBlock();
1732 FuncRep->push_back(DummyExitBlk); //insert to function
1733 SHOWNEWBLK(DummyExitBlk, "DummyExitBlock to normalize infiniteLoop: ");
1734 DEBUG(dbgs() << "Old branch instr: " << *BranchMI << "\n";);
1735 MachineBasicBlock::iterator I = BranchMI;
1736 unsigned ImmReg = FuncRep->getRegInfo().createVirtualRegister(I32RC);
1737 llvm_unreachable("Extra register needed to handle CFG");
1738 MachineInstr *NewMI = insertInstrBefore(I, AMDGPU::BRANCH_COND_i32);
1739 MachineInstrBuilder MIB(*FuncRep, NewMI);
1740 MIB.addMBB(LoopHeader);
1741 MIB.addReg(ImmReg, false);
1742 SHOWNEWINSTR(NewMI);
1743 BranchMI->eraseFromParent();
1744 LoopLatch->addSuccessor(DummyExitBlk);
1746 return DummyExitBlk;
1749 void AMDGPUCFGStructurizer::removeUnconditionalBranch(MachineBasicBlock *MBB) {
1750 MachineInstr *BranchMI;
1752 // I saw two unconditional branch in one basic block in example
1753 // test_fc_do_while_or.c need to fix the upstream on this to remove the loop.
1754 while ((BranchMI = getLoopendBlockBranchInstr(MBB))
1755 && isUncondBranch(BranchMI)) {
1756 DEBUG(dbgs() << "Removing uncond branch instr"; BranchMI->dump(););
1757 BranchMI->eraseFromParent();
1761 void AMDGPUCFGStructurizer::removeRedundantConditionalBranch(
1762 MachineBasicBlock *MBB) {
1763 if (MBB->succ_size() != 2)
1765 MachineBasicBlock *MBB1 = *MBB->succ_begin();
1766 MachineBasicBlock *MBB2 = *llvm::next(MBB->succ_begin());
1770 MachineInstr *BranchMI = getNormalBlockBranchInstr(MBB);
1771 assert(BranchMI && isCondBranch(BranchMI));
1772 DEBUG(dbgs() << "Removing unneeded cond branch instr"; BranchMI->dump(););
1773 BranchMI->eraseFromParent();
1774 SHOWNEWBLK(MBB1, "Removing redundant successor");
1775 MBB->removeSuccessor(MBB1);
1778 void AMDGPUCFGStructurizer::addDummyExitBlock(
1779 SmallVectorImpl<MachineBasicBlock*> &RetMBB) {
1780 MachineBasicBlock *DummyExitBlk = FuncRep->CreateMachineBasicBlock();
1781 FuncRep->push_back(DummyExitBlk); //insert to function
1782 insertInstrEnd(DummyExitBlk, AMDGPU::RETURN);
1784 for (SmallVectorImpl<MachineBasicBlock *>::iterator It = RetMBB.begin(),
1785 E = RetMBB.end(); It != E; ++It) {
1786 MachineBasicBlock *MBB = *It;
1787 MachineInstr *MI = getReturnInstr(MBB);
1789 MI->eraseFromParent();
1790 MBB->addSuccessor(DummyExitBlk);
1792 dbgs() << "Add dummyExitBlock to BB" << MBB->getNumber()
1796 SHOWNEWBLK(DummyExitBlk, "DummyExitBlock: ");
1799 void AMDGPUCFGStructurizer::removeSuccessor(MachineBasicBlock *MBB) {
1800 while (MBB->succ_size())
1801 MBB->removeSuccessor(*MBB->succ_begin());
1804 void AMDGPUCFGStructurizer::recordSccnum(MachineBasicBlock *MBB,
1806 BlockInformation *&srcBlkInfo = BlockInfoMap[MBB];
1808 srcBlkInfo = new BlockInformation();
1809 srcBlkInfo->SccNum = SccNum;
1812 void AMDGPUCFGStructurizer::retireBlock(MachineBasicBlock *MBB) {
1814 dbgs() << "Retiring BB" << MBB->getNumber() << "\n";
1817 BlockInformation *&SrcBlkInfo = BlockInfoMap[MBB];
1820 SrcBlkInfo = new BlockInformation();
1822 SrcBlkInfo->IsRetired = true;
1823 assert(MBB->succ_size() == 0 && MBB->pred_size() == 0
1824 && "can't retire block yet");
1827 void AMDGPUCFGStructurizer::setLoopLandBlock(MachineLoop *loopRep,
1828 MachineBasicBlock *MBB) {
1829 MachineBasicBlock *&TheEntry = LLInfoMap[loopRep];
1831 MBB = FuncRep->CreateMachineBasicBlock();
1832 FuncRep->push_back(MBB); //insert to function
1833 SHOWNEWBLK(MBB, "DummyLandingBlock for loop without break: ");
1837 dbgs() << "setLoopLandBlock loop-header = BB"
1838 << loopRep->getHeader()->getNumber()
1839 << " landing-block = BB" << MBB->getNumber() << "\n";
1844 AMDGPUCFGStructurizer::findNearestCommonPostDom(MachineBasicBlock *MBB1,
1845 MachineBasicBlock *MBB2) {
1847 if (PDT->dominates(MBB1, MBB2))
1849 if (PDT->dominates(MBB2, MBB1))
1852 MachineDomTreeNode *Node1 = PDT->getNode(MBB1);
1853 MachineDomTreeNode *Node2 = PDT->getNode(MBB2);
1855 // Handle newly cloned node.
1856 if (!Node1 && MBB1->succ_size() == 1)
1857 return findNearestCommonPostDom(*MBB1->succ_begin(), MBB2);
1858 if (!Node2 && MBB2->succ_size() == 1)
1859 return findNearestCommonPostDom(MBB1, *MBB2->succ_begin());
1861 if (!Node1 || !Node2)
1864 Node1 = Node1->getIDom();
1866 if (PDT->dominates(Node1, Node2))
1867 return Node1->getBlock();
1868 Node1 = Node1->getIDom();
1875 AMDGPUCFGStructurizer::findNearestCommonPostDom(
1876 std::set<MachineBasicBlock *> &MBBs) {
1877 MachineBasicBlock *CommonDom;
1878 std::set<MachineBasicBlock *>::const_iterator It = MBBs.begin();
1879 std::set<MachineBasicBlock *>::const_iterator E = MBBs.end();
1880 for (CommonDom = *It; It != E && CommonDom; ++It) {
1881 MachineBasicBlock *MBB = *It;
1882 if (MBB != CommonDom)
1883 CommonDom = findNearestCommonPostDom(MBB, CommonDom);
1887 dbgs() << "Common post dominator for exit blocks is ";
1889 dbgs() << "BB" << CommonDom->getNumber() << "\n";
1897 char AMDGPUCFGStructurizer::ID = 0;
1899 } // end anonymous namespace
1902 FunctionPass *llvm::createAMDGPUCFGStructurizerPass(TargetMachine &tm) {
1903 return new AMDGPUCFGStructurizer(tm);