1 //====- X86CmovConversion.cpp - Convert Cmov to Branch --------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements a pass that converts X86 cmov instructions into
11 /// branches when profitable. This pass is conservative. It transforms if and
12 /// only if it can guarantee a gain with high confidence.
14 /// Thus, the optimization applies under the following conditions:
15 /// 1. Consider as candidates only CMOVs in innermost loops (assume that
16 /// most hotspots are represented by these loops).
17 /// 2. Given a group of CMOV instructions that are using the same EFLAGS def
19 /// a. Consider them as candidates only if all have the same code condition
20 /// or the opposite one to prevent generating more than one conditional
21 /// jump per EFLAGS def instruction.
22 /// b. Consider them as candidates only if all are profitable to be
23 /// converted (assume that one bad conversion may cause a degradation).
24 /// 3. Apply conversion only for loops that are found profitable and only for
25 /// CMOV candidates that were found profitable.
26 /// a. A loop is considered profitable only if conversion will reduce its
27 /// depth cost by some threshold.
28 /// b. CMOV is considered profitable if the cost of its condition is higher
29 /// than the average cost of its true-value and false-value by 25% of
30 /// branch-misprediction-penalty. This assures no degradation even with
31 /// 25% branch misprediction.
33 /// Note: This pass is assumed to run on SSA machine code.
35 //===----------------------------------------------------------------------===//
37 // External interfaces:
38 // FunctionPass *llvm::createX86CmovConverterPass();
39 // bool X86CmovConverterPass::runOnMachineFunction(MachineFunction &MF);
41 //===----------------------------------------------------------------------===//
44 #include "X86InstrInfo.h"
45 #include "llvm/ADT/ArrayRef.h"
46 #include "llvm/ADT/DenseMap.h"
47 #include "llvm/ADT/STLExtras.h"
48 #include "llvm/ADT/SmallPtrSet.h"
49 #include "llvm/ADT/SmallVector.h"
50 #include "llvm/ADT/Statistic.h"
51 #include "llvm/CodeGen/MachineBasicBlock.h"
52 #include "llvm/CodeGen/MachineFunction.h"
53 #include "llvm/CodeGen/MachineFunctionPass.h"
54 #include "llvm/CodeGen/MachineInstr.h"
55 #include "llvm/CodeGen/MachineInstrBuilder.h"
56 #include "llvm/CodeGen/MachineLoopInfo.h"
57 #include "llvm/CodeGen/MachineOperand.h"
58 #include "llvm/CodeGen/MachineRegisterInfo.h"
59 #include "llvm/CodeGen/TargetInstrInfo.h"
60 #include "llvm/CodeGen/TargetRegisterInfo.h"
61 #include "llvm/CodeGen/TargetSchedule.h"
62 #include "llvm/CodeGen/TargetSubtargetInfo.h"
63 #include "llvm/IR/DebugLoc.h"
64 #include "llvm/InitializePasses.h"
65 #include "llvm/MC/MCSchedule.h"
66 #include "llvm/Pass.h"
67 #include "llvm/Support/CommandLine.h"
68 #include "llvm/Support/Debug.h"
69 #include "llvm/Support/raw_ostream.h"
77 #define DEBUG_TYPE "x86-cmov-conversion"
79 STATISTIC(NumOfSkippedCmovGroups, "Number of unsupported CMOV-groups");
80 STATISTIC(NumOfCmovGroupCandidate, "Number of CMOV-group candidates");
81 STATISTIC(NumOfLoopCandidate, "Number of CMOV-conversion profitable loops");
82 STATISTIC(NumOfOptimizedCmovGroups, "Number of optimized CMOV-groups");
84 // This internal switch can be used to turn off the cmov/branch optimization.
86 EnableCmovConverter("x86-cmov-converter",
87 cl::desc("Enable the X86 cmov-to-branch optimization."),
88 cl::init(true), cl::Hidden);
90 static cl::opt<unsigned>
91 GainCycleThreshold("x86-cmov-converter-threshold",
92 cl::desc("Minimum gain per loop (in cycles) threshold."),
93 cl::init(4), cl::Hidden);
95 static cl::opt<bool> ForceMemOperand(
96 "x86-cmov-converter-force-mem-operand",
97 cl::desc("Convert cmovs to branches whenever they have memory operands."),
98 cl::init(true), cl::Hidden);
102 /// Converts X86 cmov instructions into branches when profitable.
103 class X86CmovConverterPass : public MachineFunctionPass {
105 X86CmovConverterPass() : MachineFunctionPass(ID) { }
107 StringRef getPassName() const override { return "X86 cmov Conversion"; }
108 bool runOnMachineFunction(MachineFunction &MF) override;
109 void getAnalysisUsage(AnalysisUsage &AU) const override;
111 /// Pass identification, replacement for typeid.
115 MachineRegisterInfo *MRI = nullptr;
116 const TargetInstrInfo *TII = nullptr;
117 const TargetRegisterInfo *TRI = nullptr;
118 TargetSchedModel TSchedModel;
120 /// List of consecutive CMOV instructions.
121 using CmovGroup = SmallVector<MachineInstr *, 2>;
122 using CmovGroups = SmallVector<CmovGroup, 2>;
124 /// Collect all CMOV-group-candidates in \p CurrLoop and update \p
125 /// CmovInstGroups accordingly.
127 /// \param Blocks List of blocks to process.
128 /// \param CmovInstGroups List of consecutive CMOV instructions in CurrLoop.
129 /// \returns true iff it found any CMOV-group-candidate.
130 bool collectCmovCandidates(ArrayRef<MachineBasicBlock *> Blocks,
131 CmovGroups &CmovInstGroups,
132 bool IncludeLoads = false);
134 /// Check if it is profitable to transform each CMOV-group-candidates into
135 /// branch. Remove all groups that are not profitable from \p CmovInstGroups.
137 /// \param Blocks List of blocks to process.
138 /// \param CmovInstGroups List of consecutive CMOV instructions in CurrLoop.
139 /// \returns true iff any CMOV-group-candidate remain.
140 bool checkForProfitableCmovCandidates(ArrayRef<MachineBasicBlock *> Blocks,
141 CmovGroups &CmovInstGroups);
143 /// Convert the given list of consecutive CMOV instructions into a branch.
145 /// \param Group Consecutive CMOV instructions to be converted into branch.
146 void convertCmovInstsToBranches(SmallVectorImpl<MachineInstr *> &Group) const;
149 } // end anonymous namespace
151 char X86CmovConverterPass::ID = 0;
153 void X86CmovConverterPass::getAnalysisUsage(AnalysisUsage &AU) const {
154 MachineFunctionPass::getAnalysisUsage(AU);
155 AU.addRequired<MachineLoopInfo>();
158 bool X86CmovConverterPass::runOnMachineFunction(MachineFunction &MF) {
159 if (skipFunction(MF.getFunction()))
161 if (!EnableCmovConverter)
164 LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
167 bool Changed = false;
168 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
169 const TargetSubtargetInfo &STI = MF.getSubtarget();
170 MRI = &MF.getRegInfo();
171 TII = STI.getInstrInfo();
172 TRI = STI.getRegisterInfo();
173 TSchedModel.init(&STI);
175 // Before we handle the more subtle cases of register-register CMOVs inside
176 // of potentially hot loops, we want to quickly remove all CMOVs with
177 // a memory operand. The CMOV will risk a stall waiting for the load to
178 // complete that speculative execution behind a branch is better suited to
179 // handle on modern x86 chips.
180 if (ForceMemOperand) {
181 CmovGroups AllCmovGroups;
182 SmallVector<MachineBasicBlock *, 4> Blocks;
184 Blocks.push_back(&MBB);
185 if (collectCmovCandidates(Blocks, AllCmovGroups, /*IncludeLoads*/ true)) {
186 for (auto &Group : AllCmovGroups) {
187 // Skip any group that doesn't do at least one memory operand cmov.
188 if (!llvm::any_of(Group, [&](MachineInstr *I) { return I->mayLoad(); }))
191 // For CMOV groups which we can rewrite and which contain a memory load,
192 // always rewrite them. On x86, a CMOV will dramatically amplify any
193 // memory latency by blocking speculative execution.
195 convertCmovInstsToBranches(Group);
200 //===--------------------------------------------------------------------===//
201 // Register-operand Conversion Algorithm
203 // For each inner most loop
204 // collectCmovCandidates() {
205 // Find all CMOV-group-candidates.
208 // checkForProfitableCmovCandidates() {
209 // * Calculate both loop-depth and optimized-loop-depth.
210 // * Use these depth to check for loop transformation profitability.
211 // * Check for CMOV-group-candidate transformation profitability.
214 // For each profitable CMOV-group-candidate
215 // convertCmovInstsToBranches() {
216 // * Create FalseBB, SinkBB, Conditional branch to SinkBB.
217 // * Replace each CMOV instruction with a PHI instruction in SinkBB.
220 // Note: For more details, see each function description.
221 //===--------------------------------------------------------------------===//
223 // Build up the loops in pre-order.
224 SmallVector<MachineLoop *, 4> Loops(MLI.begin(), MLI.end());
225 // Note that we need to check size on each iteration as we accumulate child
227 for (int i = 0; i < (int)Loops.size(); ++i)
228 for (MachineLoop *Child : Loops[i]->getSubLoops())
229 Loops.push_back(Child);
231 for (MachineLoop *CurrLoop : Loops) {
232 // Optimize only inner most loops.
233 if (!CurrLoop->getSubLoops().empty())
236 // List of consecutive CMOV instructions to be processed.
237 CmovGroups CmovInstGroups;
239 if (!collectCmovCandidates(CurrLoop->getBlocks(), CmovInstGroups))
242 if (!checkForProfitableCmovCandidates(CurrLoop->getBlocks(),
247 for (auto &Group : CmovInstGroups)
248 convertCmovInstsToBranches(Group);
254 bool X86CmovConverterPass::collectCmovCandidates(
255 ArrayRef<MachineBasicBlock *> Blocks, CmovGroups &CmovInstGroups,
257 //===--------------------------------------------------------------------===//
258 // Collect all CMOV-group-candidates and add them into CmovInstGroups.
261 // CMOV instructions, in same MBB, that uses same EFLAGS def instruction.
263 // CMOV-group-candidate:
264 // CMOV-group where all the CMOV instructions are
266 // 2. have same condition code or opposite one.
267 // 3. have only operand registers (X86::CMOVrr).
268 //===--------------------------------------------------------------------===//
269 // List of possible improvement (TODO's):
270 // --------------------------------------
271 // TODO: Add support for X86::CMOVrm instructions.
272 // TODO: Add support for X86::SETcc instructions.
273 // TODO: Add support for CMOV-groups with non consecutive CMOV instructions.
274 //===--------------------------------------------------------------------===//
276 // Current processed CMOV-Group.
278 for (auto *MBB : Blocks) {
280 // Condition code of first CMOV instruction current processed range and its
281 // opposite condition code.
282 X86::CondCode FirstCC = X86::COND_INVALID, FirstOppCC = X86::COND_INVALID,
283 MemOpCC = X86::COND_INVALID;
284 // Indicator of a non CMOVrr instruction in the current processed range.
285 bool FoundNonCMOVInst = false;
286 // Indicator for current processed CMOV-group if it should be skipped.
287 bool SkipGroup = false;
289 for (auto &I : *MBB) {
290 // Skip debug instructions.
291 if (I.isDebugInstr())
293 X86::CondCode CC = X86::getCondFromCMov(I);
294 // Check if we found a X86::CMOVrr instruction.
295 if (CC != X86::COND_INVALID && (IncludeLoads || !I.mayLoad())) {
297 // We found first CMOV in the range, reset flags.
299 FirstOppCC = X86::GetOppositeBranchCondition(CC);
300 // Clear out the prior group's memory operand CC.
301 MemOpCC = X86::COND_INVALID;
302 FoundNonCMOVInst = false;
306 // Check if it is a non-consecutive CMOV instruction or it has different
307 // condition code than FirstCC or FirstOppCC.
308 if (FoundNonCMOVInst || (CC != FirstCC && CC != FirstOppCC))
309 // Mark the SKipGroup indicator to skip current processed CMOV-Group.
312 if (MemOpCC == X86::COND_INVALID)
313 // The first memory operand CMOV.
315 else if (CC != MemOpCC)
316 // Can't handle mixed conditions with memory operands.
319 // Check if we were relying on zero-extending behavior of the CMOV.
322 MRI->use_nodbg_instructions(I.defs().begin()->getReg()),
323 [&](MachineInstr &UseI) {
324 return UseI.getOpcode() == X86::SUBREG_TO_REG;
326 // FIXME: We should model the cost of using an explicit MOV to handle
327 // the zero-extension rather than just refusing to handle this.
331 // If Group is empty, keep looking for first CMOV in the range.
335 // We found a non X86::CMOVrr instruction.
336 FoundNonCMOVInst = true;
337 // Check if this instruction define EFLAGS, to determine end of processed
338 // range, as there would be no more instructions using current EFLAGS def.
339 if (I.definesRegister(X86::EFLAGS)) {
340 // Check if current processed CMOV-group should not be skipped and add
341 // it as a CMOV-group-candidate.
343 CmovInstGroups.push_back(Group);
345 ++NumOfSkippedCmovGroups;
349 // End of basic block is considered end of range, check if current processed
350 // CMOV-group should not be skipped and add it as a CMOV-group-candidate.
354 CmovInstGroups.push_back(Group);
356 ++NumOfSkippedCmovGroups;
359 NumOfCmovGroupCandidate += CmovInstGroups.size();
360 return !CmovInstGroups.empty();
363 /// \returns Depth of CMOV instruction as if it was converted into branch.
364 /// \param TrueOpDepth depth cost of CMOV true value operand.
365 /// \param FalseOpDepth depth cost of CMOV false value operand.
366 static unsigned getDepthOfOptCmov(unsigned TrueOpDepth, unsigned FalseOpDepth) {
367 // The depth of the result after branch conversion is
368 // TrueOpDepth * TrueOpProbability + FalseOpDepth * FalseOpProbability.
369 // As we have no info about branch weight, we assume 75% for one and 25% for
370 // the other, and pick the result with the largest resulting depth.
372 divideCeil(TrueOpDepth * 3 + FalseOpDepth, 4),
373 divideCeil(FalseOpDepth * 3 + TrueOpDepth, 4));
376 bool X86CmovConverterPass::checkForProfitableCmovCandidates(
377 ArrayRef<MachineBasicBlock *> Blocks, CmovGroups &CmovInstGroups) {
379 /// Depth of original loop.
381 /// Depth of optimized loop.
384 /// Number of loop iterations to calculate depth for ?!
385 static const unsigned LoopIterations = 2;
386 DenseMap<MachineInstr *, DepthInfo> DepthMap;
387 DepthInfo LoopDepth[LoopIterations] = {{0, 0}, {0, 0}};
388 enum { PhyRegType = 0, VirRegType = 1, RegTypeNum = 2 };
389 /// For each register type maps the register to its last def instruction.
390 DenseMap<unsigned, MachineInstr *> RegDefMaps[RegTypeNum];
391 /// Maps register operand to its def instruction, which can be nullptr if it
392 /// is unknown (e.g., operand is defined outside the loop).
393 DenseMap<MachineOperand *, MachineInstr *> OperandToDefMap;
395 // Set depth of unknown instruction (i.e., nullptr) to zero.
396 DepthMap[nullptr] = {0, 0};
398 SmallPtrSet<MachineInstr *, 4> CmovInstructions;
399 for (auto &Group : CmovInstGroups)
400 CmovInstructions.insert(Group.begin(), Group.end());
402 //===--------------------------------------------------------------------===//
403 // Step 1: Calculate instruction depth and loop depth.
405 // loop with CMOV-group-candidates converted into branches.
407 // Instruction-Depth:
408 // instruction latency + max operand depth.
409 // * For CMOV instruction in optimized loop the depth is calculated as:
410 // CMOV latency + getDepthOfOptCmov(True-Op-Depth, False-Op-depth)
411 // TODO: Find a better way to estimate the latency of the branch instruction
412 // rather than using the CMOV latency.
415 // max instruction depth of all instructions in the loop.
416 // Note: instruction with max depth represents the critical-path in the loop.
419 // Loop-Depth calculated for first `i` iterations.
420 // Note: it is enough to calculate depth for up to two iterations.
423 // Number of cycles saved in first 'i` iterations by optimizing the loop.
424 //===--------------------------------------------------------------------===//
425 for (unsigned I = 0; I < LoopIterations; ++I) {
426 DepthInfo &MaxDepth = LoopDepth[I];
427 for (auto *MBB : Blocks) {
428 // Clear physical registers Def map.
429 RegDefMaps[PhyRegType].clear();
430 for (MachineInstr &MI : *MBB) {
431 // Skip debug instructions.
432 if (MI.isDebugInstr())
434 unsigned MIDepth = 0;
435 unsigned MIDepthOpt = 0;
436 bool IsCMOV = CmovInstructions.count(&MI);
437 for (auto &MO : MI.uses()) {
438 // Checks for "isUse()" as "uses()" returns also implicit definitions.
439 if (!MO.isReg() || !MO.isUse())
441 Register Reg = MO.getReg();
442 auto &RDM = RegDefMaps[Register::isVirtualRegister(Reg)];
443 if (MachineInstr *DefMI = RDM.lookup(Reg)) {
444 OperandToDefMap[&MO] = DefMI;
445 DepthInfo Info = DepthMap.lookup(DefMI);
446 MIDepth = std::max(MIDepth, Info.Depth);
448 MIDepthOpt = std::max(MIDepthOpt, Info.OptDepth);
453 MIDepthOpt = getDepthOfOptCmov(
454 DepthMap[OperandToDefMap.lookup(&MI.getOperand(1))].OptDepth,
455 DepthMap[OperandToDefMap.lookup(&MI.getOperand(2))].OptDepth);
457 // Iterates over all operands to handle implicit definitions as well.
458 for (auto &MO : MI.operands()) {
459 if (!MO.isReg() || !MO.isDef())
461 Register Reg = MO.getReg();
462 RegDefMaps[Register::isVirtualRegister(Reg)][Reg] = &MI;
465 unsigned Latency = TSchedModel.computeInstrLatency(&MI);
466 DepthMap[&MI] = {MIDepth += Latency, MIDepthOpt += Latency};
467 MaxDepth.Depth = std::max(MaxDepth.Depth, MIDepth);
468 MaxDepth.OptDepth = std::max(MaxDepth.OptDepth, MIDepthOpt);
473 unsigned Diff[LoopIterations] = {LoopDepth[0].Depth - LoopDepth[0].OptDepth,
474 LoopDepth[1].Depth - LoopDepth[1].OptDepth};
476 //===--------------------------------------------------------------------===//
477 // Step 2: Check if Loop worth to be optimized.
478 // Worth-Optimize-Loop:
479 // case 1: Diff[1] == Diff[0]
480 // Critical-path is iteration independent - there is no dependency
481 // of critical-path instructions on critical-path instructions of
482 // previous iteration.
483 // Thus, it is enough to check gain percent of 1st iteration -
484 // To be conservative, the optimized loop need to have a depth of
485 // 12.5% cycles less than original loop, per iteration.
487 // case 2: Diff[1] > Diff[0]
488 // Critical-path is iteration dependent - there is dependency of
489 // critical-path instructions on critical-path instructions of
490 // previous iteration.
491 // Thus, check the gain percent of the 2nd iteration (similar to the
492 // previous case), but it is also required to check the gradient of
493 // the gain - the change in Depth-Diff compared to the change in
494 // Loop-Depth between 1st and 2nd iterations.
495 // To be conservative, the gradient need to be at least 50%.
497 // In addition, In order not to optimize loops with very small gain, the
498 // gain (in cycles) after 2nd iteration should not be less than a given
499 // threshold. Thus, the check (Diff[1] >= GainCycleThreshold) must apply.
501 // If loop is not worth optimizing, remove all CMOV-group-candidates.
502 //===--------------------------------------------------------------------===//
503 if (Diff[1] < GainCycleThreshold)
506 bool WorthOptLoop = false;
507 if (Diff[1] == Diff[0])
508 WorthOptLoop = Diff[0] * 8 >= LoopDepth[0].Depth;
509 else if (Diff[1] > Diff[0])
511 (Diff[1] - Diff[0]) * 2 >= (LoopDepth[1].Depth - LoopDepth[0].Depth) &&
512 (Diff[1] * 8 >= LoopDepth[1].Depth);
517 ++NumOfLoopCandidate;
519 //===--------------------------------------------------------------------===//
520 // Step 3: Check for each CMOV-group-candidate if it worth to be optimized.
521 // Worth-Optimize-Group:
522 // Iff it worths to optimize all CMOV instructions in the group.
524 // Worth-Optimize-CMOV:
525 // Predicted branch is faster than CMOV by the difference between depth of
526 // condition operand and depth of taken (predicted) value operand.
527 // To be conservative, the gain of such CMOV transformation should cover at
528 // at least 25% of branch-misprediction-penalty.
529 //===--------------------------------------------------------------------===//
530 unsigned MispredictPenalty = TSchedModel.getMCSchedModel()->MispredictPenalty;
531 CmovGroups TempGroups;
532 std::swap(TempGroups, CmovInstGroups);
533 for (auto &Group : TempGroups) {
534 bool WorthOpGroup = true;
535 for (auto *MI : Group) {
536 // Avoid CMOV instruction which value is used as a pointer to load from.
537 // This is another conservative check to avoid converting CMOV instruction
538 // used with tree-search like algorithm, where the branch is unpredicted.
539 auto UIs = MRI->use_instructions(MI->defs().begin()->getReg());
540 if (UIs.begin() != UIs.end() && ++UIs.begin() == UIs.end()) {
541 unsigned Op = UIs.begin()->getOpcode();
542 if (Op == X86::MOV64rm || Op == X86::MOV32rm) {
543 WorthOpGroup = false;
549 DepthMap[OperandToDefMap.lookup(&MI->getOperand(4))].Depth;
550 unsigned ValCost = getDepthOfOptCmov(
551 DepthMap[OperandToDefMap.lookup(&MI->getOperand(1))].Depth,
552 DepthMap[OperandToDefMap.lookup(&MI->getOperand(2))].Depth);
553 if (ValCost > CondCost || (CondCost - ValCost) * 4 < MispredictPenalty) {
554 WorthOpGroup = false;
560 CmovInstGroups.push_back(Group);
563 return !CmovInstGroups.empty();
566 static bool checkEFLAGSLive(MachineInstr *MI) {
567 if (MI->killsRegister(X86::EFLAGS))
570 // The EFLAGS operand of MI might be missing a kill marker.
571 // Figure out whether EFLAGS operand should LIVE after MI instruction.
572 MachineBasicBlock *BB = MI->getParent();
573 MachineBasicBlock::iterator ItrMI = MI;
575 // Scan forward through BB for a use/def of EFLAGS.
576 for (auto I = std::next(ItrMI), E = BB->end(); I != E; ++I) {
577 if (I->readsRegister(X86::EFLAGS))
579 if (I->definesRegister(X86::EFLAGS))
583 // We hit the end of the block, check whether EFLAGS is live into a successor.
584 for (auto I = BB->succ_begin(), E = BB->succ_end(); I != E; ++I) {
585 if ((*I)->isLiveIn(X86::EFLAGS))
592 /// Given /p First CMOV instruction and /p Last CMOV instruction representing a
593 /// group of CMOV instructions, which may contain debug instructions in between,
594 /// move all debug instructions to after the last CMOV instruction, making the
595 /// CMOV group consecutive.
596 static void packCmovGroup(MachineInstr *First, MachineInstr *Last) {
597 assert(X86::getCondFromCMov(*Last) != X86::COND_INVALID &&
598 "Last instruction in a CMOV group must be a CMOV instruction");
600 SmallVector<MachineInstr *, 2> DBGInstructions;
601 for (auto I = First->getIterator(), E = Last->getIterator(); I != E; I++) {
602 if (I->isDebugInstr())
603 DBGInstructions.push_back(&*I);
606 // Splice the debug instruction after the cmov group.
607 MachineBasicBlock *MBB = First->getParent();
608 for (auto *MI : DBGInstructions)
609 MBB->insertAfter(Last, MI->removeFromParent());
612 void X86CmovConverterPass::convertCmovInstsToBranches(
613 SmallVectorImpl<MachineInstr *> &Group) const {
614 assert(!Group.empty() && "No CMOV instructions to convert");
615 ++NumOfOptimizedCmovGroups;
617 // If the CMOV group is not packed, e.g., there are debug instructions between
618 // first CMOV and last CMOV, then pack the group and make the CMOV instruction
619 // consecutive by moving the debug instructions to after the last CMOV.
620 packCmovGroup(Group.front(), Group.back());
622 // To convert a CMOVcc instruction, we actually have to insert the diamond
623 // control-flow pattern. The incoming instruction knows the destination vreg
624 // to set, the condition code register to branch on, the true/false values to
625 // select between, and a branch opcode to use.
631 // v1 = CMOVge t1, f1, cond
632 // v2 = CMOVlt t2, f2, cond
633 // v3 = CMOVge v1, f3, cond
645 // %v1 = phi[%f1, %FalseMBB], [%t1, %MBB]
646 // %v2 = phi[%t2, %FalseMBB], [%f2, %MBB] ; For CMOV with OppCC switch
647 // ; true-value with false-value
648 // %v3 = phi[%f3, %FalseMBB], [%t1, %MBB] ; Phi instruction cannot use
649 // ; previous Phi instruction result
651 MachineInstr &MI = *Group.front();
652 MachineInstr *LastCMOV = Group.back();
653 DebugLoc DL = MI.getDebugLoc();
655 X86::CondCode CC = X86::CondCode(X86::getCondFromCMov(MI));
656 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
657 // Potentially swap the condition codes so that any memory operand to a CMOV
658 // is in the *false* position instead of the *true* position. We can invert
659 // any non-memory operand CMOV instructions to cope with this and we ensure
660 // memory operand CMOVs are only included with a single condition code.
661 if (llvm::any_of(Group, [&](MachineInstr *I) {
662 return I->mayLoad() && X86::getCondFromCMov(*I) == CC;
664 std::swap(CC, OppCC);
666 MachineBasicBlock *MBB = MI.getParent();
667 MachineFunction::iterator It = ++MBB->getIterator();
668 MachineFunction *F = MBB->getParent();
669 const BasicBlock *BB = MBB->getBasicBlock();
671 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(BB);
672 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(BB);
673 F->insert(It, FalseMBB);
674 F->insert(It, SinkMBB);
676 // If the EFLAGS register isn't dead in the terminator, then claim that it's
677 // live into the sink and copy blocks.
678 if (checkEFLAGSLive(LastCMOV)) {
679 FalseMBB->addLiveIn(X86::EFLAGS);
680 SinkMBB->addLiveIn(X86::EFLAGS);
683 // Transfer the remainder of BB and its successor edges to SinkMBB.
684 SinkMBB->splice(SinkMBB->begin(), MBB,
685 std::next(MachineBasicBlock::iterator(LastCMOV)), MBB->end());
686 SinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
688 // Add the false and sink blocks as its successors.
689 MBB->addSuccessor(FalseMBB);
690 MBB->addSuccessor(SinkMBB);
692 // Create the conditional branch instruction.
693 BuildMI(MBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
695 // Add the sink block to the false block successors.
696 FalseMBB->addSuccessor(SinkMBB);
698 MachineInstrBuilder MIB;
699 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
700 MachineBasicBlock::iterator MIItEnd =
701 std::next(MachineBasicBlock::iterator(LastCMOV));
702 MachineBasicBlock::iterator FalseInsertionPoint = FalseMBB->begin();
703 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
705 // First we need to insert an explicit load on the false path for any memory
706 // operand. We also need to potentially do register rewriting here, but it is
707 // simpler as the memory operands are always on the false path so we can
708 // simply take that input, whatever it is.
709 DenseMap<unsigned, unsigned> FalseBBRegRewriteTable;
710 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd;) {
712 // Skip any CMOVs in this group which don't load from memory.
714 // Remember the false-side register input.
716 MI.getOperand(X86::getCondFromCMov(MI) == CC ? 1 : 2).getReg();
717 // Walk back through any intermediate cmovs referenced.
719 auto FRIt = FalseBBRegRewriteTable.find(FalseReg);
720 if (FRIt == FalseBBRegRewriteTable.end())
722 FalseReg = FRIt->second;
724 FalseBBRegRewriteTable[MI.getOperand(0).getReg()] = FalseReg;
728 // The condition must be the *opposite* of the one we've decided to branch
729 // on as the branch will go *around* the load and the load should happen
730 // when the CMOV condition is false.
731 assert(X86::getCondFromCMov(MI) == OppCC &&
732 "Can only handle memory-operand cmov instructions with a condition "
733 "opposite to the selected branch direction.");
735 // The goal is to rewrite the cmov from:
738 // %A = CMOVcc %B (tied), (mem)
743 // %A = CMOVcc %B (tied), %C
747 // Which will allow the next loop to rewrite the CMOV in terms of a PHI:
754 // %A = PHI [ %C, FalseMBB ], [ %B, MBB]
756 // Get a fresh register to use as the destination of the MOV.
757 const TargetRegisterClass *RC = MRI->getRegClass(MI.getOperand(0).getReg());
758 Register TmpReg = MRI->createVirtualRegister(RC);
760 SmallVector<MachineInstr *, 4> NewMIs;
761 bool Unfolded = TII->unfoldMemoryOperand(*MBB->getParent(), MI, TmpReg,
763 /*UnfoldStore*/ false, NewMIs);
765 assert(Unfolded && "Should never fail to unfold a loading cmov!");
767 // Move the new CMOV to just before the old one and reset any impacted
769 auto *NewCMOV = NewMIs.pop_back_val();
770 assert(X86::getCondFromCMov(*NewCMOV) == OppCC &&
771 "Last new instruction isn't the expected CMOV!");
772 LLVM_DEBUG(dbgs() << "\tRewritten cmov: "; NewCMOV->dump());
773 MBB->insert(MachineBasicBlock::iterator(MI), NewCMOV);
774 if (&*MIItBegin == &MI)
775 MIItBegin = MachineBasicBlock::iterator(NewCMOV);
777 // Sink whatever instructions were needed to produce the unfolded operand
778 // into the false block.
779 for (auto *NewMI : NewMIs) {
780 LLVM_DEBUG(dbgs() << "\tRewritten load instr: "; NewMI->dump());
781 FalseMBB->insert(FalseInsertionPoint, NewMI);
782 // Re-map any operands that are from other cmovs to the inputs for this block.
783 for (auto &MOp : NewMI->uses()) {
786 auto It = FalseBBRegRewriteTable.find(MOp.getReg());
787 if (It == FalseBBRegRewriteTable.end())
790 MOp.setReg(It->second);
791 // This might have been a kill when it referenced the cmov result, but
792 // it won't necessarily be once rewritten.
793 // FIXME: We could potentially improve this by tracking whether the
794 // operand to the cmov was also a kill, and then skipping the PHI node
795 // construction below.
796 MOp.setIsKill(false);
799 MBB->erase(MachineBasicBlock::iterator(MI),
800 std::next(MachineBasicBlock::iterator(MI)));
802 // Add this PHI to the rewrite table.
803 FalseBBRegRewriteTable[NewCMOV->getOperand(0).getReg()] = TmpReg;
806 // As we are creating the PHIs, we have to be careful if there is more than
807 // one. Later CMOVs may reference the results of earlier CMOVs, but later
808 // PHIs have to reference the individual true/false inputs from earlier PHIs.
809 // That also means that PHI construction must work forward from earlier to
810 // later, and that the code must maintain a mapping from earlier PHI's
811 // destination registers, and the registers that went into the PHI.
812 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
814 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
815 Register DestReg = MIIt->getOperand(0).getReg();
816 Register Op1Reg = MIIt->getOperand(1).getReg();
817 Register Op2Reg = MIIt->getOperand(2).getReg();
819 // If this CMOV we are processing is the opposite condition from the jump we
820 // generated, then we have to swap the operands for the PHI that is going to
822 if (X86::getCondFromCMov(*MIIt) == OppCC)
823 std::swap(Op1Reg, Op2Reg);
825 auto Op1Itr = RegRewriteTable.find(Op1Reg);
826 if (Op1Itr != RegRewriteTable.end())
827 Op1Reg = Op1Itr->second.first;
829 auto Op2Itr = RegRewriteTable.find(Op2Reg);
830 if (Op2Itr != RegRewriteTable.end())
831 Op2Reg = Op2Itr->second.second;
834 // %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, MBB ]
836 MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
842 LLVM_DEBUG(dbgs() << "\tFrom: "; MIIt->dump());
843 LLVM_DEBUG(dbgs() << "\tTo: "; MIB->dump());
845 // Add this PHI to the rewrite table.
846 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
849 // Now remove the CMOV(s).
850 MBB->erase(MIItBegin, MIItEnd);
853 INITIALIZE_PASS_BEGIN(X86CmovConverterPass, DEBUG_TYPE, "X86 cmov Conversion",
855 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
856 INITIALIZE_PASS_END(X86CmovConverterPass, DEBUG_TYPE, "X86 cmov Conversion",
859 FunctionPass *llvm::createX86CmovConverterPass() {
860 return new X86CmovConverterPass();