1 //====- X86FlagsCopyLowering.cpp - Lowers COPY nodes of EFLAGS ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// Lowers COPY nodes of EFLAGS by directly extracting and preserving individual
14 /// We have to do this by carefully analyzing and rewriting the usage of the
15 /// copied EFLAGS register because there is no general way to rematerialize the
16 /// entire EFLAGS register safely and efficiently. Using `popf` both forces
17 /// dynamic stack adjustment and can create correctness issues due to IF, TF,
18 /// and other non-status flags being overwritten. Using sequences involving
19 /// SAHF don't work on all x86 processors and are often quite slow compared to
20 /// directly testing a single status preserved in its own GPR.
22 //===----------------------------------------------------------------------===//
25 #include "X86InstrBuilder.h"
26 #include "X86InstrInfo.h"
27 #include "X86Subtarget.h"
28 #include "llvm/ADT/ArrayRef.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/ScopeExit.h"
32 #include "llvm/ADT/SmallPtrSet.h"
33 #include "llvm/ADT/SmallSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/SparseBitVector.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/CodeGen/MachineBasicBlock.h"
38 #include "llvm/CodeGen/MachineConstantPool.h"
39 #include "llvm/CodeGen/MachineDominators.h"
40 #include "llvm/CodeGen/MachineFunction.h"
41 #include "llvm/CodeGen/MachineFunctionPass.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineModuleInfo.h"
45 #include "llvm/CodeGen/MachineOperand.h"
46 #include "llvm/CodeGen/MachineRegisterInfo.h"
47 #include "llvm/CodeGen/MachineSSAUpdater.h"
48 #include "llvm/CodeGen/TargetInstrInfo.h"
49 #include "llvm/CodeGen/TargetRegisterInfo.h"
50 #include "llvm/CodeGen/TargetSchedule.h"
51 #include "llvm/CodeGen/TargetSubtargetInfo.h"
52 #include "llvm/IR/DebugLoc.h"
53 #include "llvm/MC/MCSchedule.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/raw_ostream.h"
65 #define PASS_KEY "x86-flags-copy-lowering"
66 #define DEBUG_TYPE PASS_KEY
68 STATISTIC(NumCopiesEliminated, "Number of copies of EFLAGS eliminated");
69 STATISTIC(NumSetCCsInserted, "Number of setCC instructions inserted");
70 STATISTIC(NumTestsInserted, "Number of test instructions inserted");
71 STATISTIC(NumAddsInserted, "Number of adds instructions inserted");
75 void initializeX86FlagsCopyLoweringPassPass(PassRegistry &);
77 } // end namespace llvm
81 // Convenient array type for storing registers associated with each condition.
82 using CondRegArray = std::array<unsigned, X86::LAST_VALID_COND + 1>;
84 class X86FlagsCopyLoweringPass : public MachineFunctionPass {
86 X86FlagsCopyLoweringPass() : MachineFunctionPass(ID) {
87 initializeX86FlagsCopyLoweringPassPass(*PassRegistry::getPassRegistry());
90 StringRef getPassName() const override { return "X86 EFLAGS copy lowering"; }
91 bool runOnMachineFunction(MachineFunction &MF) override;
92 void getAnalysisUsage(AnalysisUsage &AU) const override;
94 /// Pass identification, replacement for typeid.
98 MachineRegisterInfo *MRI;
99 const X86InstrInfo *TII;
100 const TargetRegisterInfo *TRI;
101 const TargetRegisterClass *PromoteRC;
102 MachineDominatorTree *MDT;
104 CondRegArray collectCondsInRegs(MachineBasicBlock &MBB,
105 MachineInstr &CopyDefI);
107 unsigned promoteCondToReg(MachineBasicBlock &MBB,
108 MachineBasicBlock::iterator TestPos,
109 DebugLoc TestLoc, X86::CondCode Cond);
110 std::pair<unsigned, bool>
111 getCondOrInverseInReg(MachineBasicBlock &TestMBB,
112 MachineBasicBlock::iterator TestPos, DebugLoc TestLoc,
113 X86::CondCode Cond, CondRegArray &CondRegs);
114 void insertTest(MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos,
115 DebugLoc Loc, unsigned Reg);
117 void rewriteArithmetic(MachineBasicBlock &TestMBB,
118 MachineBasicBlock::iterator TestPos, DebugLoc TestLoc,
119 MachineInstr &MI, MachineOperand &FlagUse,
120 CondRegArray &CondRegs);
121 void rewriteCMov(MachineBasicBlock &TestMBB,
122 MachineBasicBlock::iterator TestPos, DebugLoc TestLoc,
123 MachineInstr &CMovI, MachineOperand &FlagUse,
124 CondRegArray &CondRegs);
125 void rewriteCondJmp(MachineBasicBlock &TestMBB,
126 MachineBasicBlock::iterator TestPos, DebugLoc TestLoc,
127 MachineInstr &JmpI, CondRegArray &CondRegs);
128 void rewriteCopy(MachineInstr &MI, MachineOperand &FlagUse,
129 MachineInstr &CopyDefI);
130 void rewriteSetCarryExtended(MachineBasicBlock &TestMBB,
131 MachineBasicBlock::iterator TestPos,
132 DebugLoc TestLoc, MachineInstr &SetBI,
133 MachineOperand &FlagUse, CondRegArray &CondRegs);
134 void rewriteSetCC(MachineBasicBlock &TestMBB,
135 MachineBasicBlock::iterator TestPos, DebugLoc TestLoc,
136 MachineInstr &SetCCI, MachineOperand &FlagUse,
137 CondRegArray &CondRegs);
140 } // end anonymous namespace
142 INITIALIZE_PASS_BEGIN(X86FlagsCopyLoweringPass, DEBUG_TYPE,
143 "X86 EFLAGS copy lowering", false, false)
144 INITIALIZE_PASS_END(X86FlagsCopyLoweringPass, DEBUG_TYPE,
145 "X86 EFLAGS copy lowering", false, false)
147 FunctionPass *llvm::createX86FlagsCopyLoweringPass() {
148 return new X86FlagsCopyLoweringPass();
151 char X86FlagsCopyLoweringPass::ID = 0;
153 void X86FlagsCopyLoweringPass::getAnalysisUsage(AnalysisUsage &AU) const {
154 AU.addRequired<MachineDominatorTree>();
155 MachineFunctionPass::getAnalysisUsage(AU);
159 /// An enumeration of the arithmetic instruction mnemonics which have
160 /// interesting flag semantics.
162 /// We can map instruction opcodes into these mnemonics to make it easy to
163 /// dispatch with specific functionality.
164 enum class FlagArithMnemonic {
174 static FlagArithMnemonic getMnemonicFromOpcode(unsigned Opcode) {
177 report_fatal_error("No support for lowering a copy into EFLAGS when used "
178 "by this instruction!");
180 #define LLVM_EXPAND_INSTR_SIZES(MNEMONIC, SUFFIX) \
181 case X86::MNEMONIC##8##SUFFIX: \
182 case X86::MNEMONIC##16##SUFFIX: \
183 case X86::MNEMONIC##32##SUFFIX: \
184 case X86::MNEMONIC##64##SUFFIX:
186 #define LLVM_EXPAND_ADC_SBB_INSTR(MNEMONIC) \
187 LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rr) \
188 LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rr_REV) \
189 LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rm) \
190 LLVM_EXPAND_INSTR_SIZES(MNEMONIC, mr) \
191 case X86::MNEMONIC##8ri: \
192 case X86::MNEMONIC##16ri8: \
193 case X86::MNEMONIC##32ri8: \
194 case X86::MNEMONIC##64ri8: \
195 case X86::MNEMONIC##16ri: \
196 case X86::MNEMONIC##32ri: \
197 case X86::MNEMONIC##64ri32: \
198 case X86::MNEMONIC##8mi: \
199 case X86::MNEMONIC##16mi8: \
200 case X86::MNEMONIC##32mi8: \
201 case X86::MNEMONIC##64mi8: \
202 case X86::MNEMONIC##16mi: \
203 case X86::MNEMONIC##32mi: \
204 case X86::MNEMONIC##64mi32: \
205 case X86::MNEMONIC##8i8: \
206 case X86::MNEMONIC##16i16: \
207 case X86::MNEMONIC##32i32: \
208 case X86::MNEMONIC##64i32:
210 LLVM_EXPAND_ADC_SBB_INSTR(ADC)
211 return FlagArithMnemonic::ADC;
213 LLVM_EXPAND_ADC_SBB_INSTR(SBB)
214 return FlagArithMnemonic::SBB;
216 #undef LLVM_EXPAND_ADC_SBB_INSTR
218 LLVM_EXPAND_INSTR_SIZES(RCL, rCL)
219 LLVM_EXPAND_INSTR_SIZES(RCL, r1)
220 LLVM_EXPAND_INSTR_SIZES(RCL, ri)
221 return FlagArithMnemonic::RCL;
223 LLVM_EXPAND_INSTR_SIZES(RCR, rCL)
224 LLVM_EXPAND_INSTR_SIZES(RCR, r1)
225 LLVM_EXPAND_INSTR_SIZES(RCR, ri)
226 return FlagArithMnemonic::RCR;
228 #undef LLVM_EXPAND_INSTR_SIZES
234 return FlagArithMnemonic::ADCX;
240 return FlagArithMnemonic::ADOX;
244 static MachineBasicBlock &splitBlock(MachineBasicBlock &MBB,
245 MachineInstr &SplitI,
246 const X86InstrInfo &TII) {
247 MachineFunction &MF = *MBB.getParent();
249 assert(SplitI.getParent() == &MBB &&
250 "Split instruction must be in the split block!");
251 assert(SplitI.isBranch() &&
252 "Only designed to split a tail of branch instructions!");
253 assert(X86::getCondFromBranchOpc(SplitI.getOpcode()) != X86::COND_INVALID &&
254 "Must split on an actual jCC instruction!");
256 // Dig out the previous instruction to the split point.
257 MachineInstr &PrevI = *std::prev(SplitI.getIterator());
258 assert(PrevI.isBranch() && "Must split after a branch!");
259 assert(X86::getCondFromBranchOpc(PrevI.getOpcode()) != X86::COND_INVALID &&
260 "Must split after an actual jCC instruction!");
261 assert(!std::prev(PrevI.getIterator())->isTerminator() &&
262 "Must only have this one terminator prior to the split!");
264 // Grab the one successor edge that will stay in `MBB`.
265 MachineBasicBlock &UnsplitSucc = *PrevI.getOperand(0).getMBB();
267 // Analyze the original block to see if we are actually splitting an edge
268 // into two edges. This can happen when we have multiple conditional jumps to
269 // the same successor.
271 std::any_of(SplitI.getIterator(), MBB.instr_end(),
272 [&](MachineInstr &MI) {
273 assert(MI.isTerminator() &&
274 "Should only have spliced terminators!");
276 MI.operands(), [&](MachineOperand &MOp) {
277 return MOp.isMBB() && MOp.getMBB() == &UnsplitSucc;
280 MBB.getFallThrough() == &UnsplitSucc;
282 MachineBasicBlock &NewMBB = *MF.CreateMachineBasicBlock();
284 // Insert the new block immediately after the current one. Any existing
285 // fallthrough will be sunk into this new block anyways.
286 MF.insert(std::next(MachineFunction::iterator(&MBB)), &NewMBB);
288 // Splice the tail of instructions into the new block.
289 NewMBB.splice(NewMBB.end(), &MBB, SplitI.getIterator(), MBB.end());
291 // Copy the necessary succesors (and their probability info) into the new
293 for (auto SI = MBB.succ_begin(), SE = MBB.succ_end(); SI != SE; ++SI)
294 if (IsEdgeSplit || *SI != &UnsplitSucc)
295 NewMBB.copySuccessor(&MBB, SI);
296 // Normalize the probabilities if we didn't end up splitting the edge.
298 NewMBB.normalizeSuccProbs();
300 // Now replace all of the moved successors in the original block with the new
301 // block. This will merge their probabilities.
302 for (MachineBasicBlock *Succ : NewMBB.successors())
303 if (Succ != &UnsplitSucc)
304 MBB.replaceSuccessor(Succ, &NewMBB);
306 // We should always end up replacing at least one successor.
307 assert(MBB.isSuccessor(&NewMBB) &&
308 "Failed to make the new block a successor!");
310 // Now update all the PHIs.
311 for (MachineBasicBlock *Succ : NewMBB.successors()) {
312 for (MachineInstr &MI : *Succ) {
316 for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps;
318 MachineOperand &OpV = MI.getOperand(OpIdx);
319 MachineOperand &OpMBB = MI.getOperand(OpIdx + 1);
320 assert(OpMBB.isMBB() && "Block operand to a PHI is not a block!");
321 if (OpMBB.getMBB() != &MBB)
324 // Replace the operand for unsplit successors
325 if (!IsEdgeSplit || Succ != &UnsplitSucc) {
326 OpMBB.setMBB(&NewMBB);
328 // We have to continue scanning as there may be multiple entries in
333 // When we have split the edge append a new successor.
334 MI.addOperand(MF, OpV);
335 MI.addOperand(MF, MachineOperand::CreateMBB(&NewMBB));
344 bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
345 DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
348 auto &Subtarget = MF.getSubtarget<X86Subtarget>();
349 MRI = &MF.getRegInfo();
350 TII = Subtarget.getInstrInfo();
351 TRI = Subtarget.getRegisterInfo();
352 MDT = &getAnalysis<MachineDominatorTree>();
353 PromoteRC = &X86::GR8RegClass;
355 if (MF.begin() == MF.end())
356 // Nothing to do for a degenerate empty function...
359 SmallVector<MachineInstr *, 4> Copies;
360 for (MachineBasicBlock &MBB : MF)
361 for (MachineInstr &MI : MBB)
362 if (MI.getOpcode() == TargetOpcode::COPY &&
363 MI.getOperand(0).getReg() == X86::EFLAGS)
364 Copies.push_back(&MI);
366 for (MachineInstr *CopyI : Copies) {
367 MachineBasicBlock &MBB = *CopyI->getParent();
369 MachineOperand &VOp = CopyI->getOperand(1);
370 assert(VOp.isReg() &&
371 "The input to the copy for EFLAGS should always be a register!");
372 MachineInstr &CopyDefI = *MRI->getVRegDef(VOp.getReg());
373 if (CopyDefI.getOpcode() != TargetOpcode::COPY) {
374 // FIXME: The big likely candidate here are PHI nodes. We could in theory
375 // handle PHI nodes, but it gets really, really hard. Insanely hard. Hard
376 // enough that it is probably better to change every other part of LLVM
377 // to avoid creating them. The issue is that once we have PHIs we won't
378 // know which original EFLAGS value we need to capture with our setCCs
379 // below. The end result will be computing a complete set of setCCs that
380 // we *might* want, computing them in every place where we copy *out* of
381 // EFLAGS and then doing SSA formation on all of them to insert necessary
382 // PHI nodes and consume those here. Then hoping that somehow we DCE the
383 // unnecessary ones. This DCE seems very unlikely to be successful and so
384 // we will almost certainly end up with a glut of dead setCC
385 // instructions. Until we have a motivating test case and fail to avoid
386 // it by changing other parts of LLVM's lowering, we refuse to handle
387 // this complex case here.
388 DEBUG(dbgs() << "ERROR: Encountered unexpected def of an eflags copy: ";
391 "Cannot lower EFLAGS copy unless it is defined in turn by a copy!");
394 auto Cleanup = make_scope_exit([&] {
395 // All uses of the EFLAGS copy are now rewritten, kill the copy into
396 // eflags and if dead the copy from.
397 CopyI->eraseFromParent();
398 if (MRI->use_empty(CopyDefI.getOperand(0).getReg()))
399 CopyDefI.eraseFromParent();
400 ++NumCopiesEliminated;
403 MachineOperand &DOp = CopyI->getOperand(0);
404 assert(DOp.isDef() && "Expected register def!");
405 assert(DOp.getReg() == X86::EFLAGS && "Unexpected copy def register!");
409 MachineBasicBlock &TestMBB = *CopyDefI.getParent();
410 auto TestPos = CopyDefI.getIterator();
411 DebugLoc TestLoc = CopyDefI.getDebugLoc();
413 DEBUG(dbgs() << "Rewriting copy: "; CopyI->dump());
415 // Scan for usage of newly set EFLAGS so we can rewrite them. We just buffer
416 // jumps because their usage is very constrained.
417 bool FlagsKilled = false;
418 SmallVector<MachineInstr *, 4> JmpIs;
420 // Gather the condition flags that have already been preserved in
421 // registers. We do this from scratch each time as we expect there to be
422 // very few of them and we expect to not revisit the same copy definition
423 // many times. If either of those change sufficiently we could build a map
424 // of these up front instead.
425 CondRegArray CondRegs = collectCondsInRegs(TestMBB, CopyDefI);
427 // Collect the basic blocks we need to scan. Typically this will just be
428 // a single basic block but we may have to scan multiple blocks if the
429 // EFLAGS copy lives into successors.
430 SmallVector<MachineBasicBlock *, 2> Blocks;
431 SmallPtrSet<MachineBasicBlock *, 2> VisitedBlocks;
432 Blocks.push_back(&MBB);
433 VisitedBlocks.insert(&MBB);
436 MachineBasicBlock &UseMBB = *Blocks.pop_back_val();
438 // We currently don't do any PHI insertion and so we require that the
439 // test basic block dominates all of the use basic blocks.
441 // We could in theory do PHI insertion here if it becomes useful by just
442 // taking undef values in along every edge that we don't trace this
443 // EFLAGS copy along. This isn't as bad as fully general PHI insertion,
444 // but still seems like a great deal of complexity.
446 // Because it is theoretically possible that some earlier MI pass or
447 // other lowering transformation could induce this to happen, we do
448 // a hard check even in non-debug builds here.
449 if (&TestMBB != &UseMBB && !MDT->dominates(&TestMBB, &UseMBB)) {
451 dbgs() << "ERROR: Encountered use that is not dominated by our test "
452 "basic block! Rewriting this would require inserting PHI "
453 "nodes to track the flag state across the CFG.\n\nTest "
456 dbgs() << "Use block:\n";
459 report_fatal_error("Cannot lower EFLAGS copy when original copy def "
460 "does not dominate all uses.");
463 for (auto MII = &UseMBB == &MBB ? std::next(CopyI->getIterator())
464 : UseMBB.instr_begin(),
465 MIE = UseMBB.instr_end();
467 MachineInstr &MI = *MII++;
468 MachineOperand *FlagUse = MI.findRegisterUseOperand(X86::EFLAGS);
470 if (MI.findRegisterDefOperand(X86::EFLAGS)) {
471 // If EFLAGS are defined, it's as-if they were killed. We can stop
474 // NB!!! Many instructions only modify some flags. LLVM currently
475 // models this as clobbering all flags, but if that ever changes
476 // this will need to be carefully updated to handle that more
484 DEBUG(dbgs() << " Rewriting use: "; MI.dump());
486 // Check the kill flag before we rewrite as that may change it.
487 if (FlagUse->isKill())
490 // Once we encounter a branch, the rest of the instructions must also be
491 // branches. We can't rewrite in place here, so we handle them below.
493 // Note that we don't have to handle tail calls here, even conditional
494 // tail calls, as those are not introduced into the X86 MI until post-RA
495 // branch folding or black placement. As a consequence, we get to deal
496 // with the simpler formulation of conditional branches followed by tail
498 if (X86::getCondFromBranchOpc(MI.getOpcode()) != X86::COND_INVALID) {
499 auto JmpIt = MI.getIterator();
501 JmpIs.push_back(&*JmpIt);
503 } while (JmpIt != UseMBB.instr_end() &&
504 X86::getCondFromBranchOpc(JmpIt->getOpcode()) !=
509 // Otherwise we can just rewrite in-place.
510 if (X86::getCondFromCMovOpc(MI.getOpcode()) != X86::COND_INVALID) {
511 rewriteCMov(TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
512 } else if (X86::getCondFromSETOpc(MI.getOpcode()) !=
514 rewriteSetCC(TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
515 } else if (MI.getOpcode() == TargetOpcode::COPY) {
516 rewriteCopy(MI, *FlagUse, CopyDefI);
518 // We assume all other instructions that use flags also def them.
519 assert(MI.findRegisterDefOperand(X86::EFLAGS) &&
520 "Expected a def of EFLAGS for this instruction!");
522 // NB!!! Several arithmetic instructions only *partially* update
523 // flags. Theoretically, we could generate MI code sequences that
524 // would rely on this fact and observe different flags independently.
525 // But currently LLVM models all of these instructions as clobbering
526 // all the flags in an undef way. We rely on that to simplify the
530 switch (MI.getOpcode()) {
535 // Use custom lowering for arithmetic that is merely extending the
536 // carry flag. We model this as the SETB_C* pseudo instructions.
537 rewriteSetCarryExtended(TestMBB, TestPos, TestLoc, MI, *FlagUse,
542 // Generically handle remaining uses as arithmetic instructions.
543 rewriteArithmetic(TestMBB, TestPos, TestLoc, MI, *FlagUse,
550 // If this was the last use of the flags, we're done.
555 // If the flags were killed, we're done with this block.
559 // Otherwise we need to scan successors for ones where the flags live-in
560 // and queue those up for processing.
561 for (MachineBasicBlock *SuccMBB : UseMBB.successors())
562 if (SuccMBB->isLiveIn(X86::EFLAGS) &&
563 VisitedBlocks.insert(SuccMBB).second)
564 Blocks.push_back(SuccMBB);
565 } while (!Blocks.empty());
567 // Now rewrite the jumps that use the flags. These we handle specially
568 // because if there are multiple jumps in a single basic block we'll have
569 // to do surgery on the CFG.
570 MachineBasicBlock *LastJmpMBB = nullptr;
571 for (MachineInstr *JmpI : JmpIs) {
572 // Past the first jump within a basic block we need to split the blocks
574 if (JmpI->getParent() == LastJmpMBB)
575 splitBlock(*JmpI->getParent(), *JmpI, *TII);
577 LastJmpMBB = JmpI->getParent();
579 rewriteCondJmp(TestMBB, TestPos, TestLoc, *JmpI, CondRegs);
582 // FIXME: Mark the last use of EFLAGS before the copy's def as a kill if
583 // the copy's def operand is itself a kill.
587 for (MachineBasicBlock &MBB : MF)
588 for (MachineInstr &MI : MBB)
589 if (MI.getOpcode() == TargetOpcode::COPY &&
590 (MI.getOperand(0).getReg() == X86::EFLAGS ||
591 MI.getOperand(1).getReg() == X86::EFLAGS)) {
592 DEBUG(dbgs() << "ERROR: Found a COPY involving EFLAGS: "; MI.dump());
593 llvm_unreachable("Unlowered EFLAGS copy!");
600 /// Collect any conditions that have already been set in registers so that we
601 /// can re-use them rather than adding duplicates.
603 X86FlagsCopyLoweringPass::collectCondsInRegs(MachineBasicBlock &MBB,
604 MachineInstr &CopyDefI) {
605 CondRegArray CondRegs = {};
607 // Scan backwards across the range of instructions with live EFLAGS.
608 for (MachineInstr &MI : llvm::reverse(
609 llvm::make_range(MBB.instr_begin(), CopyDefI.getIterator()))) {
610 X86::CondCode Cond = X86::getCondFromSETOpc(MI.getOpcode());
611 if (Cond != X86::COND_INVALID && MI.getOperand(0).isReg() &&
612 TRI->isVirtualRegister(MI.getOperand(0).getReg()))
613 CondRegs[Cond] = MI.getOperand(0).getReg();
615 // Stop scanning when we see the first definition of the EFLAGS as prior to
616 // this we would potentially capture the wrong flag state.
617 if (MI.findRegisterDefOperand(X86::EFLAGS))
623 unsigned X86FlagsCopyLoweringPass::promoteCondToReg(
624 MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos,
625 DebugLoc TestLoc, X86::CondCode Cond) {
626 unsigned Reg = MRI->createVirtualRegister(PromoteRC);
627 auto SetI = BuildMI(TestMBB, TestPos, TestLoc,
628 TII->get(X86::getSETFromCond(Cond)), Reg);
630 DEBUG(dbgs() << " save cond: "; SetI->dump());
635 std::pair<unsigned, bool> X86FlagsCopyLoweringPass::getCondOrInverseInReg(
636 MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos,
637 DebugLoc TestLoc, X86::CondCode Cond, CondRegArray &CondRegs) {
638 unsigned &CondReg = CondRegs[Cond];
639 unsigned &InvCondReg = CondRegs[X86::GetOppositeBranchCondition(Cond)];
640 if (!CondReg && !InvCondReg)
641 CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
644 return {CondReg, false};
646 return {InvCondReg, true};
649 void X86FlagsCopyLoweringPass::insertTest(MachineBasicBlock &MBB,
650 MachineBasicBlock::iterator Pos,
651 DebugLoc Loc, unsigned Reg) {
652 // We emit test instructions as register/immediate test against -1. This
653 // allows register allocation to fold a memory operand if needed (that will
654 // happen often due to the places this code is emitted). But hopefully will
655 // also allow us to select a shorter encoding of `testb %reg, %reg` when that
656 // would be equivalent.
658 BuildMI(MBB, Pos, Loc, TII->get(X86::TEST8rr)).addReg(Reg).addReg(Reg);
660 DEBUG(dbgs() << " test cond: "; TestI->dump());
664 void X86FlagsCopyLoweringPass::rewriteArithmetic(
665 MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos,
666 DebugLoc TestLoc, MachineInstr &MI, MachineOperand &FlagUse,
667 CondRegArray &CondRegs) {
668 // Arithmetic is either reading CF or OF. Figure out which condition we need
669 // to preserve in a register.
672 // The addend to use to reset CF or OF when added to the flag value.
675 switch (getMnemonicFromOpcode(MI.getOpcode())) {
676 case FlagArithMnemonic::ADC:
677 case FlagArithMnemonic::ADCX:
678 case FlagArithMnemonic::RCL:
679 case FlagArithMnemonic::RCR:
680 case FlagArithMnemonic::SBB:
681 Cond = X86::COND_B; // CF == 1
682 // Set up an addend that when one is added will need a carry due to not
683 // having a higher bit available.
687 case FlagArithMnemonic::ADOX:
688 Cond = X86::COND_O; // OF == 1
689 // Set up an addend that when one is added will turn from positive to
690 // negative and thus overflow in the signed domain.
695 // Now get a register that contains the value of the flag input to the
696 // arithmetic. We require exactly this flag to simplify the arithmetic
697 // required to materialize it back into the flag.
698 unsigned &CondReg = CondRegs[Cond];
700 CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
702 MachineBasicBlock &MBB = *MI.getParent();
704 // Insert an instruction that will set the flag back to the desired value.
705 unsigned TmpReg = MRI->createVirtualRegister(PromoteRC);
707 BuildMI(MBB, MI.getIterator(), MI.getDebugLoc(), TII->get(X86::ADD8ri))
708 .addDef(TmpReg, RegState::Dead)
712 DEBUG(dbgs() << " add cond: "; AddI->dump());
714 FlagUse.setIsKill(true);
717 void X86FlagsCopyLoweringPass::rewriteCMov(MachineBasicBlock &TestMBB,
718 MachineBasicBlock::iterator TestPos,
721 MachineOperand &FlagUse,
722 CondRegArray &CondRegs) {
723 // First get the register containing this specific condition.
724 X86::CondCode Cond = X86::getCondFromCMovOpc(CMovI.getOpcode());
727 std::tie(CondReg, Inverted) =
728 getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs);
730 MachineBasicBlock &MBB = *CMovI.getParent();
732 // Insert a direct test of the saved register.
733 insertTest(MBB, CMovI.getIterator(), CMovI.getDebugLoc(), CondReg);
735 // Rewrite the CMov to use the !ZF flag from the test (but match register
736 // size and memory operand), and then kill its use of the flags afterward.
737 auto &CMovRC = *MRI->getRegClass(CMovI.getOperand(0).getReg());
738 CMovI.setDesc(TII->get(X86::getCMovFromCond(
739 Inverted ? X86::COND_E : X86::COND_NE, TRI->getRegSizeInBits(CMovRC) / 8,
740 !CMovI.memoperands_empty())));
741 FlagUse.setIsKill(true);
742 DEBUG(dbgs() << " fixed cmov: "; CMovI.dump());
745 void X86FlagsCopyLoweringPass::rewriteCondJmp(
746 MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos,
747 DebugLoc TestLoc, MachineInstr &JmpI, CondRegArray &CondRegs) {
748 // First get the register containing this specific condition.
749 X86::CondCode Cond = X86::getCondFromBranchOpc(JmpI.getOpcode());
752 std::tie(CondReg, Inverted) =
753 getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs);
755 MachineBasicBlock &JmpMBB = *JmpI.getParent();
757 // Insert a direct test of the saved register.
758 insertTest(JmpMBB, JmpI.getIterator(), JmpI.getDebugLoc(), CondReg);
760 // Rewrite the jump to use the !ZF flag from the test, and kill its use of
762 JmpI.setDesc(TII->get(
763 X86::GetCondBranchFromCond(Inverted ? X86::COND_E : X86::COND_NE)));
764 const int ImplicitEFLAGSOpIdx = 1;
765 JmpI.getOperand(ImplicitEFLAGSOpIdx).setIsKill(true);
766 DEBUG(dbgs() << " fixed jCC: "; JmpI.dump());
769 void X86FlagsCopyLoweringPass::rewriteCopy(MachineInstr &MI,
770 MachineOperand &FlagUse,
771 MachineInstr &CopyDefI) {
772 // Just replace this copy with the the original copy def.
773 MRI->replaceRegWith(MI.getOperand(0).getReg(),
774 CopyDefI.getOperand(0).getReg());
775 MI.eraseFromParent();
778 void X86FlagsCopyLoweringPass::rewriteSetCarryExtended(
779 MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos,
780 DebugLoc TestLoc, MachineInstr &SetBI, MachineOperand &FlagUse,
781 CondRegArray &CondRegs) {
782 // This routine is only used to handle pseudos for setting a register to zero
783 // or all ones based on CF. This is essentially the sign extended from 1-bit
784 // form of SETB and modeled with the SETB_C* pseudos. They require special
785 // handling as they aren't normal SETcc instructions and are lowered to an
786 // EFLAGS clobbering operation (SBB typically). One simplifying aspect is that
787 // they are only provided in reg-defining forms. A complicating factor is that
788 // they can define many different register widths.
789 assert(SetBI.getOperand(0).isReg() &&
790 "Cannot have a non-register defined operand to this variant of SETB!");
792 // Little helper to do the common final step of replacing the register def'ed
793 // by this SETB instruction with a new register and removing the SETB
795 auto RewriteToReg = [&](unsigned Reg) {
796 MRI->replaceRegWith(SetBI.getOperand(0).getReg(), Reg);
797 SetBI.eraseFromParent();
800 // Grab the register class used for this particular instruction.
801 auto &SetBRC = *MRI->getRegClass(SetBI.getOperand(0).getReg());
803 MachineBasicBlock &MBB = *SetBI.getParent();
804 auto SetPos = SetBI.getIterator();
805 auto SetLoc = SetBI.getDebugLoc();
807 auto AdjustReg = [&](unsigned Reg) {
808 auto &OrigRC = *MRI->getRegClass(Reg);
809 if (&OrigRC == &SetBRC)
814 int OrigRegSize = TRI->getRegSizeInBits(OrigRC) / 8;
815 int TargetRegSize = TRI->getRegSizeInBits(SetBRC) / 8;
816 assert(OrigRegSize <= 8 && "No GPRs larger than 64-bits!");
817 assert(TargetRegSize <= 8 && "No GPRs larger than 64-bits!");
818 int SubRegIdx[] = {X86::NoSubRegister, X86::sub_8bit, X86::sub_16bit,
819 X86::NoSubRegister, X86::sub_32bit};
821 // If the original size is smaller than the target *and* is smaller than 4
822 // bytes, we need to explicitly zero extend it. We always extend to 4-bytes
823 // to maximize the chance of being able to CSE that operation and to avoid
824 // partial dependency stalls extending to 2-bytes.
825 if (OrigRegSize < TargetRegSize && OrigRegSize < 4) {
826 NewReg = MRI->createVirtualRegister(&X86::GR32RegClass);
827 BuildMI(MBB, SetPos, SetLoc, TII->get(X86::MOVZX32rr8), NewReg)
829 if (&SetBRC == &X86::GR32RegClass)
835 NewReg = MRI->createVirtualRegister(&SetBRC);
836 if (OrigRegSize < TargetRegSize) {
837 BuildMI(MBB, SetPos, SetLoc, TII->get(TargetOpcode::SUBREG_TO_REG),
841 .addImm(SubRegIdx[OrigRegSize]);
842 } else if (OrigRegSize > TargetRegSize) {
843 BuildMI(MBB, SetPos, SetLoc, TII->get(TargetOpcode::EXTRACT_SUBREG),
846 .addImm(SubRegIdx[TargetRegSize]);
848 BuildMI(MBB, SetPos, SetLoc, TII->get(TargetOpcode::COPY), NewReg)
854 unsigned &CondReg = CondRegs[X86::COND_B];
856 CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, X86::COND_B);
858 // Adjust the condition to have the desired register width by zero-extending
860 // FIXME: We should use a better API to avoid the local reference and using a
861 // different variable here.
862 unsigned ExtCondReg = AdjustReg(CondReg);
864 // Now we need to turn this into a bitmask. We do this by subtracting it from
866 unsigned ZeroReg = MRI->createVirtualRegister(&X86::GR32RegClass);
867 BuildMI(MBB, SetPos, SetLoc, TII->get(X86::MOV32r0), ZeroReg);
868 ZeroReg = AdjustReg(ZeroReg);
871 switch (SetBI.getOpcode()) {
889 llvm_unreachable("Invalid SETB_C* opcode!");
891 unsigned ResultReg = MRI->createVirtualRegister(&SetBRC);
892 BuildMI(MBB, SetPos, SetLoc, TII->get(Sub), ResultReg)
895 return RewriteToReg(ResultReg);
898 void X86FlagsCopyLoweringPass::rewriteSetCC(MachineBasicBlock &TestMBB,
899 MachineBasicBlock::iterator TestPos,
901 MachineInstr &SetCCI,
902 MachineOperand &FlagUse,
903 CondRegArray &CondRegs) {
904 X86::CondCode Cond = X86::getCondFromSETOpc(SetCCI.getOpcode());
905 // Note that we can't usefully rewrite this to the inverse without complex
906 // analysis of the users of the setCC. Largely we rely on duplicates which
907 // could have been avoided already being avoided here.
908 unsigned &CondReg = CondRegs[Cond];
910 CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
912 // Rewriting a register def is trivial: we just replace the register and
914 if (!SetCCI.mayStore()) {
915 assert(SetCCI.getOperand(0).isReg() &&
916 "Cannot have a non-register defined operand to SETcc!");
917 MRI->replaceRegWith(SetCCI.getOperand(0).getReg(), CondReg);
918 SetCCI.eraseFromParent();
922 // Otherwise, we need to emit a store.
923 auto MIB = BuildMI(*SetCCI.getParent(), SetCCI.getIterator(),
924 SetCCI.getDebugLoc(), TII->get(X86::MOV8mr));
925 // Copy the address operands.
926 for (int i = 0; i < X86::AddrNumOperands; ++i)
927 MIB.add(SetCCI.getOperand(i));
931 MIB->setMemRefs(SetCCI.memoperands_begin(), SetCCI.memoperands_end());
933 SetCCI.eraseFromParent();