1 //===-- X86VZeroUpper.cpp - AVX vzeroupper instruction inserter -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the pass which inserts x86 AVX vzeroupper instructions
11 // before calls to SSE encoded functions. This avoids transition latency
12 // penalty when tranfering control between AVX encoded instructions and old
15 //===----------------------------------------------------------------------===//
17 #define DEBUG_TYPE "x86-vzeroupper"
19 #include "X86InstrInfo.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/CodeGen/MachineFunctionPass.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/Passes.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/raw_ostream.h"
27 #include "llvm/Target/TargetInstrInfo.h"
30 STATISTIC(NumVZU, "Number of vzeroupper instructions inserted");
33 struct VZeroUpperInserter : public MachineFunctionPass {
35 VZeroUpperInserter() : MachineFunctionPass(ID) {}
37 virtual bool runOnMachineFunction(MachineFunction &MF);
39 bool processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB);
41 virtual const char *getPassName() const { return "X86 vzeroupper inserter";}
44 const TargetInstrInfo *TII; // Machine instruction info.
46 // Any YMM register live-in to this function?
49 // BBState - Contains the state of each MBB: unknown, clean, dirty
50 SmallVector<uint8_t, 8> BBState;
52 // BBSolved - Keep track of all MBB which had been already analyzed
53 // and there is no further processing required.
56 // Machine Basic Blocks are classified according this pass:
58 // ST_UNKNOWN - The MBB state is unknown, meaning from the entry state
59 // until the MBB exit there isn't a instruction using YMM to change
60 // the state to dirty, or one of the incoming predecessors is unknown
61 // and there's not a dirty predecessor between them.
63 // ST_CLEAN - No YMM usage in the end of the MBB. A MBB could have
64 // instructions using YMM and be marked ST_CLEAN, as long as the state
65 // is cleaned by a vzeroupper before any call.
67 // ST_DIRTY - Any MBB ending with a YMM usage not cleaned up by a
68 // vzeroupper instruction.
70 // ST_INIT - Placeholder for an empty state set
79 // computeState - Given two states, compute the resulting state, in
82 // 1) One dirty state yields another dirty state
83 // 2) All states must be clean for the result to be clean
84 // 3) If none above and one unknown, the result state is also unknown
86 static unsigned computeState(unsigned PrevState, unsigned CurState) {
87 if (PrevState == ST_INIT)
90 if (PrevState == ST_DIRTY || CurState == ST_DIRTY)
93 if (PrevState == ST_CLEAN && CurState == ST_CLEAN)
100 char VZeroUpperInserter::ID = 0;
103 FunctionPass *llvm::createX86IssueVZeroUpperPass() {
104 return new VZeroUpperInserter();
107 static bool isYmmReg(unsigned Reg) {
108 return (Reg >= X86::YMM0 && Reg <= X86::YMM31);
111 static bool isZmmReg(unsigned Reg) {
112 return (Reg >= X86::ZMM0 && Reg <= X86::ZMM31);
115 static bool checkFnHasLiveInYmm(MachineRegisterInfo &MRI) {
116 for (MachineRegisterInfo::livein_iterator I = MRI.livein_begin(),
117 E = MRI.livein_end(); I != E; ++I)
118 if (isYmmReg(I->first) || isZmmReg(I->first))
124 static bool clobbersAllYmmRegs(const MachineOperand &MO) {
125 for (unsigned reg = X86::YMM0; reg <= X86::YMM31; ++reg) {
126 if (!MO.clobbersPhysReg(reg))
129 for (unsigned reg = X86::ZMM0; reg <= X86::ZMM31; ++reg) {
130 if (!MO.clobbersPhysReg(reg))
136 static bool hasYmmReg(MachineInstr *MI) {
137 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
138 const MachineOperand &MO = MI->getOperand(i);
139 if (MI->isCall() && MO.isRegMask() && !clobbersAllYmmRegs(MO))
145 if (isYmmReg(MO.getReg()))
151 /// clobbersAnyYmmReg() - Check if any YMM register will be clobbered by this
153 static bool clobbersAnyYmmReg(MachineInstr *MI) {
154 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
155 const MachineOperand &MO = MI->getOperand(i);
158 for (unsigned reg = X86::YMM0; reg <= X86::YMM31; ++reg) {
159 if (MO.clobbersPhysReg(reg))
162 for (unsigned reg = X86::ZMM0; reg <= X86::ZMM31; ++reg) {
163 if (MO.clobbersPhysReg(reg))
170 /// runOnMachineFunction - Loop over all of the basic blocks, inserting
171 /// vzero upper instructions before function calls.
172 bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
173 TII = MF.getTarget().getInstrInfo();
174 MachineRegisterInfo &MRI = MF.getRegInfo();
175 bool EverMadeChange = false;
177 // Fast check: if the function doesn't use any ymm registers, we don't need
178 // to insert any VZEROUPPER instructions. This is constant-time, so it is
179 // cheap in the common case of no ymm use.
180 bool YMMUsed = false;
181 const TargetRegisterClass *RC = &X86::VR256RegClass;
182 for (TargetRegisterClass::iterator i = RC->begin(), e = RC->end();
184 if (!MRI.reg_nodbg_empty(*i)) {
190 return EverMadeChange;
192 // Pre-compute the existence of any live-in YMM registers to this function
193 FnHasLiveInYmm = checkFnHasLiveInYmm(MRI);
195 assert(BBState.empty());
196 BBState.resize(MF.getNumBlockIDs(), 0);
197 BBSolved.resize(MF.getNumBlockIDs(), 0);
199 // Each BB state depends on all predecessors, loop over until everything
200 // converges. (Once we converge, we can implicitly mark everything that is
201 // still ST_UNKNOWN as ST_CLEAN.)
203 bool MadeChange = false;
205 // Process all basic blocks.
206 for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
207 MadeChange |= processBasicBlock(MF, *I);
209 // If this iteration over the code changed anything, keep iterating.
210 if (!MadeChange) break;
211 EverMadeChange = true;
216 return EverMadeChange;
219 /// processBasicBlock - Loop over all of the instructions in the basic block,
220 /// inserting vzero upper instructions before function calls.
221 bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF,
222 MachineBasicBlock &BB) {
223 bool Changed = false;
224 unsigned BBNum = BB.getNumber();
226 // Don't process already solved BBs
228 return false; // No changes
230 // Check the state of all predecessors
231 unsigned EntryState = ST_INIT;
232 for (MachineBasicBlock::const_pred_iterator PI = BB.pred_begin(),
233 PE = BB.pred_end(); PI != PE; ++PI) {
234 EntryState = computeState(EntryState, BBState[(*PI)->getNumber()]);
235 if (EntryState == ST_DIRTY)
240 // The entry MBB for the function may set the initial state to dirty if
241 // the function receives any YMM incoming arguments
242 if (&BB == MF.begin()) {
243 EntryState = ST_CLEAN;
245 EntryState = ST_DIRTY;
248 // The current state is initialized according to the predecessors
249 unsigned CurState = EntryState;
250 bool BBHasCall = false;
252 for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) {
253 DebugLoc dl = I->getDebugLoc();
254 MachineInstr *MI = I;
256 bool isControlFlow = MI->isCall() || MI->isReturn();
258 // Shortcut: don't need to check regular instructions in dirty state.
259 if (!isControlFlow && CurState == ST_DIRTY)
263 // We found a ymm-using instruction; this could be an AVX instruction,
264 // or it could be control flow.
269 // Check for control-flow out of the current function (which might
270 // indirectly execute SSE instructions).
274 // If the call won't clobber any YMM register, skip it as well. It usually
275 // happens on helper function calls (such as '_chkstk', '_ftol2') where
276 // standard calling convention is not used (RegMask is not used to mark
277 // register clobbered and register usage (def/imp-def/use) is well-dfined
278 // and explicitly specified.
279 if (MI->isCall() && !clobbersAnyYmmReg(MI))
284 // The VZEROUPPER instruction resets the upper 128 bits of all Intel AVX
285 // registers. This instruction has zero latency. In addition, the processor
286 // changes back to Clean state, after which execution of Intel SSE
287 // instructions or Intel AVX instructions has no transition penalty. Add
288 // the VZEROUPPER instruction before any function call/return that might
290 // FIXME: In some cases, we may want to move the VZEROUPPER into a
291 // predecessor block.
292 if (CurState == ST_DIRTY) {
293 // Only insert the VZEROUPPER in case the entry state isn't unknown.
294 // When unknown, only compute the information within the block to have
295 // it available in the exit if possible, but don't change the block.
296 if (EntryState != ST_UNKNOWN) {
297 BuildMI(BB, I, dl, TII->get(X86::VZEROUPPER));
301 // After the inserted VZEROUPPER the state becomes clean again, but
302 // other YMM may appear before other subsequent calls or even before
303 // the end of the BB.
308 DEBUG(dbgs() << "MBB #" << BBNum
309 << ", current state: " << CurState << '\n');
311 // A BB can only be considered solved when we both have done all the
312 // necessary transformations, and have computed the exit state. This happens
314 // 1) We know the entry state: this immediately implies the exit state and
315 // all the necessary transformations.
316 // 2) There are no calls, and and a non-call instruction marks this block:
317 // no transformations are necessary, and we know the exit state.
318 if (EntryState != ST_UNKNOWN || (!BBHasCall && CurState != ST_UNKNOWN))
319 BBSolved[BBNum] = true;
321 if (CurState != BBState[BBNum])
324 BBState[BBNum] = CurState;