1 //====- X86SpeculativeLoadHardening.cpp - A Spectre v1 mitigation ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// Provide a pass which mitigates speculative execution attacks which operate
12 /// by speculating incorrectly past some predicate (a type check, bounds check,
13 /// or other condition) to reach a load with invalid inputs and leak the data
14 /// accessed by that load using a side channel out of the speculative domain.
16 /// For details on the attacks, see the first variant in both the Project Zero
17 /// writeup and the Spectre paper:
18 /// https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html
19 /// https://spectreattack.com/spectre.pdf
21 //===----------------------------------------------------------------------===//
24 #include "X86InstrBuilder.h"
25 #include "X86InstrInfo.h"
26 #include "X86Subtarget.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/Optional.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/ScopeExit.h"
32 #include "llvm/ADT/SmallPtrSet.h"
33 #include "llvm/ADT/SmallSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/SparseBitVector.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/CodeGen/MachineBasicBlock.h"
38 #include "llvm/CodeGen/MachineConstantPool.h"
39 #include "llvm/CodeGen/MachineFunction.h"
40 #include "llvm/CodeGen/MachineFunctionPass.h"
41 #include "llvm/CodeGen/MachineInstr.h"
42 #include "llvm/CodeGen/MachineInstrBuilder.h"
43 #include "llvm/CodeGen/MachineModuleInfo.h"
44 #include "llvm/CodeGen/MachineOperand.h"
45 #include "llvm/CodeGen/MachineRegisterInfo.h"
46 #include "llvm/CodeGen/MachineSSAUpdater.h"
47 #include "llvm/CodeGen/TargetInstrInfo.h"
48 #include "llvm/CodeGen/TargetRegisterInfo.h"
49 #include "llvm/CodeGen/TargetSchedule.h"
50 #include "llvm/CodeGen/TargetSubtargetInfo.h"
51 #include "llvm/IR/DebugLoc.h"
52 #include "llvm/MC/MCSchedule.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Debug.h"
56 #include "llvm/Support/raw_ostream.h"
64 #define PASS_KEY "x86-speculative-load-hardening"
65 #define DEBUG_TYPE PASS_KEY
67 STATISTIC(NumCondBranchesTraced, "Number of conditional branches traced");
68 STATISTIC(NumBranchesUntraced, "Number of branches unable to trace");
69 STATISTIC(NumAddrRegsHardened,
70 "Number of address mode used registers hardaned");
71 STATISTIC(NumPostLoadRegsHardened,
72 "Number of post-load register values hardened");
73 STATISTIC(NumCallsOrJumpsHardened,
74 "Number of calls or jumps requiring extra hardening");
75 STATISTIC(NumInstsInserted, "Number of instructions inserted");
76 STATISTIC(NumLFENCEsInserted, "Number of lfence instructions inserted");
78 static cl::opt<bool> HardenEdgesWithLFENCE(
81 "Use LFENCE along each conditional edge to harden against speculative "
82 "loads rather than conditional movs and poisoned pointers."),
83 cl::init(false), cl::Hidden);
85 static cl::opt<bool> EnablePostLoadHardening(
86 PASS_KEY "-post-load",
87 cl::desc("Harden the value loaded *after* it is loaded by "
88 "flushing the loaded bits to 1. This is hard to do "
89 "in general but can be done easily for GPRs."),
90 cl::init(true), cl::Hidden);
92 static cl::opt<bool> FenceCallAndRet(
93 PASS_KEY "-fence-call-and-ret",
94 cl::desc("Use a full speculation fence to harden both call and ret edges "
95 "rather than a lighter weight mitigation."),
96 cl::init(false), cl::Hidden);
98 static cl::opt<bool> HardenInterprocedurally(
100 cl::desc("Harden interprocedurally by passing our state in and out of "
101 "functions in the high bits of the stack pointer."),
102 cl::init(true), cl::Hidden);
105 HardenLoads(PASS_KEY "-loads",
106 cl::desc("Sanitize loads from memory. When disable, no "
107 "significant security is provided."),
108 cl::init(true), cl::Hidden);
110 static cl::opt<bool> HardenIndirectCallsAndJumps(
111 PASS_KEY "-indirect",
112 cl::desc("Harden indirect calls and jumps against using speculatively "
113 "stored attacker controlled addresses. This is designed to "
114 "mitigate Spectre v1.2 style attacks."),
115 cl::init(true), cl::Hidden);
119 void initializeX86SpeculativeLoadHardeningPassPass(PassRegistry &);
121 } // end namespace llvm
125 class X86SpeculativeLoadHardeningPass : public MachineFunctionPass {
127 X86SpeculativeLoadHardeningPass() : MachineFunctionPass(ID) {
128 initializeX86SpeculativeLoadHardeningPassPass(
129 *PassRegistry::getPassRegistry());
132 StringRef getPassName() const override {
133 return "X86 speculative load hardening";
135 bool runOnMachineFunction(MachineFunction &MF) override;
136 void getAnalysisUsage(AnalysisUsage &AU) const override;
138 /// Pass identification, replacement for typeid.
142 /// The information about a block's conditional terminators needed to trace
143 /// our predicate state through the exiting edges.
144 struct BlockCondInfo {
145 MachineBasicBlock *MBB;
147 // We mostly have one conditional branch, and in extremely rare cases have
148 // two. Three and more are so rare as to be unimportant for compile time.
149 SmallVector<MachineInstr *, 2> CondBrs;
151 MachineInstr *UncondBr;
154 /// Manages the predicate state traced through the program.
159 const TargetRegisterClass *RC;
160 MachineSSAUpdater SSA;
162 PredState(MachineFunction &MF, const TargetRegisterClass *RC)
166 const X86Subtarget *Subtarget;
167 MachineRegisterInfo *MRI;
168 const X86InstrInfo *TII;
169 const TargetRegisterInfo *TRI;
171 Optional<PredState> PS;
173 void hardenEdgesWithLFENCE(MachineFunction &MF);
175 SmallVector<BlockCondInfo, 16> collectBlockCondInfo(MachineFunction &MF);
177 SmallVector<MachineInstr *, 16>
178 tracePredStateThroughCFG(MachineFunction &MF, ArrayRef<BlockCondInfo> Infos);
180 void unfoldCallAndJumpLoads(MachineFunction &MF);
182 void tracePredStateThroughBlocksAndHarden(MachineFunction &MF);
184 unsigned saveEFLAGS(MachineBasicBlock &MBB,
185 MachineBasicBlock::iterator InsertPt, DebugLoc Loc);
186 void restoreEFLAGS(MachineBasicBlock &MBB,
187 MachineBasicBlock::iterator InsertPt, DebugLoc Loc,
190 void mergePredStateIntoSP(MachineBasicBlock &MBB,
191 MachineBasicBlock::iterator InsertPt, DebugLoc Loc,
192 unsigned PredStateReg);
193 unsigned extractPredStateFromSP(MachineBasicBlock &MBB,
194 MachineBasicBlock::iterator InsertPt,
198 hardenLoadAddr(MachineInstr &MI, MachineOperand &BaseMO,
199 MachineOperand &IndexMO,
200 SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg);
202 sinkPostLoadHardenedInst(MachineInstr &MI,
203 SmallPtrSetImpl<MachineInstr *> &HardenedInstrs);
204 bool canHardenRegister(unsigned Reg);
205 unsigned hardenValueInRegister(unsigned Reg, MachineBasicBlock &MBB,
206 MachineBasicBlock::iterator InsertPt,
208 unsigned hardenPostLoad(MachineInstr &MI);
209 void hardenReturnInstr(MachineInstr &MI);
210 void tracePredStateThroughCall(MachineInstr &MI);
211 void hardenIndirectCallOrJumpInstr(
213 SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg);
216 } // end anonymous namespace
218 char X86SpeculativeLoadHardeningPass::ID = 0;
220 void X86SpeculativeLoadHardeningPass::getAnalysisUsage(
221 AnalysisUsage &AU) const {
222 MachineFunctionPass::getAnalysisUsage(AU);
225 static MachineBasicBlock &splitEdge(MachineBasicBlock &MBB,
226 MachineBasicBlock &Succ, int SuccCount,
227 MachineInstr *Br, MachineInstr *&UncondBr,
228 const X86InstrInfo &TII) {
229 assert(!Succ.isEHPad() && "Shouldn't get edges to EH pads!");
231 MachineFunction &MF = *MBB.getParent();
233 MachineBasicBlock &NewMBB = *MF.CreateMachineBasicBlock();
235 // We have to insert the new block immediately after the current one as we
236 // don't know what layout-successor relationships the successor has and we
237 // may not be able to (and generally don't want to) try to fix those up.
238 MF.insert(std::next(MachineFunction::iterator(&MBB)), &NewMBB);
240 // Update the branch instruction if necessary.
242 assert(Br->getOperand(0).getMBB() == &Succ &&
243 "Didn't start with the right target!");
244 Br->getOperand(0).setMBB(&NewMBB);
246 // If this successor was reached through a branch rather than fallthrough,
247 // we might have *broken* fallthrough and so need to inject a new
248 // unconditional branch.
250 MachineBasicBlock &OldLayoutSucc =
251 *std::next(MachineFunction::iterator(&NewMBB));
252 assert(MBB.isSuccessor(&OldLayoutSucc) &&
253 "Without an unconditional branch, the old layout successor should "
254 "be an actual successor!");
256 BuildMI(&MBB, DebugLoc(), TII.get(X86::JMP_1)).addMBB(&OldLayoutSucc);
257 // Update the unconditional branch now that we've added one.
258 UncondBr = &*BrBuilder;
261 // Insert unconditional "jump Succ" instruction in the new block if
263 if (!NewMBB.isLayoutSuccessor(&Succ)) {
264 SmallVector<MachineOperand, 4> Cond;
265 TII.insertBranch(NewMBB, &Succ, nullptr, Cond, Br->getDebugLoc());
269 "Cannot have a branchless successor and an unconditional branch!");
270 assert(NewMBB.isLayoutSuccessor(&Succ) &&
271 "A non-branch successor must have been a layout successor before "
272 "and now is a layout successor of the new block.");
275 // If this is the only edge to the successor, we can just replace it in the
276 // CFG. Otherwise we need to add a new entry in the CFG for the new
278 if (SuccCount == 1) {
279 MBB.replaceSuccessor(&Succ, &NewMBB);
281 MBB.splitSuccessor(&Succ, &NewMBB);
284 // Hook up the edge from the new basic block to the old successor in the CFG.
285 NewMBB.addSuccessor(&Succ);
287 // Fix PHI nodes in Succ so they refer to NewMBB instead of MBB.
288 for (MachineInstr &MI : Succ) {
291 for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps;
293 MachineOperand &OpV = MI.getOperand(OpIdx);
294 MachineOperand &OpMBB = MI.getOperand(OpIdx + 1);
295 assert(OpMBB.isMBB() && "Block operand to a PHI is not a block!");
296 if (OpMBB.getMBB() != &MBB)
299 // If this is the last edge to the succesor, just replace MBB in the PHI
300 if (SuccCount == 1) {
301 OpMBB.setMBB(&NewMBB);
305 // Otherwise, append a new pair of operands for the new incoming edge.
306 MI.addOperand(MF, OpV);
307 MI.addOperand(MF, MachineOperand::CreateMBB(&NewMBB));
312 // Inherit live-ins from the successor
313 for (auto &LI : Succ.liveins())
314 NewMBB.addLiveIn(LI);
316 LLVM_DEBUG(dbgs() << " Split edge from '" << MBB.getName() << "' to '"
317 << Succ.getName() << "'.\n");
321 /// Removing duplicate PHI operands to leave the PHI in a canonical and
322 /// predictable form.
324 /// FIXME: It's really frustrating that we have to do this, but SSA-form in MIR
325 /// isn't what you might expect. We may have multiple entries in PHI nodes for
326 /// a single predecessor. This makes CFG-updating extremely complex, so here we
327 /// simplify all PHI nodes to a model even simpler than the IR's model: exactly
328 /// one entry per predecessor, regardless of how many edges there are.
329 static void canonicalizePHIOperands(MachineFunction &MF) {
330 SmallPtrSet<MachineBasicBlock *, 4> Preds;
331 SmallVector<int, 4> DupIndices;
333 for (auto &MI : MBB) {
337 // First we scan the operands of the PHI looking for duplicate entries
338 // a particular predecessor. We retain the operand index of each duplicate
340 for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps;
342 if (!Preds.insert(MI.getOperand(OpIdx + 1).getMBB()).second)
343 DupIndices.push_back(OpIdx);
345 // Now walk the duplicate indices, removing both the block and value. Note
346 // that these are stored as a vector making this element-wise removal
348 // potentially quadratic.
350 // FIXME: It is really frustrating that we have to use a quadratic
351 // removal algorithm here. There should be a better way, but the use-def
352 // updates required make that impossible using the public API.
354 // Note that we have to process these backwards so that we don't
355 // invalidate other indices with each removal.
356 while (!DupIndices.empty()) {
357 int OpIdx = DupIndices.pop_back_val();
358 // Remove both the block and value operand, again in reverse order to
360 MI.RemoveOperand(OpIdx + 1);
361 MI.RemoveOperand(OpIdx);
368 /// Helper to scan a function for loads vulnerable to misspeculation that we
371 /// We use this to avoid making changes to functions where there is nothing we
372 /// need to do to harden against misspeculation.
373 static bool hasVulnerableLoad(MachineFunction &MF) {
374 for (MachineBasicBlock &MBB : MF) {
375 for (MachineInstr &MI : MBB) {
376 // Loads within this basic block after an LFENCE are not at risk of
377 // speculatively executing with invalid predicates from prior control
378 // flow. So break out of this block but continue scanning the function.
379 if (MI.getOpcode() == X86::LFENCE)
382 // Looking for loads only.
386 // An MFENCE is modeled as a load but isn't vulnerable to misspeculation.
387 if (MI.getOpcode() == X86::MFENCE)
399 bool X86SpeculativeLoadHardeningPass::runOnMachineFunction(
400 MachineFunction &MF) {
401 LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
404 Subtarget = &MF.getSubtarget<X86Subtarget>();
405 MRI = &MF.getRegInfo();
406 TII = Subtarget->getInstrInfo();
407 TRI = Subtarget->getRegisterInfo();
409 // FIXME: Support for 32-bit.
410 PS.emplace(MF, &X86::GR64_NOSPRegClass);
412 if (MF.begin() == MF.end())
413 // Nothing to do for a degenerate empty function...
416 // We support an alternative hardening technique based on a debug flag.
417 if (HardenEdgesWithLFENCE) {
418 hardenEdgesWithLFENCE(MF);
422 // Create a dummy debug loc to use for all the generated code here.
425 MachineBasicBlock &Entry = *MF.begin();
426 auto EntryInsertPt = Entry.SkipPHIsLabelsAndDebug(Entry.begin());
428 // Do a quick scan to see if we have any checkable loads.
429 bool HasVulnerableLoad = hasVulnerableLoad(MF);
431 // See if we have any conditional branching blocks that we will need to trace
432 // predicate state through.
433 SmallVector<BlockCondInfo, 16> Infos = collectBlockCondInfo(MF);
435 // If we have no interesting conditions or loads, nothing to do here.
436 if (!HasVulnerableLoad && Infos.empty())
439 // The poison value is required to be an all-ones value for many aspects of
441 const int PoisonVal = -1;
442 PS->PoisonReg = MRI->createVirtualRegister(PS->RC);
443 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV64ri32), PS->PoisonReg)
447 // If we have loads being hardened and we've asked for call and ret edges to
448 // get a full fence-based mitigation, inject that fence.
449 if (HasVulnerableLoad && FenceCallAndRet) {
450 // We need to insert an LFENCE at the start of the function to suspend any
451 // incoming misspeculation from the caller. This helps two-fold: the caller
452 // may not have been protected as this code has been, and this code gets to
453 // not take any specific action to protect across calls.
454 // FIXME: We could skip this for functions which unconditionally return
456 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::LFENCE));
458 ++NumLFENCEsInserted;
461 // If we guarded the entry with an LFENCE and have no conditionals to protect
462 // in blocks, then we're done.
463 if (FenceCallAndRet && Infos.empty())
464 // We may have changed the function's code at this point to insert fences.
467 // For every basic block in the function which can b
468 if (HardenInterprocedurally && !FenceCallAndRet) {
469 // Set up the predicate state by extracting it from the incoming stack
470 // pointer so we pick up any misspeculation in our caller.
471 PS->InitialReg = extractPredStateFromSP(Entry, EntryInsertPt, Loc);
473 // Otherwise, just build the predicate state itself by zeroing a register
474 // as we don't need any initial state.
475 PS->InitialReg = MRI->createVirtualRegister(PS->RC);
476 unsigned PredStateSubReg = MRI->createVirtualRegister(&X86::GR32RegClass);
477 auto ZeroI = BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV32r0),
480 MachineOperand *ZeroEFLAGSDefOp =
481 ZeroI->findRegisterDefOperand(X86::EFLAGS);
482 assert(ZeroEFLAGSDefOp && ZeroEFLAGSDefOp->isImplicit() &&
483 "Must have an implicit def of EFLAGS!");
484 ZeroEFLAGSDefOp->setIsDead(true);
485 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::SUBREG_TO_REG),
488 .addReg(PredStateSubReg)
489 .addImm(X86::sub_32bit);
492 // We're going to need to trace predicate state throughout the function's
493 // CFG. Prepare for this by setting up our initial state of PHIs with unique
494 // predecessor entries and all the initial predicate state.
495 canonicalizePHIOperands(MF);
497 // Track the updated values in an SSA updater to rewrite into SSA form at the
499 PS->SSA.Initialize(PS->InitialReg);
500 PS->SSA.AddAvailableValue(&Entry, PS->InitialReg);
502 // Trace through the CFG.
503 auto CMovs = tracePredStateThroughCFG(MF, Infos);
505 // We may also enter basic blocks in this function via exception handling
506 // control flow. Here, if we are hardening interprocedurally, we need to
507 // re-capture the predicate state from the throwing code. In the Itanium ABI,
508 // the throw will always look like a call to __cxa_throw and will have the
509 // predicate state in the stack pointer, so extract fresh predicate state from
510 // the stack pointer and make it available in SSA.
511 // FIXME: Handle non-itanium ABI EH models.
512 if (HardenInterprocedurally) {
513 for (MachineBasicBlock &MBB : MF) {
514 assert(!MBB.isEHScopeEntry() && "Only Itanium ABI EH supported!");
515 assert(!MBB.isEHFuncletEntry() && "Only Itanium ABI EH supported!");
516 assert(!MBB.isCleanupFuncletEntry() && "Only Itanium ABI EH supported!");
519 PS->SSA.AddAvailableValue(
521 extractPredStateFromSP(MBB, MBB.SkipPHIsAndLabels(MBB.begin()), Loc));
525 // If we are going to harden calls and jumps we need to unfold their memory
527 if (HardenIndirectCallsAndJumps)
528 unfoldCallAndJumpLoads(MF);
530 // Now that we have the predicate state available at the start of each block
531 // in the CFG, trace it through each block, hardening vulnerable instructions
533 tracePredStateThroughBlocksAndHarden(MF);
535 // Now rewrite all the uses of the pred state using the SSA updater to insert
536 // PHIs connecting the state between blocks along the CFG edges.
537 for (MachineInstr *CMovI : CMovs)
538 for (MachineOperand &Op : CMovI->operands()) {
539 if (!Op.isReg() || Op.getReg() != PS->InitialReg)
542 PS->SSA.RewriteUse(Op);
545 LLVM_DEBUG(dbgs() << "Final speculative load hardened function:\n"; MF.dump();
546 dbgs() << "\n"; MF.verify(this));
550 /// Implements the naive hardening approach of putting an LFENCE after every
551 /// potentially mis-predicted control flow construct.
553 /// We include this as an alternative mostly for the purpose of comparison. The
554 /// performance impact of this is expected to be extremely severe and not
555 /// practical for any real-world users.
556 void X86SpeculativeLoadHardeningPass::hardenEdgesWithLFENCE(
557 MachineFunction &MF) {
558 // First, we scan the function looking for blocks that are reached along edges
559 // that we might want to harden.
560 SmallSetVector<MachineBasicBlock *, 8> Blocks;
561 for (MachineBasicBlock &MBB : MF) {
562 // If there are no or only one successor, nothing to do here.
563 if (MBB.succ_size() <= 1)
566 // Skip blocks unless their terminators start with a branch. Other
567 // terminators don't seem interesting for guarding against misspeculation.
568 auto TermIt = MBB.getFirstTerminator();
569 if (TermIt == MBB.end() || !TermIt->isBranch())
572 // Add all the non-EH-pad succossors to the blocks we want to harden. We
573 // skip EH pads because there isn't really a condition of interest on
575 for (MachineBasicBlock *SuccMBB : MBB.successors())
576 if (!SuccMBB->isEHPad())
577 Blocks.insert(SuccMBB);
580 for (MachineBasicBlock *MBB : Blocks) {
581 auto InsertPt = MBB->SkipPHIsAndLabels(MBB->begin());
582 BuildMI(*MBB, InsertPt, DebugLoc(), TII->get(X86::LFENCE));
584 ++NumLFENCEsInserted;
588 SmallVector<X86SpeculativeLoadHardeningPass::BlockCondInfo, 16>
589 X86SpeculativeLoadHardeningPass::collectBlockCondInfo(MachineFunction &MF) {
590 SmallVector<BlockCondInfo, 16> Infos;
592 // Walk the function and build up a summary for each block's conditions that
593 // we need to trace through.
594 for (MachineBasicBlock &MBB : MF) {
595 // If there are no or only one successor, nothing to do here.
596 if (MBB.succ_size() <= 1)
599 // We want to reliably handle any conditional branch terminators in the
600 // MBB, so we manually analyze the branch. We can handle all of the
601 // permutations here, including ones that analyze branch cannot.
603 // The approach is to walk backwards across the terminators, resetting at
604 // any unconditional non-indirect branch, and track all conditional edges
605 // to basic blocks as well as the fallthrough or unconditional successor
606 // edge. For each conditional edge, we track the target and the opposite
607 // condition code in order to inject a "no-op" cmov into that successor
608 // that will harden the predicate. For the fallthrough/unconditional
609 // edge, we inject a separate cmov for each conditional branch with
610 // matching condition codes. This effectively implements an "and" of the
611 // condition flags, even if there isn't a single condition flag that would
612 // directly implement that. We don't bother trying to optimize either of
613 // these cases because if such an optimization is possible, LLVM should
614 // have optimized the conditional *branches* in that way already to reduce
615 // instruction count. This late, we simply assume the minimal number of
616 // branch instructions is being emitted and use that to guide our cmov
619 BlockCondInfo Info = {&MBB, {}, nullptr};
621 // Now walk backwards through the terminators and build up successors they
622 // reach and the conditions.
623 for (MachineInstr &MI : llvm::reverse(MBB)) {
624 // Once we've handled all the terminators, we're done.
625 if (!MI.isTerminator())
628 // If we see a non-branch terminator, we can't handle anything so bail.
629 if (!MI.isBranch()) {
630 Info.CondBrs.clear();
634 // If we see an unconditional branch, reset our state, clear any
635 // fallthrough, and set this is the "else" successor.
636 if (MI.getOpcode() == X86::JMP_1) {
637 Info.CondBrs.clear();
642 // If we get an invalid condition, we have an indirect branch or some
643 // other unanalyzable "fallthrough" case. We model this as a nullptr for
644 // the destination so we can still guard any conditional successors.
645 // Consider code sequences like:
650 // We still want to harden the edge to `L1`.
651 if (X86::getCondFromBranchOpc(MI.getOpcode()) == X86::COND_INVALID) {
652 Info.CondBrs.clear();
657 // We have a vanilla conditional branch, add it to our list.
658 Info.CondBrs.push_back(&MI);
660 if (Info.CondBrs.empty()) {
661 ++NumBranchesUntraced;
662 LLVM_DEBUG(dbgs() << "WARNING: unable to secure successors of block:\n";
667 Infos.push_back(Info);
673 /// Trace the predicate state through the CFG, instrumenting each conditional
674 /// branch such that misspeculation through an edge will poison the predicate
677 /// Returns the list of inserted CMov instructions so that they can have their
678 /// uses of the predicate state rewritten into proper SSA form once it is
680 SmallVector<MachineInstr *, 16>
681 X86SpeculativeLoadHardeningPass::tracePredStateThroughCFG(
682 MachineFunction &MF, ArrayRef<BlockCondInfo> Infos) {
683 // Collect the inserted cmov instructions so we can rewrite their uses of the
684 // predicate state into SSA form.
685 SmallVector<MachineInstr *, 16> CMovs;
687 // Now walk all of the basic blocks looking for ones that end in conditional
688 // jumps where we need to update this register along each edge.
689 for (const BlockCondInfo &Info : Infos) {
690 MachineBasicBlock &MBB = *Info.MBB;
691 const SmallVectorImpl<MachineInstr *> &CondBrs = Info.CondBrs;
692 MachineInstr *UncondBr = Info.UncondBr;
694 LLVM_DEBUG(dbgs() << "Tracing predicate through block: " << MBB.getName()
696 ++NumCondBranchesTraced;
698 // Compute the non-conditional successor as either the target of any
699 // unconditional branch or the layout successor.
700 MachineBasicBlock *UncondSucc =
701 UncondBr ? (UncondBr->getOpcode() == X86::JMP_1
702 ? UncondBr->getOperand(0).getMBB()
704 : &*std::next(MachineFunction::iterator(&MBB));
706 // Count how many edges there are to any given successor.
707 SmallDenseMap<MachineBasicBlock *, int> SuccCounts;
709 ++SuccCounts[UncondSucc];
710 for (auto *CondBr : CondBrs)
711 ++SuccCounts[CondBr->getOperand(0).getMBB()];
713 // A lambda to insert cmov instructions into a block checking all of the
714 // condition codes in a sequence.
715 auto BuildCheckingBlockForSuccAndConds =
716 [&](MachineBasicBlock &MBB, MachineBasicBlock &Succ, int SuccCount,
717 MachineInstr *Br, MachineInstr *&UncondBr,
718 ArrayRef<X86::CondCode> Conds) {
719 // First, we split the edge to insert the checking block into a safe
722 (SuccCount == 1 && Succ.pred_size() == 1)
724 : splitEdge(MBB, Succ, SuccCount, Br, UncondBr, *TII);
726 bool LiveEFLAGS = Succ.isLiveIn(X86::EFLAGS);
728 CheckingMBB.addLiveIn(X86::EFLAGS);
730 // Now insert the cmovs to implement the checks.
731 auto InsertPt = CheckingMBB.begin();
732 assert((InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) &&
733 "Should never have a PHI in the initial checking block as it "
734 "always has a single predecessor!");
736 // We will wire each cmov to each other, but need to start with the
737 // incoming pred state.
738 unsigned CurStateReg = PS->InitialReg;
740 for (X86::CondCode Cond : Conds) {
741 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
742 auto CMovOp = X86::getCMovFromCond(Cond, PredStateSizeInBytes);
744 unsigned UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
745 // Note that we intentionally use an empty debug location so that
746 // this picks up the preceding location.
747 auto CMovI = BuildMI(CheckingMBB, InsertPt, DebugLoc(),
748 TII->get(CMovOp), UpdatedStateReg)
750 .addReg(PS->PoisonReg);
751 // If this is the last cmov and the EFLAGS weren't originally
752 // live-in, mark them as killed.
753 if (!LiveEFLAGS && Cond == Conds.back())
754 CMovI->findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
757 LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump();
760 // The first one of the cmovs will be using the top level
761 // `PredStateReg` and need to get rewritten into SSA form.
762 if (CurStateReg == PS->InitialReg)
763 CMovs.push_back(&*CMovI);
765 // The next cmov should start from this one's def.
766 CurStateReg = UpdatedStateReg;
769 // And put the last one into the available values for SSA form of our
771 PS->SSA.AddAvailableValue(&CheckingMBB, CurStateReg);
774 std::vector<X86::CondCode> UncondCodeSeq;
775 for (auto *CondBr : CondBrs) {
776 MachineBasicBlock &Succ = *CondBr->getOperand(0).getMBB();
777 int &SuccCount = SuccCounts[&Succ];
779 X86::CondCode Cond = X86::getCondFromBranchOpc(CondBr->getOpcode());
780 X86::CondCode InvCond = X86::GetOppositeBranchCondition(Cond);
781 UncondCodeSeq.push_back(Cond);
783 BuildCheckingBlockForSuccAndConds(MBB, Succ, SuccCount, CondBr, UncondBr,
786 // Decrement the successor count now that we've split one of the edges.
787 // We need to keep the count of edges to the successor accurate in order
788 // to know above when to *replace* the successor in the CFG vs. just
789 // adding the new successor.
793 // Since we may have split edges and changed the number of successors,
794 // normalize the probabilities. This avoids doing it each time we split an
796 MBB.normalizeSuccProbs();
798 // Finally, we need to insert cmovs into the "fallthrough" edge. Here, we
799 // need to intersect the other condition codes. We can do this by just
800 // doing a cmov for each one.
802 // If we have no fallthrough to protect (perhaps it is an indirect jump?)
803 // just skip this and continue.
806 assert(SuccCounts[UncondSucc] == 1 &&
807 "We should never have more than one edge to the unconditional "
808 "successor at this point because every other edge must have been "
811 // Sort and unique the codes to minimize them.
812 llvm::sort(UncondCodeSeq.begin(), UncondCodeSeq.end());
813 UncondCodeSeq.erase(std::unique(UncondCodeSeq.begin(), UncondCodeSeq.end()),
814 UncondCodeSeq.end());
816 // Build a checking version of the successor.
817 BuildCheckingBlockForSuccAndConds(MBB, *UncondSucc, /*SuccCount*/ 1,
818 UncondBr, UncondBr, UncondCodeSeq);
824 /// Compute the register class for the unfolded load.
826 /// FIXME: This should probably live in X86InstrInfo, potentially by adding
827 /// a way to unfold into a newly created vreg rather than requiring a register
829 static const TargetRegisterClass *
830 getRegClassForUnfoldedLoad(MachineFunction &MF, const X86InstrInfo &TII,
833 unsigned UnfoldedOpc = TII.getOpcodeAfterMemoryUnfold(
834 Opcode, /*UnfoldLoad*/ true, /*UnfoldStore*/ false, &Index);
835 const MCInstrDesc &MCID = TII.get(UnfoldedOpc);
836 return TII.getRegClass(MCID, Index, &TII.getRegisterInfo(), MF);
839 void X86SpeculativeLoadHardeningPass::unfoldCallAndJumpLoads(
840 MachineFunction &MF) {
841 for (MachineBasicBlock &MBB : MF)
842 for (auto MII = MBB.instr_begin(), MIE = MBB.instr_end(); MII != MIE;) {
843 // Grab a reference and increment the iterator so we can remove this
844 // instruction if needed without disturbing the iteration.
845 MachineInstr &MI = *MII++;
847 // Must either be a call or a branch.
848 if (!MI.isCall() && !MI.isBranch())
850 // We only care about loading variants of these instructions.
854 switch (MI.getOpcode()) {
857 dbgs() << "ERROR: Found an unexpected loading branch or call "
859 MI.dump(); dbgs() << "\n");
860 report_fatal_error("Unexpected loading branch or call!");
863 case X86::FARCALL16m:
864 case X86::FARCALL32m:
869 // We cannot mitigate far jumps or calls, but we also don't expect them
870 // to be vulnerable to Spectre v1.2 style attacks.
874 case X86::CALL16m_NT:
876 case X86::CALL32m_NT:
878 case X86::CALL64m_NT:
885 case X86::TAILJMPm64:
886 case X86::TAILJMPm64_REX:
888 case X86::TCRETURNmi64:
889 case X86::TCRETURNmi: {
890 // Use the generic unfold logic now that we know we're dealing with
891 // expected instructions.
892 // FIXME: We don't have test coverage for all of these!
893 auto *UnfoldedRC = getRegClassForUnfoldedLoad(MF, *TII, MI.getOpcode());
896 << "ERROR: Unable to unfold load from instruction:\n";
897 MI.dump(); dbgs() << "\n");
898 report_fatal_error("Unable to unfold load!");
900 unsigned Reg = MRI->createVirtualRegister(UnfoldedRC);
901 SmallVector<MachineInstr *, 2> NewMIs;
902 // If we were able to compute an unfolded reg class, any failure here
903 // is just a programming error so just assert.
905 TII->unfoldMemoryOperand(MF, MI, Reg, /*UnfoldLoad*/ true,
906 /*UnfoldStore*/ false, NewMIs);
909 "Computed unfolded register class but failed to unfold");
910 // Now stitch the new instructions into place and erase the old one.
911 for (auto *NewMI : NewMIs)
912 MBB.insert(MI.getIterator(), NewMI);
913 MI.eraseFromParent();
915 dbgs() << "Unfolded load successfully into:\n";
916 for (auto *NewMI : NewMIs) {
924 llvm_unreachable("Escaped switch with default!");
928 /// Returns true if the instruction has no behavior (specified or otherwise)
929 /// that is based on the value of any of its register operands
931 /// A classical example of something that is inherently not data invariant is an
932 /// indirect jump -- the destination is loaded into icache based on the bits set
933 /// in the jump destination register.
935 /// FIXME: This should become part of our instruction tables.
936 static bool isDataInvariant(MachineInstr &MI) {
937 switch (MI.getOpcode()) {
939 // By default, assume that the instruction is not data invariant.
942 // Some target-independent operations that trivially lower to data-invariant
944 case TargetOpcode::COPY:
945 case TargetOpcode::INSERT_SUBREG:
946 case TargetOpcode::SUBREG_TO_REG:
949 // On x86 it is believed that imul is constant time w.r.t. the loaded data.
950 // However, they set flags and are perhaps the most surprisingly constant
951 // time operations so we call them out here separately.
953 case X86::IMUL16rri8:
956 case X86::IMUL32rri8:
959 case X86::IMUL64rri32:
960 case X86::IMUL64rri8:
962 // Bit scanning and counting instructions that are somewhat surprisingly
963 // constant time as they scan across bits and do other fairly complex
964 // operations like popcnt, but are believed to be constant time on x86.
965 // However, these set flags.
975 case X86::POPCNT16rr:
976 case X86::POPCNT32rr:
977 case X86::POPCNT64rr:
982 // Bit manipulation instructions are effectively combinations of basic
983 // arithmetic ops, and should still execute in constant time. These also
985 case X86::BLCFILL32rr:
986 case X86::BLCFILL64rr:
991 case X86::BLCMSK32rr:
992 case X86::BLCMSK64rr:
995 case X86::BLSFILL32rr:
996 case X86::BLSFILL64rr:
1000 case X86::BLSIC64rr:
1001 case X86::BLSMSK32rr:
1002 case X86::BLSMSK64rr:
1005 case X86::TZMSK32rr:
1006 case X86::TZMSK64rr:
1008 // Bit extracting and clearing instructions should execute in constant time,
1010 case X86::BEXTR32rr:
1011 case X86::BEXTR64rr:
1012 case X86::BEXTRI32ri:
1013 case X86::BEXTRI64ri:
1017 // Shift and rotate.
1018 case X86::ROL8r1: case X86::ROL16r1: case X86::ROL32r1: case X86::ROL64r1:
1019 case X86::ROL8rCL: case X86::ROL16rCL: case X86::ROL32rCL: case X86::ROL64rCL:
1020 case X86::ROL8ri: case X86::ROL16ri: case X86::ROL32ri: case X86::ROL64ri:
1021 case X86::ROR8r1: case X86::ROR16r1: case X86::ROR32r1: case X86::ROR64r1:
1022 case X86::ROR8rCL: case X86::ROR16rCL: case X86::ROR32rCL: case X86::ROR64rCL:
1023 case X86::ROR8ri: case X86::ROR16ri: case X86::ROR32ri: case X86::ROR64ri:
1024 case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1: case X86::SAR64r1:
1025 case X86::SAR8rCL: case X86::SAR16rCL: case X86::SAR32rCL: case X86::SAR64rCL:
1026 case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri: case X86::SAR64ri:
1027 case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1: case X86::SHL64r1:
1028 case X86::SHL8rCL: case X86::SHL16rCL: case X86::SHL32rCL: case X86::SHL64rCL:
1029 case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri: case X86::SHL64ri:
1030 case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1: case X86::SHR64r1:
1031 case X86::SHR8rCL: case X86::SHR16rCL: case X86::SHR32rCL: case X86::SHR64rCL:
1032 case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri: case X86::SHR64ri:
1033 case X86::SHLD16rrCL: case X86::SHLD32rrCL: case X86::SHLD64rrCL:
1034 case X86::SHLD16rri8: case X86::SHLD32rri8: case X86::SHLD64rri8:
1035 case X86::SHRD16rrCL: case X86::SHRD32rrCL: case X86::SHRD64rrCL:
1036 case X86::SHRD16rri8: case X86::SHRD32rri8: case X86::SHRD64rri8:
1038 // Basic arithmetic is constant time on the input but does set flags.
1039 case X86::ADC8rr: case X86::ADC8ri:
1040 case X86::ADC16rr: case X86::ADC16ri: case X86::ADC16ri8:
1041 case X86::ADC32rr: case X86::ADC32ri: case X86::ADC32ri8:
1042 case X86::ADC64rr: case X86::ADC64ri8: case X86::ADC64ri32:
1043 case X86::ADD8rr: case X86::ADD8ri:
1044 case X86::ADD16rr: case X86::ADD16ri: case X86::ADD16ri8:
1045 case X86::ADD32rr: case X86::ADD32ri: case X86::ADD32ri8:
1046 case X86::ADD64rr: case X86::ADD64ri8: case X86::ADD64ri32:
1047 case X86::AND8rr: case X86::AND8ri:
1048 case X86::AND16rr: case X86::AND16ri: case X86::AND16ri8:
1049 case X86::AND32rr: case X86::AND32ri: case X86::AND32ri8:
1050 case X86::AND64rr: case X86::AND64ri8: case X86::AND64ri32:
1051 case X86::OR8rr: case X86::OR8ri:
1052 case X86::OR16rr: case X86::OR16ri: case X86::OR16ri8:
1053 case X86::OR32rr: case X86::OR32ri: case X86::OR32ri8:
1054 case X86::OR64rr: case X86::OR64ri8: case X86::OR64ri32:
1055 case X86::SBB8rr: case X86::SBB8ri:
1056 case X86::SBB16rr: case X86::SBB16ri: case X86::SBB16ri8:
1057 case X86::SBB32rr: case X86::SBB32ri: case X86::SBB32ri8:
1058 case X86::SBB64rr: case X86::SBB64ri8: case X86::SBB64ri32:
1059 case X86::SUB8rr: case X86::SUB8ri:
1060 case X86::SUB16rr: case X86::SUB16ri: case X86::SUB16ri8:
1061 case X86::SUB32rr: case X86::SUB32ri: case X86::SUB32ri8:
1062 case X86::SUB64rr: case X86::SUB64ri8: case X86::SUB64ri32:
1063 case X86::XOR8rr: case X86::XOR8ri:
1064 case X86::XOR16rr: case X86::XOR16ri: case X86::XOR16ri8:
1065 case X86::XOR32rr: case X86::XOR32ri: case X86::XOR32ri8:
1066 case X86::XOR64rr: case X86::XOR64ri8: case X86::XOR64ri32:
1067 // Arithmetic with just 32-bit and 64-bit variants and no immediates.
1068 case X86::ADCX32rr: case X86::ADCX64rr:
1069 case X86::ADOX32rr: case X86::ADOX64rr:
1070 case X86::ANDN32rr: case X86::ANDN64rr:
1071 // Unary arithmetic operations.
1072 case X86::DEC8r: case X86::DEC16r: case X86::DEC32r: case X86::DEC64r:
1073 case X86::INC8r: case X86::INC16r: case X86::INC32r: case X86::INC64r:
1074 case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r:
1075 // Check whether the EFLAGS implicit-def is dead. We assume that this will
1076 // always find the implicit-def because this code should only be reached
1077 // for instructions that do in fact implicitly def this.
1078 if (!MI.findRegisterDefOperand(X86::EFLAGS)->isDead()) {
1079 // If we would clobber EFLAGS that are used, just bail for now.
1080 LLVM_DEBUG(dbgs() << " Unable to harden post-load due to EFLAGS: ";
1081 MI.dump(); dbgs() << "\n");
1085 // Otherwise, fallthrough to handle these the same as instructions that
1086 // don't set EFLAGS.
1089 // Unlike other arithmetic, NOT doesn't set EFLAGS.
1090 case X86::NOT8r: case X86::NOT16r: case X86::NOT32r: case X86::NOT64r:
1092 // Various move instructions used to zero or sign extend things. Note that we
1093 // intentionally don't support the _NOREX variants as we can't handle that
1094 // register constraint anyways.
1095 case X86::MOVSX16rr8:
1096 case X86::MOVSX32rr8: case X86::MOVSX32rr16:
1097 case X86::MOVSX64rr8: case X86::MOVSX64rr16: case X86::MOVSX64rr32:
1098 case X86::MOVZX16rr8:
1099 case X86::MOVZX32rr8: case X86::MOVZX32rr16:
1100 case X86::MOVZX64rr8: case X86::MOVZX64rr16:
1103 // Arithmetic instructions that are both constant time and don't set flags.
1113 // LEA doesn't actually access memory, and its arithmetic is constant time.
1116 case X86::LEA64_32r:
1122 /// Returns true if the instruction has no behavior (specified or otherwise)
1123 /// that is based on the value loaded from memory or the value of any
1124 /// non-address register operands.
1126 /// For example, if the latency of the instruction is dependent on the
1127 /// particular bits set in any of the registers *or* any of the bits loaded from
1130 /// A classical example of something that is inherently not data invariant is an
1131 /// indirect jump -- the destination is loaded into icache based on the bits set
1132 /// in the jump destination register.
1134 /// FIXME: This should become part of our instruction tables.
1135 static bool isDataInvariantLoad(MachineInstr &MI) {
1136 switch (MI.getOpcode()) {
1138 // By default, assume that the load will immediately leak.
1141 // On x86 it is believed that imul is constant time w.r.t. the loaded data.
1142 // However, they set flags and are perhaps the most surprisingly constant
1143 // time operations so we call them out here separately.
1145 case X86::IMUL16rmi8:
1146 case X86::IMUL16rmi:
1148 case X86::IMUL32rmi8:
1149 case X86::IMUL32rmi:
1151 case X86::IMUL64rmi32:
1152 case X86::IMUL64rmi8:
1154 // Bit scanning and counting instructions that are somewhat surprisingly
1155 // constant time as they scan across bits and do other fairly complex
1156 // operations like popcnt, but are believed to be constant time on x86.
1157 // However, these set flags.
1164 case X86::LZCNT16rm:
1165 case X86::LZCNT32rm:
1166 case X86::LZCNT64rm:
1167 case X86::POPCNT16rm:
1168 case X86::POPCNT32rm:
1169 case X86::POPCNT64rm:
1170 case X86::TZCNT16rm:
1171 case X86::TZCNT32rm:
1172 case X86::TZCNT64rm:
1174 // Bit manipulation instructions are effectively combinations of basic
1175 // arithmetic ops, and should still execute in constant time. These also
1177 case X86::BLCFILL32rm:
1178 case X86::BLCFILL64rm:
1181 case X86::BLCIC32rm:
1182 case X86::BLCIC64rm:
1183 case X86::BLCMSK32rm:
1184 case X86::BLCMSK64rm:
1187 case X86::BLSFILL32rm:
1188 case X86::BLSFILL64rm:
1191 case X86::BLSIC32rm:
1192 case X86::BLSIC64rm:
1193 case X86::BLSMSK32rm:
1194 case X86::BLSMSK64rm:
1197 case X86::TZMSK32rm:
1198 case X86::TZMSK64rm:
1200 // Bit extracting and clearing instructions should execute in constant time,
1202 case X86::BEXTR32rm:
1203 case X86::BEXTR64rm:
1204 case X86::BEXTRI32mi:
1205 case X86::BEXTRI64mi:
1209 // Basic arithmetic is constant time on the input but does set flags.
1244 // Check whether the EFLAGS implicit-def is dead. We assume that this will
1245 // always find the implicit-def because this code should only be reached
1246 // for instructions that do in fact implicitly def this.
1247 if (!MI.findRegisterDefOperand(X86::EFLAGS)->isDead()) {
1248 // If we would clobber EFLAGS that are used, just bail for now.
1249 LLVM_DEBUG(dbgs() << " Unable to harden post-load due to EFLAGS: ";
1250 MI.dump(); dbgs() << "\n");
1254 // Otherwise, fallthrough to handle these the same as instructions that
1255 // don't set EFLAGS.
1258 // Integer multiply w/o affecting flags is still believed to be constant
1259 // time on x86. Called out separately as this is among the most surprising
1260 // instructions to exhibit that behavior.
1264 // Arithmetic instructions that are both constant time and don't set flags.
1274 // Conversions are believed to be constant time and don't set flags.
1275 case X86::CVTTSD2SI64rm: case X86::VCVTTSD2SI64rm: case X86::VCVTTSD2SI64Zrm:
1276 case X86::CVTTSD2SIrm: case X86::VCVTTSD2SIrm: case X86::VCVTTSD2SIZrm:
1277 case X86::CVTTSS2SI64rm: case X86::VCVTTSS2SI64rm: case X86::VCVTTSS2SI64Zrm:
1278 case X86::CVTTSS2SIrm: case X86::VCVTTSS2SIrm: case X86::VCVTTSS2SIZrm:
1279 case X86::CVTSI2SDrm: case X86::VCVTSI2SDrm: case X86::VCVTSI2SDZrm:
1280 case X86::CVTSI2SSrm: case X86::VCVTSI2SSrm: case X86::VCVTSI2SSZrm:
1281 case X86::CVTSI642SDrm: case X86::VCVTSI642SDrm: case X86::VCVTSI642SDZrm:
1282 case X86::CVTSI642SSrm: case X86::VCVTSI642SSrm: case X86::VCVTSI642SSZrm:
1283 case X86::CVTSS2SDrm: case X86::VCVTSS2SDrm: case X86::VCVTSS2SDZrm:
1284 case X86::CVTSD2SSrm: case X86::VCVTSD2SSrm: case X86::VCVTSD2SSZrm:
1285 // AVX512 added unsigned integer conversions.
1286 case X86::VCVTTSD2USI64Zrm:
1287 case X86::VCVTTSD2USIZrm:
1288 case X86::VCVTTSS2USI64Zrm:
1289 case X86::VCVTTSS2USIZrm:
1290 case X86::VCVTUSI2SDZrm:
1291 case X86::VCVTUSI642SDZrm:
1292 case X86::VCVTUSI2SSZrm:
1293 case X86::VCVTUSI642SSZrm:
1295 // Loads to register don't set flags.
1297 case X86::MOV8rm_NOREX:
1301 case X86::MOVSX16rm8:
1302 case X86::MOVSX32rm16:
1303 case X86::MOVSX32rm8:
1304 case X86::MOVSX32rm8_NOREX:
1305 case X86::MOVSX64rm16:
1306 case X86::MOVSX64rm32:
1307 case X86::MOVSX64rm8:
1308 case X86::MOVZX16rm8:
1309 case X86::MOVZX32rm16:
1310 case X86::MOVZX32rm8:
1311 case X86::MOVZX32rm8_NOREX:
1312 case X86::MOVZX64rm16:
1313 case X86::MOVZX64rm8:
1318 static bool isEFLAGSLive(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
1319 const TargetRegisterInfo &TRI) {
1320 // Check if EFLAGS are alive by seeing if there is a def of them or they
1321 // live-in, and then seeing if that def is in turn used.
1322 for (MachineInstr &MI : llvm::reverse(llvm::make_range(MBB.begin(), I))) {
1323 if (MachineOperand *DefOp = MI.findRegisterDefOperand(X86::EFLAGS)) {
1324 // If the def is dead, then EFLAGS is not live.
1325 if (DefOp->isDead())
1328 // Otherwise we've def'ed it, and it is live.
1331 // While at this instruction, also check if we use and kill EFLAGS
1332 // which means it isn't live.
1333 if (MI.killsRegister(X86::EFLAGS, &TRI))
1337 // If we didn't find anything conclusive (neither definitely alive or
1338 // definitely dead) return whether it lives into the block.
1339 return MBB.isLiveIn(X86::EFLAGS);
1342 /// Trace the predicate state through each of the blocks in the function,
1343 /// hardening everything necessary along the way.
1345 /// We call this routine once the initial predicate state has been established
1346 /// for each basic block in the function in the SSA updater. This routine traces
1347 /// it through the instructions within each basic block, and for non-returning
1348 /// blocks informs the SSA updater about the final state that lives out of the
1349 /// block. Along the way, it hardens any vulnerable instruction using the
1350 /// currently valid predicate state. We have to do these two things together
1351 /// because the SSA updater only works across blocks. Within a block, we track
1352 /// the current predicate state directly and update it as it changes.
1354 /// This operates in two passes over each block. First, we analyze the loads in
1355 /// the block to determine which strategy will be used to harden them: hardening
1356 /// the address or hardening the loaded value when loaded into a register
1357 /// amenable to hardening. We have to process these first because the two
1358 /// strategies may interact -- later hardening may change what strategy we wish
1359 /// to use. We also will analyze data dependencies between loads and avoid
1360 /// hardening those loads that are data dependent on a load with a hardened
1361 /// address. We also skip hardening loads already behind an LFENCE as that is
1362 /// sufficient to harden them against misspeculation.
1364 /// Second, we actively trace the predicate state through the block, applying
1365 /// the hardening steps we determined necessary in the first pass as we go.
1367 /// These two passes are applied to each basic block. We operate one block at a
1368 /// time to simplify reasoning about reachability and sequencing.
1369 void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
1370 MachineFunction &MF) {
1371 SmallPtrSet<MachineInstr *, 16> HardenPostLoad;
1372 SmallPtrSet<MachineInstr *, 16> HardenLoadAddr;
1374 SmallSet<unsigned, 16> HardenedAddrRegs;
1376 SmallDenseMap<unsigned, unsigned, 32> AddrRegToHardenedReg;
1378 // Track the set of load-dependent registers through the basic block. Because
1379 // the values of these registers have an existing data dependency on a loaded
1380 // value which we would have checked, we can omit any checks on them.
1381 SparseBitVector<> LoadDepRegs;
1383 for (MachineBasicBlock &MBB : MF) {
1384 // The first pass over the block: collect all the loads which can have their
1385 // loaded value hardened and all the loads that instead need their address
1386 // hardened. During this walk we propagate load dependence for address
1387 // hardened loads and also look for LFENCE to stop hardening wherever
1388 // possible. When deciding whether or not to harden the loaded value or not,
1389 // we check to see if any registers used in the address will have been
1390 // hardened at this point and if so, harden any remaining address registers
1391 // as that often successfully re-uses hardened addresses and minimizes
1394 // FIXME: We should consider an aggressive mode where we continue to keep as
1395 // many loads value hardened even when some address register hardening would
1396 // be free (due to reuse).
1398 // Note that we only need this pass if we are actually hardening loads.
1400 for (MachineInstr &MI : MBB) {
1401 // We naively assume that all def'ed registers of an instruction have
1402 // a data dependency on all of their operands.
1403 // FIXME: Do a more careful analysis of x86 to build a conservative
1405 if (llvm::any_of(MI.uses(), [&](MachineOperand &Op) {
1406 return Op.isReg() && LoadDepRegs.test(Op.getReg());
1408 for (MachineOperand &Def : MI.defs())
1410 LoadDepRegs.set(Def.getReg());
1412 // Both Intel and AMD are guiding that they will change the semantics of
1413 // LFENCE to be a speculation barrier, so if we see an LFENCE, there is
1414 // no more need to guard things in this block.
1415 if (MI.getOpcode() == X86::LFENCE)
1418 // If this instruction cannot load, nothing to do.
1422 // Some instructions which "load" are trivially safe or unimportant.
1423 if (MI.getOpcode() == X86::MFENCE)
1426 // Extract the memory operand information about this instruction.
1427 // FIXME: This doesn't handle loading pseudo instructions which we often
1428 // could handle with similarly generic logic. We probably need to add an
1429 // MI-layer routine similar to the MC-layer one we use here which maps
1430 // pseudos much like this maps real instructions.
1431 const MCInstrDesc &Desc = MI.getDesc();
1432 int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
1433 if (MemRefBeginIdx < 0) {
1435 << "WARNING: unable to harden loading instruction: ";
1440 MemRefBeginIdx += X86II::getOperandBias(Desc);
1442 MachineOperand &BaseMO =
1443 MI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
1444 MachineOperand &IndexMO =
1445 MI.getOperand(MemRefBeginIdx + X86::AddrIndexReg);
1447 // If we have at least one (non-frame-index, non-RIP) register operand,
1448 // and neither operand is load-dependent, we need to check the load.
1449 unsigned BaseReg = 0, IndexReg = 0;
1450 if (!BaseMO.isFI() && BaseMO.getReg() != X86::RIP &&
1451 BaseMO.getReg() != X86::NoRegister)
1452 BaseReg = BaseMO.getReg();
1453 if (IndexMO.getReg() != X86::NoRegister)
1454 IndexReg = IndexMO.getReg();
1456 if (!BaseReg && !IndexReg)
1457 // No register operands!
1460 // If any register operand is dependent, this load is dependent and we
1461 // needn't check it.
1462 // FIXME: Is this true in the case where we are hardening loads after
1463 // they complete? Unclear, need to investigate.
1464 if ((BaseReg && LoadDepRegs.test(BaseReg)) ||
1465 (IndexReg && LoadDepRegs.test(IndexReg)))
1468 // If post-load hardening is enabled, this load is compatible with
1469 // post-load hardening, and we aren't already going to harden one of the
1470 // address registers, queue it up to be hardened post-load. Notably,
1471 // even once hardened this won't introduce a useful dependency that
1472 // could prune out subsequent loads.
1473 if (EnablePostLoadHardening && isDataInvariantLoad(MI) &&
1474 MI.getDesc().getNumDefs() == 1 && MI.getOperand(0).isReg() &&
1475 canHardenRegister(MI.getOperand(0).getReg()) &&
1476 !HardenedAddrRegs.count(BaseReg) &&
1477 !HardenedAddrRegs.count(IndexReg)) {
1478 HardenPostLoad.insert(&MI);
1479 HardenedAddrRegs.insert(MI.getOperand(0).getReg());
1483 // Record this instruction for address hardening and record its register
1484 // operands as being address-hardened.
1485 HardenLoadAddr.insert(&MI);
1487 HardenedAddrRegs.insert(BaseReg);
1489 HardenedAddrRegs.insert(IndexReg);
1491 for (MachineOperand &Def : MI.defs())
1493 LoadDepRegs.set(Def.getReg());
1496 // Now re-walk the instructions in the basic block, and apply whichever
1497 // hardening strategy we have elected. Note that we do this in a second
1498 // pass specifically so that we have the complete set of instructions for
1499 // which we will do post-load hardening and can defer it in certain
1502 // FIXME: This could probably be made even more effective by doing it
1503 // across the entire function. Rather than just walking the flat list
1504 // backwards here, we could walk the function in PO and each block bottom
1505 // up, allowing us to in some cases sink hardening across block blocks. As
1506 // long as the in-block predicate state is used at the eventual hardening
1507 // site, this remains safe.
1508 for (MachineInstr &MI : MBB) {
1510 // We cannot both require hardening the def of a load and its address.
1511 assert(!(HardenLoadAddr.count(&MI) && HardenPostLoad.count(&MI)) &&
1512 "Requested to harden both the address and def of a load!");
1514 // Check if this is a load whose address needs to be hardened.
1515 if (HardenLoadAddr.erase(&MI)) {
1516 const MCInstrDesc &Desc = MI.getDesc();
1517 int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
1518 assert(MemRefBeginIdx >= 0 && "Cannot have an invalid index here!");
1520 MemRefBeginIdx += X86II::getOperandBias(Desc);
1522 MachineOperand &BaseMO =
1523 MI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
1524 MachineOperand &IndexMO =
1525 MI.getOperand(MemRefBeginIdx + X86::AddrIndexReg);
1526 hardenLoadAddr(MI, BaseMO, IndexMO, AddrRegToHardenedReg);
1530 // Test if this instruction is one of our post load instructions (and
1531 // remove it from the set if so).
1532 if (HardenPostLoad.erase(&MI)) {
1533 assert(!MI.isCall() && "Must not try to post-load harden a call!");
1535 // If this is a data-invariant load, we want to try and sink any
1536 // hardening as far as possible.
1537 if (isDataInvariantLoad(MI)) {
1538 // Sink the instruction we'll need to harden as far as we can down
1540 MachineInstr *SunkMI = sinkPostLoadHardenedInst(MI, HardenPostLoad);
1542 // If we managed to sink this instruction, update everything so we
1543 // harden that instruction when we reach it in the instruction
1545 if (SunkMI != &MI) {
1546 // If in sinking there was no instruction needing to be hardened,
1551 // Otherwise, add this to the set of defs we harden.
1552 HardenPostLoad.insert(SunkMI);
1557 unsigned HardenedReg = hardenPostLoad(MI);
1559 // Mark the resulting hardened register as such so we don't re-harden.
1560 AddrRegToHardenedReg[HardenedReg] = HardenedReg;
1565 // Check for an indirect call or branch that may need its input hardened
1566 // even if we couldn't find the specific load used, or were able to
1567 // avoid hardening it for some reason. Note that here we cannot break
1568 // out afterward as we may still need to handle any call aspect of this
1570 if ((MI.isCall() || MI.isBranch()) && HardenIndirectCallsAndJumps)
1571 hardenIndirectCallOrJumpInstr(MI, AddrRegToHardenedReg);
1574 // After we finish hardening loads we handle interprocedural hardening if
1575 // enabled and relevant for this instruction.
1576 if (!HardenInterprocedurally)
1578 if (!MI.isCall() && !MI.isReturn())
1581 // If this is a direct return (IE, not a tail call) just directly harden
1583 if (MI.isReturn() && !MI.isCall()) {
1584 hardenReturnInstr(MI);
1588 // Otherwise we have a call. We need to handle transferring the predicate
1589 // state into a call and recovering it after the call returns unless this
1591 assert(MI.isCall() && "Should only reach here for calls!");
1592 tracePredStateThroughCall(MI);
1595 HardenPostLoad.clear();
1596 HardenLoadAddr.clear();
1597 HardenedAddrRegs.clear();
1598 AddrRegToHardenedReg.clear();
1600 // Currently, we only track data-dependent loads within a basic block.
1601 // FIXME: We should see if this is necessary or if we could be more
1602 // aggressive here without opening up attack avenues.
1603 LoadDepRegs.clear();
1607 /// Save EFLAGS into the returned GPR. This can in turn be restored with
1608 /// `restoreEFLAGS`.
1610 /// Note that LLVM can only lower very simple patterns of saved and restored
1611 /// EFLAGS registers. The restore should always be within the same basic block
1612 /// as the save so that no PHI nodes are inserted.
1613 unsigned X86SpeculativeLoadHardeningPass::saveEFLAGS(
1614 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
1616 // FIXME: Hard coding this to a 32-bit register class seems weird, but matches
1617 // what instruction selection does.
1618 unsigned Reg = MRI->createVirtualRegister(&X86::GR32RegClass);
1619 // We directly copy the FLAGS register and rely on later lowering to clean
1620 // this up into the appropriate setCC instructions.
1621 BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), Reg).addReg(X86::EFLAGS);
1626 /// Restore EFLAGS from the provided GPR. This should be produced by
1629 /// This must be done within the same basic block as the save in order to
1631 void X86SpeculativeLoadHardeningPass::restoreEFLAGS(
1632 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, DebugLoc Loc,
1634 BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), X86::EFLAGS).addReg(Reg);
1638 /// Takes the current predicate state (in a register) and merges it into the
1639 /// stack pointer. The state is essentially a single bit, but we merge this in
1640 /// a way that won't form non-canonical pointers and also will be preserved
1641 /// across normal stack adjustments.
1642 void X86SpeculativeLoadHardeningPass::mergePredStateIntoSP(
1643 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, DebugLoc Loc,
1644 unsigned PredStateReg) {
1645 unsigned TmpReg = MRI->createVirtualRegister(PS->RC);
1646 // FIXME: This hard codes a shift distance based on the number of bits needed
1647 // to stay canonical on 64-bit. We should compute this somehow and support
1648 // 32-bit as part of that.
1649 auto ShiftI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHL64ri), TmpReg)
1650 .addReg(PredStateReg, RegState::Kill)
1652 ShiftI->addRegisterDead(X86::EFLAGS, TRI);
1654 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), X86::RSP)
1656 .addReg(TmpReg, RegState::Kill);
1657 OrI->addRegisterDead(X86::EFLAGS, TRI);
1661 /// Extracts the predicate state stored in the high bits of the stack pointer.
1662 unsigned X86SpeculativeLoadHardeningPass::extractPredStateFromSP(
1663 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
1665 unsigned PredStateReg = MRI->createVirtualRegister(PS->RC);
1666 unsigned TmpReg = MRI->createVirtualRegister(PS->RC);
1668 // We know that the stack pointer will have any preserved predicate state in
1669 // its high bit. We just want to smear this across the other bits. Turns out,
1670 // this is exactly what an arithmetic right shift does.
1671 BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), TmpReg)
1674 BuildMI(MBB, InsertPt, Loc, TII->get(X86::SAR64ri), PredStateReg)
1675 .addReg(TmpReg, RegState::Kill)
1676 .addImm(TRI->getRegSizeInBits(*PS->RC) - 1);
1677 ShiftI->addRegisterDead(X86::EFLAGS, TRI);
1680 return PredStateReg;
1683 void X86SpeculativeLoadHardeningPass::hardenLoadAddr(
1684 MachineInstr &MI, MachineOperand &BaseMO, MachineOperand &IndexMO,
1685 SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg) {
1686 MachineBasicBlock &MBB = *MI.getParent();
1687 DebugLoc Loc = MI.getDebugLoc();
1689 // Check if EFLAGS are alive by seeing if there is a def of them or they
1690 // live-in, and then seeing if that def is in turn used.
1691 bool EFLAGSLive = isEFLAGSLive(MBB, MI.getIterator(), *TRI);
1693 SmallVector<MachineOperand *, 2> HardenOpRegs;
1695 if (BaseMO.isFI()) {
1696 // A frame index is never a dynamically controllable load, so only
1697 // harden it if we're covering fixed address loads as well.
1699 dbgs() << " Skipping hardening base of explicit stack frame load: ";
1700 MI.dump(); dbgs() << "\n");
1701 } else if (BaseMO.getReg() == X86::RIP ||
1702 BaseMO.getReg() == X86::NoRegister) {
1703 // For both RIP-relative addressed loads or absolute loads, we cannot
1704 // meaningfully harden them because the address being loaded has no
1705 // dynamic component.
1707 // FIXME: When using a segment base (like TLS does) we end up with the
1708 // dynamic address being the base plus -1 because we can't mutate the
1709 // segment register here. This allows the signed 32-bit offset to point at
1710 // valid segment-relative addresses and load them successfully.
1712 dbgs() << " Cannot harden base of "
1713 << (BaseMO.getReg() == X86::RIP ? "RIP-relative" : "no-base")
1714 << " address in a load!");
1716 assert(BaseMO.isReg() &&
1717 "Only allowed to have a frame index or register base.");
1718 HardenOpRegs.push_back(&BaseMO);
1721 if (IndexMO.getReg() != X86::NoRegister &&
1722 (HardenOpRegs.empty() ||
1723 HardenOpRegs.front()->getReg() != IndexMO.getReg()))
1724 HardenOpRegs.push_back(&IndexMO);
1726 assert((HardenOpRegs.size() == 1 || HardenOpRegs.size() == 2) &&
1727 "Should have exactly one or two registers to harden!");
1728 assert((HardenOpRegs.size() == 1 ||
1729 HardenOpRegs[0]->getReg() != HardenOpRegs[1]->getReg()) &&
1730 "Should not have two of the same registers!");
1732 // Remove any registers that have alreaded been checked.
1733 llvm::erase_if(HardenOpRegs, [&](MachineOperand *Op) {
1734 // See if this operand's register has already been checked.
1735 auto It = AddrRegToHardenedReg.find(Op->getReg());
1736 if (It == AddrRegToHardenedReg.end())
1737 // Not checked, so retain this one.
1740 // Otherwise, we can directly update this operand and remove it.
1741 Op->setReg(It->second);
1744 // If there are none left, we're done.
1745 if (HardenOpRegs.empty())
1748 // Compute the current predicate state.
1749 unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
1751 auto InsertPt = MI.getIterator();
1753 // If EFLAGS are live and we don't have access to instructions that avoid
1754 // clobbering EFLAGS we need to save and restore them. This in turn makes
1755 // the EFLAGS no longer live.
1756 unsigned FlagsReg = 0;
1757 if (EFLAGSLive && !Subtarget->hasBMI2()) {
1759 FlagsReg = saveEFLAGS(MBB, InsertPt, Loc);
1762 for (MachineOperand *Op : HardenOpRegs) {
1763 unsigned OpReg = Op->getReg();
1764 auto *OpRC = MRI->getRegClass(OpReg);
1765 unsigned TmpReg = MRI->createVirtualRegister(OpRC);
1767 // If this is a vector register, we'll need somewhat custom logic to handle
1769 if (!Subtarget->hasVLX() && (OpRC->hasSuperClassEq(&X86::VR128RegClass) ||
1770 OpRC->hasSuperClassEq(&X86::VR256RegClass))) {
1771 assert(Subtarget->hasAVX2() && "AVX2-specific register classes!");
1772 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128RegClass);
1774 // Move our state into a vector register.
1775 // FIXME: We could skip this at the cost of longer encodings with AVX-512
1776 // but that doesn't seem likely worth it.
1777 unsigned VStateReg = MRI->createVirtualRegister(&X86::VR128RegClass);
1779 BuildMI(MBB, InsertPt, Loc, TII->get(X86::VMOV64toPQIrr), VStateReg)
1783 LLVM_DEBUG(dbgs() << " Inserting mov: "; MovI->dump(); dbgs() << "\n");
1785 // Broadcast it across the vector register.
1786 unsigned VBStateReg = MRI->createVirtualRegister(OpRC);
1787 auto BroadcastI = BuildMI(MBB, InsertPt, Loc,
1788 TII->get(Is128Bit ? X86::VPBROADCASTQrr
1789 : X86::VPBROADCASTQYrr),
1794 LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump();
1797 // Merge our potential poison state into the value with a vector or.
1799 BuildMI(MBB, InsertPt, Loc,
1800 TII->get(Is128Bit ? X86::VPORrr : X86::VPORYrr), TmpReg)
1805 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
1806 } else if (OpRC->hasSuperClassEq(&X86::VR128XRegClass) ||
1807 OpRC->hasSuperClassEq(&X86::VR256XRegClass) ||
1808 OpRC->hasSuperClassEq(&X86::VR512RegClass)) {
1809 assert(Subtarget->hasAVX512() && "AVX512-specific register classes!");
1810 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128XRegClass);
1811 bool Is256Bit = OpRC->hasSuperClassEq(&X86::VR256XRegClass);
1812 if (Is128Bit || Is256Bit)
1813 assert(Subtarget->hasVLX() && "AVX512VL-specific register classes!");
1815 // Broadcast our state into a vector register.
1816 unsigned VStateReg = MRI->createVirtualRegister(OpRC);
1817 unsigned BroadcastOp =
1818 Is128Bit ? X86::VPBROADCASTQrZ128r
1819 : Is256Bit ? X86::VPBROADCASTQrZ256r : X86::VPBROADCASTQrZr;
1821 BuildMI(MBB, InsertPt, Loc, TII->get(BroadcastOp), VStateReg)
1825 LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump();
1828 // Merge our potential poison state into the value with a vector or.
1829 unsigned OrOp = Is128Bit ? X86::VPORQZ128rr
1830 : Is256Bit ? X86::VPORQZ256rr : X86::VPORQZrr;
1831 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOp), TmpReg)
1836 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
1838 // FIXME: Need to support GR32 here for 32-bit code.
1839 assert(OpRC->hasSuperClassEq(&X86::GR64RegClass) &&
1840 "Not a supported register class for address hardening!");
1843 // Merge our potential poison state into the value with an or.
1844 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), TmpReg)
1847 OrI->addRegisterDead(X86::EFLAGS, TRI);
1849 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
1851 // We need to avoid touching EFLAGS so shift out all but the least
1852 // significant bit using the instruction that doesn't update flags.
1854 BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHRX64rr), TmpReg)
1859 LLVM_DEBUG(dbgs() << " Inserting shrx: "; ShiftI->dump();
1864 // Record this register as checked and update the operand.
1865 assert(!AddrRegToHardenedReg.count(Op->getReg()) &&
1866 "Should not have checked this register yet!");
1867 AddrRegToHardenedReg[Op->getReg()] = TmpReg;
1869 ++NumAddrRegsHardened;
1872 // And restore the flags if needed.
1874 restoreEFLAGS(MBB, InsertPt, Loc, FlagsReg);
1877 MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
1878 MachineInstr &InitialMI, SmallPtrSetImpl<MachineInstr *> &HardenedInstrs) {
1879 assert(isDataInvariantLoad(InitialMI) &&
1880 "Cannot get here with a non-invariant load!");
1882 // See if we can sink hardening the loaded value.
1883 auto SinkCheckToSingleUse =
1884 [&](MachineInstr &MI) -> Optional<MachineInstr *> {
1885 unsigned DefReg = MI.getOperand(0).getReg();
1887 // We need to find a single use which we can sink the check. We can
1888 // primarily do this because many uses may already end up checked on their
1890 MachineInstr *SingleUseMI = nullptr;
1891 for (MachineInstr &UseMI : MRI->use_instructions(DefReg)) {
1892 // If we're already going to harden this use, it is data invariant and
1893 // within our block.
1894 if (HardenedInstrs.count(&UseMI)) {
1895 if (!isDataInvariantLoad(UseMI)) {
1896 // If we've already decided to harden a non-load, we must have sunk
1897 // some other post-load hardened instruction to it and it must itself
1898 // be data-invariant.
1899 assert(isDataInvariant(UseMI) &&
1900 "Data variant instruction being hardened!");
1904 // Otherwise, this is a load and the load component can't be data
1905 // invariant so check how this register is being used.
1906 const MCInstrDesc &Desc = UseMI.getDesc();
1907 int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
1908 assert(MemRefBeginIdx >= 0 &&
1909 "Should always have mem references here!");
1910 MemRefBeginIdx += X86II::getOperandBias(Desc);
1912 MachineOperand &BaseMO =
1913 UseMI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
1914 MachineOperand &IndexMO =
1915 UseMI.getOperand(MemRefBeginIdx + X86::AddrIndexReg);
1916 if ((BaseMO.isReg() && BaseMO.getReg() == DefReg) ||
1917 (IndexMO.isReg() && IndexMO.getReg() == DefReg))
1918 // The load uses the register as part of its address making it not
1926 // We already have a single use, this would make two. Bail.
1929 // If this single use isn't data invariant, isn't in this block, or has
1930 // interfering EFLAGS, we can't sink the hardening to it.
1931 if (!isDataInvariant(UseMI) || UseMI.getParent() != MI.getParent())
1934 // If this instruction defines multiple registers bail as we won't harden
1936 if (UseMI.getDesc().getNumDefs() > 1)
1939 // If this register isn't a virtual register we can't walk uses of sanely,
1940 // just bail. Also check that its register class is one of the ones we
1942 unsigned UseDefReg = UseMI.getOperand(0).getReg();
1943 if (!TRI->isVirtualRegister(UseDefReg) ||
1944 !canHardenRegister(UseDefReg))
1947 SingleUseMI = &UseMI;
1950 // If SingleUseMI is still null, there is no use that needs its own
1951 // checking. Otherwise, it is the single use that needs checking.
1952 return {SingleUseMI};
1955 MachineInstr *MI = &InitialMI;
1956 while (Optional<MachineInstr *> SingleUse = SinkCheckToSingleUse(*MI)) {
1957 // Update which MI we're checking now.
1966 bool X86SpeculativeLoadHardeningPass::canHardenRegister(unsigned Reg) {
1967 auto *RC = MRI->getRegClass(Reg);
1968 int RegBytes = TRI->getRegSizeInBits(*RC) / 8;
1970 // We don't support post-load hardening of vectors.
1973 // If this register class is explicitly constrained to a class that doesn't
1974 // require REX prefix, we may not be able to satisfy that constraint when
1975 // emitting the hardening instructions, so bail out here.
1976 // FIXME: This seems like a pretty lame hack. The way this comes up is when we
1977 // end up both with a NOREX and REX-only register as operands to the hardening
1978 // instructions. It would be better to fix that code to handle this situation
1979 // rather than hack around it in this way.
1980 const TargetRegisterClass *NOREXRegClasses[] = {
1981 &X86::GR8_NOREXRegClass, &X86::GR16_NOREXRegClass,
1982 &X86::GR32_NOREXRegClass, &X86::GR64_NOREXRegClass};
1983 if (RC == NOREXRegClasses[Log2_32(RegBytes)])
1986 const TargetRegisterClass *GPRRegClasses[] = {
1987 &X86::GR8RegClass, &X86::GR16RegClass, &X86::GR32RegClass,
1988 &X86::GR64RegClass};
1989 return RC->hasSuperClassEq(GPRRegClasses[Log2_32(RegBytes)]);
1992 /// Harden a value in a register.
1994 /// This is the low-level logic to fully harden a value sitting in a register
1995 /// against leaking during speculative execution.
1997 /// Unlike hardening an address that is used by a load, this routine is required
1998 /// to hide *all* incoming bits in the register.
2000 /// `Reg` must be a virtual register. Currently, it is required to be a GPR no
2001 /// larger than the predicate state register. FIXME: We should support vector
2002 /// registers here by broadcasting the predicate state.
2004 /// The new, hardened virtual register is returned. It will have the same
2005 /// register class as `Reg`.
2006 unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
2007 unsigned Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
2009 assert(canHardenRegister(Reg) && "Cannot harden this register!");
2010 assert(TRI->isVirtualRegister(Reg) && "Cannot harden a physical register!");
2012 auto *RC = MRI->getRegClass(Reg);
2013 int Bytes = TRI->getRegSizeInBits(*RC) / 8;
2015 unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
2017 // FIXME: Need to teach this about 32-bit mode.
2019 unsigned SubRegImms[] = {X86::sub_8bit, X86::sub_16bit, X86::sub_32bit};
2020 unsigned SubRegImm = SubRegImms[Log2_32(Bytes)];
2021 unsigned NarrowStateReg = MRI->createVirtualRegister(RC);
2022 BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), NarrowStateReg)
2023 .addReg(StateReg, 0, SubRegImm);
2024 StateReg = NarrowStateReg;
2027 unsigned FlagsReg = 0;
2028 if (isEFLAGSLive(MBB, InsertPt, *TRI))
2029 FlagsReg = saveEFLAGS(MBB, InsertPt, Loc);
2031 unsigned NewReg = MRI->createVirtualRegister(RC);
2032 unsigned OrOpCodes[] = {X86::OR8rr, X86::OR16rr, X86::OR32rr, X86::OR64rr};
2033 unsigned OrOpCode = OrOpCodes[Log2_32(Bytes)];
2034 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOpCode), NewReg)
2037 OrI->addRegisterDead(X86::EFLAGS, TRI);
2039 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
2042 restoreEFLAGS(MBB, InsertPt, Loc, FlagsReg);
2047 /// Harden a load by hardening the loaded value in the defined register.
2049 /// We can harden a non-leaking load into a register without touching the
2050 /// address by just hiding all of the loaded bits during misspeculation. We use
2051 /// an `or` instruction to do this because we set up our poison value as all
2052 /// ones. And the goal is just for the loaded bits to not be exposed to
2053 /// execution and coercing them to one is sufficient.
2055 /// Returns the newly hardened register.
2056 unsigned X86SpeculativeLoadHardeningPass::hardenPostLoad(MachineInstr &MI) {
2057 MachineBasicBlock &MBB = *MI.getParent();
2058 DebugLoc Loc = MI.getDebugLoc();
2060 auto &DefOp = MI.getOperand(0);
2061 unsigned OldDefReg = DefOp.getReg();
2062 auto *DefRC = MRI->getRegClass(OldDefReg);
2064 // Because we want to completely replace the uses of this def'ed value with
2065 // the hardened value, create a dedicated new register that will only be used
2066 // to communicate the unhardened value to the hardening.
2067 unsigned UnhardenedReg = MRI->createVirtualRegister(DefRC);
2068 DefOp.setReg(UnhardenedReg);
2070 // Now harden this register's value, getting a hardened reg that is safe to
2071 // use. Note that we insert the instructions to compute this *after* the
2072 // defining instruction, not before it.
2073 unsigned HardenedReg = hardenValueInRegister(
2074 UnhardenedReg, MBB, std::next(MI.getIterator()), Loc);
2076 // Finally, replace the old register (which now only has the uses of the
2077 // original def) with the hardened register.
2078 MRI->replaceRegWith(/*FromReg*/ OldDefReg, /*ToReg*/ HardenedReg);
2080 ++NumPostLoadRegsHardened;
2084 /// Harden a return instruction.
2086 /// Returns implicitly perform a load which we need to harden. Without hardening
2087 /// this load, an attacker my speculatively write over the return address to
2088 /// steer speculation of the return to an attacker controlled address. This is
2089 /// called Spectre v1.1 or Bounds Check Bypass Store (BCBS) and is described in
2091 /// https://people.csail.mit.edu/vlk/spectre11.pdf
2093 /// We can harden this by introducing an LFENCE that will delay any load of the
2094 /// return address until prior instructions have retired (and thus are not being
2095 /// speculated), or we can harden the address used by the implicit load: the
2098 /// If we are not using an LFENCE, hardening the stack pointer has an additional
2099 /// benefit: it allows us to pass the predicate state accumulated in this
2100 /// function back to the caller. In the absence of a BCBS attack on the return,
2101 /// the caller will typically be resumed and speculatively executed due to the
2102 /// Return Stack Buffer (RSB) prediction which is very accurate and has a high
2103 /// priority. It is possible that some code from the caller will be executed
2104 /// speculatively even during a BCBS-attacked return until the steering takes
2105 /// effect. Whenever this happens, the caller can recover the (poisoned)
2106 /// predicate state from the stack pointer and continue to harden loads.
2107 void X86SpeculativeLoadHardeningPass::hardenReturnInstr(MachineInstr &MI) {
2108 MachineBasicBlock &MBB = *MI.getParent();
2109 DebugLoc Loc = MI.getDebugLoc();
2110 auto InsertPt = MI.getIterator();
2112 if (FenceCallAndRet) {
2113 // Simply forcibly block speculation of loads out of the function by using
2114 // an LFENCE. This is potentially a heavy-weight mitigation strategy, but
2115 // should be secure, is simple from an ABI perspective, and the cost can be
2116 // minimized through inlining.
2118 // FIXME: We should investigate ways to establish a strong data-dependency
2119 // on the return. However, poisoning the stack pointer is unlikely to work
2120 // because the return is *predicted* rather than relying on the load of the
2121 // return address to actually resolve.
2122 BuildMI(MBB, InsertPt, Loc, TII->get(X86::LFENCE));
2124 ++NumLFENCEsInserted;
2128 // Take our predicate state, shift it to the high 17 bits (so that we keep
2129 // pointers canonical) and merge it into RSP. This will allow the caller to
2130 // extract it when we return (speculatively).
2131 mergePredStateIntoSP(MBB, InsertPt, Loc, PS->SSA.GetValueAtEndOfBlock(&MBB));
2134 /// Trace the predicate state through a call.
2136 /// There are several layers of this needed to handle the full complexity of
2139 /// First, we need to send the predicate state into the called function. We do
2140 /// this by merging it into the high bits of the stack pointer.
2142 /// For tail calls, this is all we need to do.
2144 /// For calls where we might return to control flow, we further need to extract
2145 /// the predicate state built up within that function from the high bits of the
2146 /// stack pointer, and make that the newly available predicate state.
2147 void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall(
2149 MachineBasicBlock &MBB = *MI.getParent();
2150 auto InsertPt = MI.getIterator();
2151 DebugLoc Loc = MI.getDebugLoc();
2153 // First, we transfer the predicate state into the called function by merging
2154 // it into the stack pointer. This will kill the current def of the state.
2155 unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
2156 mergePredStateIntoSP(MBB, InsertPt, Loc, StateReg);
2158 // If this call is also a return, it is a tail call and we don't need anything
2159 // else to handle it so just continue.
2160 // FIXME: We should also handle noreturn calls.
2164 // We need to step past the call and recover the predicate state from SP after
2165 // the return, and make this new state available.
2167 unsigned NewStateReg = extractPredStateFromSP(MBB, InsertPt, Loc);
2168 PS->SSA.AddAvailableValue(&MBB, NewStateReg);
2171 /// An attacker may speculatively store over a value that is then speculatively
2172 /// loaded and used as the target of an indirect call or jump instruction. This
2173 /// is called Spectre v1.2 or Bounds Check Bypass Store (BCBS) and is described
2175 /// https://people.csail.mit.edu/vlk/spectre11.pdf
2177 /// When this happens, the speculative execution of the call or jump will end up
2178 /// being steered to this attacker controlled address. While most such loads
2179 /// will be adequately hardened already, we want to ensure that they are
2180 /// definitively treated as needing post-load hardening. While address hardening
2181 /// is sufficient to prevent secret data from leaking to the attacker, it may
2182 /// not be sufficient to prevent an attacker from steering speculative
2183 /// execution. We forcibly unfolded all relevant loads above and so will always
2184 /// have an opportunity to post-load harden here, we just need to scan for cases
2185 /// not already flagged and add them.
2186 void X86SpeculativeLoadHardeningPass::hardenIndirectCallOrJumpInstr(
2188 SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg) {
2189 switch (MI.getOpcode()) {
2190 case X86::FARCALL16m:
2191 case X86::FARCALL32m:
2192 case X86::FARCALL64:
2193 case X86::FARJMP16m:
2194 case X86::FARJMP32m:
2196 // We don't need to harden either far calls or far jumps as they are
2197 // safe from Spectre.
2204 // We should never see a loading instruction at this point, as those should
2205 // have been unfolded.
2206 assert(!MI.mayLoad() && "Found a lingering loading instruction!");
2208 // If the first operand isn't a register, this is a branch or call
2209 // instruction with an immediate operand which doesn't need to be hardened.
2210 if (!MI.getOperand(0).isReg())
2213 // For all of these, the target register is the first operand of the
2215 auto &TargetOp = MI.getOperand(0);
2216 unsigned OldTargetReg = TargetOp.getReg();
2218 // Try to lookup a hardened version of this register. We retain a reference
2219 // here as we want to update the map to track any newly computed hardened
2221 unsigned &HardenedTargetReg = AddrRegToHardenedReg[OldTargetReg];
2223 // If we don't have a hardened register yet, compute one. Otherwise, just use
2224 // the already hardened register.
2226 // FIXME: It is a little suspect that we use partially hardened registers that
2227 // only feed addresses. The complexity of partial hardening with SHRX
2228 // continues to pile up. Should definitively measure its value and consider
2230 if (!HardenedTargetReg)
2231 HardenedTargetReg = hardenValueInRegister(
2232 OldTargetReg, *MI.getParent(), MI.getIterator(), MI.getDebugLoc());
2234 // Set the target operand to the hardened register.
2235 TargetOp.setReg(HardenedTargetReg);
2237 ++NumCallsOrJumpsHardened;
2240 INITIALIZE_PASS_BEGIN(X86SpeculativeLoadHardeningPass, DEBUG_TYPE,
2241 "X86 speculative load hardener", false, false)
2242 INITIALIZE_PASS_END(X86SpeculativeLoadHardeningPass, DEBUG_TYPE,
2243 "X86 speculative load hardener", false, false)
2245 FunctionPass *llvm::createX86SpeculativeLoadHardeningPass() {
2246 return new X86SpeculativeLoadHardeningPass();