1 //===-- SafeStack.cpp - Safe Stack Insertion ------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass splits the stack into the safe stack (kept as-is for LLVM backend)
11 // and the unsafe stack (explicitly allocated and managed through the runtime
14 // http://clang.llvm.org/docs/SafeStack.html
16 //===----------------------------------------------------------------------===//
18 #include "SafeStackColoring.h"
19 #include "SafeStackLayout.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/ADT/Triple.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/BranchProbabilityInfo.h"
24 #include "llvm/Analysis/ScalarEvolution.h"
25 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
26 #include "llvm/CodeGen/Passes.h"
27 #include "llvm/CodeGen/TargetPassConfig.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DIBuilder.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/InstIterator.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/Intrinsics.h"
38 #include "llvm/IR/MDBuilder.h"
39 #include "llvm/IR/Module.h"
40 #include "llvm/Pass.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/Format.h"
44 #include "llvm/Support/MathExtras.h"
45 #include "llvm/Support/raw_os_ostream.h"
46 #include "llvm/Target/TargetLowering.h"
47 #include "llvm/Target/TargetSubtargetInfo.h"
48 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
49 #include "llvm/Transforms/Utils/Local.h"
50 #include "llvm/Transforms/Utils/ModuleUtils.h"
53 using namespace llvm::safestack;
55 #define DEBUG_TYPE "safe-stack"
59 STATISTIC(NumFunctions, "Total number of functions");
60 STATISTIC(NumUnsafeStackFunctions, "Number of functions with unsafe stack");
61 STATISTIC(NumUnsafeStackRestorePointsFunctions,
62 "Number of functions that use setjmp or exceptions");
64 STATISTIC(NumAllocas, "Total number of allocas");
65 STATISTIC(NumUnsafeStaticAllocas, "Number of unsafe static allocas");
66 STATISTIC(NumUnsafeDynamicAllocas, "Number of unsafe dynamic allocas");
67 STATISTIC(NumUnsafeByValArguments, "Number of unsafe byval arguments");
68 STATISTIC(NumUnsafeStackRestorePoints, "Number of setjmps and landingpads");
74 /// Rewrite an SCEV expression for a memory access address to an expression that
75 /// represents offset from the given alloca.
77 /// The implementation simply replaces all mentions of the alloca with zero.
78 class AllocaOffsetRewriter : public SCEVRewriteVisitor<AllocaOffsetRewriter> {
79 const Value *AllocaPtr;
82 AllocaOffsetRewriter(ScalarEvolution &SE, const Value *AllocaPtr)
83 : SCEVRewriteVisitor(SE), AllocaPtr(AllocaPtr) {}
85 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
86 if (Expr->getValue() == AllocaPtr)
87 return SE.getZero(Expr->getType());
92 /// The SafeStack pass splits the stack of each function into the safe
93 /// stack, which is only accessed through memory safe dereferences (as
94 /// determined statically), and the unsafe stack, which contains all
95 /// local variables that are accessed in ways that we can't prove to
99 const TargetLoweringBase &TL;
100 const DataLayout &DL;
108 Value *UnsafeStackPtr = nullptr;
110 /// Unsafe stack alignment. Each stack frame must ensure that the stack is
111 /// aligned to this value. We need to re-align the unsafe stack if the
112 /// alignment of any object on the stack exceeds this value.
114 /// 16 seems like a reasonable upper bound on the alignment of objects that we
115 /// might expect to appear on the stack on most common targets.
116 enum { StackAlignment = 16 };
118 /// \brief Return the value of the stack canary.
119 Value *getStackGuard(IRBuilder<> &IRB, Function &F);
121 /// \brief Load stack guard from the frame and check if it has changed.
122 void checkStackGuard(IRBuilder<> &IRB, Function &F, ReturnInst &RI,
123 AllocaInst *StackGuardSlot, Value *StackGuard);
125 /// \brief Find all static allocas, dynamic allocas, return instructions and
126 /// stack restore points (exception unwind blocks and setjmp calls) in the
127 /// given function and append them to the respective vectors.
128 void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas,
129 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
130 SmallVectorImpl<Argument *> &ByValArguments,
131 SmallVectorImpl<ReturnInst *> &Returns,
132 SmallVectorImpl<Instruction *> &StackRestorePoints);
134 /// \brief Calculate the allocation size of a given alloca. Returns 0 if the
135 /// size can not be statically determined.
136 uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI);
138 /// \brief Allocate space for all static allocas in \p StaticAllocas,
139 /// replace allocas with pointers into the unsafe stack and generate code to
140 /// restore the stack pointer before all return instructions in \p Returns.
142 /// \returns A pointer to the top of the unsafe stack after all unsafe static
143 /// allocas are allocated.
144 Value *moveStaticAllocasToUnsafeStack(IRBuilder<> &IRB, Function &F,
145 ArrayRef<AllocaInst *> StaticAllocas,
146 ArrayRef<Argument *> ByValArguments,
147 ArrayRef<ReturnInst *> Returns,
148 Instruction *BasePointer,
149 AllocaInst *StackGuardSlot);
151 /// \brief Generate code to restore the stack after all stack restore points
152 /// in \p StackRestorePoints.
154 /// \returns A local variable in which to maintain the dynamic top of the
155 /// unsafe stack if needed.
157 createStackRestorePoints(IRBuilder<> &IRB, Function &F,
158 ArrayRef<Instruction *> StackRestorePoints,
159 Value *StaticTop, bool NeedDynamicTop);
161 /// \brief Replace all allocas in \p DynamicAllocas with code to allocate
162 /// space dynamically on the unsafe stack and store the dynamic unsafe stack
163 /// top to \p DynamicTop if non-null.
164 void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr,
165 AllocaInst *DynamicTop,
166 ArrayRef<AllocaInst *> DynamicAllocas);
168 bool IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize);
170 bool IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
171 const Value *AllocaPtr, uint64_t AllocaSize);
172 bool IsAccessSafe(Value *Addr, uint64_t Size, const Value *AllocaPtr,
173 uint64_t AllocaSize);
176 SafeStack(Function &F, const TargetLoweringBase &TL, const DataLayout &DL,
178 : F(F), TL(TL), DL(DL), SE(SE),
179 StackPtrTy(Type::getInt8PtrTy(F.getContext())),
180 IntPtrTy(DL.getIntPtrType(F.getContext())),
181 Int32Ty(Type::getInt32Ty(F.getContext())),
182 Int8Ty(Type::getInt8Ty(F.getContext())) {}
184 // Run the transformation on the associated function.
185 // Returns whether the function was changed.
189 uint64_t SafeStack::getStaticAllocaAllocationSize(const AllocaInst* AI) {
190 uint64_t Size = DL.getTypeAllocSize(AI->getAllocatedType());
191 if (AI->isArrayAllocation()) {
192 auto C = dyn_cast<ConstantInt>(AI->getArraySize());
195 Size *= C->getZExtValue();
200 bool SafeStack::IsAccessSafe(Value *Addr, uint64_t AccessSize,
201 const Value *AllocaPtr, uint64_t AllocaSize) {
202 AllocaOffsetRewriter Rewriter(SE, AllocaPtr);
203 const SCEV *Expr = Rewriter.visit(SE.getSCEV(Addr));
205 uint64_t BitWidth = SE.getTypeSizeInBits(Expr->getType());
206 ConstantRange AccessStartRange = SE.getUnsignedRange(Expr);
207 ConstantRange SizeRange =
208 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AccessSize));
209 ConstantRange AccessRange = AccessStartRange.add(SizeRange);
210 ConstantRange AllocaRange =
211 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AllocaSize));
212 bool Safe = AllocaRange.contains(AccessRange);
214 DEBUG(dbgs() << "[SafeStack] "
215 << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
216 << *AllocaPtr << "\n"
217 << " Access " << *Addr << "\n"
219 << " U: " << SE.getUnsignedRange(Expr)
220 << ", S: " << SE.getSignedRange(Expr) << "\n"
221 << " Range " << AccessRange << "\n"
222 << " AllocaRange " << AllocaRange << "\n"
223 << " " << (Safe ? "safe" : "unsafe") << "\n");
228 bool SafeStack::IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
229 const Value *AllocaPtr,
230 uint64_t AllocaSize) {
231 // All MemIntrinsics have destination address in Arg0 and size in Arg2.
232 if (MI->getRawDest() != U) return true;
233 const auto *Len = dyn_cast<ConstantInt>(MI->getLength());
234 // Non-constant size => unsafe. FIXME: try SCEV getRange.
235 if (!Len) return false;
236 return IsAccessSafe(U, Len->getZExtValue(), AllocaPtr, AllocaSize);
239 /// Check whether a given allocation must be put on the safe
240 /// stack or not. The function analyzes all uses of AI and checks whether it is
241 /// only accessed in a memory safe way (as decided statically).
242 bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) {
243 // Go through all uses of this alloca and check whether all accesses to the
244 // allocated object are statically known to be memory safe and, hence, the
245 // object can be placed on the safe stack.
246 SmallPtrSet<const Value *, 16> Visited;
247 SmallVector<const Value *, 8> WorkList;
248 WorkList.push_back(AllocaPtr);
250 // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc.
251 while (!WorkList.empty()) {
252 const Value *V = WorkList.pop_back_val();
253 for (const Use &UI : V->uses()) {
254 auto I = cast<const Instruction>(UI.getUser());
255 assert(V == UI.get());
257 switch (I->getOpcode()) {
258 case Instruction::Load: {
259 if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getType()), AllocaPtr,
264 case Instruction::VAArg:
265 // "va-arg" from a pointer is safe.
267 case Instruction::Store: {
268 if (V == I->getOperand(0)) {
269 // Stored the pointer - conservatively assume it may be unsafe.
270 DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
271 << "\n store of address: " << *I << "\n");
275 if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getOperand(0)->getType()),
276 AllocaPtr, AllocaSize))
280 case Instruction::Ret: {
285 case Instruction::Call:
286 case Instruction::Invoke: {
287 ImmutableCallSite CS(I);
289 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
290 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
291 II->getIntrinsicID() == Intrinsic::lifetime_end)
295 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
296 if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) {
297 DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
298 << "\n unsafe memintrinsic: " << *I
305 // LLVM 'nocapture' attribute is only set for arguments whose address
306 // is not stored, passed around, or used in any other non-trivial way.
307 // We assume that passing a pointer to an object as a 'nocapture
308 // readnone' argument is safe.
309 // FIXME: a more precise solution would require an interprocedural
310 // analysis here, which would look at all uses of an argument inside
311 // the function being called.
312 ImmutableCallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end();
313 for (ImmutableCallSite::arg_iterator A = B; A != E; ++A)
315 if (!(CS.doesNotCapture(A - B) && (CS.doesNotAccessMemory(A - B) ||
316 CS.doesNotAccessMemory()))) {
317 DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
318 << "\n unsafe call: " << *I << "\n");
325 if (Visited.insert(I).second)
326 WorkList.push_back(cast<const Instruction>(I));
331 // All uses of the alloca are safe, we can place it on the safe stack.
335 Value *SafeStack::getStackGuard(IRBuilder<> &IRB, Function &F) {
336 Value *StackGuardVar = TL.getIRStackGuard(IRB);
339 F.getParent()->getOrInsertGlobal("__stack_chk_guard", StackPtrTy);
340 return IRB.CreateLoad(StackGuardVar, "StackGuard");
343 void SafeStack::findInsts(Function &F,
344 SmallVectorImpl<AllocaInst *> &StaticAllocas,
345 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
346 SmallVectorImpl<Argument *> &ByValArguments,
347 SmallVectorImpl<ReturnInst *> &Returns,
348 SmallVectorImpl<Instruction *> &StackRestorePoints) {
349 for (Instruction &I : instructions(&F)) {
350 if (auto AI = dyn_cast<AllocaInst>(&I)) {
353 uint64_t Size = getStaticAllocaAllocationSize(AI);
354 if (IsSafeStackAlloca(AI, Size))
357 if (AI->isStaticAlloca()) {
358 ++NumUnsafeStaticAllocas;
359 StaticAllocas.push_back(AI);
361 ++NumUnsafeDynamicAllocas;
362 DynamicAllocas.push_back(AI);
364 } else if (auto RI = dyn_cast<ReturnInst>(&I)) {
365 Returns.push_back(RI);
366 } else if (auto CI = dyn_cast<CallInst>(&I)) {
367 // setjmps require stack restore.
368 if (CI->getCalledFunction() && CI->canReturnTwice())
369 StackRestorePoints.push_back(CI);
370 } else if (auto LP = dyn_cast<LandingPadInst>(&I)) {
371 // Exception landing pads require stack restore.
372 StackRestorePoints.push_back(LP);
373 } else if (auto II = dyn_cast<IntrinsicInst>(&I)) {
374 if (II->getIntrinsicID() == Intrinsic::gcroot)
375 llvm::report_fatal_error(
376 "gcroot intrinsic not compatible with safestack attribute");
379 for (Argument &Arg : F.args()) {
380 if (!Arg.hasByValAttr())
383 DL.getTypeStoreSize(Arg.getType()->getPointerElementType());
384 if (IsSafeStackAlloca(&Arg, Size))
387 ++NumUnsafeByValArguments;
388 ByValArguments.push_back(&Arg);
393 SafeStack::createStackRestorePoints(IRBuilder<> &IRB, Function &F,
394 ArrayRef<Instruction *> StackRestorePoints,
395 Value *StaticTop, bool NeedDynamicTop) {
396 assert(StaticTop && "The stack top isn't set.");
398 if (StackRestorePoints.empty())
401 // We need the current value of the shadow stack pointer to restore
402 // after longjmp or exception catching.
404 // FIXME: On some platforms this could be handled by the longjmp/exception
407 AllocaInst *DynamicTop = nullptr;
408 if (NeedDynamicTop) {
409 // If we also have dynamic alloca's, the stack pointer value changes
410 // throughout the function. For now we store it in an alloca.
411 DynamicTop = IRB.CreateAlloca(StackPtrTy, /*ArraySize=*/nullptr,
412 "unsafe_stack_dynamic_ptr");
413 IRB.CreateStore(StaticTop, DynamicTop);
416 // Restore current stack pointer after longjmp/exception catch.
417 for (Instruction *I : StackRestorePoints) {
418 ++NumUnsafeStackRestorePoints;
420 IRB.SetInsertPoint(I->getNextNode());
421 Value *CurrentTop = DynamicTop ? IRB.CreateLoad(DynamicTop) : StaticTop;
422 IRB.CreateStore(CurrentTop, UnsafeStackPtr);
428 void SafeStack::checkStackGuard(IRBuilder<> &IRB, Function &F, ReturnInst &RI,
429 AllocaInst *StackGuardSlot, Value *StackGuard) {
430 Value *V = IRB.CreateLoad(StackGuardSlot);
431 Value *Cmp = IRB.CreateICmpNE(StackGuard, V);
433 auto SuccessProb = BranchProbabilityInfo::getBranchProbStackProtector(true);
434 auto FailureProb = BranchProbabilityInfo::getBranchProbStackProtector(false);
435 MDNode *Weights = MDBuilder(F.getContext())
436 .createBranchWeights(SuccessProb.getNumerator(),
437 FailureProb.getNumerator());
438 Instruction *CheckTerm =
439 SplitBlockAndInsertIfThen(Cmp, &RI,
440 /* Unreachable */ true, Weights);
441 IRBuilder<> IRBFail(CheckTerm);
442 // FIXME: respect -fsanitize-trap / -ftrap-function here?
443 Constant *StackChkFail = F.getParent()->getOrInsertFunction(
444 "__stack_chk_fail", IRB.getVoidTy());
445 IRBFail.CreateCall(StackChkFail, {});
448 /// We explicitly compute and set the unsafe stack layout for all unsafe
449 /// static alloca instructions. We save the unsafe "base pointer" in the
450 /// prologue into a local variable and restore it in the epilogue.
451 Value *SafeStack::moveStaticAllocasToUnsafeStack(
452 IRBuilder<> &IRB, Function &F, ArrayRef<AllocaInst *> StaticAllocas,
453 ArrayRef<Argument *> ByValArguments, ArrayRef<ReturnInst *> Returns,
454 Instruction *BasePointer, AllocaInst *StackGuardSlot) {
455 if (StaticAllocas.empty() && ByValArguments.empty())
458 DIBuilder DIB(*F.getParent());
460 StackColoring SSC(F, StaticAllocas);
462 SSC.removeAllMarkers();
464 // Unsafe stack always grows down.
465 StackLayout SSL(StackAlignment);
466 if (StackGuardSlot) {
467 Type *Ty = StackGuardSlot->getAllocatedType();
469 std::max(DL.getPrefTypeAlignment(Ty), StackGuardSlot->getAlignment());
470 SSL.addObject(StackGuardSlot, getStaticAllocaAllocationSize(StackGuardSlot),
471 Align, SSC.getFullLiveRange());
474 for (Argument *Arg : ByValArguments) {
475 Type *Ty = Arg->getType()->getPointerElementType();
476 uint64_t Size = DL.getTypeStoreSize(Ty);
478 Size = 1; // Don't create zero-sized stack objects.
480 // Ensure the object is properly aligned.
481 unsigned Align = std::max((unsigned)DL.getPrefTypeAlignment(Ty),
482 Arg->getParamAlignment());
483 SSL.addObject(Arg, Size, Align, SSC.getFullLiveRange());
486 for (AllocaInst *AI : StaticAllocas) {
487 Type *Ty = AI->getAllocatedType();
488 uint64_t Size = getStaticAllocaAllocationSize(AI);
490 Size = 1; // Don't create zero-sized stack objects.
492 // Ensure the object is properly aligned.
494 std::max((unsigned)DL.getPrefTypeAlignment(Ty), AI->getAlignment());
496 SSL.addObject(AI, Size, Align, SSC.getLiveRange(AI));
500 unsigned FrameAlignment = SSL.getFrameAlignment();
502 // FIXME: tell SSL that we start at a less-then-MaxAlignment aligned location
504 if (FrameAlignment > StackAlignment) {
505 // Re-align the base pointer according to the max requested alignment.
506 assert(isPowerOf2_32(FrameAlignment));
507 IRB.SetInsertPoint(BasePointer->getNextNode());
508 BasePointer = cast<Instruction>(IRB.CreateIntToPtr(
509 IRB.CreateAnd(IRB.CreatePtrToInt(BasePointer, IntPtrTy),
510 ConstantInt::get(IntPtrTy, ~uint64_t(FrameAlignment - 1))),
514 IRB.SetInsertPoint(BasePointer->getNextNode());
516 if (StackGuardSlot) {
517 unsigned Offset = SSL.getObjectOffset(StackGuardSlot);
518 Value *Off = IRB.CreateGEP(BasePointer, // BasePointer is i8*
519 ConstantInt::get(Int32Ty, -Offset));
521 IRB.CreateBitCast(Off, StackGuardSlot->getType(), "StackGuardSlot");
523 // Replace alloc with the new location.
524 StackGuardSlot->replaceAllUsesWith(NewAI);
525 StackGuardSlot->eraseFromParent();
528 for (Argument *Arg : ByValArguments) {
529 unsigned Offset = SSL.getObjectOffset(Arg);
530 Type *Ty = Arg->getType()->getPointerElementType();
532 uint64_t Size = DL.getTypeStoreSize(Ty);
534 Size = 1; // Don't create zero-sized stack objects.
536 Value *Off = IRB.CreateGEP(BasePointer, // BasePointer is i8*
537 ConstantInt::get(Int32Ty, -Offset));
538 Value *NewArg = IRB.CreateBitCast(Off, Arg->getType(),
539 Arg->getName() + ".unsafe-byval");
541 // Replace alloc with the new location.
542 replaceDbgDeclare(Arg, BasePointer, BasePointer->getNextNode(), DIB,
543 /*Deref=*/false, -Offset);
544 Arg->replaceAllUsesWith(NewArg);
545 IRB.SetInsertPoint(cast<Instruction>(NewArg)->getNextNode());
546 IRB.CreateMemCpy(Off, Arg, Size, Arg->getParamAlignment());
549 // Allocate space for every unsafe static AllocaInst on the unsafe stack.
550 for (AllocaInst *AI : StaticAllocas) {
551 IRB.SetInsertPoint(AI);
552 unsigned Offset = SSL.getObjectOffset(AI);
554 uint64_t Size = getStaticAllocaAllocationSize(AI);
556 Size = 1; // Don't create zero-sized stack objects.
558 replaceDbgDeclareForAlloca(AI, BasePointer, DIB, /*Deref=*/false, -Offset);
559 replaceDbgValueForAlloca(AI, BasePointer, DIB, -Offset);
561 // Replace uses of the alloca with the new location.
562 // Insert address calculation close to each use to work around PR27844.
563 std::string Name = std::string(AI->getName()) + ".unsafe";
564 while (!AI->use_empty()) {
565 Use &U = *AI->use_begin();
566 Instruction *User = cast<Instruction>(U.getUser());
568 Instruction *InsertBefore;
569 if (auto *PHI = dyn_cast<PHINode>(User))
570 InsertBefore = PHI->getIncomingBlock(U)->getTerminator();
574 IRBuilder<> IRBUser(InsertBefore);
575 Value *Off = IRBUser.CreateGEP(BasePointer, // BasePointer is i8*
576 ConstantInt::get(Int32Ty, -Offset));
577 Value *Replacement = IRBUser.CreateBitCast(Off, AI->getType(), Name);
579 if (auto *PHI = dyn_cast<PHINode>(User)) {
580 // PHI nodes may have multiple incoming edges from the same BB (why??),
581 // all must be updated at once with the same incoming value.
582 auto *BB = PHI->getIncomingBlock(U);
583 for (unsigned I = 0; I < PHI->getNumIncomingValues(); ++I)
584 if (PHI->getIncomingBlock(I) == BB)
585 PHI->setIncomingValue(I, Replacement);
591 AI->eraseFromParent();
594 // Re-align BasePointer so that our callees would see it aligned as
596 // FIXME: no need to update BasePointer in leaf functions.
597 unsigned FrameSize = alignTo(SSL.getFrameSize(), StackAlignment);
599 // Update shadow stack pointer in the function epilogue.
600 IRB.SetInsertPoint(BasePointer->getNextNode());
603 IRB.CreateGEP(BasePointer, ConstantInt::get(Int32Ty, -FrameSize),
604 "unsafe_stack_static_top");
605 IRB.CreateStore(StaticTop, UnsafeStackPtr);
609 void SafeStack::moveDynamicAllocasToUnsafeStack(
610 Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop,
611 ArrayRef<AllocaInst *> DynamicAllocas) {
612 DIBuilder DIB(*F.getParent());
614 for (AllocaInst *AI : DynamicAllocas) {
617 // Compute the new SP value (after AI).
618 Value *ArraySize = AI->getArraySize();
619 if (ArraySize->getType() != IntPtrTy)
620 ArraySize = IRB.CreateIntCast(ArraySize, IntPtrTy, false);
622 Type *Ty = AI->getAllocatedType();
623 uint64_t TySize = DL.getTypeAllocSize(Ty);
624 Value *Size = IRB.CreateMul(ArraySize, ConstantInt::get(IntPtrTy, TySize));
626 Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(UnsafeStackPtr), IntPtrTy);
627 SP = IRB.CreateSub(SP, Size);
629 // Align the SP value to satisfy the AllocaInst, type and stack alignments.
630 unsigned Align = std::max(
631 std::max((unsigned)DL.getPrefTypeAlignment(Ty), AI->getAlignment()),
632 (unsigned)StackAlignment);
634 assert(isPowerOf2_32(Align));
635 Value *NewTop = IRB.CreateIntToPtr(
636 IRB.CreateAnd(SP, ConstantInt::get(IntPtrTy, ~uint64_t(Align - 1))),
639 // Save the stack pointer.
640 IRB.CreateStore(NewTop, UnsafeStackPtr);
642 IRB.CreateStore(NewTop, DynamicTop);
644 Value *NewAI = IRB.CreatePointerCast(NewTop, AI->getType());
645 if (AI->hasName() && isa<Instruction>(NewAI))
648 replaceDbgDeclareForAlloca(AI, NewAI, DIB, /*Deref=*/false);
649 AI->replaceAllUsesWith(NewAI);
650 AI->eraseFromParent();
653 if (!DynamicAllocas.empty()) {
654 // Now go through the instructions again, replacing stacksave/stackrestore.
655 for (inst_iterator It = inst_begin(&F), Ie = inst_end(&F); It != Ie;) {
656 Instruction *I = &*(It++);
657 auto II = dyn_cast<IntrinsicInst>(I);
661 if (II->getIntrinsicID() == Intrinsic::stacksave) {
663 Instruction *LI = IRB.CreateLoad(UnsafeStackPtr);
665 II->replaceAllUsesWith(LI);
666 II->eraseFromParent();
667 } else if (II->getIntrinsicID() == Intrinsic::stackrestore) {
669 Instruction *SI = IRB.CreateStore(II->getArgOperand(0), UnsafeStackPtr);
671 assert(II->use_empty());
672 II->eraseFromParent();
678 bool SafeStack::run() {
679 assert(F.hasFnAttribute(Attribute::SafeStack) &&
680 "Can't run SafeStack on a function without the attribute");
681 assert(!F.isDeclaration() && "Can't run SafeStack on a function declaration");
685 SmallVector<AllocaInst *, 16> StaticAllocas;
686 SmallVector<AllocaInst *, 4> DynamicAllocas;
687 SmallVector<Argument *, 4> ByValArguments;
688 SmallVector<ReturnInst *, 4> Returns;
690 // Collect all points where stack gets unwound and needs to be restored
691 // This is only necessary because the runtime (setjmp and unwind code) is
692 // not aware of the unsafe stack and won't unwind/restore it properly.
693 // To work around this problem without changing the runtime, we insert
694 // instrumentation to restore the unsafe stack pointer when necessary.
695 SmallVector<Instruction *, 4> StackRestorePoints;
697 // Find all static and dynamic alloca instructions that must be moved to the
698 // unsafe stack, all return instructions and stack restore points.
699 findInsts(F, StaticAllocas, DynamicAllocas, ByValArguments, Returns,
702 if (StaticAllocas.empty() && DynamicAllocas.empty() &&
703 ByValArguments.empty() && StackRestorePoints.empty())
704 return false; // Nothing to do in this function.
706 if (!StaticAllocas.empty() || !DynamicAllocas.empty() ||
707 !ByValArguments.empty())
708 ++NumUnsafeStackFunctions; // This function has the unsafe stack.
710 if (!StackRestorePoints.empty())
711 ++NumUnsafeStackRestorePointsFunctions;
713 IRBuilder<> IRB(&F.front(), F.begin()->getFirstInsertionPt());
714 UnsafeStackPtr = TL.getSafeStackPointerLocation(IRB);
716 // Load the current stack pointer (we'll also use it as a base pointer).
717 // FIXME: use a dedicated register for it ?
718 Instruction *BasePointer =
719 IRB.CreateLoad(UnsafeStackPtr, false, "unsafe_stack_ptr");
720 assert(BasePointer->getType() == StackPtrTy);
722 AllocaInst *StackGuardSlot = nullptr;
723 // FIXME: implement weaker forms of stack protector.
724 if (F.hasFnAttribute(Attribute::StackProtect) ||
725 F.hasFnAttribute(Attribute::StackProtectStrong) ||
726 F.hasFnAttribute(Attribute::StackProtectReq)) {
727 Value *StackGuard = getStackGuard(IRB, F);
728 StackGuardSlot = IRB.CreateAlloca(StackPtrTy, nullptr);
729 IRB.CreateStore(StackGuard, StackGuardSlot);
731 for (ReturnInst *RI : Returns) {
732 IRBuilder<> IRBRet(RI);
733 checkStackGuard(IRBRet, F, *RI, StackGuardSlot, StackGuard);
737 // The top of the unsafe stack after all unsafe static allocas are
740 moveStaticAllocasToUnsafeStack(IRB, F, StaticAllocas, ByValArguments,
741 Returns, BasePointer, StackGuardSlot);
743 // Safe stack object that stores the current unsafe stack top. It is updated
744 // as unsafe dynamic (non-constant-sized) allocas are allocated and freed.
745 // This is only needed if we need to restore stack pointer after longjmp
746 // or exceptions, and we have dynamic allocations.
747 // FIXME: a better alternative might be to store the unsafe stack pointer
748 // before setjmp / invoke instructions.
749 AllocaInst *DynamicTop = createStackRestorePoints(
750 IRB, F, StackRestorePoints, StaticTop, !DynamicAllocas.empty());
752 // Handle dynamic allocas.
753 moveDynamicAllocasToUnsafeStack(F, UnsafeStackPtr, DynamicTop,
756 // Restore the unsafe stack pointer before each return.
757 for (ReturnInst *RI : Returns) {
758 IRB.SetInsertPoint(RI);
759 IRB.CreateStore(BasePointer, UnsafeStackPtr);
762 DEBUG(dbgs() << "[SafeStack] safestack applied\n");
766 class SafeStackLegacyPass : public FunctionPass {
767 const TargetMachine *TM;
770 static char ID; // Pass identification, replacement for typeid..
771 SafeStackLegacyPass() : FunctionPass(ID), TM(nullptr) {
772 initializeSafeStackLegacyPassPass(*PassRegistry::getPassRegistry());
775 void getAnalysisUsage(AnalysisUsage &AU) const override {
776 AU.addRequired<TargetPassConfig>();
777 AU.addRequired<TargetLibraryInfoWrapperPass>();
778 AU.addRequired<AssumptionCacheTracker>();
781 bool runOnFunction(Function &F) override {
782 DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
784 if (!F.hasFnAttribute(Attribute::SafeStack)) {
785 DEBUG(dbgs() << "[SafeStack] safestack is not requested"
786 " for this function\n");
790 if (F.isDeclaration()) {
791 DEBUG(dbgs() << "[SafeStack] function definition"
792 " is not available\n");
796 TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
797 auto *TL = TM->getSubtargetImpl(F)->getTargetLowering();
799 report_fatal_error("TargetLowering instance is required");
801 auto *DL = &F.getParent()->getDataLayout();
802 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
803 auto &ACT = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
805 // Compute DT and LI only for functions that have the attribute.
806 // This is only useful because the legacy pass manager doesn't let us
807 // compute analyzes lazily.
808 // In the backend pipeline, nothing preserves DT before SafeStack, so we
809 // would otherwise always compute it wastefully, even if there is no
810 // function with the safestack attribute.
814 ScalarEvolution SE(F, TLI, ACT, DT, LI);
816 return SafeStack(F, *TL, *DL, SE).run();
820 } // anonymous namespace
822 char SafeStackLegacyPass::ID = 0;
823 INITIALIZE_PASS_BEGIN(SafeStackLegacyPass, DEBUG_TYPE,
824 "Safe Stack instrumentation pass", false, false)
825 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
826 INITIALIZE_PASS_END(SafeStackLegacyPass, DEBUG_TYPE,
827 "Safe Stack instrumentation pass", false, false)
829 FunctionPass *llvm::createSafeStackPass() { return new SafeStackLegacyPass(); }