1 //===- HWAddressSanitizer.cpp - detector of uninitialized reads -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This file is a part of HWAddressSanitizer, an address sanity checker
12 /// based on tagged addressing.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/StringExtras.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/ADT/Triple.h"
19 #include "llvm/IR/Attributes.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/InlineAsm.h"
28 #include "llvm/IR/InstVisitor.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/LLVMContext.h"
34 #include "llvm/IR/MDBuilder.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/IR/Type.h"
37 #include "llvm/IR/Value.h"
38 #include "llvm/Pass.h"
39 #include "llvm/Support/Casting.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/Instrumentation.h"
44 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
45 #include "llvm/Transforms/Utils/ModuleUtils.h"
46 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
50 #define DEBUG_TYPE "hwasan"
52 static const char *const kHwasanModuleCtorName = "hwasan.module_ctor";
53 static const char *const kHwasanInitName = "__hwasan_init";
55 static const char *const kHwasanShadowMemoryDynamicAddress =
56 "__hwasan_shadow_memory_dynamic_address";
58 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
59 static const size_t kNumberOfAccessSizes = 5;
61 static const size_t kDefaultShadowScale = 4;
62 static const uint64_t kDynamicShadowSentinel =
63 std::numeric_limits<uint64_t>::max();
64 static const unsigned kPointerTagShift = 56;
66 static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
67 "hwasan-memory-access-callback-prefix",
68 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
69 cl::init("__hwasan_"));
72 ClInstrumentWithCalls("hwasan-instrument-with-calls",
73 cl::desc("instrument reads and writes with callbacks"),
74 cl::Hidden, cl::init(false));
76 static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
77 cl::desc("instrument read instructions"),
78 cl::Hidden, cl::init(true));
80 static cl::opt<bool> ClInstrumentWrites(
81 "hwasan-instrument-writes", cl::desc("instrument write instructions"),
82 cl::Hidden, cl::init(true));
84 static cl::opt<bool> ClInstrumentAtomics(
85 "hwasan-instrument-atomics",
86 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
89 static cl::opt<bool> ClRecover(
91 cl::desc("Enable recovery mode (continue-after-error)."),
92 cl::Hidden, cl::init(false));
94 static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
95 cl::desc("instrument stack (allocas)"),
96 cl::Hidden, cl::init(true));
98 static cl::opt<bool> ClUARRetagToZero(
99 "hwasan-uar-retag-to-zero",
100 cl::desc("Clear alloca tags before returning from the function to allow "
101 "non-instrumented and instrumented function calls mix. When set "
102 "to false, allocas are retagged before returning from the "
103 "function to detect use after return."),
104 cl::Hidden, cl::init(true));
106 static cl::opt<bool> ClGenerateTagsWithCalls(
107 "hwasan-generate-tags-with-calls",
108 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
111 static cl::opt<int> ClMatchAllTag(
112 "hwasan-match-all-tag",
113 cl::desc("don't report bad accesses via pointers with this tag"),
114 cl::Hidden, cl::init(-1));
116 static cl::opt<bool> ClEnableKhwasan(
118 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
119 cl::Hidden, cl::init(false));
121 // These flags allow to change the shadow mapping and control how shadow memory
122 // is accessed. The shadow mapping looks like:
123 // Shadow = (Mem >> scale) + offset
125 static cl::opt<unsigned long long> ClMappingOffset(
126 "hwasan-mapping-offset",
127 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden,
132 /// An instrumentation pass implementing detection of addressability bugs
133 /// using tagged pointers.
134 class HWAddressSanitizer : public FunctionPass {
136 // Pass identification, replacement for typeid.
139 explicit HWAddressSanitizer(bool CompileKernel = false, bool Recover = false)
141 this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
142 this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0 ?
143 ClEnableKhwasan : CompileKernel;
146 StringRef getPassName() const override { return "HWAddressSanitizer"; }
148 bool runOnFunction(Function &F) override;
149 bool doInitialization(Module &M) override;
151 void initializeCallbacks(Module &M);
153 void maybeInsertDynamicShadowAtFunctionEntry(Function &F);
155 void untagPointerOperand(Instruction *I, Value *Addr);
156 Value *memToShadow(Value *Shadow, Type *Ty, IRBuilder<> &IRB);
157 void instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
158 unsigned AccessSizeIndex,
159 Instruction *InsertBefore);
160 bool instrumentMemAccess(Instruction *I);
161 Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
162 uint64_t *TypeSize, unsigned *Alignment,
165 bool isInterestingAlloca(const AllocaInst &AI);
166 bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag);
167 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
168 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
169 bool instrumentStack(SmallVectorImpl<AllocaInst *> &Allocas,
170 SmallVectorImpl<Instruction *> &RetVec);
171 Value *getNextTagWithCall(IRBuilder<> &IRB);
172 Value *getStackBaseTag(IRBuilder<> &IRB);
173 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI,
175 Value *getUARTag(IRBuilder<> &IRB, Value *StackTag);
181 /// This struct defines the shadow mapping using the rule:
182 /// shadow = (mem >> Scale) + Offset.
183 /// If InGlobal is true, then
184 /// extern char __hwasan_shadow[];
185 /// shadow = (mem >> Scale) + &__hwasan_shadow
186 struct ShadowMapping {
191 void init(Triple &TargetTriple);
192 unsigned getAllocaAlignment() const { return 1U << Scale; }
194 ShadowMapping Mapping;
202 Function *HwasanCtorFunction;
204 Function *HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
205 Function *HwasanMemoryAccessCallbackSized[2];
207 Function *HwasanTagMemoryFunc;
208 Function *HwasanGenerateTagFunc;
210 Constant *ShadowGlobal;
212 Value *LocalDynamicShadow = nullptr;
215 } // end anonymous namespace
217 char HWAddressSanitizer::ID = 0;
219 INITIALIZE_PASS_BEGIN(
220 HWAddressSanitizer, "hwasan",
221 "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
224 HWAddressSanitizer, "hwasan",
225 "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
228 FunctionPass *llvm::createHWAddressSanitizerPass(bool CompileKernel,
230 assert(!CompileKernel || Recover);
231 return new HWAddressSanitizer(CompileKernel, Recover);
234 /// Module-level initialization.
236 /// inserts a call to __hwasan_init to the module's constructor list.
237 bool HWAddressSanitizer::doInitialization(Module &M) {
238 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
239 auto &DL = M.getDataLayout();
241 TargetTriple = Triple(M.getTargetTriple());
243 Mapping.init(TargetTriple);
245 C = &(M.getContext());
247 IntptrTy = IRB.getIntPtrTy(DL);
248 Int8Ty = IRB.getInt8Ty();
250 HwasanCtorFunction = nullptr;
251 if (!CompileKernel) {
252 std::tie(HwasanCtorFunction, std::ignore) =
253 createSanitizerCtorAndInitFunctions(M, kHwasanModuleCtorName,
257 appendToGlobalCtors(M, HwasanCtorFunction, 0);
262 void HWAddressSanitizer::initializeCallbacks(Module &M) {
264 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
265 const std::string TypeStr = AccessIsWrite ? "store" : "load";
266 const std::string EndingStr = Recover ? "_noabort" : "";
268 HwasanMemoryAccessCallbackSized[AccessIsWrite] =
269 checkSanitizerInterfaceFunction(M.getOrInsertFunction(
270 ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr,
271 FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false)));
273 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
275 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
276 checkSanitizerInterfaceFunction(M.getOrInsertFunction(
277 ClMemoryAccessCallbackPrefix + TypeStr +
278 itostr(1ULL << AccessSizeIndex) + EndingStr,
279 FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false)));
283 HwasanTagMemoryFunc = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
284 "__hwasan_tag_memory", IRB.getVoidTy(), IntptrTy, Int8Ty, IntptrTy));
285 HwasanGenerateTagFunc = checkSanitizerInterfaceFunction(
286 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty));
288 if (Mapping.InGlobal)
289 ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow",
290 ArrayType::get(IRB.getInt8Ty(), 0));
293 void HWAddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
294 // Generate code only when dynamic addressing is needed.
295 if (Mapping.Offset != kDynamicShadowSentinel)
298 IRBuilder<> IRB(&F.front().front());
299 if (Mapping.InGlobal) {
300 // An empty inline asm with input reg == output reg.
301 // An opaque pointer-to-int cast, basically.
302 InlineAsm *Asm = InlineAsm::get(
303 FunctionType::get(IntptrTy, {ShadowGlobal->getType()}, false),
304 StringRef(""), StringRef("=r,0"),
305 /*hasSideEffects=*/false);
306 LocalDynamicShadow = IRB.CreateCall(Asm, {ShadowGlobal}, ".hwasan.shadow");
308 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
309 kHwasanShadowMemoryDynamicAddress, IntptrTy);
310 LocalDynamicShadow = IRB.CreateLoad(GlobalDynamicAddress);
314 Value *HWAddressSanitizer::isInterestingMemoryAccess(Instruction *I,
319 // Skip memory accesses inserted by another instrumentation.
320 if (I->getMetadata("nosanitize")) return nullptr;
322 // Do not instrument the load fetching the dynamic shadow address.
323 if (LocalDynamicShadow == I)
326 Value *PtrOperand = nullptr;
327 const DataLayout &DL = I->getModule()->getDataLayout();
328 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
329 if (!ClInstrumentReads) return nullptr;
331 *TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
332 *Alignment = LI->getAlignment();
333 PtrOperand = LI->getPointerOperand();
334 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
335 if (!ClInstrumentWrites) return nullptr;
337 *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
338 *Alignment = SI->getAlignment();
339 PtrOperand = SI->getPointerOperand();
340 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
341 if (!ClInstrumentAtomics) return nullptr;
343 *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
345 PtrOperand = RMW->getPointerOperand();
346 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
347 if (!ClInstrumentAtomics) return nullptr;
349 *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
351 PtrOperand = XCHG->getPointerOperand();
355 // Do not instrument accesses from different address spaces; we cannot deal
357 Type *PtrTy = cast<PointerType>(PtrOperand->getType()->getScalarType());
358 if (PtrTy->getPointerAddressSpace() != 0)
361 // Ignore swifterror addresses.
362 // swifterror memory addresses are mem2reg promoted by instruction
363 // selection. As such they cannot have regular uses like an instrumentation
364 // function and it makes no sense to track them as memory.
365 if (PtrOperand->isSwiftError())
372 static unsigned getPointerOperandIndex(Instruction *I) {
373 if (LoadInst *LI = dyn_cast<LoadInst>(I))
374 return LI->getPointerOperandIndex();
375 if (StoreInst *SI = dyn_cast<StoreInst>(I))
376 return SI->getPointerOperandIndex();
377 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
378 return RMW->getPointerOperandIndex();
379 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
380 return XCHG->getPointerOperandIndex();
381 report_fatal_error("Unexpected instruction");
385 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
386 size_t Res = countTrailingZeros(TypeSize / 8);
387 assert(Res < kNumberOfAccessSizes);
391 void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
392 if (TargetTriple.isAArch64())
396 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
398 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
399 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
402 Value *HWAddressSanitizer::memToShadow(Value *Mem, Type *Ty, IRBuilder<> &IRB) {
404 Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
405 if (Mapping.Offset == 0)
407 // (Mem >> Scale) + Offset
409 if (LocalDynamicShadow)
410 ShadowBase = LocalDynamicShadow;
412 ShadowBase = ConstantInt::get(Ty, Mapping.Offset);
413 return IRB.CreateAdd(Shadow, ShadowBase);
416 void HWAddressSanitizer::instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
417 unsigned AccessSizeIndex,
418 Instruction *InsertBefore) {
419 IRBuilder<> IRB(InsertBefore);
420 Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, kPointerTagShift),
422 Value *AddrLong = untagPointer(IRB, PtrLong);
423 Value *ShadowLong = memToShadow(AddrLong, PtrLong->getType(), IRB);
425 IRB.CreateLoad(IRB.CreateIntToPtr(ShadowLong, IRB.getInt8PtrTy()));
426 Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
428 int matchAllTag = ClMatchAllTag.getNumOccurrences() > 0 ?
429 ClMatchAllTag : (CompileKernel ? 0xFF : -1);
430 if (matchAllTag != -1) {
431 Value *TagNotIgnored = IRB.CreateICmpNE(PtrTag,
432 ConstantInt::get(PtrTag->getType(), matchAllTag));
433 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
436 TerminatorInst *CheckTerm =
437 SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, !Recover,
438 MDBuilder(*C).createBranchWeights(1, 100000));
440 IRB.SetInsertPoint(CheckTerm);
441 const int64_t AccessInfo = Recover * 0x20 + IsWrite * 0x10 + AccessSizeIndex;
443 switch (TargetTriple.getArch()) {
445 // The signal handler will find the data address in rdi.
446 Asm = InlineAsm::get(
447 FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
448 "int3\nnopl " + itostr(0x40 + AccessInfo) + "(%rax)",
450 /*hasSideEffects=*/true);
452 case Triple::aarch64:
453 case Triple::aarch64_be:
454 // The signal handler will find the data address in x0.
455 Asm = InlineAsm::get(
456 FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
457 "brk #" + itostr(0x900 + AccessInfo),
459 /*hasSideEffects=*/true);
462 report_fatal_error("unsupported architecture");
464 IRB.CreateCall(Asm, PtrLong);
467 bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) {
468 LLVM_DEBUG(dbgs() << "Instrumenting: " << *I << "\n");
469 bool IsWrite = false;
470 unsigned Alignment = 0;
471 uint64_t TypeSize = 0;
472 Value *MaybeMask = nullptr;
474 isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment, &MaybeMask);
480 return false; //FIXME
483 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
484 if (isPowerOf2_64(TypeSize) &&
485 (TypeSize / 8 <= (1UL << (kNumberOfAccessSizes - 1))) &&
486 (Alignment >= (1UL << Mapping.Scale) || Alignment == 0 ||
487 Alignment >= TypeSize / 8)) {
488 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
489 if (ClInstrumentWithCalls) {
490 IRB.CreateCall(HwasanMemoryAccessCallback[IsWrite][AccessSizeIndex],
493 instrumentMemAccessInline(AddrLong, IsWrite, AccessSizeIndex, I);
496 IRB.CreateCall(HwasanMemoryAccessCallbackSized[IsWrite],
497 {AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8)});
499 untagPointerOperand(I, Addr);
504 static uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
505 uint64_t ArraySize = 1;
506 if (AI.isArrayAllocation()) {
507 const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
508 assert(CI && "non-constant array size");
509 ArraySize = CI->getZExtValue();
511 Type *Ty = AI.getAllocatedType();
512 uint64_t SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
513 return SizeInBytes * ArraySize;
516 bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI,
518 size_t Size = (getAllocaSizeInBytes(*AI) + Mapping.getAllocaAlignment() - 1) &
519 ~(Mapping.getAllocaAlignment() - 1);
521 Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty());
522 if (ClInstrumentWithCalls) {
523 IRB.CreateCall(HwasanTagMemoryFunc,
524 {IRB.CreatePointerCast(AI, IntptrTy), JustTag,
525 ConstantInt::get(IntptrTy, Size)});
527 size_t ShadowSize = Size >> Mapping.Scale;
528 Value *ShadowPtr = IRB.CreateIntToPtr(
529 memToShadow(IRB.CreatePointerCast(AI, IntptrTy), AI->getType(), IRB),
531 // If this memset is not inlined, it will be intercepted in the hwasan
532 // runtime library. That's OK, because the interceptor skips the checks if
533 // the address is in the shadow region.
534 // FIXME: the interceptor is not as fast as real memset. Consider lowering
535 // llvm.memset right here into either a sequence of stores, or a call to
536 // hwasan_tag_memory.
537 IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, /*Align=*/1);
542 static unsigned RetagMask(unsigned AllocaNo) {
543 // A list of 8-bit numbers that have at most one run of non-zero bits.
544 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
546 // The list does not include the value 255, which is used for UAR.
547 static unsigned FastMasks[] = {
548 0, 1, 2, 3, 4, 6, 7, 8, 12, 14, 15, 16, 24,
549 28, 30, 31, 32, 48, 56, 60, 62, 63, 64, 96, 112, 120,
550 124, 126, 127, 128, 192, 224, 240, 248, 252, 254};
551 return FastMasks[AllocaNo % (sizeof(FastMasks) / sizeof(FastMasks[0]))];
554 Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
555 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
558 Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
559 if (ClGenerateTagsWithCalls)
561 // FIXME: use addressofreturnaddress (but implement it in aarch64 backend
563 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
564 auto GetStackPointerFn =
565 Intrinsic::getDeclaration(M, Intrinsic::frameaddress);
566 Value *StackPointer = IRB.CreateCall(
567 GetStackPointerFn, {Constant::getNullValue(IRB.getInt32Ty())});
569 // Extract some entropy from the stack pointer for the tags.
570 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
571 // between functions).
572 Value *StackPointerLong = IRB.CreatePointerCast(StackPointer, IntptrTy);
574 IRB.CreateXor(StackPointerLong, IRB.CreateLShr(StackPointerLong, 20),
575 "hwasan.stack.base.tag");
579 Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
580 AllocaInst *AI, unsigned AllocaNo) {
581 if (ClGenerateTagsWithCalls)
582 return getNextTagWithCall(IRB);
583 return IRB.CreateXor(StackTag,
584 ConstantInt::get(IntptrTy, RetagMask(AllocaNo)));
587 Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB, Value *StackTag) {
588 if (ClUARRetagToZero)
589 return ConstantInt::get(IntptrTy, 0);
590 if (ClGenerateTagsWithCalls)
591 return getNextTagWithCall(IRB);
592 return IRB.CreateXor(StackTag, ConstantInt::get(IntptrTy, 0xFFU));
595 // Add a tag to an address.
596 Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
597 Value *PtrLong, Value *Tag) {
598 Value *TaggedPtrLong;
600 // Kernel addresses have 0xFF in the most significant byte.
601 Value *ShiftedTag = IRB.CreateOr(
602 IRB.CreateShl(Tag, kPointerTagShift),
603 ConstantInt::get(IntptrTy, (1ULL << kPointerTagShift) - 1));
604 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
606 // Userspace can simply do OR (tag << 56);
607 Value *ShiftedTag = IRB.CreateShl(Tag, kPointerTagShift);
608 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
610 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
613 // Remove tag from an address.
614 Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
615 Value *UntaggedPtrLong;
617 // Kernel addresses have 0xFF in the most significant byte.
618 UntaggedPtrLong = IRB.CreateOr(PtrLong,
619 ConstantInt::get(PtrLong->getType(), 0xFFULL << kPointerTagShift));
621 // Userspace addresses have 0x00.
622 UntaggedPtrLong = IRB.CreateAnd(PtrLong,
623 ConstantInt::get(PtrLong->getType(), ~(0xFFULL << kPointerTagShift)));
625 return UntaggedPtrLong;
628 bool HWAddressSanitizer::instrumentStack(
629 SmallVectorImpl<AllocaInst *> &Allocas,
630 SmallVectorImpl<Instruction *> &RetVec) {
631 Function *F = Allocas[0]->getParent()->getParent();
632 Instruction *InsertPt = &*F->getEntryBlock().begin();
633 IRBuilder<> IRB(InsertPt);
635 Value *StackTag = getStackBaseTag(IRB);
637 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
638 // alloca addresses using that. Unfortunately, offsets are not known yet
639 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
640 // temp, shift-OR it into each alloca address and xor with the retag mask.
641 // This generates one extra instruction per alloca use.
642 for (unsigned N = 0; N < Allocas.size(); ++N) {
643 auto *AI = Allocas[N];
644 IRB.SetInsertPoint(AI->getNextNode());
646 // Replace uses of the alloca with tagged address.
647 Value *Tag = getAllocaTag(IRB, StackTag, AI, N);
648 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
649 Value *Replacement = tagPointer(IRB, AI->getType(), AILong, Tag);
651 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
652 Replacement->setName(Name + ".hwasan");
654 for (auto UI = AI->use_begin(), UE = AI->use_end(); UI != UE;) {
656 if (U.getUser() != AILong)
660 tagAlloca(IRB, AI, Tag);
662 for (auto RI : RetVec) {
663 IRB.SetInsertPoint(RI);
665 // Re-tag alloca memory with the special UAR tag.
666 Value *Tag = getUARTag(IRB, StackTag);
667 tagAlloca(IRB, AI, Tag);
674 bool HWAddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
675 return (AI.getAllocatedType()->isSized() &&
676 // FIXME: instrument dynamic allocas, too
677 AI.isStaticAlloca() &&
678 // alloca() may be called with 0 size, ignore it.
679 getAllocaSizeInBytes(AI) > 0 &&
680 // We are only interested in allocas not promotable to registers.
681 // Promotable allocas are common under -O0.
682 !isAllocaPromotable(&AI) &&
683 // inalloca allocas are not treated as static, and we don't want
684 // dynamic alloca instrumentation for them as well.
685 !AI.isUsedWithInAlloca() &&
686 // swifterror allocas are register promoted by ISel
690 bool HWAddressSanitizer::runOnFunction(Function &F) {
691 if (&F == HwasanCtorFunction)
694 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
697 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
699 initializeCallbacks(*F.getParent());
701 assert(!LocalDynamicShadow);
702 maybeInsertDynamicShadowAtFunctionEntry(F);
704 bool Changed = false;
705 SmallVector<Instruction*, 16> ToInstrument;
706 SmallVector<AllocaInst*, 8> AllocasToInstrument;
707 SmallVector<Instruction*, 8> RetVec;
709 for (auto &Inst : BB) {
710 if (ClInstrumentStack)
711 if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
712 // Realign all allocas. We don't want small uninteresting allocas to
713 // hide in instrumented alloca's padding.
714 if (AI->getAlignment() < Mapping.getAllocaAlignment())
715 AI->setAlignment(Mapping.getAllocaAlignment());
716 // Instrument some of them.
717 if (isInterestingAlloca(*AI))
718 AllocasToInstrument.push_back(AI);
722 if (isa<ReturnInst>(Inst) || isa<ResumeInst>(Inst) ||
723 isa<CleanupReturnInst>(Inst))
724 RetVec.push_back(&Inst);
726 Value *MaybeMask = nullptr;
730 Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize,
731 &Alignment, &MaybeMask);
732 if (Addr || isa<MemIntrinsic>(Inst))
733 ToInstrument.push_back(&Inst);
737 if (!AllocasToInstrument.empty())
738 Changed |= instrumentStack(AllocasToInstrument, RetVec);
740 for (auto Inst : ToInstrument)
741 Changed |= instrumentMemAccess(Inst);
743 LocalDynamicShadow = nullptr;
748 void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple) {
749 const bool IsAndroid = TargetTriple.isAndroid();
750 const bool IsAndroidWithIfuncSupport =
751 IsAndroid && !TargetTriple.isAndroidVersionLT(21);
753 Scale = kDefaultShadowScale;
755 if (ClEnableKhwasan || ClInstrumentWithCalls || !IsAndroidWithIfuncSupport)
758 Offset = kDynamicShadowSentinel;
759 if (ClMappingOffset.getNumOccurrences() > 0)
760 Offset = ClMappingOffset;
762 InGlobal = IsAndroidWithIfuncSupport;