1 //===- HWAddressSanitizer.cpp - detector of uninitialized reads -------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file is a part of HWAddressSanitizer, an address sanity checker
11 /// based on tagged addressing.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/StringExtras.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/ADT/Triple.h"
19 #include "llvm/IR/Attributes.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DebugInfoMetadata.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/IRBuilder.h"
28 #include "llvm/IR/InlineAsm.h"
29 #include "llvm/IR/InstVisitor.h"
30 #include "llvm/IR/Instruction.h"
31 #include "llvm/IR/Instructions.h"
32 #include "llvm/IR/IntrinsicInst.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/IR/LLVMContext.h"
35 #include "llvm/IR/MDBuilder.h"
36 #include "llvm/IR/Module.h"
37 #include "llvm/IR/Type.h"
38 #include "llvm/IR/Value.h"
39 #include "llvm/Pass.h"
40 #include "llvm/Support/Casting.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Transforms/Instrumentation.h"
45 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
46 #include "llvm/Transforms/Utils/ModuleUtils.h"
47 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
52 #define DEBUG_TYPE "hwasan"
54 static const char *const kHwasanModuleCtorName = "hwasan.module_ctor";
55 static const char *const kHwasanInitName = "__hwasan_init";
57 static const char *const kHwasanShadowMemoryDynamicAddress =
58 "__hwasan_shadow_memory_dynamic_address";
60 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
61 static const size_t kNumberOfAccessSizes = 5;
63 static const size_t kDefaultShadowScale = 4;
64 static const uint64_t kDynamicShadowSentinel =
65 std::numeric_limits<uint64_t>::max();
66 static const unsigned kPointerTagShift = 56;
68 static const unsigned kShadowBaseAlignment = 32;
70 static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
71 "hwasan-memory-access-callback-prefix",
72 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
73 cl::init("__hwasan_"));
76 ClInstrumentWithCalls("hwasan-instrument-with-calls",
77 cl::desc("instrument reads and writes with callbacks"),
78 cl::Hidden, cl::init(false));
80 static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
81 cl::desc("instrument read instructions"),
82 cl::Hidden, cl::init(true));
84 static cl::opt<bool> ClInstrumentWrites(
85 "hwasan-instrument-writes", cl::desc("instrument write instructions"),
86 cl::Hidden, cl::init(true));
88 static cl::opt<bool> ClInstrumentAtomics(
89 "hwasan-instrument-atomics",
90 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
93 static cl::opt<bool> ClRecover(
95 cl::desc("Enable recovery mode (continue-after-error)."),
96 cl::Hidden, cl::init(false));
98 static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
99 cl::desc("instrument stack (allocas)"),
100 cl::Hidden, cl::init(true));
102 static cl::opt<bool> ClUARRetagToZero(
103 "hwasan-uar-retag-to-zero",
104 cl::desc("Clear alloca tags before returning from the function to allow "
105 "non-instrumented and instrumented function calls mix. When set "
106 "to false, allocas are retagged before returning from the "
107 "function to detect use after return."),
108 cl::Hidden, cl::init(true));
110 static cl::opt<bool> ClGenerateTagsWithCalls(
111 "hwasan-generate-tags-with-calls",
112 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
115 static cl::opt<int> ClMatchAllTag(
116 "hwasan-match-all-tag",
117 cl::desc("don't report bad accesses via pointers with this tag"),
118 cl::Hidden, cl::init(-1));
120 static cl::opt<bool> ClEnableKhwasan(
122 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
123 cl::Hidden, cl::init(false));
125 // These flags allow to change the shadow mapping and control how shadow memory
126 // is accessed. The shadow mapping looks like:
127 // Shadow = (Mem >> scale) + offset
129 static cl::opt<uint64_t>
130 ClMappingOffset("hwasan-mapping-offset",
131 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
132 cl::Hidden, cl::init(0));
135 ClWithIfunc("hwasan-with-ifunc",
136 cl::desc("Access dynamic shadow through an ifunc global on "
137 "platforms that support this"),
138 cl::Hidden, cl::init(false));
140 static cl::opt<bool> ClWithTls(
142 cl::desc("Access dynamic shadow through an thread-local pointer on "
143 "platforms that support this"),
144 cl::Hidden, cl::init(true));
147 ClRecordStackHistory("hwasan-record-stack-history",
148 cl::desc("Record stack frames with tagged allocations "
149 "in a thread-local ring buffer"),
150 cl::Hidden, cl::init(true));
152 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
153 cl::desc("instrument memory intrinsics"),
154 cl::Hidden, cl::init(true));
157 ClInstrumentLandingPads("hwasan-instrument-landing-pads",
158 cl::desc("instrument landing pads"), cl::Hidden,
161 static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
162 cl::desc("inline all checks"),
163 cl::Hidden, cl::init(false));
167 /// An instrumentation pass implementing detection of addressability bugs
168 /// using tagged pointers.
169 class HWAddressSanitizer {
171 explicit HWAddressSanitizer(Module &M, bool CompileKernel = false,
172 bool Recover = false) {
173 this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
174 this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0 ?
175 ClEnableKhwasan : CompileKernel;
180 bool sanitizeFunction(Function &F);
181 void initializeModule(Module &M);
183 void initializeCallbacks(Module &M);
185 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
186 Value *getDynamicShadowNonTls(IRBuilder<> &IRB);
188 void untagPointerOperand(Instruction *I, Value *Addr);
190 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
191 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
192 unsigned AccessSizeIndex,
193 Instruction *InsertBefore);
194 void instrumentMemIntrinsic(MemIntrinsic *MI);
195 bool instrumentMemAccess(Instruction *I);
196 Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
197 uint64_t *TypeSize, unsigned *Alignment,
200 bool isInterestingAlloca(const AllocaInst &AI);
201 bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
202 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
203 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
204 bool instrumentStack(
205 SmallVectorImpl<AllocaInst *> &Allocas,
206 DenseMap<AllocaInst *, std::vector<DbgDeclareInst *>> &AllocaDeclareMap,
207 SmallVectorImpl<Instruction *> &RetVec, Value *StackTag);
208 Value *readRegister(IRBuilder<> &IRB, StringRef Name);
209 bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
210 Value *getNextTagWithCall(IRBuilder<> &IRB);
211 Value *getStackBaseTag(IRBuilder<> &IRB);
212 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI,
214 Value *getUARTag(IRBuilder<> &IRB, Value *StackTag);
216 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty);
217 void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
221 std::string CurModuleUniqueId;
223 FunctionCallee HWAsanMemmove, HWAsanMemcpy, HWAsanMemset;
224 FunctionCallee HWAsanHandleVfork;
226 /// This struct defines the shadow mapping using the rule:
227 /// shadow = (mem >> Scale) + Offset.
228 /// If InGlobal is true, then
229 /// extern char __hwasan_shadow[];
230 /// shadow = (mem >> Scale) + &__hwasan_shadow
231 /// If InTls is true, then
232 /// extern char *__hwasan_tls;
233 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
234 struct ShadowMapping {
240 void init(Triple &TargetTriple);
241 unsigned getAllocaAlignment() const { return 1U << Scale; }
243 ShadowMapping Mapping;
253 Function *HwasanCtorFunction;
255 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
256 FunctionCallee HwasanMemoryAccessCallbackSized[2];
258 FunctionCallee HwasanTagMemoryFunc;
259 FunctionCallee HwasanGenerateTagFunc;
260 FunctionCallee HwasanThreadEnterFunc;
262 Constant *ShadowGlobal;
264 Value *LocalDynamicShadow = nullptr;
265 Value *StackBaseTag = nullptr;
266 GlobalValue *ThreadPtrGlobal = nullptr;
269 class HWAddressSanitizerLegacyPass : public FunctionPass {
271 // Pass identification, replacement for typeid.
274 explicit HWAddressSanitizerLegacyPass(bool CompileKernel = false,
275 bool Recover = false)
276 : FunctionPass(ID), CompileKernel(CompileKernel), Recover(Recover) {}
278 StringRef getPassName() const override { return "HWAddressSanitizer"; }
280 bool doInitialization(Module &M) override {
281 HWASan = llvm::make_unique<HWAddressSanitizer>(M, CompileKernel, Recover);
285 bool runOnFunction(Function &F) override {
286 return HWASan->sanitizeFunction(F);
289 bool doFinalization(Module &M) override {
295 std::unique_ptr<HWAddressSanitizer> HWASan;
300 } // end anonymous namespace
302 char HWAddressSanitizerLegacyPass::ID = 0;
304 INITIALIZE_PASS_BEGIN(
305 HWAddressSanitizerLegacyPass, "hwasan",
306 "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
309 HWAddressSanitizerLegacyPass, "hwasan",
310 "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
313 FunctionPass *llvm::createHWAddressSanitizerLegacyPassPass(bool CompileKernel,
315 assert(!CompileKernel || Recover);
316 return new HWAddressSanitizerLegacyPass(CompileKernel, Recover);
319 HWAddressSanitizerPass::HWAddressSanitizerPass(bool CompileKernel, bool Recover)
320 : CompileKernel(CompileKernel), Recover(Recover) {}
322 PreservedAnalyses HWAddressSanitizerPass::run(Module &M,
323 ModuleAnalysisManager &MAM) {
324 HWAddressSanitizer HWASan(M, CompileKernel, Recover);
325 bool Modified = false;
326 for (Function &F : M)
327 Modified |= HWASan.sanitizeFunction(F);
329 return PreservedAnalyses::none();
330 return PreservedAnalyses::all();
333 /// Module-level initialization.
335 /// inserts a call to __hwasan_init to the module's constructor list.
336 void HWAddressSanitizer::initializeModule(Module &M) {
337 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
338 auto &DL = M.getDataLayout();
340 TargetTriple = Triple(M.getTargetTriple());
342 Mapping.init(TargetTriple);
344 C = &(M.getContext());
345 CurModuleUniqueId = getUniqueModuleId(&M);
347 IntptrTy = IRB.getIntPtrTy(DL);
348 Int8PtrTy = IRB.getInt8PtrTy();
349 Int8Ty = IRB.getInt8Ty();
350 Int32Ty = IRB.getInt32Ty();
352 HwasanCtorFunction = nullptr;
353 if (!CompileKernel) {
354 std::tie(HwasanCtorFunction, std::ignore) =
355 getOrCreateSanitizerCtorAndInitFunctions(
356 M, kHwasanModuleCtorName, kHwasanInitName,
359 // This callback is invoked when the functions are created the first
360 // time. Hook them into the global ctors list in that case:
361 [&](Function *Ctor, FunctionCallee) {
362 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
363 Ctor->setComdat(CtorComdat);
364 appendToGlobalCtors(M, Ctor, 0, Ctor);
368 if (!TargetTriple.isAndroid()) {
369 Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
370 auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
371 GlobalValue::ExternalLinkage, nullptr,
372 "__hwasan_tls", nullptr,
373 GlobalVariable::InitialExecTLSModel);
374 appendToCompilerUsed(M, GV);
377 ThreadPtrGlobal = cast<GlobalVariable>(C);
381 void HWAddressSanitizer::initializeCallbacks(Module &M) {
383 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
384 const std::string TypeStr = AccessIsWrite ? "store" : "load";
385 const std::string EndingStr = Recover ? "_noabort" : "";
387 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
388 ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr,
389 FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false));
391 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
393 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
394 M.getOrInsertFunction(
395 ClMemoryAccessCallbackPrefix + TypeStr +
396 itostr(1ULL << AccessSizeIndex) + EndingStr,
397 FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false));
401 HwasanTagMemoryFunc = M.getOrInsertFunction(
402 "__hwasan_tag_memory", IRB.getVoidTy(), Int8PtrTy, Int8Ty, IntptrTy);
403 HwasanGenerateTagFunc =
404 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
406 ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow",
407 ArrayType::get(IRB.getInt8Ty(), 0));
409 const std::string MemIntrinCallbackPrefix =
410 CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix;
411 HWAsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
412 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
413 IRB.getInt8PtrTy(), IntptrTy);
414 HWAsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy",
415 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
416 IRB.getInt8PtrTy(), IntptrTy);
417 HWAsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
418 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
419 IRB.getInt32Ty(), IntptrTy);
422 M.getOrInsertFunction("__hwasan_handle_vfork", IRB.getVoidTy(), IntptrTy);
424 HwasanThreadEnterFunc =
425 M.getOrInsertFunction("__hwasan_thread_enter", IRB.getVoidTy());
428 Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
429 // An empty inline asm with input reg == output reg.
430 // An opaque no-op cast, basically.
431 InlineAsm *Asm = InlineAsm::get(
432 FunctionType::get(Int8PtrTy, {ShadowGlobal->getType()}, false),
433 StringRef(""), StringRef("=r,0"),
434 /*hasSideEffects=*/false);
435 return IRB.CreateCall(Asm, {ShadowGlobal}, ".hwasan.shadow");
438 Value *HWAddressSanitizer::getDynamicShadowNonTls(IRBuilder<> &IRB) {
439 // Generate code only when dynamic addressing is needed.
440 if (Mapping.Offset != kDynamicShadowSentinel)
443 if (Mapping.InGlobal) {
444 return getDynamicShadowIfunc(IRB);
446 Value *GlobalDynamicAddress =
447 IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal(
448 kHwasanShadowMemoryDynamicAddress, Int8PtrTy);
449 return IRB.CreateLoad(Int8PtrTy, GlobalDynamicAddress);
453 Value *HWAddressSanitizer::isInterestingMemoryAccess(Instruction *I,
458 // Skip memory accesses inserted by another instrumentation.
459 if (I->getMetadata("nosanitize")) return nullptr;
461 // Do not instrument the load fetching the dynamic shadow address.
462 if (LocalDynamicShadow == I)
465 Value *PtrOperand = nullptr;
466 const DataLayout &DL = I->getModule()->getDataLayout();
467 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
468 if (!ClInstrumentReads) return nullptr;
470 *TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
471 *Alignment = LI->getAlignment();
472 PtrOperand = LI->getPointerOperand();
473 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
474 if (!ClInstrumentWrites) return nullptr;
476 *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
477 *Alignment = SI->getAlignment();
478 PtrOperand = SI->getPointerOperand();
479 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
480 if (!ClInstrumentAtomics) return nullptr;
482 *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
484 PtrOperand = RMW->getPointerOperand();
485 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
486 if (!ClInstrumentAtomics) return nullptr;
488 *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
490 PtrOperand = XCHG->getPointerOperand();
494 // Do not instrument accesses from different address spaces; we cannot deal
496 Type *PtrTy = cast<PointerType>(PtrOperand->getType()->getScalarType());
497 if (PtrTy->getPointerAddressSpace() != 0)
500 // Ignore swifterror addresses.
501 // swifterror memory addresses are mem2reg promoted by instruction
502 // selection. As such they cannot have regular uses like an instrumentation
503 // function and it makes no sense to track them as memory.
504 if (PtrOperand->isSwiftError())
511 static unsigned getPointerOperandIndex(Instruction *I) {
512 if (LoadInst *LI = dyn_cast<LoadInst>(I))
513 return LI->getPointerOperandIndex();
514 if (StoreInst *SI = dyn_cast<StoreInst>(I))
515 return SI->getPointerOperandIndex();
516 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
517 return RMW->getPointerOperandIndex();
518 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
519 return XCHG->getPointerOperandIndex();
520 report_fatal_error("Unexpected instruction");
524 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
525 size_t Res = countTrailingZeros(TypeSize / 8);
526 assert(Res < kNumberOfAccessSizes);
530 void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
531 if (TargetTriple.isAArch64())
535 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
537 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
538 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
541 Value *HWAddressSanitizer::shadowBase() {
542 if (LocalDynamicShadow)
543 return LocalDynamicShadow;
544 return ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, Mapping.Offset),
548 Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
550 Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
551 if (Mapping.Offset == 0)
552 return IRB.CreateIntToPtr(Shadow, Int8PtrTy);
553 // (Mem >> Scale) + Offset
554 return IRB.CreateGEP(Int8Ty, shadowBase(), Shadow);
557 void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
558 unsigned AccessSizeIndex,
559 Instruction *InsertBefore) {
560 const int64_t AccessInfo = Recover * 0x20 + IsWrite * 0x10 + AccessSizeIndex;
561 IRBuilder<> IRB(InsertBefore);
563 if (!ClInlineAllChecks && TargetTriple.isAArch64() &&
564 TargetTriple.isOSBinFormatELF() && !Recover) {
565 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
566 Ptr = IRB.CreateBitCast(Ptr, Int8PtrTy);
568 Intrinsic::getDeclaration(M, Intrinsic::hwasan_check_memaccess),
569 {shadowBase(), Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
573 Value *PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
574 Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, kPointerTagShift),
576 Value *AddrLong = untagPointer(IRB, PtrLong);
577 Value *Shadow = memToShadow(AddrLong, IRB);
578 Value *MemTag = IRB.CreateLoad(Int8Ty, Shadow);
579 Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
581 int matchAllTag = ClMatchAllTag.getNumOccurrences() > 0 ?
582 ClMatchAllTag : (CompileKernel ? 0xFF : -1);
583 if (matchAllTag != -1) {
584 Value *TagNotIgnored = IRB.CreateICmpNE(PtrTag,
585 ConstantInt::get(PtrTag->getType(), matchAllTag));
586 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
589 Instruction *CheckTerm =
590 SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, false,
591 MDBuilder(*C).createBranchWeights(1, 100000));
593 IRB.SetInsertPoint(CheckTerm);
594 Value *OutOfShortGranuleTagRange =
595 IRB.CreateICmpUGT(MemTag, ConstantInt::get(Int8Ty, 15));
596 Instruction *CheckFailTerm =
597 SplitBlockAndInsertIfThen(OutOfShortGranuleTagRange, CheckTerm, !Recover,
598 MDBuilder(*C).createBranchWeights(1, 100000));
600 IRB.SetInsertPoint(CheckTerm);
601 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(PtrLong, 15), Int8Ty);
602 PtrLowBits = IRB.CreateAdd(
603 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
604 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, MemTag);
605 SplitBlockAndInsertIfThen(PtrLowBitsOOB, CheckTerm, false,
606 MDBuilder(*C).createBranchWeights(1, 100000),
607 nullptr, nullptr, CheckFailTerm->getParent());
609 IRB.SetInsertPoint(CheckTerm);
610 Value *InlineTagAddr = IRB.CreateOr(AddrLong, 15);
611 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, Int8PtrTy);
612 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
613 Value *InlineTagMismatch = IRB.CreateICmpNE(PtrTag, InlineTag);
614 SplitBlockAndInsertIfThen(InlineTagMismatch, CheckTerm, false,
615 MDBuilder(*C).createBranchWeights(1, 100000),
616 nullptr, nullptr, CheckFailTerm->getParent());
618 IRB.SetInsertPoint(CheckFailTerm);
620 switch (TargetTriple.getArch()) {
622 // The signal handler will find the data address in rdi.
623 Asm = InlineAsm::get(
624 FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
625 "int3\nnopl " + itostr(0x40 + AccessInfo) + "(%rax)",
627 /*hasSideEffects=*/true);
629 case Triple::aarch64:
630 case Triple::aarch64_be:
631 // The signal handler will find the data address in x0.
632 Asm = InlineAsm::get(
633 FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
634 "brk #" + itostr(0x900 + AccessInfo),
636 /*hasSideEffects=*/true);
639 report_fatal_error("unsupported architecture");
641 IRB.CreateCall(Asm, PtrLong);
643 cast<BranchInst>(CheckFailTerm)->setSuccessor(0, CheckTerm->getParent());
646 void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
648 if (isa<MemTransferInst>(MI)) {
650 isa<MemMoveInst>(MI) ? HWAsanMemmove : HWAsanMemcpy,
651 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
652 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
653 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
654 } else if (isa<MemSetInst>(MI)) {
657 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
658 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
659 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
661 MI->eraseFromParent();
664 bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) {
665 LLVM_DEBUG(dbgs() << "Instrumenting: " << *I << "\n");
666 bool IsWrite = false;
667 unsigned Alignment = 0;
668 uint64_t TypeSize = 0;
669 Value *MaybeMask = nullptr;
671 if (ClInstrumentMemIntrinsics && isa<MemIntrinsic>(I)) {
672 instrumentMemIntrinsic(cast<MemIntrinsic>(I));
677 isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment, &MaybeMask);
683 return false; //FIXME
686 if (isPowerOf2_64(TypeSize) &&
687 (TypeSize / 8 <= (1UL << (kNumberOfAccessSizes - 1))) &&
688 (Alignment >= (1UL << Mapping.Scale) || Alignment == 0 ||
689 Alignment >= TypeSize / 8)) {
690 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
691 if (ClInstrumentWithCalls) {
692 IRB.CreateCall(HwasanMemoryAccessCallback[IsWrite][AccessSizeIndex],
693 IRB.CreatePointerCast(Addr, IntptrTy));
695 instrumentMemAccessInline(Addr, IsWrite, AccessSizeIndex, I);
698 IRB.CreateCall(HwasanMemoryAccessCallbackSized[IsWrite],
699 {IRB.CreatePointerCast(Addr, IntptrTy),
700 ConstantInt::get(IntptrTy, TypeSize / 8)});
702 untagPointerOperand(I, Addr);
707 static uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
708 uint64_t ArraySize = 1;
709 if (AI.isArrayAllocation()) {
710 const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
711 assert(CI && "non-constant array size");
712 ArraySize = CI->getZExtValue();
714 Type *Ty = AI.getAllocatedType();
715 uint64_t SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
716 return SizeInBytes * ArraySize;
719 bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI,
720 Value *Tag, size_t Size) {
721 size_t AlignedSize = alignTo(Size, Mapping.getAllocaAlignment());
723 Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty());
724 if (ClInstrumentWithCalls) {
725 IRB.CreateCall(HwasanTagMemoryFunc,
726 {IRB.CreatePointerCast(AI, Int8PtrTy), JustTag,
727 ConstantInt::get(IntptrTy, AlignedSize)});
729 size_t ShadowSize = Size >> Mapping.Scale;
730 Value *ShadowPtr = memToShadow(IRB.CreatePointerCast(AI, IntptrTy), IRB);
731 // If this memset is not inlined, it will be intercepted in the hwasan
732 // runtime library. That's OK, because the interceptor skips the checks if
733 // the address is in the shadow region.
734 // FIXME: the interceptor is not as fast as real memset. Consider lowering
735 // llvm.memset right here into either a sequence of stores, or a call to
736 // hwasan_tag_memory.
738 IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, /*Align=*/1);
739 if (Size != AlignedSize) {
741 ConstantInt::get(Int8Ty, Size % Mapping.getAllocaAlignment()),
742 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
743 IRB.CreateStore(JustTag, IRB.CreateConstGEP1_32(
744 Int8Ty, IRB.CreateBitCast(AI, Int8PtrTy),
751 static unsigned RetagMask(unsigned AllocaNo) {
752 // A list of 8-bit numbers that have at most one run of non-zero bits.
753 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
755 // The list does not include the value 255, which is used for UAR.
757 // Because we are more likely to use earlier elements of this list than later
758 // ones, it is sorted in increasing order of probability of collision with a
759 // mask allocated (temporally) nearby. The program that generated this list
761 // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
762 static unsigned FastMasks[] = {0, 128, 64, 192, 32, 96, 224, 112, 240,
763 48, 16, 120, 248, 56, 24, 8, 124, 252,
764 60, 28, 12, 4, 126, 254, 62, 30, 14,
765 6, 2, 127, 63, 31, 15, 7, 3, 1};
766 return FastMasks[AllocaNo % (sizeof(FastMasks) / sizeof(FastMasks[0]))];
769 Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
770 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
773 Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
774 if (ClGenerateTagsWithCalls)
775 return getNextTagWithCall(IRB);
778 // FIXME: use addressofreturnaddress (but implement it in aarch64 backend
780 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
781 auto GetStackPointerFn =
782 Intrinsic::getDeclaration(M, Intrinsic::frameaddress);
783 Value *StackPointer = IRB.CreateCall(
784 GetStackPointerFn, {Constant::getNullValue(IRB.getInt32Ty())});
786 // Extract some entropy from the stack pointer for the tags.
787 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
788 // between functions).
789 Value *StackPointerLong = IRB.CreatePointerCast(StackPointer, IntptrTy);
791 IRB.CreateXor(StackPointerLong, IRB.CreateLShr(StackPointerLong, 20),
792 "hwasan.stack.base.tag");
796 Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
797 AllocaInst *AI, unsigned AllocaNo) {
798 if (ClGenerateTagsWithCalls)
799 return getNextTagWithCall(IRB);
800 return IRB.CreateXor(StackTag,
801 ConstantInt::get(IntptrTy, RetagMask(AllocaNo)));
804 Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB, Value *StackTag) {
805 if (ClUARRetagToZero)
806 return ConstantInt::get(IntptrTy, 0);
807 if (ClGenerateTagsWithCalls)
808 return getNextTagWithCall(IRB);
809 return IRB.CreateXor(StackTag, ConstantInt::get(IntptrTy, 0xFFU));
812 // Add a tag to an address.
813 Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
814 Value *PtrLong, Value *Tag) {
815 Value *TaggedPtrLong;
817 // Kernel addresses have 0xFF in the most significant byte.
818 Value *ShiftedTag = IRB.CreateOr(
819 IRB.CreateShl(Tag, kPointerTagShift),
820 ConstantInt::get(IntptrTy, (1ULL << kPointerTagShift) - 1));
821 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
823 // Userspace can simply do OR (tag << 56);
824 Value *ShiftedTag = IRB.CreateShl(Tag, kPointerTagShift);
825 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
827 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
830 // Remove tag from an address.
831 Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
832 Value *UntaggedPtrLong;
834 // Kernel addresses have 0xFF in the most significant byte.
835 UntaggedPtrLong = IRB.CreateOr(PtrLong,
836 ConstantInt::get(PtrLong->getType(), 0xFFULL << kPointerTagShift));
838 // Userspace addresses have 0x00.
839 UntaggedPtrLong = IRB.CreateAnd(PtrLong,
840 ConstantInt::get(PtrLong->getType(), ~(0xFFULL << kPointerTagShift)));
842 return UntaggedPtrLong;
845 Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
846 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
847 if (TargetTriple.isAArch64() && TargetTriple.isAndroid()) {
848 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
849 // in Bionic's libc/private/bionic_tls.h.
850 Function *ThreadPointerFunc =
851 Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
852 Value *SlotPtr = IRB.CreatePointerCast(
853 IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
854 IRB.CreateCall(ThreadPointerFunc), 0x30),
855 Ty->getPointerTo(0));
859 return ThreadPtrGlobal;
865 void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
866 if (!Mapping.InTls) {
867 LocalDynamicShadow = getDynamicShadowNonTls(IRB);
871 if (!WithFrameRecord && TargetTriple.isAndroid()) {
872 LocalDynamicShadow = getDynamicShadowIfunc(IRB);
876 Value *SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
879 Instruction *ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
881 Function *F = IRB.GetInsertBlock()->getParent();
882 if (F->getFnAttribute("hwasan-abi").getValueAsString() == "interceptor") {
883 Value *ThreadLongEqZero =
884 IRB.CreateICmpEQ(ThreadLong, ConstantInt::get(IntptrTy, 0));
885 auto *Br = cast<BranchInst>(SplitBlockAndInsertIfThen(
886 ThreadLongEqZero, cast<Instruction>(ThreadLongEqZero)->getNextNode(),
887 false, MDBuilder(*C).createBranchWeights(1, 100000)));
889 IRB.SetInsertPoint(Br);
890 // FIXME: This should call a new runtime function with a custom calling
891 // convention to avoid needing to spill all arguments here.
892 IRB.CreateCall(HwasanThreadEnterFunc);
893 LoadInst *ReloadThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
895 IRB.SetInsertPoint(&*Br->getSuccessor(0)->begin());
896 PHINode *ThreadLongPhi = IRB.CreatePHI(IntptrTy, 2);
897 ThreadLongPhi->addIncoming(ThreadLong, ThreadLong->getParent());
898 ThreadLongPhi->addIncoming(ReloadThreadLong, ReloadThreadLong->getParent());
899 ThreadLong = ThreadLongPhi;
902 // Extract the address field from ThreadLong. Unnecessary on AArch64 with TBI.
903 Value *ThreadLongMaybeUntagged =
904 TargetTriple.isAArch64() ? ThreadLong : untagPointer(IRB, ThreadLong);
906 if (WithFrameRecord) {
907 StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
909 // Prepare ring buffer data.
911 if (TargetTriple.getArch() == Triple::aarch64)
912 PC = readRegister(IRB, "pc");
914 PC = IRB.CreatePtrToInt(F, IntptrTy);
915 auto GetStackPointerFn =
916 Intrinsic::getDeclaration(F->getParent(), Intrinsic::frameaddress);
917 Value *SP = IRB.CreatePtrToInt(
918 IRB.CreateCall(GetStackPointerFn,
919 {Constant::getNullValue(IRB.getInt32Ty())}),
923 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
924 // SP is 0xsssssssssssSSSS0 (4 lower bits are zero)
925 // We only really need ~20 lower non-zero bits (SSSS), so we mix like this:
926 // 0xSSSSPPPPPPPPPPPP
927 SP = IRB.CreateShl(SP, 44);
929 // Store data to ring buffer.
931 IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IntptrTy->getPointerTo(0));
932 IRB.CreateStore(IRB.CreateOr(PC, SP), RecordPtr);
934 // Update the ring buffer. Top byte of ThreadLong defines the size of the
935 // buffer in pages, it must be a power of two, and the start of the buffer
936 // must be aligned by twice that much. Therefore wrap around of the ring
937 // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
938 // The use of AShr instead of LShr is due to
939 // https://bugs.llvm.org/show_bug.cgi?id=39030
940 // Runtime library makes sure not to use the highest bit.
941 Value *WrapMask = IRB.CreateXor(
942 IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
943 ConstantInt::get(IntptrTy, (uint64_t)-1));
944 Value *ThreadLongNew = IRB.CreateAnd(
945 IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
946 IRB.CreateStore(ThreadLongNew, SlotPtr);
949 // Get shadow base address by aligning RecordPtr up.
950 // Note: this is not correct if the pointer is already aligned.
951 // Runtime library will make sure this never happens.
952 LocalDynamicShadow = IRB.CreateAdd(
954 ThreadLongMaybeUntagged,
955 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
956 ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
957 LocalDynamicShadow = IRB.CreateIntToPtr(LocalDynamicShadow, Int8PtrTy);
960 Value *HWAddressSanitizer::readRegister(IRBuilder<> &IRB, StringRef Name) {
961 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
962 Function *ReadRegister =
963 Intrinsic::getDeclaration(M, Intrinsic::read_register, IntptrTy);
964 MDNode *MD = MDNode::get(*C, {MDString::get(*C, Name)});
965 Value *Args[] = {MetadataAsValue::get(*C, MD)};
966 return IRB.CreateCall(ReadRegister, Args);
969 bool HWAddressSanitizer::instrumentLandingPads(
970 SmallVectorImpl<Instruction *> &LandingPadVec) {
971 for (auto *LP : LandingPadVec) {
972 IRBuilder<> IRB(LP->getNextNode());
975 {readRegister(IRB, (TargetTriple.getArch() == Triple::x86_64) ? "rsp"
981 bool HWAddressSanitizer::instrumentStack(
982 SmallVectorImpl<AllocaInst *> &Allocas,
983 DenseMap<AllocaInst *, std::vector<DbgDeclareInst *>> &AllocaDeclareMap,
984 SmallVectorImpl<Instruction *> &RetVec, Value *StackTag) {
985 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
986 // alloca addresses using that. Unfortunately, offsets are not known yet
987 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
988 // temp, shift-OR it into each alloca address and xor with the retag mask.
989 // This generates one extra instruction per alloca use.
990 for (unsigned N = 0; N < Allocas.size(); ++N) {
991 auto *AI = Allocas[N];
992 IRBuilder<> IRB(AI->getNextNode());
994 // Replace uses of the alloca with tagged address.
995 Value *Tag = getAllocaTag(IRB, StackTag, AI, N);
996 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
997 Value *Replacement = tagPointer(IRB, AI->getType(), AILong, Tag);
999 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
1000 Replacement->setName(Name + ".hwasan");
1002 for (auto UI = AI->use_begin(), UE = AI->use_end(); UI != UE;) {
1004 if (U.getUser() != AILong)
1008 for (auto *DDI : AllocaDeclareMap.lookup(AI)) {
1009 DIExpression *OldExpr = DDI->getExpression();
1010 DIExpression *NewExpr = DIExpression::append(
1011 OldExpr, {dwarf::DW_OP_LLVM_tag_offset, RetagMask(N)});
1012 DDI->setArgOperand(2, MetadataAsValue::get(*C, NewExpr));
1015 size_t Size = getAllocaSizeInBytes(*AI);
1016 tagAlloca(IRB, AI, Tag, Size);
1018 for (auto RI : RetVec) {
1019 IRB.SetInsertPoint(RI);
1021 // Re-tag alloca memory with the special UAR tag.
1022 Value *Tag = getUARTag(IRB, StackTag);
1023 tagAlloca(IRB, AI, Tag, alignTo(Size, Mapping.getAllocaAlignment()));
1030 bool HWAddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1031 return (AI.getAllocatedType()->isSized() &&
1032 // FIXME: instrument dynamic allocas, too
1033 AI.isStaticAlloca() &&
1034 // alloca() may be called with 0 size, ignore it.
1035 getAllocaSizeInBytes(AI) > 0 &&
1036 // We are only interested in allocas not promotable to registers.
1037 // Promotable allocas are common under -O0.
1038 !isAllocaPromotable(&AI) &&
1039 // inalloca allocas are not treated as static, and we don't want
1040 // dynamic alloca instrumentation for them as well.
1041 !AI.isUsedWithInAlloca() &&
1042 // swifterror allocas are register promoted by ISel
1043 !AI.isSwiftError());
1046 bool HWAddressSanitizer::sanitizeFunction(Function &F) {
1047 if (&F == HwasanCtorFunction)
1050 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1053 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1055 SmallVector<Instruction*, 16> ToInstrument;
1056 SmallVector<AllocaInst*, 8> AllocasToInstrument;
1057 SmallVector<Instruction*, 8> RetVec;
1058 SmallVector<Instruction*, 8> LandingPadVec;
1059 DenseMap<AllocaInst *, std::vector<DbgDeclareInst *>> AllocaDeclareMap;
1060 for (auto &BB : F) {
1061 for (auto &Inst : BB) {
1062 if (ClInstrumentStack)
1063 if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
1064 if (isInterestingAlloca(*AI))
1065 AllocasToInstrument.push_back(AI);
1069 if (isa<ReturnInst>(Inst) || isa<ResumeInst>(Inst) ||
1070 isa<CleanupReturnInst>(Inst))
1071 RetVec.push_back(&Inst);
1073 if (auto *DDI = dyn_cast<DbgDeclareInst>(&Inst))
1074 if (auto *Alloca = dyn_cast_or_null<AllocaInst>(DDI->getAddress()))
1075 AllocaDeclareMap[Alloca].push_back(DDI);
1077 if (ClInstrumentLandingPads && isa<LandingPadInst>(Inst))
1078 LandingPadVec.push_back(&Inst);
1080 Value *MaybeMask = nullptr;
1084 Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize,
1085 &Alignment, &MaybeMask);
1086 if (Addr || isa<MemIntrinsic>(Inst))
1087 ToInstrument.push_back(&Inst);
1091 initializeCallbacks(*F.getParent());
1093 if (!LandingPadVec.empty())
1094 instrumentLandingPads(LandingPadVec);
1096 if (AllocasToInstrument.empty() && ToInstrument.empty())
1099 assert(!LocalDynamicShadow);
1101 Instruction *InsertPt = &*F.getEntryBlock().begin();
1102 IRBuilder<> EntryIRB(InsertPt);
1103 emitPrologue(EntryIRB,
1104 /*WithFrameRecord*/ ClRecordStackHistory &&
1105 !AllocasToInstrument.empty());
1107 bool Changed = false;
1108 if (!AllocasToInstrument.empty()) {
1110 ClGenerateTagsWithCalls ? nullptr : getStackBaseTag(EntryIRB);
1111 Changed |= instrumentStack(AllocasToInstrument, AllocaDeclareMap, RetVec,
1115 // Pad and align each of the allocas that we instrumented to stop small
1116 // uninteresting allocas from hiding in instrumented alloca's padding and so
1117 // that we have enough space to store real tags for short granules.
1118 DenseMap<AllocaInst *, AllocaInst *> AllocaToPaddedAllocaMap;
1119 for (AllocaInst *AI : AllocasToInstrument) {
1120 uint64_t Size = getAllocaSizeInBytes(*AI);
1121 uint64_t AlignedSize = alignTo(Size, Mapping.getAllocaAlignment());
1122 AI->setAlignment(std::max(AI->getAlignment(), 16u));
1123 if (Size != AlignedSize) {
1124 Type *AllocatedType = AI->getAllocatedType();
1125 if (AI->isArrayAllocation()) {
1126 uint64_t ArraySize =
1127 cast<ConstantInt>(AI->getArraySize())->getZExtValue();
1128 AllocatedType = ArrayType::get(AllocatedType, ArraySize);
1130 Type *TypeWithPadding = StructType::get(
1131 AllocatedType, ArrayType::get(Int8Ty, AlignedSize - Size));
1132 auto *NewAI = new AllocaInst(
1133 TypeWithPadding, AI->getType()->getAddressSpace(), nullptr, "", AI);
1134 NewAI->takeName(AI);
1135 NewAI->setAlignment(AI->getAlignment());
1136 NewAI->setUsedWithInAlloca(AI->isUsedWithInAlloca());
1137 NewAI->setSwiftError(AI->isSwiftError());
1138 NewAI->copyMetadata(*AI);
1139 auto *Bitcast = new BitCastInst(NewAI, AI->getType(), "", AI);
1140 AI->replaceAllUsesWith(Bitcast);
1141 AllocaToPaddedAllocaMap[AI] = NewAI;
1145 if (!AllocaToPaddedAllocaMap.empty()) {
1147 for (auto &Inst : BB)
1148 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst))
1150 dyn_cast_or_null<AllocaInst>(DVI->getVariableLocation()))
1151 if (auto *NewAI = AllocaToPaddedAllocaMap.lookup(AI))
1153 0, MetadataAsValue::get(*C, LocalAsMetadata::get(NewAI)));
1154 for (auto &P : AllocaToPaddedAllocaMap)
1155 P.first->eraseFromParent();
1158 // If we split the entry block, move any allocas that were originally in the
1159 // entry block back into the entry block so that they aren't treated as
1161 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1162 InsertPt = &*F.getEntryBlock().begin();
1163 for (auto II = EntryIRB.GetInsertBlock()->begin(),
1164 IE = EntryIRB.GetInsertBlock()->end();
1166 Instruction *I = &*II++;
1167 if (auto *AI = dyn_cast<AllocaInst>(I))
1168 if (isa<ConstantInt>(AI->getArraySize()))
1169 I->moveBefore(InsertPt);
1173 for (auto Inst : ToInstrument)
1174 Changed |= instrumentMemAccess(Inst);
1176 LocalDynamicShadow = nullptr;
1177 StackBaseTag = nullptr;
1182 void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple) {
1183 Scale = kDefaultShadowScale;
1184 if (ClMappingOffset.getNumOccurrences() > 0) {
1187 Offset = ClMappingOffset;
1188 } else if (ClEnableKhwasan || ClInstrumentWithCalls) {
1192 } else if (ClWithIfunc) {
1195 Offset = kDynamicShadowSentinel;
1196 } else if (ClWithTls) {
1199 Offset = kDynamicShadowSentinel;
1203 Offset = kDynamicShadowSentinel;