1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
13 /// The algorithm of the tool is similar to Memcheck
14 /// (http://goo.gl/QKbem). We associate a few shadow bits with every
15 /// byte of the application memory, poison the shadow of the malloc-ed
16 /// or alloca-ed memory, load the shadow bits on every memory read,
17 /// propagate the shadow bits through some of the arithmetic
18 /// instruction (including MOV), store the shadow bits on every memory
19 /// write, report a bug on some other instructions (e.g. JMP) if the
20 /// associated shadow is poisoned.
22 /// But there are differences too. The first and the major one:
23 /// compiler instrumentation instead of binary instrumentation. This
24 /// gives us much better register allocation, possible compiler
25 /// optimizations and a fast start-up. But this brings the major issue
26 /// as well: msan needs to see all program events, including system
27 /// calls and reads/writes in system libraries, so we either need to
28 /// compile *everything* with msan or use a binary translation
29 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
30 /// Another difference from Memcheck is that we use 8 shadow bits per
31 /// byte of application memory and use a direct shadow mapping. This
32 /// greatly simplifies the instrumentation code and avoids races on
33 /// shadow updates (Memcheck is single-threaded so races are not a
34 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
35 /// path storage that uses 8 bits per byte).
37 /// The default value of shadow is 0, which means "clean" (not poisoned).
39 /// Every module initializer should call __msan_init to ensure that the
40 /// shadow memory is ready. On error, __msan_warning is called. Since
41 /// parameters and return values may be passed via registers, we have a
42 /// specialized thread-local shadow for return values
43 /// (__msan_retval_tls) and parameters (__msan_param_tls).
47 /// MemorySanitizer can track origins (allocation points) of all uninitialized
48 /// values. This behavior is controlled with a flag (msan-track-origins) and is
49 /// disabled by default.
51 /// Origins are 4-byte values created and interpreted by the runtime library.
52 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
53 /// of application memory. Propagation of origins is basically a bunch of
54 /// "select" instructions that pick the origin of a dirty argument, if an
55 /// instruction has one.
57 /// Every 4 aligned, consecutive bytes of application memory have one origin
58 /// value associated with them. If these bytes contain uninitialized data
59 /// coming from 2 different allocations, the last store wins. Because of this,
60 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
63 /// Origins are meaningless for fully initialized values, so MemorySanitizer
64 /// avoids storing origin to memory when a fully initialized value is stored.
65 /// This way it avoids needless overwritting origin of the 4-byte region on
66 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
70 /// Ideally, every atomic store of application value should update the
71 /// corresponding shadow location in an atomic way. Unfortunately, atomic store
72 /// of two disjoint locations can not be done without severe slowdown.
74 /// Therefore, we implement an approximation that may err on the safe side.
75 /// In this implementation, every atomically accessed location in the program
76 /// may only change from (partially) uninitialized to fully initialized, but
77 /// not the other way around. We load the shadow _after_ the application load,
78 /// and we store the shadow _before_ the app store. Also, we always store clean
79 /// shadow (if the application store is atomic). This way, if the store-load
80 /// pair constitutes a happens-before arc, shadow store and load are correctly
81 /// ordered such that the load will get either the value that was stored, or
82 /// some later value (which is always clean).
84 /// This does not work very well with Compare-And-Swap (CAS) and
85 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
86 /// must store the new shadow before the app operation, and load the shadow
87 /// after the app operation. Computers don't work this way. Current
88 /// implementation ignores the load aspect of CAS/RMW, always returning a clean
89 /// value. It implements the store part as a simple atomic store by storing a
92 //===----------------------------------------------------------------------===//
94 #include "llvm/ADT/DepthFirstIterator.h"
95 #include "llvm/ADT/SmallString.h"
96 #include "llvm/ADT/SmallVector.h"
97 #include "llvm/ADT/StringExtras.h"
98 #include "llvm/ADT/Triple.h"
99 #include "llvm/IR/DataLayout.h"
100 #include "llvm/IR/Function.h"
101 #include "llvm/IR/IRBuilder.h"
102 #include "llvm/IR/InlineAsm.h"
103 #include "llvm/IR/InstVisitor.h"
104 #include "llvm/IR/IntrinsicInst.h"
105 #include "llvm/IR/LLVMContext.h"
106 #include "llvm/IR/MDBuilder.h"
107 #include "llvm/IR/Module.h"
108 #include "llvm/IR/Type.h"
109 #include "llvm/IR/ValueMap.h"
110 #include "llvm/Support/CommandLine.h"
111 #include "llvm/Support/Debug.h"
112 #include "llvm/Support/raw_ostream.h"
113 #include "llvm/Transforms/Instrumentation.h"
114 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
115 #include "llvm/Transforms/Utils/Local.h"
116 #include "llvm/Transforms/Utils/ModuleUtils.h"
118 using namespace llvm;
120 #define DEBUG_TYPE "msan"
122 static const unsigned kOriginSize = 4;
123 static const unsigned kMinOriginAlignment = 4;
124 static const unsigned kShadowTLSAlignment = 8;
126 // These constants must be kept in sync with the ones in msan.h.
127 static const unsigned kParamTLSSize = 800;
128 static const unsigned kRetvalTLSSize = 800;
130 // Accesses sizes are powers of two: 1, 2, 4, 8.
131 static const size_t kNumberOfAccessSizes = 4;
133 /// \brief Track origins of uninitialized values.
135 /// Adds a section to MemorySanitizer report that points to the allocation
136 /// (stack or heap) the uninitialized bits came from originally.
137 static cl::opt<int> ClTrackOrigins("msan-track-origins",
138 cl::desc("Track origins (allocation sites) of poisoned memory"),
139 cl::Hidden, cl::init(0));
140 static cl::opt<bool> ClKeepGoing("msan-keep-going",
141 cl::desc("keep going after reporting a UMR"),
142 cl::Hidden, cl::init(false));
143 static cl::opt<bool> ClPoisonStack("msan-poison-stack",
144 cl::desc("poison uninitialized stack variables"),
145 cl::Hidden, cl::init(true));
146 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
147 cl::desc("poison uninitialized stack variables with a call"),
148 cl::Hidden, cl::init(false));
149 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
150 cl::desc("poison uninitialized stack variables with the given pattern"),
151 cl::Hidden, cl::init(0xff));
152 static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
153 cl::desc("poison undef temps"),
154 cl::Hidden, cl::init(true));
156 static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
157 cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
158 cl::Hidden, cl::init(true));
160 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
161 cl::desc("exact handling of relational integer ICmp"),
162 cl::Hidden, cl::init(false));
164 // This flag controls whether we check the shadow of the address
165 // operand of load or store. Such bugs are very rare, since load from
166 // a garbage address typically results in SEGV, but still happen
167 // (e.g. only lower bits of address are garbage, or the access happens
168 // early at program startup where malloc-ed memory is more likely to
169 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
170 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
171 cl::desc("report accesses through a pointer which has poisoned shadow"),
172 cl::Hidden, cl::init(true));
174 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
175 cl::desc("print out instructions with default strict semantics"),
176 cl::Hidden, cl::init(false));
178 static cl::opt<int> ClInstrumentationWithCallThreshold(
179 "msan-instrumentation-with-call-threshold",
181 "If the function being instrumented requires more than "
182 "this number of checks and origin stores, use callbacks instead of "
183 "inline checks (-1 means never use callbacks)."),
184 cl::Hidden, cl::init(3500));
186 // This is an experiment to enable handling of cases where shadow is a non-zero
187 // compile-time constant. For some unexplainable reason they were silently
188 // ignored in the instrumentation.
189 static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow",
190 cl::desc("Insert checks for constant shadow values"),
191 cl::Hidden, cl::init(false));
193 // This is off by default because of a bug in gold:
194 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002
195 static cl::opt<bool> ClWithComdat("msan-with-comdat",
196 cl::desc("Place MSan constructors in comdat sections"),
197 cl::Hidden, cl::init(false));
199 static const char *const kMsanModuleCtorName = "msan.module_ctor";
200 static const char *const kMsanInitName = "__msan_init";
204 // Memory map parameters used in application-to-shadow address calculation.
205 // Offset = (Addr & ~AndMask) ^ XorMask
206 // Shadow = ShadowBase + Offset
207 // Origin = OriginBase + Offset
208 struct MemoryMapParams {
215 struct PlatformMemoryMapParams {
216 const MemoryMapParams *bits32;
217 const MemoryMapParams *bits64;
221 static const MemoryMapParams Linux_I386_MemoryMapParams = {
222 0x000080000000, // AndMask
223 0, // XorMask (not used)
224 0, // ShadowBase (not used)
225 0x000040000000, // OriginBase
229 static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
230 #ifdef MSAN_LINUX_X86_64_OLD_MAPPING
231 0x400000000000, // AndMask
232 0, // XorMask (not used)
233 0, // ShadowBase (not used)
234 0x200000000000, // OriginBase
236 0, // AndMask (not used)
237 0x500000000000, // XorMask
238 0, // ShadowBase (not used)
239 0x100000000000, // OriginBase
244 static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
245 0, // AndMask (not used)
246 0x008000000000, // XorMask
247 0, // ShadowBase (not used)
248 0x002000000000, // OriginBase
252 static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
253 0x200000000000, // AndMask
254 0x100000000000, // XorMask
255 0x080000000000, // ShadowBase
256 0x1C0000000000, // OriginBase
260 static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
261 0, // AndMask (not used)
262 0x06000000000, // XorMask
263 0, // ShadowBase (not used)
264 0x01000000000, // OriginBase
268 static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
269 0x000180000000, // AndMask
270 0x000040000000, // XorMask
271 0x000020000000, // ShadowBase
272 0x000700000000, // OriginBase
276 static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
277 0xc00000000000, // AndMask
278 0x200000000000, // XorMask
279 0x100000000000, // ShadowBase
280 0x380000000000, // OriginBase
283 static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
284 &Linux_I386_MemoryMapParams,
285 &Linux_X86_64_MemoryMapParams,
288 static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
290 &Linux_MIPS64_MemoryMapParams,
293 static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
295 &Linux_PowerPC64_MemoryMapParams,
298 static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = {
300 &Linux_AArch64_MemoryMapParams,
303 static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
304 &FreeBSD_I386_MemoryMapParams,
305 &FreeBSD_X86_64_MemoryMapParams,
308 /// \brief An instrumentation pass implementing detection of uninitialized
311 /// MemorySanitizer: instrument the code in module to find
312 /// uninitialized reads.
313 class MemorySanitizer : public FunctionPass {
315 MemorySanitizer(int TrackOrigins = 0, bool Recover = false)
317 TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
318 Recover(Recover || ClKeepGoing),
319 WarningFn(nullptr) {}
320 StringRef getPassName() const override { return "MemorySanitizer"; }
321 void getAnalysisUsage(AnalysisUsage &AU) const override {
322 AU.addRequired<TargetLibraryInfoWrapperPass>();
324 bool runOnFunction(Function &F) override;
325 bool doInitialization(Module &M) override;
326 static char ID; // Pass identification, replacement for typeid.
329 void initializeCallbacks(Module &M);
331 /// \brief Track origins (allocation points) of uninitialized values.
338 /// \brief Thread-local shadow storage for function parameters.
339 GlobalVariable *ParamTLS;
340 /// \brief Thread-local origin storage for function parameters.
341 GlobalVariable *ParamOriginTLS;
342 /// \brief Thread-local shadow storage for function return value.
343 GlobalVariable *RetvalTLS;
344 /// \brief Thread-local origin storage for function return value.
345 GlobalVariable *RetvalOriginTLS;
346 /// \brief Thread-local shadow storage for in-register va_arg function
347 /// parameters (x86_64-specific).
348 GlobalVariable *VAArgTLS;
349 /// \brief Thread-local shadow storage for va_arg overflow area
350 /// (x86_64-specific).
351 GlobalVariable *VAArgOverflowSizeTLS;
352 /// \brief Thread-local space used to pass origin value to the UMR reporting
354 GlobalVariable *OriginTLS;
356 /// \brief The run-time callback to print a warning.
358 // These arrays are indexed by log2(AccessSize).
359 Value *MaybeWarningFn[kNumberOfAccessSizes];
360 Value *MaybeStoreOriginFn[kNumberOfAccessSizes];
362 /// \brief Run-time helper that generates a new origin value for a stack
364 Value *MsanSetAllocaOrigin4Fn;
365 /// \brief Run-time helper that poisons stack on function entry.
366 Value *MsanPoisonStackFn;
367 /// \brief Run-time helper that records a store (or any event) of an
368 /// uninitialized value and returns an updated origin id encoding this info.
369 Value *MsanChainOriginFn;
370 /// \brief MSan runtime replacements for memmove, memcpy and memset.
371 Value *MemmoveFn, *MemcpyFn, *MemsetFn;
373 /// \brief Memory map parameters used in application-to-shadow calculation.
374 const MemoryMapParams *MapParams;
376 MDNode *ColdCallWeights;
377 /// \brief Branch weights for origin store.
378 MDNode *OriginStoreWeights;
379 /// \brief An empty volatile inline asm that prevents callback merge.
381 Function *MsanCtorFunction;
383 friend struct MemorySanitizerVisitor;
384 friend struct VarArgAMD64Helper;
385 friend struct VarArgMIPS64Helper;
386 friend struct VarArgAArch64Helper;
387 friend struct VarArgPowerPC64Helper;
389 } // anonymous namespace
391 char MemorySanitizer::ID = 0;
392 INITIALIZE_PASS_BEGIN(
393 MemorySanitizer, "msan",
394 "MemorySanitizer: detects uninitialized reads.", false, false)
395 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
397 MemorySanitizer, "msan",
398 "MemorySanitizer: detects uninitialized reads.", false, false)
400 FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins, bool Recover) {
401 return new MemorySanitizer(TrackOrigins, Recover);
404 /// \brief Create a non-const global initialized with the given string.
406 /// Creates a writable global for Str so that we can pass it to the
407 /// run-time lib. Runtime uses first 4 bytes of the string to store the
408 /// frame ID, so the string needs to be mutable.
409 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
411 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
412 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
413 GlobalValue::PrivateLinkage, StrConst, "");
416 /// \brief Insert extern declaration of runtime-provided functions and globals.
417 void MemorySanitizer::initializeCallbacks(Module &M) {
418 // Only do this once.
423 // Create the callback.
424 // FIXME: this function should have "Cold" calling conv,
425 // which is not yet implemented.
426 StringRef WarningFnName = Recover ? "__msan_warning"
427 : "__msan_warning_noreturn";
428 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), nullptr);
430 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
432 unsigned AccessSize = 1 << AccessSizeIndex;
433 std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
434 MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
435 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
436 IRB.getInt32Ty(), nullptr);
438 FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
439 MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
440 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
441 IRB.getInt8PtrTy(), IRB.getInt32Ty(), nullptr);
444 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
445 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
446 IRB.getInt8PtrTy(), IntptrTy, nullptr);
448 M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(),
449 IRB.getInt8PtrTy(), IntptrTy, nullptr);
450 MsanChainOriginFn = M.getOrInsertFunction(
451 "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(), nullptr);
452 MemmoveFn = M.getOrInsertFunction(
453 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
454 IRB.getInt8PtrTy(), IntptrTy, nullptr);
455 MemcpyFn = M.getOrInsertFunction(
456 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
458 MemsetFn = M.getOrInsertFunction(
459 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
463 RetvalTLS = new GlobalVariable(
464 M, ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), false,
465 GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr,
466 GlobalVariable::InitialExecTLSModel);
467 RetvalOriginTLS = new GlobalVariable(
468 M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr,
469 "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
471 ParamTLS = new GlobalVariable(
472 M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false,
473 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr,
474 GlobalVariable::InitialExecTLSModel);
475 ParamOriginTLS = new GlobalVariable(
476 M, ArrayType::get(OriginTy, kParamTLSSize / 4), false,
477 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_origin_tls",
478 nullptr, GlobalVariable::InitialExecTLSModel);
480 VAArgTLS = new GlobalVariable(
481 M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false,
482 GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr,
483 GlobalVariable::InitialExecTLSModel);
484 VAArgOverflowSizeTLS = new GlobalVariable(
485 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
486 "__msan_va_arg_overflow_size_tls", nullptr,
487 GlobalVariable::InitialExecTLSModel);
488 OriginTLS = new GlobalVariable(
489 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
490 "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
492 // We insert an empty inline asm after __msan_report* to avoid callback merge.
493 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
494 StringRef(""), StringRef(""),
495 /*hasSideEffects=*/true);
498 /// \brief Module-level initialization.
500 /// inserts a call to __msan_init to the module's constructor list.
501 bool MemorySanitizer::doInitialization(Module &M) {
502 auto &DL = M.getDataLayout();
504 Triple TargetTriple(M.getTargetTriple());
505 switch (TargetTriple.getOS()) {
506 case Triple::FreeBSD:
507 switch (TargetTriple.getArch()) {
509 MapParams = FreeBSD_X86_MemoryMapParams.bits64;
512 MapParams = FreeBSD_X86_MemoryMapParams.bits32;
515 report_fatal_error("unsupported architecture");
519 switch (TargetTriple.getArch()) {
521 MapParams = Linux_X86_MemoryMapParams.bits64;
524 MapParams = Linux_X86_MemoryMapParams.bits32;
527 case Triple::mips64el:
528 MapParams = Linux_MIPS_MemoryMapParams.bits64;
531 case Triple::ppc64le:
532 MapParams = Linux_PowerPC_MemoryMapParams.bits64;
534 case Triple::aarch64:
535 case Triple::aarch64_be:
536 MapParams = Linux_ARM_MemoryMapParams.bits64;
539 report_fatal_error("unsupported architecture");
543 report_fatal_error("unsupported operating system");
546 C = &(M.getContext());
548 IntptrTy = IRB.getIntPtrTy(DL);
549 OriginTy = IRB.getInt32Ty();
551 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
552 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
554 std::tie(MsanCtorFunction, std::ignore) =
555 createSanitizerCtorAndInitFunctions(M, kMsanModuleCtorName, kMsanInitName,
559 Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName);
560 MsanCtorFunction->setComdat(MsanCtorComdat);
561 appendToGlobalCtors(M, MsanCtorFunction, 0, MsanCtorFunction);
563 appendToGlobalCtors(M, MsanCtorFunction, 0);
568 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
569 IRB.getInt32(TrackOrigins), "__msan_track_origins");
572 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
573 IRB.getInt32(Recover), "__msan_keep_going");
580 /// \brief A helper class that handles instrumentation of VarArg
581 /// functions on a particular platform.
583 /// Implementations are expected to insert the instrumentation
584 /// necessary to propagate argument shadow through VarArg function
585 /// calls. Visit* methods are called during an InstVisitor pass over
586 /// the function, and should avoid creating new basic blocks. A new
587 /// instance of this class is created for each instrumented function.
588 struct VarArgHelper {
589 /// \brief Visit a CallSite.
590 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
592 /// \brief Visit a va_start call.
593 virtual void visitVAStartInst(VAStartInst &I) = 0;
595 /// \brief Visit a va_copy call.
596 virtual void visitVACopyInst(VACopyInst &I) = 0;
598 /// \brief Finalize function instrumentation.
600 /// This method is called after visiting all interesting (see above)
601 /// instructions in a function.
602 virtual void finalizeInstrumentation() = 0;
604 virtual ~VarArgHelper() {}
607 struct MemorySanitizerVisitor;
610 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
611 MemorySanitizerVisitor &Visitor);
613 unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
614 if (TypeSize <= 8) return 0;
615 return Log2_32_Ceil((TypeSize + 7) / 8);
618 /// This class does all the work for a given function. Store and Load
619 /// instructions store and load corresponding shadow and origin
620 /// values. Most instructions propagate shadow from arguments to their
621 /// return values. Certain instructions (most importantly, BranchInst)
622 /// test their argument shadow and print reports (with a runtime call) if it's
624 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
627 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
628 ValueMap<Value*, Value*> ShadowMap, OriginMap;
629 std::unique_ptr<VarArgHelper> VAHelper;
630 const TargetLibraryInfo *TLI;
632 // The following flags disable parts of MSan instrumentation based on
633 // blacklist contents and command-line options.
635 bool PropagateShadow;
638 bool CheckReturnValue;
640 struct ShadowOriginAndInsertPoint {
643 Instruction *OrigIns;
644 ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
645 : Shadow(S), Origin(O), OrigIns(I) { }
647 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
648 SmallVector<StoreInst *, 16> StoreList;
650 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
651 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
652 bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeMemory);
653 InsertChecks = SanitizeFunction;
654 PropagateShadow = SanitizeFunction;
655 PoisonStack = SanitizeFunction && ClPoisonStack;
656 PoisonUndef = SanitizeFunction && ClPoisonUndef;
657 // FIXME: Consider using SpecialCaseList to specify a list of functions that
658 // must always return fully initialized values. For now, we hardcode "main".
659 CheckReturnValue = SanitizeFunction && (F.getName() == "main");
660 TLI = &MS.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
662 DEBUG(if (!InsertChecks)
663 dbgs() << "MemorySanitizer is not inserting checks into '"
664 << F.getName() << "'\n");
667 Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
668 if (MS.TrackOrigins <= 1) return V;
669 return IRB.CreateCall(MS.MsanChainOriginFn, V);
672 Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
673 const DataLayout &DL = F.getParent()->getDataLayout();
674 unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
675 if (IntptrSize == kOriginSize) return Origin;
676 assert(IntptrSize == kOriginSize * 2);
677 Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
678 return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8));
681 /// \brief Fill memory range with the given origin value.
682 void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
683 unsigned Size, unsigned Alignment) {
684 const DataLayout &DL = F.getParent()->getDataLayout();
685 unsigned IntptrAlignment = DL.getABITypeAlignment(MS.IntptrTy);
686 unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
687 assert(IntptrAlignment >= kMinOriginAlignment);
688 assert(IntptrSize >= kOriginSize);
691 unsigned CurrentAlignment = Alignment;
692 if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) {
693 Value *IntptrOrigin = originToIntptr(IRB, Origin);
694 Value *IntptrOriginPtr =
695 IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0));
696 for (unsigned i = 0; i < Size / IntptrSize; ++i) {
697 Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i)
699 IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
700 Ofs += IntptrSize / kOriginSize;
701 CurrentAlignment = IntptrAlignment;
705 for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
707 i ? IRB.CreateConstGEP1_32(nullptr, OriginPtr, i) : OriginPtr;
708 IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
709 CurrentAlignment = kMinOriginAlignment;
713 void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
714 unsigned Alignment, bool AsCall) {
715 const DataLayout &DL = F.getParent()->getDataLayout();
716 unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
717 unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
718 if (Shadow->getType()->isAggregateType()) {
719 paintOrigin(IRB, updateOrigin(Origin, IRB),
720 getOriginPtr(Addr, IRB, Alignment), StoreSize,
723 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
724 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
725 if (ConstantShadow) {
726 if (ClCheckConstantShadow && !ConstantShadow->isZeroValue())
727 paintOrigin(IRB, updateOrigin(Origin, IRB),
728 getOriginPtr(Addr, IRB, Alignment), StoreSize,
733 unsigned TypeSizeInBits =
734 DL.getTypeSizeInBits(ConvertedShadow->getType());
735 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
736 if (AsCall && SizeIndex < kNumberOfAccessSizes) {
737 Value *Fn = MS.MaybeStoreOriginFn[SizeIndex];
738 Value *ConvertedShadow2 = IRB.CreateZExt(
739 ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
740 IRB.CreateCall(Fn, {ConvertedShadow2,
741 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
744 Value *Cmp = IRB.CreateICmpNE(
745 ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp");
746 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
747 Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
748 IRBuilder<> IRBNew(CheckTerm);
749 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew),
750 getOriginPtr(Addr, IRBNew, Alignment), StoreSize,
756 void materializeStores(bool InstrumentWithCalls) {
757 for (StoreInst *SI : StoreList) {
759 Value *Val = SI->getValueOperand();
760 Value *Addr = SI->getPointerOperand();
761 Value *Shadow = SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
762 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
765 IRB.CreateAlignedStore(Shadow, ShadowPtr, SI->getAlignment());
766 DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
769 if (ClCheckAccessAddress)
770 insertShadowCheck(Addr, SI);
773 SI->setOrdering(addReleaseOrdering(SI->getOrdering()));
775 if (MS.TrackOrigins && !SI->isAtomic())
776 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), SI->getAlignment(),
777 InstrumentWithCalls);
781 void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin,
783 IRBuilder<> IRB(OrigIns);
784 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
785 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
786 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
788 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
789 if (ConstantShadow) {
790 if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) {
791 if (MS.TrackOrigins) {
792 IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0),
795 IRB.CreateCall(MS.WarningFn, {});
796 IRB.CreateCall(MS.EmptyAsm, {});
797 // FIXME: Insert UnreachableInst if !MS.Recover?
798 // This may invalidate some of the following checks and needs to be done
804 const DataLayout &DL = OrigIns->getModule()->getDataLayout();
806 unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
807 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
808 if (AsCall && SizeIndex < kNumberOfAccessSizes) {
809 Value *Fn = MS.MaybeWarningFn[SizeIndex];
810 Value *ConvertedShadow2 =
811 IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
812 IRB.CreateCall(Fn, {ConvertedShadow2, MS.TrackOrigins && Origin
814 : (Value *)IRB.getInt32(0)});
816 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
817 getCleanShadow(ConvertedShadow), "_mscmp");
818 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
820 /* Unreachable */ !MS.Recover, MS.ColdCallWeights);
822 IRB.SetInsertPoint(CheckTerm);
823 if (MS.TrackOrigins) {
824 IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0),
827 IRB.CreateCall(MS.WarningFn, {});
828 IRB.CreateCall(MS.EmptyAsm, {});
829 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
833 void materializeChecks(bool InstrumentWithCalls) {
834 for (const auto &ShadowData : InstrumentationList) {
835 Instruction *OrigIns = ShadowData.OrigIns;
836 Value *Shadow = ShadowData.Shadow;
837 Value *Origin = ShadowData.Origin;
838 materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
840 DEBUG(dbgs() << "DONE:\n" << F);
843 /// \brief Add MemorySanitizer instrumentation to a function.
844 bool runOnFunction() {
845 MS.initializeCallbacks(*F.getParent());
847 // In the presence of unreachable blocks, we may see Phi nodes with
848 // incoming nodes from such blocks. Since InstVisitor skips unreachable
849 // blocks, such nodes will not have any shadow value associated with them.
850 // It's easier to remove unreachable blocks than deal with missing shadow.
851 removeUnreachableBlocks(F);
853 // Iterate all BBs in depth-first order and create shadow instructions
854 // for all instructions (where applicable).
855 // For PHI nodes we create dummy shadow PHIs which will be finalized later.
856 for (BasicBlock *BB : depth_first(&F.getEntryBlock()))
860 // Finalize PHI nodes.
861 for (PHINode *PN : ShadowPHINodes) {
862 PHINode *PNS = cast<PHINode>(getShadow(PN));
863 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
864 size_t NumValues = PN->getNumIncomingValues();
865 for (size_t v = 0; v < NumValues; v++) {
866 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
867 if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
871 VAHelper->finalizeInstrumentation();
873 bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 &&
874 InstrumentationList.size() + StoreList.size() >
875 (unsigned)ClInstrumentationWithCallThreshold;
877 // Delayed instrumentation of StoreInst.
878 // This may add new checks to be inserted later.
879 materializeStores(InstrumentWithCalls);
881 // Insert shadow value checks.
882 materializeChecks(InstrumentWithCalls);
887 /// \brief Compute the shadow type that corresponds to a given Value.
888 Type *getShadowTy(Value *V) {
889 return getShadowTy(V->getType());
892 /// \brief Compute the shadow type that corresponds to a given Type.
893 Type *getShadowTy(Type *OrigTy) {
894 if (!OrigTy->isSized()) {
897 // For integer type, shadow is the same as the original type.
898 // This may return weird-sized types like i1.
899 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
901 const DataLayout &DL = F.getParent()->getDataLayout();
902 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
903 uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType());
904 return VectorType::get(IntegerType::get(*MS.C, EltSize),
905 VT->getNumElements());
907 if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
908 return ArrayType::get(getShadowTy(AT->getElementType()),
909 AT->getNumElements());
911 if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
912 SmallVector<Type*, 4> Elements;
913 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
914 Elements.push_back(getShadowTy(ST->getElementType(i)));
915 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
916 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
919 uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
920 return IntegerType::get(*MS.C, TypeSize);
923 /// \brief Flatten a vector type.
924 Type *getShadowTyNoVec(Type *ty) {
925 if (VectorType *vt = dyn_cast<VectorType>(ty))
926 return IntegerType::get(*MS.C, vt->getBitWidth());
930 /// \brief Convert a shadow value to it's flattened variant.
931 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
932 Type *Ty = V->getType();
933 Type *NoVecTy = getShadowTyNoVec(Ty);
934 if (Ty == NoVecTy) return V;
935 return IRB.CreateBitCast(V, NoVecTy);
938 /// \brief Compute the integer shadow offset that corresponds to a given
939 /// application address.
941 /// Offset = (Addr & ~AndMask) ^ XorMask
942 Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) {
943 Value *OffsetLong = IRB.CreatePointerCast(Addr, MS.IntptrTy);
945 uint64_t AndMask = MS.MapParams->AndMask;
948 IRB.CreateAnd(OffsetLong, ConstantInt::get(MS.IntptrTy, ~AndMask));
950 uint64_t XorMask = MS.MapParams->XorMask;
953 IRB.CreateXor(OffsetLong, ConstantInt::get(MS.IntptrTy, XorMask));
957 /// \brief Compute the shadow address that corresponds to a given application
960 /// Shadow = ShadowBase + Offset
961 Value *getShadowPtr(Value *Addr, Type *ShadowTy,
963 Value *ShadowLong = getShadowPtrOffset(Addr, IRB);
964 uint64_t ShadowBase = MS.MapParams->ShadowBase;
967 IRB.CreateAdd(ShadowLong,
968 ConstantInt::get(MS.IntptrTy, ShadowBase));
969 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
972 /// \brief Compute the origin address that corresponds to a given application
975 /// OriginAddr = (OriginBase + Offset) & ~3ULL
976 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB, unsigned Alignment) {
977 Value *OriginLong = getShadowPtrOffset(Addr, IRB);
978 uint64_t OriginBase = MS.MapParams->OriginBase;
981 IRB.CreateAdd(OriginLong,
982 ConstantInt::get(MS.IntptrTy, OriginBase));
983 if (Alignment < kMinOriginAlignment) {
984 uint64_t Mask = kMinOriginAlignment - 1;
985 OriginLong = IRB.CreateAnd(OriginLong,
986 ConstantInt::get(MS.IntptrTy, ~Mask));
988 return IRB.CreateIntToPtr(OriginLong,
989 PointerType::get(IRB.getInt32Ty(), 0));
992 /// \brief Compute the shadow address for a given function argument.
994 /// Shadow = ParamTLS+ArgOffset.
995 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
997 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
998 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
999 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
1003 /// \brief Compute the origin address for a given function argument.
1004 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
1006 if (!MS.TrackOrigins) return nullptr;
1007 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
1008 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1009 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
1013 /// \brief Compute the shadow address for a retval.
1014 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
1015 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy);
1016 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
1020 /// \brief Compute the origin address for a retval.
1021 Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
1022 // We keep a single origin for the entire retval. Might be too optimistic.
1023 return MS.RetvalOriginTLS;
1026 /// \brief Set SV to be the shadow value for V.
1027 void setShadow(Value *V, Value *SV) {
1028 assert(!ShadowMap.count(V) && "Values may only have one shadow");
1029 ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
1032 /// \brief Set Origin to be the origin value for V.
1033 void setOrigin(Value *V, Value *Origin) {
1034 if (!MS.TrackOrigins) return;
1035 assert(!OriginMap.count(V) && "Values may only have one origin");
1036 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n");
1037 OriginMap[V] = Origin;
1040 /// \brief Create a clean shadow value for a given value.
1042 /// Clean shadow (all zeroes) means all bits of the value are defined
1044 Constant *getCleanShadow(Value *V) {
1045 Type *ShadowTy = getShadowTy(V);
1048 return Constant::getNullValue(ShadowTy);
1051 /// \brief Create a dirty shadow of a given shadow type.
1052 Constant *getPoisonedShadow(Type *ShadowTy) {
1054 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1055 return Constant::getAllOnesValue(ShadowTy);
1056 if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1057 SmallVector<Constant *, 4> Vals(AT->getNumElements(),
1058 getPoisonedShadow(AT->getElementType()));
1059 return ConstantArray::get(AT, Vals);
1061 if (StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1062 SmallVector<Constant *, 4> Vals;
1063 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1064 Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
1065 return ConstantStruct::get(ST, Vals);
1067 llvm_unreachable("Unexpected shadow type");
1070 /// \brief Create a dirty shadow for a given value.
1071 Constant *getPoisonedShadow(Value *V) {
1072 Type *ShadowTy = getShadowTy(V);
1075 return getPoisonedShadow(ShadowTy);
1078 /// \brief Create a clean (zero) origin.
1079 Value *getCleanOrigin() {
1080 return Constant::getNullValue(MS.OriginTy);
1083 /// \brief Get the shadow value for a given Value.
1085 /// This function either returns the value set earlier with setShadow,
1086 /// or extracts if from ParamTLS (for function arguments).
1087 Value *getShadow(Value *V) {
1088 if (!PropagateShadow) return getCleanShadow(V);
1089 if (Instruction *I = dyn_cast<Instruction>(V)) {
1090 // For instructions the shadow is already stored in the map.
1091 Value *Shadow = ShadowMap[V];
1093 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
1095 assert(Shadow && "No shadow for a value");
1099 if (UndefValue *U = dyn_cast<UndefValue>(V)) {
1100 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
1101 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
1105 if (Argument *A = dyn_cast<Argument>(V)) {
1106 // For arguments we compute the shadow on demand and store it in the map.
1107 Value **ShadowPtr = &ShadowMap[V];
1110 Function *F = A->getParent();
1111 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
1112 unsigned ArgOffset = 0;
1113 const DataLayout &DL = F->getParent()->getDataLayout();
1114 for (auto &FArg : F->args()) {
1115 if (!FArg.getType()->isSized()) {
1116 DEBUG(dbgs() << "Arg is not sized\n");
1121 ? DL.getTypeAllocSize(FArg.getType()->getPointerElementType())
1122 : DL.getTypeAllocSize(FArg.getType());
1124 bool Overflow = ArgOffset + Size > kParamTLSSize;
1125 Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
1126 if (FArg.hasByValAttr()) {
1127 // ByVal pointer itself has clean shadow. We copy the actual
1128 // argument shadow to the underlying memory.
1129 // Figure out maximal valid memcpy alignment.
1130 unsigned ArgAlign = FArg.getParamAlignment();
1131 if (ArgAlign == 0) {
1132 Type *EltType = A->getType()->getPointerElementType();
1133 ArgAlign = DL.getABITypeAlignment(EltType);
1136 // ParamTLS overflow.
1137 EntryIRB.CreateMemSet(
1138 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB),
1139 Constant::getNullValue(EntryIRB.getInt8Ty()), Size, ArgAlign);
1141 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
1142 Value *Cpy = EntryIRB.CreateMemCpy(
1143 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size,
1145 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
1148 *ShadowPtr = getCleanShadow(V);
1151 // ParamTLS overflow.
1152 *ShadowPtr = getCleanShadow(V);
1155 EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
1158 DEBUG(dbgs() << " ARG: " << FArg << " ==> " <<
1159 **ShadowPtr << "\n");
1160 if (MS.TrackOrigins && !Overflow) {
1162 getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
1163 setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
1165 setOrigin(A, getCleanOrigin());
1168 ArgOffset += alignTo(Size, kShadowTLSAlignment);
1170 assert(*ShadowPtr && "Could not find shadow for an argument");
1173 // For everything else the shadow is zero.
1174 return getCleanShadow(V);
1177 /// \brief Get the shadow for i-th argument of the instruction I.
1178 Value *getShadow(Instruction *I, int i) {
1179 return getShadow(I->getOperand(i));
1182 /// \brief Get the origin for a value.
1183 Value *getOrigin(Value *V) {
1184 if (!MS.TrackOrigins) return nullptr;
1185 if (!PropagateShadow) return getCleanOrigin();
1186 if (isa<Constant>(V)) return getCleanOrigin();
1187 assert((isa<Instruction>(V) || isa<Argument>(V)) &&
1188 "Unexpected value type in getOrigin()");
1189 Value *Origin = OriginMap[V];
1190 assert(Origin && "Missing origin");
1194 /// \brief Get the origin for i-th argument of the instruction I.
1195 Value *getOrigin(Instruction *I, int i) {
1196 return getOrigin(I->getOperand(i));
1199 /// \brief Remember the place where a shadow check should be inserted.
1201 /// This location will be later instrumented with a check that will print a
1202 /// UMR warning in runtime if the shadow value is not 0.
1203 void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
1205 if (!InsertChecks) return;
1207 Type *ShadowTy = Shadow->getType();
1208 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
1209 "Can only insert checks for integer and vector shadow types");
1211 InstrumentationList.push_back(
1212 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
1215 /// \brief Remember the place where a shadow check should be inserted.
1217 /// This location will be later instrumented with a check that will print a
1218 /// UMR warning in runtime if the value is not fully defined.
1219 void insertShadowCheck(Value *Val, Instruction *OrigIns) {
1221 Value *Shadow, *Origin;
1222 if (ClCheckConstantShadow) {
1223 Shadow = getShadow(Val);
1224 if (!Shadow) return;
1225 Origin = getOrigin(Val);
1227 Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
1228 if (!Shadow) return;
1229 Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
1231 insertShadowCheck(Shadow, Origin, OrigIns);
1234 AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
1236 case AtomicOrdering::NotAtomic:
1237 return AtomicOrdering::NotAtomic;
1238 case AtomicOrdering::Unordered:
1239 case AtomicOrdering::Monotonic:
1240 case AtomicOrdering::Release:
1241 return AtomicOrdering::Release;
1242 case AtomicOrdering::Acquire:
1243 case AtomicOrdering::AcquireRelease:
1244 return AtomicOrdering::AcquireRelease;
1245 case AtomicOrdering::SequentiallyConsistent:
1246 return AtomicOrdering::SequentiallyConsistent;
1248 llvm_unreachable("Unknown ordering");
1251 AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
1253 case AtomicOrdering::NotAtomic:
1254 return AtomicOrdering::NotAtomic;
1255 case AtomicOrdering::Unordered:
1256 case AtomicOrdering::Monotonic:
1257 case AtomicOrdering::Acquire:
1258 return AtomicOrdering::Acquire;
1259 case AtomicOrdering::Release:
1260 case AtomicOrdering::AcquireRelease:
1261 return AtomicOrdering::AcquireRelease;
1262 case AtomicOrdering::SequentiallyConsistent:
1263 return AtomicOrdering::SequentiallyConsistent;
1265 llvm_unreachable("Unknown ordering");
1268 // ------------------- Visitors.
1270 /// \brief Instrument LoadInst
1272 /// Loads the corresponding shadow and (optionally) origin.
1273 /// Optionally, checks that the load address is fully defined.
1274 void visitLoadInst(LoadInst &I) {
1275 assert(I.getType()->isSized() && "Load type must have size");
1276 IRBuilder<> IRB(I.getNextNode());
1277 Type *ShadowTy = getShadowTy(&I);
1278 Value *Addr = I.getPointerOperand();
1279 if (PropagateShadow && !I.getMetadata("nosanitize")) {
1280 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1282 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"));
1284 setShadow(&I, getCleanShadow(&I));
1287 if (ClCheckAccessAddress)
1288 insertShadowCheck(I.getPointerOperand(), &I);
1291 I.setOrdering(addAcquireOrdering(I.getOrdering()));
1293 if (MS.TrackOrigins) {
1294 if (PropagateShadow) {
1295 unsigned Alignment = I.getAlignment();
1296 unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1297 setOrigin(&I, IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB, Alignment),
1300 setOrigin(&I, getCleanOrigin());
1305 /// \brief Instrument StoreInst
1307 /// Stores the corresponding shadow and (optionally) origin.
1308 /// Optionally, checks that the store address is fully defined.
1309 void visitStoreInst(StoreInst &I) {
1310 StoreList.push_back(&I);
1313 void handleCASOrRMW(Instruction &I) {
1314 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
1316 IRBuilder<> IRB(&I);
1317 Value *Addr = I.getOperand(0);
1318 Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB);
1320 if (ClCheckAccessAddress)
1321 insertShadowCheck(Addr, &I);
1323 // Only test the conditional argument of cmpxchg instruction.
1324 // The other argument can potentially be uninitialized, but we can not
1325 // detect this situation reliably without possible false positives.
1326 if (isa<AtomicCmpXchgInst>(I))
1327 insertShadowCheck(I.getOperand(1), &I);
1329 IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
1331 setShadow(&I, getCleanShadow(&I));
1332 setOrigin(&I, getCleanOrigin());
1335 void visitAtomicRMWInst(AtomicRMWInst &I) {
1337 I.setOrdering(addReleaseOrdering(I.getOrdering()));
1340 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
1342 I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
1345 // Vector manipulation.
1346 void visitExtractElementInst(ExtractElementInst &I) {
1347 insertShadowCheck(I.getOperand(1), &I);
1348 IRBuilder<> IRB(&I);
1349 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
1351 setOrigin(&I, getOrigin(&I, 0));
1354 void visitInsertElementInst(InsertElementInst &I) {
1355 insertShadowCheck(I.getOperand(2), &I);
1356 IRBuilder<> IRB(&I);
1357 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
1358 I.getOperand(2), "_msprop"));
1359 setOriginForNaryOp(I);
1362 void visitShuffleVectorInst(ShuffleVectorInst &I) {
1363 insertShadowCheck(I.getOperand(2), &I);
1364 IRBuilder<> IRB(&I);
1365 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
1366 I.getOperand(2), "_msprop"));
1367 setOriginForNaryOp(I);
1371 void visitSExtInst(SExtInst &I) {
1372 IRBuilder<> IRB(&I);
1373 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
1374 setOrigin(&I, getOrigin(&I, 0));
1377 void visitZExtInst(ZExtInst &I) {
1378 IRBuilder<> IRB(&I);
1379 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
1380 setOrigin(&I, getOrigin(&I, 0));
1383 void visitTruncInst(TruncInst &I) {
1384 IRBuilder<> IRB(&I);
1385 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
1386 setOrigin(&I, getOrigin(&I, 0));
1389 void visitBitCastInst(BitCastInst &I) {
1390 // Special case: if this is the bitcast (there is exactly 1 allowed) between
1391 // a musttail call and a ret, don't instrument. New instructions are not
1392 // allowed after a musttail call.
1393 if (auto *CI = dyn_cast<CallInst>(I.getOperand(0)))
1394 if (CI->isMustTailCall())
1396 IRBuilder<> IRB(&I);
1397 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
1398 setOrigin(&I, getOrigin(&I, 0));
1401 void visitPtrToIntInst(PtrToIntInst &I) {
1402 IRBuilder<> IRB(&I);
1403 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1404 "_msprop_ptrtoint"));
1405 setOrigin(&I, getOrigin(&I, 0));
1408 void visitIntToPtrInst(IntToPtrInst &I) {
1409 IRBuilder<> IRB(&I);
1410 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1411 "_msprop_inttoptr"));
1412 setOrigin(&I, getOrigin(&I, 0));
1415 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
1416 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
1417 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
1418 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
1419 void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
1420 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
1422 /// \brief Propagate shadow for bitwise AND.
1424 /// This code is exact, i.e. if, for example, a bit in the left argument
1425 /// is defined and 0, then neither the value not definedness of the
1426 /// corresponding bit in B don't affect the resulting shadow.
1427 void visitAnd(BinaryOperator &I) {
1428 IRBuilder<> IRB(&I);
1429 // "And" of 0 and a poisoned value results in unpoisoned value.
1430 // 1&1 => 1; 0&1 => 0; p&1 => p;
1431 // 1&0 => 0; 0&0 => 0; p&0 => 0;
1432 // 1&p => p; 0&p => 0; p&p => p;
1433 // S = (S1 & S2) | (V1 & S2) | (S1 & V2)
1434 Value *S1 = getShadow(&I, 0);
1435 Value *S2 = getShadow(&I, 1);
1436 Value *V1 = I.getOperand(0);
1437 Value *V2 = I.getOperand(1);
1438 if (V1->getType() != S1->getType()) {
1439 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1440 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1442 Value *S1S2 = IRB.CreateAnd(S1, S2);
1443 Value *V1S2 = IRB.CreateAnd(V1, S2);
1444 Value *S1V2 = IRB.CreateAnd(S1, V2);
1445 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1446 setOriginForNaryOp(I);
1449 void visitOr(BinaryOperator &I) {
1450 IRBuilder<> IRB(&I);
1451 // "Or" of 1 and a poisoned value results in unpoisoned value.
1452 // 1|1 => 1; 0|1 => 1; p|1 => 1;
1453 // 1|0 => 1; 0|0 => 0; p|0 => p;
1454 // 1|p => 1; 0|p => p; p|p => p;
1455 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
1456 Value *S1 = getShadow(&I, 0);
1457 Value *S2 = getShadow(&I, 1);
1458 Value *V1 = IRB.CreateNot(I.getOperand(0));
1459 Value *V2 = IRB.CreateNot(I.getOperand(1));
1460 if (V1->getType() != S1->getType()) {
1461 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1462 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1464 Value *S1S2 = IRB.CreateAnd(S1, S2);
1465 Value *V1S2 = IRB.CreateAnd(V1, S2);
1466 Value *S1V2 = IRB.CreateAnd(S1, V2);
1467 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1468 setOriginForNaryOp(I);
1471 /// \brief Default propagation of shadow and/or origin.
1473 /// This class implements the general case of shadow propagation, used in all
1474 /// cases where we don't know and/or don't care about what the operation
1475 /// actually does. It converts all input shadow values to a common type
1476 /// (extending or truncating as necessary), and bitwise OR's them.
1478 /// This is much cheaper than inserting checks (i.e. requiring inputs to be
1479 /// fully initialized), and less prone to false positives.
1481 /// This class also implements the general case of origin propagation. For a
1482 /// Nary operation, result origin is set to the origin of an argument that is
1483 /// not entirely initialized. If there is more than one such arguments, the
1484 /// rightmost of them is picked. It does not matter which one is picked if all
1485 /// arguments are initialized.
1486 template <bool CombineShadow>
1491 MemorySanitizerVisitor *MSV;
1494 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
1495 Shadow(nullptr), Origin(nullptr), IRB(IRB), MSV(MSV) {}
1497 /// \brief Add a pair of shadow and origin values to the mix.
1498 Combiner &Add(Value *OpShadow, Value *OpOrigin) {
1499 if (CombineShadow) {
1504 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
1505 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
1509 if (MSV->MS.TrackOrigins) {
1514 Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
1515 // No point in adding something that might result in 0 origin value.
1516 if (!ConstOrigin || !ConstOrigin->isNullValue()) {
1517 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
1519 IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow));
1520 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1527 /// \brief Add an application value to the mix.
1528 Combiner &Add(Value *V) {
1529 Value *OpShadow = MSV->getShadow(V);
1530 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
1531 return Add(OpShadow, OpOrigin);
1534 /// \brief Set the current combined values as the given instruction's shadow
1536 void Done(Instruction *I) {
1537 if (CombineShadow) {
1539 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
1540 MSV->setShadow(I, Shadow);
1542 if (MSV->MS.TrackOrigins) {
1544 MSV->setOrigin(I, Origin);
1549 typedef Combiner<true> ShadowAndOriginCombiner;
1550 typedef Combiner<false> OriginCombiner;
1552 /// \brief Propagate origin for arbitrary operation.
1553 void setOriginForNaryOp(Instruction &I) {
1554 if (!MS.TrackOrigins) return;
1555 IRBuilder<> IRB(&I);
1556 OriginCombiner OC(this, IRB);
1557 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1562 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
1563 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
1564 "Vector of pointers is not a valid shadow type");
1565 return Ty->isVectorTy() ?
1566 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
1567 Ty->getPrimitiveSizeInBits();
1570 /// \brief Cast between two shadow types, extending or truncating as
1572 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
1573 bool Signed = false) {
1574 Type *srcTy = V->getType();
1575 if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
1576 return IRB.CreateIntCast(V, dstTy, Signed);
1577 if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
1578 dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
1579 return IRB.CreateIntCast(V, dstTy, Signed);
1580 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
1581 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
1582 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
1584 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
1585 return IRB.CreateBitCast(V2, dstTy);
1586 // TODO: handle struct types.
1589 /// \brief Cast an application value to the type of its own shadow.
1590 Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) {
1591 Type *ShadowTy = getShadowTy(V);
1592 if (V->getType() == ShadowTy)
1594 if (V->getType()->isPtrOrPtrVectorTy())
1595 return IRB.CreatePtrToInt(V, ShadowTy);
1597 return IRB.CreateBitCast(V, ShadowTy);
1600 /// \brief Propagate shadow for arbitrary operation.
1601 void handleShadowOr(Instruction &I) {
1602 IRBuilder<> IRB(&I);
1603 ShadowAndOriginCombiner SC(this, IRB);
1604 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1609 // \brief Handle multiplication by constant.
1611 // Handle a special case of multiplication by constant that may have one or
1612 // more zeros in the lower bits. This makes corresponding number of lower bits
1613 // of the result zero as well. We model it by shifting the other operand
1614 // shadow left by the required number of bits. Effectively, we transform
1615 // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B).
1616 // We use multiplication by 2**N instead of shift to cover the case of
1617 // multiplication by 0, which may occur in some elements of a vector operand.
1618 void handleMulByConstant(BinaryOperator &I, Constant *ConstArg,
1620 Constant *ShadowMul;
1621 Type *Ty = ConstArg->getType();
1622 if (Ty->isVectorTy()) {
1623 unsigned NumElements = Ty->getVectorNumElements();
1624 Type *EltTy = Ty->getSequentialElementType();
1625 SmallVector<Constant *, 16> Elements;
1626 for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
1627 if (ConstantInt *Elt =
1628 dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx))) {
1629 const APInt &V = Elt->getValue();
1630 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
1631 Elements.push_back(ConstantInt::get(EltTy, V2));
1633 Elements.push_back(ConstantInt::get(EltTy, 1));
1636 ShadowMul = ConstantVector::get(Elements);
1638 if (ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
1639 const APInt &V = Elt->getValue();
1640 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
1641 ShadowMul = ConstantInt::get(Ty, V2);
1643 ShadowMul = ConstantInt::get(Ty, 1);
1647 IRBuilder<> IRB(&I);
1649 IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst"));
1650 setOrigin(&I, getOrigin(OtherArg));
1653 void visitMul(BinaryOperator &I) {
1654 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
1655 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
1656 if (constOp0 && !constOp1)
1657 handleMulByConstant(I, constOp0, I.getOperand(1));
1658 else if (constOp1 && !constOp0)
1659 handleMulByConstant(I, constOp1, I.getOperand(0));
1664 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
1665 void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
1666 void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
1667 void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
1668 void visitSub(BinaryOperator &I) { handleShadowOr(I); }
1669 void visitXor(BinaryOperator &I) { handleShadowOr(I); }
1671 void handleDiv(Instruction &I) {
1672 IRBuilder<> IRB(&I);
1673 // Strict on the second argument.
1674 insertShadowCheck(I.getOperand(1), &I);
1675 setShadow(&I, getShadow(&I, 0));
1676 setOrigin(&I, getOrigin(&I, 0));
1679 void visitUDiv(BinaryOperator &I) { handleDiv(I); }
1680 void visitSDiv(BinaryOperator &I) { handleDiv(I); }
1681 void visitFDiv(BinaryOperator &I) { handleDiv(I); }
1682 void visitURem(BinaryOperator &I) { handleDiv(I); }
1683 void visitSRem(BinaryOperator &I) { handleDiv(I); }
1684 void visitFRem(BinaryOperator &I) { handleDiv(I); }
1686 /// \brief Instrument == and != comparisons.
1688 /// Sometimes the comparison result is known even if some of the bits of the
1689 /// arguments are not.
1690 void handleEqualityComparison(ICmpInst &I) {
1691 IRBuilder<> IRB(&I);
1692 Value *A = I.getOperand(0);
1693 Value *B = I.getOperand(1);
1694 Value *Sa = getShadow(A);
1695 Value *Sb = getShadow(B);
1697 // Get rid of pointers and vectors of pointers.
1698 // For ints (and vectors of ints), types of A and Sa match,
1699 // and this is a no-op.
1700 A = IRB.CreatePointerCast(A, Sa->getType());
1701 B = IRB.CreatePointerCast(B, Sb->getType());
1703 // A == B <==> (C = A^B) == 0
1704 // A != B <==> (C = A^B) != 0
1706 Value *C = IRB.CreateXor(A, B);
1707 Value *Sc = IRB.CreateOr(Sa, Sb);
1708 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
1709 // Result is defined if one of the following is true
1710 // * there is a defined 1 bit in C
1711 // * C is fully defined
1712 // Si = !(C & ~Sc) && Sc
1713 Value *Zero = Constant::getNullValue(Sc->getType());
1714 Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
1716 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
1718 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
1719 Si->setName("_msprop_icmp");
1721 setOriginForNaryOp(I);
1724 /// \brief Build the lowest possible value of V, taking into account V's
1725 /// uninitialized bits.
1726 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1729 // Split shadow into sign bit and other bits.
1730 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1731 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1732 // Maximise the undefined shadow bit, minimize other undefined bits.
1734 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
1736 // Minimize undefined bits.
1737 return IRB.CreateAnd(A, IRB.CreateNot(Sa));
1741 /// \brief Build the highest possible value of V, taking into account V's
1742 /// uninitialized bits.
1743 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1746 // Split shadow into sign bit and other bits.
1747 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1748 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1749 // Minimise the undefined shadow bit, maximise other undefined bits.
1751 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
1753 // Maximize undefined bits.
1754 return IRB.CreateOr(A, Sa);
1758 /// \brief Instrument relational comparisons.
1760 /// This function does exact shadow propagation for all relational
1761 /// comparisons of integers, pointers and vectors of those.
1762 /// FIXME: output seems suboptimal when one of the operands is a constant
1763 void handleRelationalComparisonExact(ICmpInst &I) {
1764 IRBuilder<> IRB(&I);
1765 Value *A = I.getOperand(0);
1766 Value *B = I.getOperand(1);
1767 Value *Sa = getShadow(A);
1768 Value *Sb = getShadow(B);
1770 // Get rid of pointers and vectors of pointers.
1771 // For ints (and vectors of ints), types of A and Sa match,
1772 // and this is a no-op.
1773 A = IRB.CreatePointerCast(A, Sa->getType());
1774 B = IRB.CreatePointerCast(B, Sb->getType());
1776 // Let [a0, a1] be the interval of possible values of A, taking into account
1777 // its undefined bits. Let [b0, b1] be the interval of possible values of B.
1778 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
1779 bool IsSigned = I.isSigned();
1780 Value *S1 = IRB.CreateICmp(I.getPredicate(),
1781 getLowestPossibleValue(IRB, A, Sa, IsSigned),
1782 getHighestPossibleValue(IRB, B, Sb, IsSigned));
1783 Value *S2 = IRB.CreateICmp(I.getPredicate(),
1784 getHighestPossibleValue(IRB, A, Sa, IsSigned),
1785 getLowestPossibleValue(IRB, B, Sb, IsSigned));
1786 Value *Si = IRB.CreateXor(S1, S2);
1788 setOriginForNaryOp(I);
1791 /// \brief Instrument signed relational comparisons.
1793 /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest
1794 /// bit of the shadow. Everything else is delegated to handleShadowOr().
1795 void handleSignedRelationalComparison(ICmpInst &I) {
1797 Value *op = nullptr;
1798 CmpInst::Predicate pre;
1799 if ((constOp = dyn_cast<Constant>(I.getOperand(1)))) {
1800 op = I.getOperand(0);
1801 pre = I.getPredicate();
1802 } else if ((constOp = dyn_cast<Constant>(I.getOperand(0)))) {
1803 op = I.getOperand(1);
1804 pre = I.getSwappedPredicate();
1810 if ((constOp->isNullValue() &&
1811 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) ||
1812 (constOp->isAllOnesValue() &&
1813 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) {
1814 IRBuilder<> IRB(&I);
1815 Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op),
1817 setShadow(&I, Shadow);
1818 setOrigin(&I, getOrigin(op));
1824 void visitICmpInst(ICmpInst &I) {
1825 if (!ClHandleICmp) {
1829 if (I.isEquality()) {
1830 handleEqualityComparison(I);
1834 assert(I.isRelational());
1835 if (ClHandleICmpExact) {
1836 handleRelationalComparisonExact(I);
1840 handleSignedRelationalComparison(I);
1844 assert(I.isUnsigned());
1845 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
1846 handleRelationalComparisonExact(I);
1853 void visitFCmpInst(FCmpInst &I) {
1857 void handleShift(BinaryOperator &I) {
1858 IRBuilder<> IRB(&I);
1859 // If any of the S2 bits are poisoned, the whole thing is poisoned.
1860 // Otherwise perform the same shift on S1.
1861 Value *S1 = getShadow(&I, 0);
1862 Value *S2 = getShadow(&I, 1);
1863 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
1865 Value *V2 = I.getOperand(1);
1866 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
1867 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
1868 setOriginForNaryOp(I);
1871 void visitShl(BinaryOperator &I) { handleShift(I); }
1872 void visitAShr(BinaryOperator &I) { handleShift(I); }
1873 void visitLShr(BinaryOperator &I) { handleShift(I); }
1875 /// \brief Instrument llvm.memmove
1877 /// At this point we don't know if llvm.memmove will be inlined or not.
1878 /// If we don't instrument it and it gets inlined,
1879 /// our interceptor will not kick in and we will lose the memmove.
1880 /// If we instrument the call here, but it does not get inlined,
1881 /// we will memove the shadow twice: which is bad in case
1882 /// of overlapping regions. So, we simply lower the intrinsic to a call.
1884 /// Similar situation exists for memcpy and memset.
1885 void visitMemMoveInst(MemMoveInst &I) {
1886 IRBuilder<> IRB(&I);
1889 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1890 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1891 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
1892 I.eraseFromParent();
1895 // Similar to memmove: avoid copying shadow twice.
1896 // This is somewhat unfortunate as it may slowdown small constant memcpys.
1897 // FIXME: consider doing manual inline for small constant sizes and proper
1899 void visitMemCpyInst(MemCpyInst &I) {
1900 IRBuilder<> IRB(&I);
1903 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1904 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1905 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
1906 I.eraseFromParent();
1910 void visitMemSetInst(MemSetInst &I) {
1911 IRBuilder<> IRB(&I);
1914 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1915 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
1916 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
1917 I.eraseFromParent();
1920 void visitVAStartInst(VAStartInst &I) {
1921 VAHelper->visitVAStartInst(I);
1924 void visitVACopyInst(VACopyInst &I) {
1925 VAHelper->visitVACopyInst(I);
1928 /// \brief Handle vector store-like intrinsics.
1930 /// Instrument intrinsics that look like a simple SIMD store: writes memory,
1931 /// has 1 pointer argument and 1 vector argument, returns void.
1932 bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
1933 IRBuilder<> IRB(&I);
1934 Value* Addr = I.getArgOperand(0);
1935 Value *Shadow = getShadow(&I, 1);
1936 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
1938 // We don't know the pointer alignment (could be unaligned SSE store!).
1939 // Have to assume to worst case.
1940 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
1942 if (ClCheckAccessAddress)
1943 insertShadowCheck(Addr, &I);
1945 // FIXME: use ClStoreCleanOrigin
1946 // FIXME: factor out common code from materializeStores
1947 if (MS.TrackOrigins)
1948 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB, 1));
1952 /// \brief Handle vector load-like intrinsics.
1954 /// Instrument intrinsics that look like a simple SIMD load: reads memory,
1955 /// has 1 pointer argument, returns a vector.
1956 bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
1957 IRBuilder<> IRB(&I);
1958 Value *Addr = I.getArgOperand(0);
1960 Type *ShadowTy = getShadowTy(&I);
1961 if (PropagateShadow) {
1962 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1963 // We don't know the pointer alignment (could be unaligned SSE load!).
1964 // Have to assume to worst case.
1965 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld"));
1967 setShadow(&I, getCleanShadow(&I));
1970 if (ClCheckAccessAddress)
1971 insertShadowCheck(Addr, &I);
1973 if (MS.TrackOrigins) {
1974 if (PropagateShadow)
1975 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB, 1)));
1977 setOrigin(&I, getCleanOrigin());
1982 /// \brief Handle (SIMD arithmetic)-like intrinsics.
1984 /// Instrument intrinsics with any number of arguments of the same type,
1985 /// equal to the return type. The type should be simple (no aggregates or
1986 /// pointers; vectors are fine).
1987 /// Caller guarantees that this intrinsic does not access memory.
1988 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
1989 Type *RetTy = I.getType();
1990 if (!(RetTy->isIntOrIntVectorTy() ||
1991 RetTy->isFPOrFPVectorTy() ||
1992 RetTy->isX86_MMXTy()))
1995 unsigned NumArgOperands = I.getNumArgOperands();
1997 for (unsigned i = 0; i < NumArgOperands; ++i) {
1998 Type *Ty = I.getArgOperand(i)->getType();
2003 IRBuilder<> IRB(&I);
2004 ShadowAndOriginCombiner SC(this, IRB);
2005 for (unsigned i = 0; i < NumArgOperands; ++i)
2006 SC.Add(I.getArgOperand(i));
2012 /// \brief Heuristically instrument unknown intrinsics.
2014 /// The main purpose of this code is to do something reasonable with all
2015 /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
2016 /// We recognize several classes of intrinsics by their argument types and
2017 /// ModRefBehaviour and apply special intrumentation when we are reasonably
2018 /// sure that we know what the intrinsic does.
2020 /// We special-case intrinsics where this approach fails. See llvm.bswap
2021 /// handling as an example of that.
2022 bool handleUnknownIntrinsic(IntrinsicInst &I) {
2023 unsigned NumArgOperands = I.getNumArgOperands();
2024 if (NumArgOperands == 0)
2027 if (NumArgOperands == 2 &&
2028 I.getArgOperand(0)->getType()->isPointerTy() &&
2029 I.getArgOperand(1)->getType()->isVectorTy() &&
2030 I.getType()->isVoidTy() &&
2031 !I.onlyReadsMemory()) {
2032 // This looks like a vector store.
2033 return handleVectorStoreIntrinsic(I);
2036 if (NumArgOperands == 1 &&
2037 I.getArgOperand(0)->getType()->isPointerTy() &&
2038 I.getType()->isVectorTy() &&
2039 I.onlyReadsMemory()) {
2040 // This looks like a vector load.
2041 return handleVectorLoadIntrinsic(I);
2044 if (I.doesNotAccessMemory())
2045 if (maybeHandleSimpleNomemIntrinsic(I))
2048 // FIXME: detect and handle SSE maskstore/maskload
2052 void handleBswap(IntrinsicInst &I) {
2053 IRBuilder<> IRB(&I);
2054 Value *Op = I.getArgOperand(0);
2055 Type *OpType = Op->getType();
2056 Function *BswapFunc = Intrinsic::getDeclaration(
2057 F.getParent(), Intrinsic::bswap, makeArrayRef(&OpType, 1));
2058 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
2059 setOrigin(&I, getOrigin(Op));
2062 // \brief Instrument vector convert instrinsic.
2064 // This function instruments intrinsics like cvtsi2ss:
2065 // %Out = int_xxx_cvtyyy(%ConvertOp)
2067 // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
2068 // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
2069 // number \p Out elements, and (if has 2 arguments) copies the rest of the
2070 // elements from \p CopyOp.
2071 // In most cases conversion involves floating-point value which may trigger a
2072 // hardware exception when not fully initialized. For this reason we require
2073 // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
2074 // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
2075 // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
2076 // return a fully initialized value.
2077 void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) {
2078 IRBuilder<> IRB(&I);
2079 Value *CopyOp, *ConvertOp;
2081 switch (I.getNumArgOperands()) {
2083 assert(isa<ConstantInt>(I.getArgOperand(2)) && "Invalid rounding mode");
2085 CopyOp = I.getArgOperand(0);
2086 ConvertOp = I.getArgOperand(1);
2089 ConvertOp = I.getArgOperand(0);
2093 llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
2096 // The first *NumUsedElements* elements of ConvertOp are converted to the
2097 // same number of output elements. The rest of the output is copied from
2098 // CopyOp, or (if not available) filled with zeroes.
2099 // Combine shadow for elements of ConvertOp that are used in this operation,
2100 // and insert a check.
2101 // FIXME: consider propagating shadow of ConvertOp, at least in the case of
2102 // int->any conversion.
2103 Value *ConvertShadow = getShadow(ConvertOp);
2104 Value *AggShadow = nullptr;
2105 if (ConvertOp->getType()->isVectorTy()) {
2106 AggShadow = IRB.CreateExtractElement(
2107 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
2108 for (int i = 1; i < NumUsedElements; ++i) {
2109 Value *MoreShadow = IRB.CreateExtractElement(
2110 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
2111 AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
2114 AggShadow = ConvertShadow;
2116 assert(AggShadow->getType()->isIntegerTy());
2117 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
2119 // Build result shadow by zero-filling parts of CopyOp shadow that come from
2122 assert(CopyOp->getType() == I.getType());
2123 assert(CopyOp->getType()->isVectorTy());
2124 Value *ResultShadow = getShadow(CopyOp);
2125 Type *EltTy = ResultShadow->getType()->getVectorElementType();
2126 for (int i = 0; i < NumUsedElements; ++i) {
2127 ResultShadow = IRB.CreateInsertElement(
2128 ResultShadow, ConstantInt::getNullValue(EltTy),
2129 ConstantInt::get(IRB.getInt32Ty(), i));
2131 setShadow(&I, ResultShadow);
2132 setOrigin(&I, getOrigin(CopyOp));
2134 setShadow(&I, getCleanShadow(&I));
2135 setOrigin(&I, getCleanOrigin());
2139 // Given a scalar or vector, extract lower 64 bits (or less), and return all
2140 // zeroes if it is zero, and all ones otherwise.
2141 Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2142 if (S->getType()->isVectorTy())
2143 S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
2144 assert(S->getType()->getPrimitiveSizeInBits() <= 64);
2145 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2146 return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2149 // Given a vector, extract its first element, and return all
2150 // zeroes if it is zero, and all ones otherwise.
2151 Value *LowerElementShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2152 Value *S1 = IRB.CreateExtractElement(S, (uint64_t)0);
2153 Value *S2 = IRB.CreateICmpNE(S1, getCleanShadow(S1));
2154 return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2157 Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
2158 Type *T = S->getType();
2159 assert(T->isVectorTy());
2160 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2161 return IRB.CreateSExt(S2, T);
2164 // \brief Instrument vector shift instrinsic.
2166 // This function instruments intrinsics like int_x86_avx2_psll_w.
2167 // Intrinsic shifts %In by %ShiftSize bits.
2168 // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
2169 // size, and the rest is ignored. Behavior is defined even if shift size is
2170 // greater than register (or field) width.
2171 void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
2172 assert(I.getNumArgOperands() == 2);
2173 IRBuilder<> IRB(&I);
2174 // If any of the S2 bits are poisoned, the whole thing is poisoned.
2175 // Otherwise perform the same shift on S1.
2176 Value *S1 = getShadow(&I, 0);
2177 Value *S2 = getShadow(&I, 1);
2178 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
2179 : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
2180 Value *V1 = I.getOperand(0);
2181 Value *V2 = I.getOperand(1);
2182 Value *Shift = IRB.CreateCall(I.getCalledValue(),
2183 {IRB.CreateBitCast(S1, V1->getType()), V2});
2184 Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
2185 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2186 setOriginForNaryOp(I);
2189 // \brief Get an X86_MMX-sized vector type.
2190 Type *getMMXVectorTy(unsigned EltSizeInBits) {
2191 const unsigned X86_MMXSizeInBits = 64;
2192 return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
2193 X86_MMXSizeInBits / EltSizeInBits);
2196 // \brief Returns a signed counterpart for an (un)signed-saturate-and-pack
2198 Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
2200 case llvm::Intrinsic::x86_sse2_packsswb_128:
2201 case llvm::Intrinsic::x86_sse2_packuswb_128:
2202 return llvm::Intrinsic::x86_sse2_packsswb_128;
2204 case llvm::Intrinsic::x86_sse2_packssdw_128:
2205 case llvm::Intrinsic::x86_sse41_packusdw:
2206 return llvm::Intrinsic::x86_sse2_packssdw_128;
2208 case llvm::Intrinsic::x86_avx2_packsswb:
2209 case llvm::Intrinsic::x86_avx2_packuswb:
2210 return llvm::Intrinsic::x86_avx2_packsswb;
2212 case llvm::Intrinsic::x86_avx2_packssdw:
2213 case llvm::Intrinsic::x86_avx2_packusdw:
2214 return llvm::Intrinsic::x86_avx2_packssdw;
2216 case llvm::Intrinsic::x86_mmx_packsswb:
2217 case llvm::Intrinsic::x86_mmx_packuswb:
2218 return llvm::Intrinsic::x86_mmx_packsswb;
2220 case llvm::Intrinsic::x86_mmx_packssdw:
2221 return llvm::Intrinsic::x86_mmx_packssdw;
2223 llvm_unreachable("unexpected intrinsic id");
2227 // \brief Instrument vector pack instrinsic.
2229 // This function instruments intrinsics like x86_mmx_packsswb, that
2230 // packs elements of 2 input vectors into half as many bits with saturation.
2231 // Shadow is propagated with the signed variant of the same intrinsic applied
2232 // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
2233 // EltSizeInBits is used only for x86mmx arguments.
2234 void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) {
2235 assert(I.getNumArgOperands() == 2);
2236 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2237 IRBuilder<> IRB(&I);
2238 Value *S1 = getShadow(&I, 0);
2239 Value *S2 = getShadow(&I, 1);
2240 assert(isX86_MMX || S1->getType()->isVectorTy());
2242 // SExt and ICmpNE below must apply to individual elements of input vectors.
2243 // In case of x86mmx arguments, cast them to appropriate vector types and
2245 Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType();
2247 S1 = IRB.CreateBitCast(S1, T);
2248 S2 = IRB.CreateBitCast(S2, T);
2250 Value *S1_ext = IRB.CreateSExt(
2251 IRB.CreateICmpNE(S1, llvm::Constant::getNullValue(T)), T);
2252 Value *S2_ext = IRB.CreateSExt(
2253 IRB.CreateICmpNE(S2, llvm::Constant::getNullValue(T)), T);
2255 Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
2256 S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
2257 S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy);
2260 Function *ShadowFn = Intrinsic::getDeclaration(
2261 F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
2264 IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack");
2265 if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
2267 setOriginForNaryOp(I);
2270 // \brief Instrument sum-of-absolute-differencies intrinsic.
2271 void handleVectorSadIntrinsic(IntrinsicInst &I) {
2272 const unsigned SignificantBitsPerResultElement = 16;
2273 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2274 Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType();
2275 unsigned ZeroBitsPerResultElement =
2276 ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
2278 IRBuilder<> IRB(&I);
2279 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2280 S = IRB.CreateBitCast(S, ResTy);
2281 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2283 S = IRB.CreateLShr(S, ZeroBitsPerResultElement);
2284 S = IRB.CreateBitCast(S, getShadowTy(&I));
2286 setOriginForNaryOp(I);
2289 // \brief Instrument multiply-add intrinsic.
2290 void handleVectorPmaddIntrinsic(IntrinsicInst &I,
2291 unsigned EltSizeInBits = 0) {
2292 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2293 Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType();
2294 IRBuilder<> IRB(&I);
2295 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2296 S = IRB.CreateBitCast(S, ResTy);
2297 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2299 S = IRB.CreateBitCast(S, getShadowTy(&I));
2301 setOriginForNaryOp(I);
2304 // \brief Instrument compare-packed intrinsic.
2305 // Basically, an or followed by sext(icmp ne 0) to end up with all-zeros or
2307 void handleVectorComparePackedIntrinsic(IntrinsicInst &I) {
2308 IRBuilder<> IRB(&I);
2309 Type *ResTy = getShadowTy(&I);
2310 Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2311 Value *S = IRB.CreateSExt(
2312 IRB.CreateICmpNE(S0, Constant::getNullValue(ResTy)), ResTy);
2314 setOriginForNaryOp(I);
2317 // \brief Instrument compare-scalar intrinsic.
2318 // This handles both cmp* intrinsics which return the result in the first
2319 // element of a vector, and comi* which return the result as i32.
2320 void handleVectorCompareScalarIntrinsic(IntrinsicInst &I) {
2321 IRBuilder<> IRB(&I);
2322 Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2323 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&I));
2325 setOriginForNaryOp(I);
2328 void visitIntrinsicInst(IntrinsicInst &I) {
2329 switch (I.getIntrinsicID()) {
2330 case llvm::Intrinsic::bswap:
2333 case llvm::Intrinsic::x86_avx512_vcvtsd2usi64:
2334 case llvm::Intrinsic::x86_avx512_vcvtsd2usi32:
2335 case llvm::Intrinsic::x86_avx512_vcvtss2usi64:
2336 case llvm::Intrinsic::x86_avx512_vcvtss2usi32:
2337 case llvm::Intrinsic::x86_avx512_cvttss2usi64:
2338 case llvm::Intrinsic::x86_avx512_cvttss2usi:
2339 case llvm::Intrinsic::x86_avx512_cvttsd2usi64:
2340 case llvm::Intrinsic::x86_avx512_cvttsd2usi:
2341 case llvm::Intrinsic::x86_avx512_cvtusi2sd:
2342 case llvm::Intrinsic::x86_avx512_cvtusi2ss:
2343 case llvm::Intrinsic::x86_avx512_cvtusi642sd:
2344 case llvm::Intrinsic::x86_avx512_cvtusi642ss:
2345 case llvm::Intrinsic::x86_sse2_cvtsd2si64:
2346 case llvm::Intrinsic::x86_sse2_cvtsd2si:
2347 case llvm::Intrinsic::x86_sse2_cvtsd2ss:
2348 case llvm::Intrinsic::x86_sse2_cvtsi2sd:
2349 case llvm::Intrinsic::x86_sse2_cvtsi642sd:
2350 case llvm::Intrinsic::x86_sse2_cvtss2sd:
2351 case llvm::Intrinsic::x86_sse2_cvttsd2si64:
2352 case llvm::Intrinsic::x86_sse2_cvttsd2si:
2353 case llvm::Intrinsic::x86_sse_cvtsi2ss:
2354 case llvm::Intrinsic::x86_sse_cvtsi642ss:
2355 case llvm::Intrinsic::x86_sse_cvtss2si64:
2356 case llvm::Intrinsic::x86_sse_cvtss2si:
2357 case llvm::Intrinsic::x86_sse_cvttss2si64:
2358 case llvm::Intrinsic::x86_sse_cvttss2si:
2359 handleVectorConvertIntrinsic(I, 1);
2361 case llvm::Intrinsic::x86_sse_cvtps2pi:
2362 case llvm::Intrinsic::x86_sse_cvttps2pi:
2363 handleVectorConvertIntrinsic(I, 2);
2366 case llvm::Intrinsic::x86_avx512_psll_w_512:
2367 case llvm::Intrinsic::x86_avx512_psll_d_512:
2368 case llvm::Intrinsic::x86_avx512_psll_q_512:
2369 case llvm::Intrinsic::x86_avx512_pslli_w_512:
2370 case llvm::Intrinsic::x86_avx512_pslli_d_512:
2371 case llvm::Intrinsic::x86_avx512_pslli_q_512:
2372 case llvm::Intrinsic::x86_avx512_psrl_w_512:
2373 case llvm::Intrinsic::x86_avx512_psrl_d_512:
2374 case llvm::Intrinsic::x86_avx512_psrl_q_512:
2375 case llvm::Intrinsic::x86_avx512_psra_w_512:
2376 case llvm::Intrinsic::x86_avx512_psra_d_512:
2377 case llvm::Intrinsic::x86_avx512_psra_q_512:
2378 case llvm::Intrinsic::x86_avx512_psrli_w_512:
2379 case llvm::Intrinsic::x86_avx512_psrli_d_512:
2380 case llvm::Intrinsic::x86_avx512_psrli_q_512:
2381 case llvm::Intrinsic::x86_avx512_psrai_w_512:
2382 case llvm::Intrinsic::x86_avx512_psrai_d_512:
2383 case llvm::Intrinsic::x86_avx512_psrai_q_512:
2384 case llvm::Intrinsic::x86_avx512_psra_q_256:
2385 case llvm::Intrinsic::x86_avx512_psra_q_128:
2386 case llvm::Intrinsic::x86_avx512_psrai_q_256:
2387 case llvm::Intrinsic::x86_avx512_psrai_q_128:
2388 case llvm::Intrinsic::x86_avx2_psll_w:
2389 case llvm::Intrinsic::x86_avx2_psll_d:
2390 case llvm::Intrinsic::x86_avx2_psll_q:
2391 case llvm::Intrinsic::x86_avx2_pslli_w:
2392 case llvm::Intrinsic::x86_avx2_pslli_d:
2393 case llvm::Intrinsic::x86_avx2_pslli_q:
2394 case llvm::Intrinsic::x86_avx2_psrl_w:
2395 case llvm::Intrinsic::x86_avx2_psrl_d:
2396 case llvm::Intrinsic::x86_avx2_psrl_q:
2397 case llvm::Intrinsic::x86_avx2_psra_w:
2398 case llvm::Intrinsic::x86_avx2_psra_d:
2399 case llvm::Intrinsic::x86_avx2_psrli_w:
2400 case llvm::Intrinsic::x86_avx2_psrli_d:
2401 case llvm::Intrinsic::x86_avx2_psrli_q:
2402 case llvm::Intrinsic::x86_avx2_psrai_w:
2403 case llvm::Intrinsic::x86_avx2_psrai_d:
2404 case llvm::Intrinsic::x86_sse2_psll_w:
2405 case llvm::Intrinsic::x86_sse2_psll_d:
2406 case llvm::Intrinsic::x86_sse2_psll_q:
2407 case llvm::Intrinsic::x86_sse2_pslli_w:
2408 case llvm::Intrinsic::x86_sse2_pslli_d:
2409 case llvm::Intrinsic::x86_sse2_pslli_q:
2410 case llvm::Intrinsic::x86_sse2_psrl_w:
2411 case llvm::Intrinsic::x86_sse2_psrl_d:
2412 case llvm::Intrinsic::x86_sse2_psrl_q:
2413 case llvm::Intrinsic::x86_sse2_psra_w:
2414 case llvm::Intrinsic::x86_sse2_psra_d:
2415 case llvm::Intrinsic::x86_sse2_psrli_w:
2416 case llvm::Intrinsic::x86_sse2_psrli_d:
2417 case llvm::Intrinsic::x86_sse2_psrli_q:
2418 case llvm::Intrinsic::x86_sse2_psrai_w:
2419 case llvm::Intrinsic::x86_sse2_psrai_d:
2420 case llvm::Intrinsic::x86_mmx_psll_w:
2421 case llvm::Intrinsic::x86_mmx_psll_d:
2422 case llvm::Intrinsic::x86_mmx_psll_q:
2423 case llvm::Intrinsic::x86_mmx_pslli_w:
2424 case llvm::Intrinsic::x86_mmx_pslli_d:
2425 case llvm::Intrinsic::x86_mmx_pslli_q:
2426 case llvm::Intrinsic::x86_mmx_psrl_w:
2427 case llvm::Intrinsic::x86_mmx_psrl_d:
2428 case llvm::Intrinsic::x86_mmx_psrl_q:
2429 case llvm::Intrinsic::x86_mmx_psra_w:
2430 case llvm::Intrinsic::x86_mmx_psra_d:
2431 case llvm::Intrinsic::x86_mmx_psrli_w:
2432 case llvm::Intrinsic::x86_mmx_psrli_d:
2433 case llvm::Intrinsic::x86_mmx_psrli_q:
2434 case llvm::Intrinsic::x86_mmx_psrai_w:
2435 case llvm::Intrinsic::x86_mmx_psrai_d:
2436 handleVectorShiftIntrinsic(I, /* Variable */ false);
2438 case llvm::Intrinsic::x86_avx2_psllv_d:
2439 case llvm::Intrinsic::x86_avx2_psllv_d_256:
2440 case llvm::Intrinsic::x86_avx512_psllv_d_512:
2441 case llvm::Intrinsic::x86_avx2_psllv_q:
2442 case llvm::Intrinsic::x86_avx2_psllv_q_256:
2443 case llvm::Intrinsic::x86_avx512_psllv_q_512:
2444 case llvm::Intrinsic::x86_avx2_psrlv_d:
2445 case llvm::Intrinsic::x86_avx2_psrlv_d_256:
2446 case llvm::Intrinsic::x86_avx512_psrlv_d_512:
2447 case llvm::Intrinsic::x86_avx2_psrlv_q:
2448 case llvm::Intrinsic::x86_avx2_psrlv_q_256:
2449 case llvm::Intrinsic::x86_avx512_psrlv_q_512:
2450 case llvm::Intrinsic::x86_avx2_psrav_d:
2451 case llvm::Intrinsic::x86_avx2_psrav_d_256:
2452 case llvm::Intrinsic::x86_avx512_psrav_d_512:
2453 case llvm::Intrinsic::x86_avx512_psrav_q_128:
2454 case llvm::Intrinsic::x86_avx512_psrav_q_256:
2455 case llvm::Intrinsic::x86_avx512_psrav_q_512:
2456 handleVectorShiftIntrinsic(I, /* Variable */ true);
2459 case llvm::Intrinsic::x86_sse2_packsswb_128:
2460 case llvm::Intrinsic::x86_sse2_packssdw_128:
2461 case llvm::Intrinsic::x86_sse2_packuswb_128:
2462 case llvm::Intrinsic::x86_sse41_packusdw:
2463 case llvm::Intrinsic::x86_avx2_packsswb:
2464 case llvm::Intrinsic::x86_avx2_packssdw:
2465 case llvm::Intrinsic::x86_avx2_packuswb:
2466 case llvm::Intrinsic::x86_avx2_packusdw:
2467 handleVectorPackIntrinsic(I);
2470 case llvm::Intrinsic::x86_mmx_packsswb:
2471 case llvm::Intrinsic::x86_mmx_packuswb:
2472 handleVectorPackIntrinsic(I, 16);
2475 case llvm::Intrinsic::x86_mmx_packssdw:
2476 handleVectorPackIntrinsic(I, 32);
2479 case llvm::Intrinsic::x86_mmx_psad_bw:
2480 case llvm::Intrinsic::x86_sse2_psad_bw:
2481 case llvm::Intrinsic::x86_avx2_psad_bw:
2482 handleVectorSadIntrinsic(I);
2485 case llvm::Intrinsic::x86_sse2_pmadd_wd:
2486 case llvm::Intrinsic::x86_avx2_pmadd_wd:
2487 case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw_128:
2488 case llvm::Intrinsic::x86_avx2_pmadd_ub_sw:
2489 handleVectorPmaddIntrinsic(I);
2492 case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw:
2493 handleVectorPmaddIntrinsic(I, 8);
2496 case llvm::Intrinsic::x86_mmx_pmadd_wd:
2497 handleVectorPmaddIntrinsic(I, 16);
2500 case llvm::Intrinsic::x86_sse_cmp_ss:
2501 case llvm::Intrinsic::x86_sse2_cmp_sd:
2502 case llvm::Intrinsic::x86_sse_comieq_ss:
2503 case llvm::Intrinsic::x86_sse_comilt_ss:
2504 case llvm::Intrinsic::x86_sse_comile_ss:
2505 case llvm::Intrinsic::x86_sse_comigt_ss:
2506 case llvm::Intrinsic::x86_sse_comige_ss:
2507 case llvm::Intrinsic::x86_sse_comineq_ss:
2508 case llvm::Intrinsic::x86_sse_ucomieq_ss:
2509 case llvm::Intrinsic::x86_sse_ucomilt_ss:
2510 case llvm::Intrinsic::x86_sse_ucomile_ss:
2511 case llvm::Intrinsic::x86_sse_ucomigt_ss:
2512 case llvm::Intrinsic::x86_sse_ucomige_ss:
2513 case llvm::Intrinsic::x86_sse_ucomineq_ss:
2514 case llvm::Intrinsic::x86_sse2_comieq_sd:
2515 case llvm::Intrinsic::x86_sse2_comilt_sd:
2516 case llvm::Intrinsic::x86_sse2_comile_sd:
2517 case llvm::Intrinsic::x86_sse2_comigt_sd:
2518 case llvm::Intrinsic::x86_sse2_comige_sd:
2519 case llvm::Intrinsic::x86_sse2_comineq_sd:
2520 case llvm::Intrinsic::x86_sse2_ucomieq_sd:
2521 case llvm::Intrinsic::x86_sse2_ucomilt_sd:
2522 case llvm::Intrinsic::x86_sse2_ucomile_sd:
2523 case llvm::Intrinsic::x86_sse2_ucomigt_sd:
2524 case llvm::Intrinsic::x86_sse2_ucomige_sd:
2525 case llvm::Intrinsic::x86_sse2_ucomineq_sd:
2526 handleVectorCompareScalarIntrinsic(I);
2529 case llvm::Intrinsic::x86_sse_cmp_ps:
2530 case llvm::Intrinsic::x86_sse2_cmp_pd:
2531 // FIXME: For x86_avx_cmp_pd_256 and x86_avx_cmp_ps_256 this function
2532 // generates reasonably looking IR that fails in the backend with "Do not
2533 // know how to split the result of this operator!".
2534 handleVectorComparePackedIntrinsic(I);
2538 if (!handleUnknownIntrinsic(I))
2539 visitInstruction(I);
2544 void visitCallSite(CallSite CS) {
2545 Instruction &I = *CS.getInstruction();
2546 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite");
2548 CallInst *Call = cast<CallInst>(&I);
2550 // For inline asm, do the usual thing: check argument shadow and mark all
2551 // outputs as clean. Note that any side effects of the inline asm that are
2552 // not immediately visible in its constraints are not handled.
2553 if (Call->isInlineAsm()) {
2554 visitInstruction(I);
2558 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
2560 // We are going to insert code that relies on the fact that the callee
2561 // will become a non-readonly function after it is instrumented by us. To
2562 // prevent this code from being optimized out, mark that function
2563 // non-readonly in advance.
2564 if (Function *Func = Call->getCalledFunction()) {
2565 // Clear out readonly/readnone attributes.
2567 B.addAttribute(Attribute::ReadOnly)
2568 .addAttribute(Attribute::ReadNone);
2569 Func->removeAttributes(AttributeSet::FunctionIndex,
2570 AttributeSet::get(Func->getContext(),
2571 AttributeSet::FunctionIndex,
2575 maybeMarkSanitizerLibraryCallNoBuiltin(Call, TLI);
2577 IRBuilder<> IRB(&I);
2579 unsigned ArgOffset = 0;
2580 DEBUG(dbgs() << " CallSite: " << I << "\n");
2581 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2582 ArgIt != End; ++ArgIt) {
2584 unsigned i = ArgIt - CS.arg_begin();
2585 if (!A->getType()->isSized()) {
2586 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
2590 Value *Store = nullptr;
2591 // Compute the Shadow for arg even if it is ByVal, because
2592 // in that case getShadow() will copy the actual arg shadow to
2593 // __msan_param_tls.
2594 Value *ArgShadow = getShadow(A);
2595 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
2596 DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
2597 " Shadow: " << *ArgShadow << "\n");
2598 bool ArgIsInitialized = false;
2599 const DataLayout &DL = F.getParent()->getDataLayout();
2600 if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
2601 assert(A->getType()->isPointerTy() &&
2602 "ByVal argument is not a pointer!");
2603 Size = DL.getTypeAllocSize(A->getType()->getPointerElementType());
2604 if (ArgOffset + Size > kParamTLSSize) break;
2605 unsigned ParamAlignment = CS.getParamAlignment(i + 1);
2606 unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment);
2607 Store = IRB.CreateMemCpy(ArgShadowBase,
2608 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
2611 Size = DL.getTypeAllocSize(A->getType());
2612 if (ArgOffset + Size > kParamTLSSize) break;
2613 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
2614 kShadowTLSAlignment);
2615 Constant *Cst = dyn_cast<Constant>(ArgShadow);
2616 if (Cst && Cst->isNullValue()) ArgIsInitialized = true;
2618 if (MS.TrackOrigins && !ArgIsInitialized)
2619 IRB.CreateStore(getOrigin(A),
2620 getOriginPtrForArgument(A, IRB, ArgOffset));
2622 assert(Size != 0 && Store != nullptr);
2623 DEBUG(dbgs() << " Param:" << *Store << "\n");
2624 ArgOffset += alignTo(Size, 8);
2626 DEBUG(dbgs() << " done with call args\n");
2629 cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0));
2630 if (FT->isVarArg()) {
2631 VAHelper->visitCallSite(CS, IRB);
2634 // Now, get the shadow for the RetVal.
2635 if (!I.getType()->isSized()) return;
2636 // Don't emit the epilogue for musttail call returns.
2637 if (CS.isCall() && cast<CallInst>(&I)->isMustTailCall()) return;
2638 IRBuilder<> IRBBefore(&I);
2639 // Until we have full dynamic coverage, make sure the retval shadow is 0.
2640 Value *Base = getShadowPtrForRetval(&I, IRBBefore);
2641 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
2642 BasicBlock::iterator NextInsn;
2644 NextInsn = ++I.getIterator();
2645 assert(NextInsn != I.getParent()->end());
2647 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
2648 if (!NormalDest->getSinglePredecessor()) {
2649 // FIXME: this case is tricky, so we are just conservative here.
2650 // Perhaps we need to split the edge between this BB and NormalDest,
2651 // but a naive attempt to use SplitEdge leads to a crash.
2652 setShadow(&I, getCleanShadow(&I));
2653 setOrigin(&I, getCleanOrigin());
2656 NextInsn = NormalDest->getFirstInsertionPt();
2657 assert(NextInsn != NormalDest->end() &&
2658 "Could not find insertion point for retval shadow load");
2660 IRBuilder<> IRBAfter(&*NextInsn);
2661 Value *RetvalShadow =
2662 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
2663 kShadowTLSAlignment, "_msret");
2664 setShadow(&I, RetvalShadow);
2665 if (MS.TrackOrigins)
2666 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
2669 bool isAMustTailRetVal(Value *RetVal) {
2670 if (auto *I = dyn_cast<BitCastInst>(RetVal)) {
2671 RetVal = I->getOperand(0);
2673 if (auto *I = dyn_cast<CallInst>(RetVal)) {
2674 return I->isMustTailCall();
2679 void visitReturnInst(ReturnInst &I) {
2680 IRBuilder<> IRB(&I);
2681 Value *RetVal = I.getReturnValue();
2682 if (!RetVal) return;
2683 // Don't emit the epilogue for musttail call returns.
2684 if (isAMustTailRetVal(RetVal)) return;
2685 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
2686 if (CheckReturnValue) {
2687 insertShadowCheck(RetVal, &I);
2688 Value *Shadow = getCleanShadow(RetVal);
2689 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2691 Value *Shadow = getShadow(RetVal);
2692 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2693 // FIXME: make it conditional if ClStoreCleanOrigin==0
2694 if (MS.TrackOrigins)
2695 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
2699 void visitPHINode(PHINode &I) {
2700 IRBuilder<> IRB(&I);
2701 if (!PropagateShadow) {
2702 setShadow(&I, getCleanShadow(&I));
2703 setOrigin(&I, getCleanOrigin());
2707 ShadowPHINodes.push_back(&I);
2708 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
2710 if (MS.TrackOrigins)
2711 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
2715 void visitAllocaInst(AllocaInst &I) {
2716 setShadow(&I, getCleanShadow(&I));
2717 setOrigin(&I, getCleanOrigin());
2718 IRBuilder<> IRB(I.getNextNode());
2719 const DataLayout &DL = F.getParent()->getDataLayout();
2720 uint64_t Size = DL.getTypeAllocSize(I.getAllocatedType());
2721 if (PoisonStack && ClPoisonStackWithCall) {
2722 IRB.CreateCall(MS.MsanPoisonStackFn,
2723 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
2724 ConstantInt::get(MS.IntptrTy, Size)});
2726 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
2727 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
2728 IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment());
2731 if (PoisonStack && MS.TrackOrigins) {
2732 SmallString<2048> StackDescriptionStorage;
2733 raw_svector_ostream StackDescription(StackDescriptionStorage);
2734 // We create a string with a description of the stack allocation and
2735 // pass it into __msan_set_alloca_origin.
2736 // It will be printed by the run-time if stack-originated UMR is found.
2737 // The first 4 bytes of the string are set to '----' and will be replaced
2738 // by __msan_va_arg_overflow_size_tls at the first call.
2739 StackDescription << "----" << I.getName() << "@" << F.getName();
2741 createPrivateNonConstGlobalForString(*F.getParent(),
2742 StackDescription.str());
2744 IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn,
2745 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
2746 ConstantInt::get(MS.IntptrTy, Size),
2747 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
2748 IRB.CreatePointerCast(&F, MS.IntptrTy)});
2752 void visitSelectInst(SelectInst& I) {
2753 IRBuilder<> IRB(&I);
2754 // a = select b, c, d
2755 Value *B = I.getCondition();
2756 Value *C = I.getTrueValue();
2757 Value *D = I.getFalseValue();
2758 Value *Sb = getShadow(B);
2759 Value *Sc = getShadow(C);
2760 Value *Sd = getShadow(D);
2762 // Result shadow if condition shadow is 0.
2763 Value *Sa0 = IRB.CreateSelect(B, Sc, Sd);
2765 if (I.getType()->isAggregateType()) {
2766 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
2767 // an extra "select". This results in much more compact IR.
2768 // Sa = select Sb, poisoned, (select b, Sc, Sd)
2769 Sa1 = getPoisonedShadow(getShadowTy(I.getType()));
2771 // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ]
2772 // If Sb (condition is poisoned), look for bits in c and d that are equal
2773 // and both unpoisoned.
2774 // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd.
2776 // Cast arguments to shadow-compatible type.
2777 C = CreateAppToShadowCast(IRB, C);
2778 D = CreateAppToShadowCast(IRB, D);
2780 // Result shadow if condition shadow is 1.
2781 Sa1 = IRB.CreateOr(IRB.CreateXor(C, D), IRB.CreateOr(Sc, Sd));
2783 Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select");
2785 if (MS.TrackOrigins) {
2786 // Origins are always i32, so any vector conditions must be flattened.
2787 // FIXME: consider tracking vector origins for app vectors?
2788 if (B->getType()->isVectorTy()) {
2789 Type *FlatTy = getShadowTyNoVec(B->getType());
2790 B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy),
2791 ConstantInt::getNullValue(FlatTy));
2792 Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy),
2793 ConstantInt::getNullValue(FlatTy));
2795 // a = select b, c, d
2796 // Oa = Sb ? Ob : (b ? Oc : Od)
2798 &I, IRB.CreateSelect(Sb, getOrigin(I.getCondition()),
2799 IRB.CreateSelect(B, getOrigin(I.getTrueValue()),
2800 getOrigin(I.getFalseValue()))));
2804 void visitLandingPadInst(LandingPadInst &I) {
2806 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1
2807 setShadow(&I, getCleanShadow(&I));
2808 setOrigin(&I, getCleanOrigin());
2811 void visitCatchSwitchInst(CatchSwitchInst &I) {
2812 setShadow(&I, getCleanShadow(&I));
2813 setOrigin(&I, getCleanOrigin());
2816 void visitFuncletPadInst(FuncletPadInst &I) {
2817 setShadow(&I, getCleanShadow(&I));
2818 setOrigin(&I, getCleanOrigin());
2821 void visitGetElementPtrInst(GetElementPtrInst &I) {
2825 void visitExtractValueInst(ExtractValueInst &I) {
2826 IRBuilder<> IRB(&I);
2827 Value *Agg = I.getAggregateOperand();
2828 DEBUG(dbgs() << "ExtractValue: " << I << "\n");
2829 Value *AggShadow = getShadow(Agg);
2830 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
2831 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
2832 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n");
2833 setShadow(&I, ResShadow);
2834 setOriginForNaryOp(I);
2837 void visitInsertValueInst(InsertValueInst &I) {
2838 IRBuilder<> IRB(&I);
2839 DEBUG(dbgs() << "InsertValue: " << I << "\n");
2840 Value *AggShadow = getShadow(I.getAggregateOperand());
2841 Value *InsShadow = getShadow(I.getInsertedValueOperand());
2842 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
2843 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n");
2844 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
2845 DEBUG(dbgs() << " Res: " << *Res << "\n");
2847 setOriginForNaryOp(I);
2850 void dumpInst(Instruction &I) {
2851 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2852 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
2854 errs() << "ZZZ " << I.getOpcodeName() << "\n";
2856 errs() << "QQQ " << I << "\n";
2859 void visitResumeInst(ResumeInst &I) {
2860 DEBUG(dbgs() << "Resume: " << I << "\n");
2861 // Nothing to do here.
2864 void visitCleanupReturnInst(CleanupReturnInst &CRI) {
2865 DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n");
2866 // Nothing to do here.
2869 void visitCatchReturnInst(CatchReturnInst &CRI) {
2870 DEBUG(dbgs() << "CatchReturn: " << CRI << "\n");
2871 // Nothing to do here.
2874 void visitInstruction(Instruction &I) {
2875 // Everything else: stop propagating and check for poisoned shadow.
2876 if (ClDumpStrictInstructions)
2878 DEBUG(dbgs() << "DEFAULT: " << I << "\n");
2879 for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
2880 insertShadowCheck(I.getOperand(i), &I);
2881 setShadow(&I, getCleanShadow(&I));
2882 setOrigin(&I, getCleanOrigin());
2886 /// \brief AMD64-specific implementation of VarArgHelper.
2887 struct VarArgAMD64Helper : public VarArgHelper {
2888 // An unfortunate workaround for asymmetric lowering of va_arg stuff.
2889 // See a comment in visitCallSite for more details.
2890 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
2891 static const unsigned AMD64FpEndOffset = 176;
2894 MemorySanitizer &MS;
2895 MemorySanitizerVisitor &MSV;
2896 Value *VAArgTLSCopy;
2897 Value *VAArgOverflowSize;
2899 SmallVector<CallInst*, 16> VAStartInstrumentationList;
2901 VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
2902 MemorySanitizerVisitor &MSV)
2903 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
2904 VAArgOverflowSize(nullptr) {}
2906 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
2908 ArgKind classifyArgument(Value* arg) {
2909 // A very rough approximation of X86_64 argument classification rules.
2910 Type *T = arg->getType();
2911 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
2912 return AK_FloatingPoint;
2913 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
2914 return AK_GeneralPurpose;
2915 if (T->isPointerTy())
2916 return AK_GeneralPurpose;
2920 // For VarArg functions, store the argument shadow in an ABI-specific format
2921 // that corresponds to va_list layout.
2922 // We do this because Clang lowers va_arg in the frontend, and this pass
2923 // only sees the low level code that deals with va_list internals.
2924 // A much easier alternative (provided that Clang emits va_arg instructions)
2925 // would have been to associate each live instance of va_list with a copy of
2926 // MSanParamTLS, and extract shadow on va_arg() call in the argument list
2928 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
2929 unsigned GpOffset = 0;
2930 unsigned FpOffset = AMD64GpEndOffset;
2931 unsigned OverflowOffset = AMD64FpEndOffset;
2932 const DataLayout &DL = F.getParent()->getDataLayout();
2933 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2934 ArgIt != End; ++ArgIt) {
2936 unsigned ArgNo = CS.getArgumentNo(ArgIt);
2937 bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
2938 bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal);
2940 // ByVal arguments always go to the overflow area.
2941 // Fixed arguments passed through the overflow area will be stepped
2942 // over by va_start, so don't count them towards the offset.
2945 assert(A->getType()->isPointerTy());
2946 Type *RealTy = A->getType()->getPointerElementType();
2947 uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
2948 Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
2949 OverflowOffset += alignTo(ArgSize, 8);
2950 IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
2951 ArgSize, kShadowTLSAlignment);
2953 ArgKind AK = classifyArgument(A);
2954 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
2956 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
2960 case AK_GeneralPurpose:
2961 Base = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset);
2964 case AK_FloatingPoint:
2965 Base = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset);
2971 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
2972 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
2973 OverflowOffset += alignTo(ArgSize, 8);
2975 // Take fixed arguments into account for GpOffset and FpOffset,
2976 // but don't actually store shadows for them.
2979 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
2982 Constant *OverflowSize =
2983 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
2984 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
2987 /// \brief Compute the shadow address for a given va_arg.
2988 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
2990 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
2991 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
2992 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
2996 void visitVAStartInst(VAStartInst &I) override {
2997 if (F.getCallingConv() == CallingConv::X86_64_Win64)
2999 IRBuilder<> IRB(&I);
3000 VAStartInstrumentationList.push_back(&I);
3001 Value *VAListTag = I.getArgOperand(0);
3002 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3004 // Unpoison the whole __va_list_tag.
3005 // FIXME: magic ABI constants.
3006 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3007 /* size */24, /* alignment */8, false);
3010 void visitVACopyInst(VACopyInst &I) override {
3011 if (F.getCallingConv() == CallingConv::X86_64_Win64)
3013 IRBuilder<> IRB(&I);
3014 Value *VAListTag = I.getArgOperand(0);
3015 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3017 // Unpoison the whole __va_list_tag.
3018 // FIXME: magic ABI constants.
3019 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3020 /* size */24, /* alignment */8, false);
3023 void finalizeInstrumentation() override {
3024 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
3025 "finalizeInstrumentation called twice");
3026 if (!VAStartInstrumentationList.empty()) {
3027 // If there is a va_start in this function, make a backup copy of
3028 // va_arg_tls somewhere in the function entry block.
3029 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
3030 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3032 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
3034 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3035 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3038 // Instrument va_start.
3039 // Copy va_list shadow from the backup copy of the TLS contents.
3040 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3041 CallInst *OrigInst = VAStartInstrumentationList[i];
3042 IRBuilder<> IRB(OrigInst->getNextNode());
3043 Value *VAListTag = OrigInst->getArgOperand(0);
3045 Value *RegSaveAreaPtrPtr =
3047 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3048 ConstantInt::get(MS.IntptrTy, 16)),
3049 Type::getInt64PtrTy(*MS.C));
3050 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
3051 Value *RegSaveAreaShadowPtr =
3052 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3053 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy,
3054 AMD64FpEndOffset, 16);
3056 Value *OverflowArgAreaPtrPtr =
3058 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3059 ConstantInt::get(MS.IntptrTy, 8)),
3060 Type::getInt64PtrTy(*MS.C));
3061 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
3062 Value *OverflowArgAreaShadowPtr =
3063 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
3064 Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
3066 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
3071 /// \brief MIPS64-specific implementation of VarArgHelper.
3072 struct VarArgMIPS64Helper : public VarArgHelper {
3074 MemorySanitizer &MS;
3075 MemorySanitizerVisitor &MSV;
3076 Value *VAArgTLSCopy;
3079 SmallVector<CallInst*, 16> VAStartInstrumentationList;
3081 VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
3082 MemorySanitizerVisitor &MSV)
3083 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
3084 VAArgSize(nullptr) {}
3086 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
3087 unsigned VAArgOffset = 0;
3088 const DataLayout &DL = F.getParent()->getDataLayout();
3089 for (CallSite::arg_iterator ArgIt = CS.arg_begin() +
3090 CS.getFunctionType()->getNumParams(), End = CS.arg_end();
3091 ArgIt != End; ++ArgIt) {
3092 llvm::Triple TargetTriple(F.getParent()->getTargetTriple());
3095 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3096 if (TargetTriple.getArch() == llvm::Triple::mips64) {
3097 // Adjusting the shadow for argument with size < 8 to match the placement
3098 // of bits in big endian system
3100 VAArgOffset += (8 - ArgSize);
3102 Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset);
3103 VAArgOffset += ArgSize;
3104 VAArgOffset = alignTo(VAArgOffset, 8);
3105 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
3108 Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset);
3109 // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
3110 // a new class member i.e. it is the total size of all VarArgs.
3111 IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
3114 /// \brief Compute the shadow address for a given va_arg.
3115 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3117 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3118 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3119 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3123 void visitVAStartInst(VAStartInst &I) override {
3124 IRBuilder<> IRB(&I);
3125 VAStartInstrumentationList.push_back(&I);
3126 Value *VAListTag = I.getArgOperand(0);
3127 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3128 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3129 /* size */8, /* alignment */8, false);
3132 void visitVACopyInst(VACopyInst &I) override {
3133 IRBuilder<> IRB(&I);
3134 Value *VAListTag = I.getArgOperand(0);
3135 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3136 // Unpoison the whole __va_list_tag.
3137 // FIXME: magic ABI constants.
3138 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3139 /* size */8, /* alignment */8, false);
3142 void finalizeInstrumentation() override {
3143 assert(!VAArgSize && !VAArgTLSCopy &&
3144 "finalizeInstrumentation called twice");
3145 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
3146 VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3147 Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
3150 if (!VAStartInstrumentationList.empty()) {
3151 // If there is a va_start in this function, make a backup copy of
3152 // va_arg_tls somewhere in the function entry block.
3153 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3154 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3157 // Instrument va_start.
3158 // Copy va_list shadow from the backup copy of the TLS contents.
3159 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3160 CallInst *OrigInst = VAStartInstrumentationList[i];
3161 IRBuilder<> IRB(OrigInst->getNextNode());
3162 Value *VAListTag = OrigInst->getArgOperand(0);
3163 Value *RegSaveAreaPtrPtr =
3164 IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3165 Type::getInt64PtrTy(*MS.C));
3166 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
3167 Value *RegSaveAreaShadowPtr =
3168 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3169 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, 8);
3175 /// \brief AArch64-specific implementation of VarArgHelper.
3176 struct VarArgAArch64Helper : public VarArgHelper {
3177 static const unsigned kAArch64GrArgSize = 64;
3178 static const unsigned kAArch64VrArgSize = 128;
3180 static const unsigned AArch64GrBegOffset = 0;
3181 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
3182 // Make VR space aligned to 16 bytes.
3183 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
3184 static const unsigned AArch64VrEndOffset = AArch64VrBegOffset
3185 + kAArch64VrArgSize;
3186 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
3189 MemorySanitizer &MS;
3190 MemorySanitizerVisitor &MSV;
3191 Value *VAArgTLSCopy;
3192 Value *VAArgOverflowSize;
3194 SmallVector<CallInst*, 16> VAStartInstrumentationList;
3196 VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
3197 MemorySanitizerVisitor &MSV)
3198 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
3199 VAArgOverflowSize(nullptr) {}
3201 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
3203 ArgKind classifyArgument(Value* arg) {
3204 Type *T = arg->getType();
3205 if (T->isFPOrFPVectorTy())
3206 return AK_FloatingPoint;
3207 if ((T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
3208 || (T->isPointerTy()))
3209 return AK_GeneralPurpose;
3213 // The instrumentation stores the argument shadow in a non ABI-specific
3214 // format because it does not know which argument is named (since Clang,
3215 // like x86_64 case, lowers the va_args in the frontend and this pass only
3216 // sees the low level code that deals with va_list internals).
3217 // The first seven GR registers are saved in the first 56 bytes of the
3218 // va_arg tls arra, followers by the first 8 FP/SIMD registers, and then
3219 // the remaining arguments.
3220 // Using constant offset within the va_arg TLS array allows fast copy
3221 // in the finalize instrumentation.
3222 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
3223 unsigned GrOffset = AArch64GrBegOffset;
3224 unsigned VrOffset = AArch64VrBegOffset;
3225 unsigned OverflowOffset = AArch64VAEndOffset;
3227 const DataLayout &DL = F.getParent()->getDataLayout();
3228 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
3229 ArgIt != End; ++ArgIt) {
3231 unsigned ArgNo = CS.getArgumentNo(ArgIt);
3232 bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
3233 ArgKind AK = classifyArgument(A);
3234 if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset)
3236 if (AK == AK_FloatingPoint && VrOffset >= AArch64VrEndOffset)
3240 case AK_GeneralPurpose:
3241 Base = getShadowPtrForVAArgument(A->getType(), IRB, GrOffset);
3244 case AK_FloatingPoint:
3245 Base = getShadowPtrForVAArgument(A->getType(), IRB, VrOffset);
3249 // Don't count fixed arguments in the overflow area - va_start will
3250 // skip right over them.
3253 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3254 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
3255 OverflowOffset += alignTo(ArgSize, 8);
3258 // Count Gp/Vr fixed arguments to their respective offsets, but don't
3259 // bother to actually store a shadow.
3262 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
3264 Constant *OverflowSize =
3265 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
3266 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
3269 /// Compute the shadow address for a given va_arg.
3270 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3272 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3273 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3274 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3278 void visitVAStartInst(VAStartInst &I) override {
3279 IRBuilder<> IRB(&I);
3280 VAStartInstrumentationList.push_back(&I);
3281 Value *VAListTag = I.getArgOperand(0);
3282 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3283 // Unpoison the whole __va_list_tag.
3284 // FIXME: magic ABI constants (size of va_list).
3285 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3286 /* size */32, /* alignment */8, false);
3289 void visitVACopyInst(VACopyInst &I) override {
3290 IRBuilder<> IRB(&I);
3291 Value *VAListTag = I.getArgOperand(0);
3292 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3293 // Unpoison the whole __va_list_tag.
3294 // FIXME: magic ABI constants (size of va_list).
3295 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3296 /* size */32, /* alignment */8, false);
3299 // Retrieve a va_list field of 'void*' size.
3300 Value* getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
3301 Value *SaveAreaPtrPtr =
3303 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3304 ConstantInt::get(MS.IntptrTy, offset)),
3305 Type::getInt64PtrTy(*MS.C));
3306 return IRB.CreateLoad(SaveAreaPtrPtr);
3309 // Retrieve a va_list field of 'int' size.
3310 Value* getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
3311 Value *SaveAreaPtr =
3313 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3314 ConstantInt::get(MS.IntptrTy, offset)),
3315 Type::getInt32PtrTy(*MS.C));
3316 Value *SaveArea32 = IRB.CreateLoad(SaveAreaPtr);
3317 return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
3320 void finalizeInstrumentation() override {
3321 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
3322 "finalizeInstrumentation called twice");
3323 if (!VAStartInstrumentationList.empty()) {
3324 // If there is a va_start in this function, make a backup copy of
3325 // va_arg_tls somewhere in the function entry block.
3326 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
3327 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3329 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset),
3331 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3332 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3335 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
3336 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
3338 // Instrument va_start, copy va_list shadow from the backup copy of
3339 // the TLS contents.
3340 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3341 CallInst *OrigInst = VAStartInstrumentationList[i];
3342 IRBuilder<> IRB(OrigInst->getNextNode());
3344 Value *VAListTag = OrigInst->getArgOperand(0);
3346 // The variadic ABI for AArch64 creates two areas to save the incoming
3347 // argument registers (one for 64-bit general register xn-x7 and another
3348 // for 128-bit FP/SIMD vn-v7).
3349 // We need then to propagate the shadow arguments on both regions
3350 // 'va::__gr_top + va::__gr_offs' and 'va::__vr_top + va::__vr_offs'.
3351 // The remaning arguments are saved on shadow for 'va::stack'.
3352 // One caveat is it requires only to propagate the non-named arguments,
3353 // however on the call site instrumentation 'all' the arguments are
3354 // saved. So to copy the shadow values from the va_arg TLS array
3355 // we need to adjust the offset for both GR and VR fields based on
3356 // the __{gr,vr}_offs value (since they are stores based on incoming
3357 // named arguments).
3359 // Read the stack pointer from the va_list.
3360 Value *StackSaveAreaPtr = getVAField64(IRB, VAListTag, 0);
3362 // Read both the __gr_top and __gr_off and add them up.
3363 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
3364 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
3366 Value *GrRegSaveAreaPtr = IRB.CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea);
3368 // Read both the __vr_top and __vr_off and add them up.
3369 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
3370 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
3372 Value *VrRegSaveAreaPtr = IRB.CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea);
3374 // It does not know how many named arguments is being used and, on the
3375 // callsite all the arguments were saved. Since __gr_off is defined as
3376 // '0 - ((8 - named_gr) * 8)', the idea is to just propagate the variadic
3377 // argument by ignoring the bytes of shadow from named arguments.
3378 Value *GrRegSaveAreaShadowPtrOff =
3379 IRB.CreateAdd(GrArgSize, GrOffSaveArea);
3381 Value *GrRegSaveAreaShadowPtr =
3382 MSV.getShadowPtr(GrRegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3384 Value *GrSrcPtr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
3385 GrRegSaveAreaShadowPtrOff);
3386 Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
3388 IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, GrSrcPtr, GrCopySize, 8);
3390 // Again, but for FP/SIMD values.
3391 Value *VrRegSaveAreaShadowPtrOff =
3392 IRB.CreateAdd(VrArgSize, VrOffSaveArea);
3394 Value *VrRegSaveAreaShadowPtr =
3395 MSV.getShadowPtr(VrRegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3397 Value *VrSrcPtr = IRB.CreateInBoundsGEP(
3399 IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
3400 IRB.getInt32(AArch64VrBegOffset)),
3401 VrRegSaveAreaShadowPtrOff);
3402 Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
3404 IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, VrSrcPtr, VrCopySize, 8);
3406 // And finally for remaining arguments.
3407 Value *StackSaveAreaShadowPtr =
3408 MSV.getShadowPtr(StackSaveAreaPtr, IRB.getInt8Ty(), IRB);
3410 Value *StackSrcPtr =
3411 IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
3412 IRB.getInt32(AArch64VAEndOffset));
3414 IRB.CreateMemCpy(StackSaveAreaShadowPtr, StackSrcPtr,
3415 VAArgOverflowSize, 16);
3420 /// \brief PowerPC64-specific implementation of VarArgHelper.
3421 struct VarArgPowerPC64Helper : public VarArgHelper {
3423 MemorySanitizer &MS;
3424 MemorySanitizerVisitor &MSV;
3425 Value *VAArgTLSCopy;
3428 SmallVector<CallInst*, 16> VAStartInstrumentationList;
3430 VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
3431 MemorySanitizerVisitor &MSV)
3432 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
3433 VAArgSize(nullptr) {}
3435 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
3436 // For PowerPC, we need to deal with alignment of stack arguments -
3437 // they are mostly aligned to 8 bytes, but vectors and i128 arrays
3438 // are aligned to 16 bytes, byvals can be aligned to 8 or 16 bytes,
3439 // and QPX vectors are aligned to 32 bytes. For that reason, we
3440 // compute current offset from stack pointer (which is always properly
3441 // aligned), and offset for the first vararg, then subtract them.
3443 llvm::Triple TargetTriple(F.getParent()->getTargetTriple());
3444 // Parameter save area starts at 48 bytes from frame pointer for ABIv1,
3445 // and 32 bytes for ABIv2. This is usually determined by target
3446 // endianness, but in theory could be overriden by function attribute.
3447 // For simplicity, we ignore it here (it'd only matter for QPX vectors).
3448 if (TargetTriple.getArch() == llvm::Triple::ppc64)
3452 unsigned VAArgOffset = VAArgBase;
3453 const DataLayout &DL = F.getParent()->getDataLayout();
3454 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
3455 ArgIt != End; ++ArgIt) {
3457 unsigned ArgNo = CS.getArgumentNo(ArgIt);
3458 bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
3459 bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal);
3461 assert(A->getType()->isPointerTy());
3462 Type *RealTy = A->getType()->getPointerElementType();
3463 uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
3464 uint64_t ArgAlign = CS.getParamAlignment(ArgNo + 1);
3467 VAArgOffset = alignTo(VAArgOffset, ArgAlign);
3469 Value *Base = getShadowPtrForVAArgument(RealTy, IRB,
3470 VAArgOffset - VAArgBase);
3471 IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
3472 ArgSize, kShadowTLSAlignment);
3474 VAArgOffset += alignTo(ArgSize, 8);
3477 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3478 uint64_t ArgAlign = 8;
3479 if (A->getType()->isArrayTy()) {
3480 // Arrays are aligned to element size, except for long double
3481 // arrays, which are aligned to 8 bytes.
3482 Type *ElementTy = A->getType()->getArrayElementType();
3483 if (!ElementTy->isPPC_FP128Ty())
3484 ArgAlign = DL.getTypeAllocSize(ElementTy);
3485 } else if (A->getType()->isVectorTy()) {
3486 // Vectors are naturally aligned.
3487 ArgAlign = DL.getTypeAllocSize(A->getType());
3491 VAArgOffset = alignTo(VAArgOffset, ArgAlign);
3492 if (DL.isBigEndian()) {
3493 // Adjusting the shadow for argument with size < 8 to match the placement
3494 // of bits in big endian system
3496 VAArgOffset += (8 - ArgSize);
3499 Base = getShadowPtrForVAArgument(A->getType(), IRB,
3500 VAArgOffset - VAArgBase);
3501 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
3503 VAArgOffset += ArgSize;
3504 VAArgOffset = alignTo(VAArgOffset, 8);
3507 VAArgBase = VAArgOffset;
3510 Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(),
3511 VAArgOffset - VAArgBase);
3512 // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
3513 // a new class member i.e. it is the total size of all VarArgs.
3514 IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
3517 /// \brief Compute the shadow address for a given va_arg.
3518 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3520 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3521 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3522 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3526 void visitVAStartInst(VAStartInst &I) override {
3527 IRBuilder<> IRB(&I);
3528 VAStartInstrumentationList.push_back(&I);
3529 Value *VAListTag = I.getArgOperand(0);
3530 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3531 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3532 /* size */8, /* alignment */8, false);
3535 void visitVACopyInst(VACopyInst &I) override {
3536 IRBuilder<> IRB(&I);
3537 Value *VAListTag = I.getArgOperand(0);
3538 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3539 // Unpoison the whole __va_list_tag.
3540 // FIXME: magic ABI constants.
3541 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3542 /* size */8, /* alignment */8, false);
3545 void finalizeInstrumentation() override {
3546 assert(!VAArgSize && !VAArgTLSCopy &&
3547 "finalizeInstrumentation called twice");
3548 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
3549 VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3550 Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
3553 if (!VAStartInstrumentationList.empty()) {
3554 // If there is a va_start in this function, make a backup copy of
3555 // va_arg_tls somewhere in the function entry block.
3556 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3557 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3560 // Instrument va_start.
3561 // Copy va_list shadow from the backup copy of the TLS contents.
3562 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3563 CallInst *OrigInst = VAStartInstrumentationList[i];
3564 IRBuilder<> IRB(OrigInst->getNextNode());
3565 Value *VAListTag = OrigInst->getArgOperand(0);
3566 Value *RegSaveAreaPtrPtr =
3567 IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3568 Type::getInt64PtrTy(*MS.C));
3569 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
3570 Value *RegSaveAreaShadowPtr =
3571 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3572 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, 8);
3577 /// \brief A no-op implementation of VarArgHelper.
3578 struct VarArgNoOpHelper : public VarArgHelper {
3579 VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
3580 MemorySanitizerVisitor &MSV) {}
3582 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {}
3584 void visitVAStartInst(VAStartInst &I) override {}
3586 void visitVACopyInst(VACopyInst &I) override {}
3588 void finalizeInstrumentation() override {}
3591 VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
3592 MemorySanitizerVisitor &Visitor) {
3593 // VarArg handling is only implemented on AMD64. False positives are possible
3594 // on other platforms.
3595 llvm::Triple TargetTriple(Func.getParent()->getTargetTriple());
3596 if (TargetTriple.getArch() == llvm::Triple::x86_64)
3597 return new VarArgAMD64Helper(Func, Msan, Visitor);
3598 else if (TargetTriple.getArch() == llvm::Triple::mips64 ||
3599 TargetTriple.getArch() == llvm::Triple::mips64el)
3600 return new VarArgMIPS64Helper(Func, Msan, Visitor);
3601 else if (TargetTriple.getArch() == llvm::Triple::aarch64)
3602 return new VarArgAArch64Helper(Func, Msan, Visitor);
3603 else if (TargetTriple.getArch() == llvm::Triple::ppc64 ||
3604 TargetTriple.getArch() == llvm::Triple::ppc64le)
3605 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
3607 return new VarArgNoOpHelper(Func, Msan, Visitor);
3610 } // anonymous namespace
3612 bool MemorySanitizer::runOnFunction(Function &F) {
3613 if (&F == MsanCtorFunction)
3615 MemorySanitizerVisitor Visitor(F, *this);
3617 // Clear out readonly/readnone attributes.
3619 B.addAttribute(Attribute::ReadOnly)
3620 .addAttribute(Attribute::ReadNone);
3621 F.removeAttributes(AttributeSet::FunctionIndex,
3622 AttributeSet::get(F.getContext(),
3623 AttributeSet::FunctionIndex, B));
3625 return Visitor.runOnFunction();