1 //===- Loads.cpp - Local load analysis ------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines simple local analyses for load instructions.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Analysis/Loads.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/LoopInfo.h"
16 #include "llvm/Analysis/ScalarEvolution.h"
17 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/GlobalAlias.h"
21 #include "llvm/IR/GlobalVariable.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/IR/Operator.h"
26 #include "llvm/IR/Statepoint.h"
30 static MaybeAlign getBaseAlign(const Value *Base, const DataLayout &DL) {
31 if (const MaybeAlign PA = Base->getPointerAlignment(DL))
33 Type *const Ty = Base->getType()->getPointerElementType();
36 return Align(DL.getABITypeAlignment(Ty));
39 static bool isAligned(const Value *Base, const APInt &Offset, Align Alignment,
40 const DataLayout &DL) {
41 if (MaybeAlign BA = getBaseAlign(Base, DL)) {
42 const APInt APBaseAlign(Offset.getBitWidth(), BA->value());
43 const APInt APAlign(Offset.getBitWidth(), Alignment.value());
44 assert(APAlign.isPowerOf2() && "must be a power of 2!");
45 return APBaseAlign.uge(APAlign) && !(Offset & (APAlign - 1));
50 /// Test if V is always a pointer to allocated and suitably aligned memory for
51 /// a simple load or store.
52 static bool isDereferenceableAndAlignedPointer(
53 const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
54 const Instruction *CtxI, const DominatorTree *DT,
55 SmallPtrSetImpl<const Value *> &Visited) {
56 // Already visited? Bail out, we've likely hit unreachable code.
57 if (!Visited.insert(V).second)
60 // Note that it is not safe to speculate into a malloc'd region because
61 // malloc may return null.
63 // bitcast instructions are no-ops as far as dereferenceability is concerned.
64 if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V))
65 return isDereferenceableAndAlignedPointer(BC->getOperand(0), Alignment,
66 Size, DL, CtxI, DT, Visited);
68 bool CheckForNonNull = false;
69 APInt KnownDerefBytes(Size.getBitWidth(),
70 V->getPointerDereferenceableBytes(DL, CheckForNonNull));
71 if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size))
72 if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT)) {
73 // As we recursed through GEPs to get here, we've incrementally checked
74 // that each step advanced by a multiple of the alignment. If our base is
75 // properly aligned, then the original offset accessed must also be.
76 Type *Ty = V->getType();
77 assert(Ty->isSized() && "must be sized");
78 APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0);
79 return isAligned(V, Offset, Alignment, DL);
82 // For GEPs, determine if the indexing lands within the allocated object.
83 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
84 const Value *Base = GEP->getPointerOperand();
86 APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
87 if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
88 !Offset.urem(APInt(Offset.getBitWidth(), Alignment.value()))
92 // If the base pointer is dereferenceable for Offset+Size bytes, then the
93 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
94 // pointer is aligned to Align bytes, and the Offset is divisible by Align
95 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
96 // aligned to Align bytes.
98 // Offset and Size may have different bit widths if we have visited an
99 // addrspacecast, so we can't do arithmetic directly on the APInt values.
100 return isDereferenceableAndAlignedPointer(
101 Base, Alignment, Offset + Size.sextOrTrunc(Offset.getBitWidth()), DL,
105 // For gc.relocate, look through relocations
106 if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
107 return isDereferenceableAndAlignedPointer(
108 RelocateInst->getDerivedPtr(), Alignment, Size, DL, CtxI, DT, Visited);
110 if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
111 return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Alignment,
112 Size, DL, CtxI, DT, Visited);
114 if (const auto *Call = dyn_cast<CallBase>(V))
115 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
116 return isDereferenceableAndAlignedPointer(RP, Alignment, Size, DL, CtxI,
119 // If we don't know, assume the worst.
123 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Align Alignment,
125 const DataLayout &DL,
126 const Instruction *CtxI,
127 const DominatorTree *DT) {
128 // Note: At the moment, Size can be zero. This ends up being interpreted as
129 // a query of whether [Base, V] is dereferenceable and V is aligned (since
130 // that's what the implementation happened to do). It's unclear if this is
131 // the desired semantic, but at least SelectionDAG does exercise this case.
133 SmallPtrSet<const Value *, 32> Visited;
134 return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, DT,
138 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
140 const DataLayout &DL,
141 const Instruction *CtxI,
142 const DominatorTree *DT) {
146 // When dereferenceability information is provided by a dereferenceable
147 // attribute, we know exactly how many bytes are dereferenceable. If we can
148 // determine the exact offset to the attributed variable, we can use that
151 // Require ABI alignment for loads without alignment specification
152 const Align Alignment = DL.getValueOrABITypeAlignment(MA, Ty);
153 APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
154 DL.getTypeStoreSize(Ty));
155 return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI,
159 bool llvm::isDereferenceablePointer(const Value *V, Type *Ty,
160 const DataLayout &DL,
161 const Instruction *CtxI,
162 const DominatorTree *DT) {
163 return isDereferenceableAndAlignedPointer(V, Ty, Align::None(), DL, CtxI, DT);
166 /// Test if A and B will obviously have the same value.
168 /// This includes recognizing that %t0 and %t1 will have the same
169 /// value in code like this:
171 /// %t0 = getelementptr \@a, 0, 3
172 /// store i32 0, i32* %t0
173 /// %t1 = getelementptr \@a, 0, 3
174 /// %t2 = load i32* %t1
177 static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
178 // Test if the values are trivially equivalent.
182 // Test if the values come from identical arithmetic instructions.
183 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
184 // this function is only used when one address use dominates the
185 // other, which means that they'll always either have the same
186 // value or one of them will have an undefined value.
187 if (isa<BinaryOperator>(A) || isa<CastInst>(A) || isa<PHINode>(A) ||
188 isa<GetElementPtrInst>(A))
189 if (const Instruction *BI = dyn_cast<Instruction>(B))
190 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
193 // Otherwise they may not be equivalent.
197 bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
200 auto &DL = LI->getModule()->getDataLayout();
201 Value *Ptr = LI->getPointerOperand();
203 APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
204 DL.getTypeStoreSize(LI->getType()));
205 const Align Alignment = DL.getValueOrABITypeAlignment(
206 MaybeAlign(LI->getAlignment()), LI->getType());
208 Instruction *HeaderFirstNonPHI = L->getHeader()->getFirstNonPHI();
210 // If given a uniform (i.e. non-varying) address, see if we can prove the
211 // access is safe within the loop w/o needing predication.
212 if (L->isLoopInvariant(Ptr))
213 return isDereferenceableAndAlignedPointer(Ptr, Alignment, EltSize, DL,
214 HeaderFirstNonPHI, &DT);
216 // Otherwise, check to see if we have a repeating access pattern where we can
217 // prove that all accesses are well aligned and dereferenceable.
218 auto *AddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Ptr));
219 if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
221 auto* Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE));
224 // TODO: generalize to access patterns which have gaps
225 if (Step->getAPInt() != EltSize)
228 // TODO: If the symbolic trip count has a small bound (max count), we might
229 // be able to prove safety.
230 auto TC = SE.getSmallConstantTripCount(L);
234 const APInt AccessSize = TC * EltSize;
236 auto *StartS = dyn_cast<SCEVUnknown>(AddRec->getStart());
239 assert(SE.isLoopInvariant(StartS, L) && "implied by addrec definition");
240 Value *Base = StartS->getValue();
242 // For the moment, restrict ourselves to the case where the access size is a
243 // multiple of the requested alignment and the base is aligned.
244 // TODO: generalize if a case found which warrants
245 if (EltSize.urem(Alignment.value()) != 0)
247 return isDereferenceableAndAlignedPointer(Base, Alignment, AccessSize, DL,
248 HeaderFirstNonPHI, &DT);
251 /// Check if executing a load of this pointer value cannot trap.
253 /// If DT and ScanFrom are specified this method performs context-sensitive
254 /// analysis and returns true if it is safe to load immediately before ScanFrom.
256 /// If it is not obviously safe to load from the specified pointer, we do
257 /// a quick local scan of the basic block containing \c ScanFrom, to determine
258 /// if the address is already accessed.
260 /// This uses the pointee type to determine how many bytes need to be safe to
261 /// load from the pointer.
262 bool llvm::isSafeToLoadUnconditionally(Value *V, MaybeAlign MA, APInt &Size,
263 const DataLayout &DL,
264 Instruction *ScanFrom,
265 const DominatorTree *DT) {
266 // Zero alignment means that the load has the ABI alignment for the target
267 const Align Alignment =
268 DL.getValueOrABITypeAlignment(MA, V->getType()->getPointerElementType());
270 // If DT is not specified we can't make context-sensitive query
271 const Instruction* CtxI = DT ? ScanFrom : nullptr;
272 if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, DT))
278 if (Size.getBitWidth() > 64)
280 const uint64_t LoadSize = Size.getZExtValue();
282 // Otherwise, be a little bit aggressive by scanning the local block where we
283 // want to check to see if the pointer is already being loaded or stored
284 // from/to. If so, the previous load or store would have already trapped,
285 // so there is no harm doing an extra load (also, CSE will later eliminate
286 // the load entirely).
287 BasicBlock::iterator BBI = ScanFrom->getIterator(),
288 E = ScanFrom->getParent()->begin();
290 // We can at least always strip pointer casts even though we can't use the
292 V = V->stripPointerCasts();
297 // If we see a free or a call which may write to memory (i.e. which might do
298 // a free) the pointer could be marked invalid.
299 if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
300 !isa<DbgInfoIntrinsic>(BBI))
304 MaybeAlign MaybeAccessedAlign;
305 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
306 // Ignore volatile loads. The execution of a volatile load cannot
307 // be used to prove an address is backed by regular memory; it can,
308 // for example, point to an MMIO register.
309 if (LI->isVolatile())
311 AccessedPtr = LI->getPointerOperand();
312 MaybeAccessedAlign = MaybeAlign(LI->getAlignment());
313 } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
314 // Ignore volatile stores (see comment for loads).
315 if (SI->isVolatile())
317 AccessedPtr = SI->getPointerOperand();
318 MaybeAccessedAlign = MaybeAlign(SI->getAlignment());
322 Type *AccessedTy = AccessedPtr->getType()->getPointerElementType();
324 const Align AccessedAlign =
325 DL.getValueOrABITypeAlignment(MaybeAccessedAlign, AccessedTy);
326 if (AccessedAlign < Alignment)
329 // Handle trivial cases.
330 if (AccessedPtr == V &&
331 LoadSize <= DL.getTypeStoreSize(AccessedTy))
334 if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
335 LoadSize <= DL.getTypeStoreSize(AccessedTy))
341 bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, MaybeAlign Alignment,
342 const DataLayout &DL,
343 Instruction *ScanFrom,
344 const DominatorTree *DT) {
345 APInt Size(DL.getIndexTypeSizeInBits(V->getType()), DL.getTypeStoreSize(Ty));
346 return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, DT);
349 /// DefMaxInstsToScan - the default number of maximum instructions
350 /// to scan in the block, used by FindAvailableLoadedValue().
351 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
352 /// threading in part by eliminating partially redundant loads.
353 /// At that point, the value of MaxInstsToScan was already set to '6'
354 /// without documented explanation.
356 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden,
357 cl::desc("Use this to specify the default maximum number of instructions "
358 "to scan backward from a given instruction, when searching for "
359 "available loaded value"));
361 Value *llvm::FindAvailableLoadedValue(LoadInst *Load,
363 BasicBlock::iterator &ScanFrom,
364 unsigned MaxInstsToScan,
365 AliasAnalysis *AA, bool *IsLoad,
366 unsigned *NumScanedInst) {
367 // Don't CSE load that is volatile or anything stronger than unordered.
368 if (!Load->isUnordered())
371 return FindAvailablePtrLoadStore(
372 Load->getPointerOperand(), Load->getType(), Load->isAtomic(), ScanBB,
373 ScanFrom, MaxInstsToScan, AA, IsLoad, NumScanedInst);
376 Value *llvm::FindAvailablePtrLoadStore(Value *Ptr, Type *AccessTy,
377 bool AtLeastAtomic, BasicBlock *ScanBB,
378 BasicBlock::iterator &ScanFrom,
379 unsigned MaxInstsToScan,
380 AliasAnalysis *AA, bool *IsLoadCSE,
381 unsigned *NumScanedInst) {
382 if (MaxInstsToScan == 0)
383 MaxInstsToScan = ~0U;
385 const DataLayout &DL = ScanBB->getModule()->getDataLayout();
386 Value *StrippedPtr = Ptr->stripPointerCasts();
388 while (ScanFrom != ScanBB->begin()) {
389 // We must ignore debug info directives when counting (otherwise they
390 // would affect codegen).
391 Instruction *Inst = &*--ScanFrom;
392 if (isa<DbgInfoIntrinsic>(Inst))
395 // Restore ScanFrom to expected value in case next test succeeds
401 // Don't scan huge blocks.
402 if (MaxInstsToScan-- == 0)
406 // If this is a load of Ptr, the loaded value is available.
407 // (This is true even if the load is volatile or atomic, although
408 // those cases are unlikely.)
409 if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
410 if (AreEquivalentAddressValues(
411 LI->getPointerOperand()->stripPointerCasts(), StrippedPtr) &&
412 CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
414 // We can value forward from an atomic to a non-atomic, but not the
416 if (LI->isAtomic() < AtLeastAtomic)
424 // Try to get the store size for the type.
425 auto AccessSize = LocationSize::precise(DL.getTypeStoreSize(AccessTy));
427 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
428 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
429 // If this is a store through Ptr, the value is available!
430 // (This is true even if the store is volatile or atomic, although
431 // those cases are unlikely.)
432 if (AreEquivalentAddressValues(StorePtr, StrippedPtr) &&
433 CastInst::isBitOrNoopPointerCastable(SI->getValueOperand()->getType(),
436 // We can value forward from an atomic to a non-atomic, but not the
438 if (SI->isAtomic() < AtLeastAtomic)
443 return SI->getOperand(0);
446 // If both StrippedPtr and StorePtr reach all the way to an alloca or
447 // global and they are different, ignore the store. This is a trivial form
448 // of alias analysis that is important for reg2mem'd code.
449 if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) &&
450 (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) &&
451 StrippedPtr != StorePtr)
454 // If we have alias analysis and it says the store won't modify the loaded
455 // value, ignore the store.
456 if (AA && !isModSet(AA->getModRefInfo(SI, StrippedPtr, AccessSize)))
459 // Otherwise the store that may or may not alias the pointer, bail out.
464 // If this is some other instruction that may clobber Ptr, bail out.
465 if (Inst->mayWriteToMemory()) {
466 // If alias analysis claims that it really won't modify the load,
468 if (AA && !isModSet(AA->getModRefInfo(Inst, StrippedPtr, AccessSize)))
471 // May modify the pointer, bail out.
477 // Got to the start of the block, we didn't find it, but are done for this