1 //==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the declaration of the MachineMemOperand class, which is a
11 // description of a memory reference. It is used to help track dependencies
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
17 #define LLVM_CODEGEN_MACHINEMEMOPERAND_H
19 #include "llvm/ADT/BitmaskEnum.h"
20 #include "llvm/ADT/PointerUnion.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/Metadata.h"
24 #include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*>
25 #include "llvm/Support/AtomicOrdering.h"
26 #include "llvm/Support/DataTypes.h"
30 class FoldingSetNodeID;
33 class MachineFunction;
34 class ModuleSlotTracker;
36 /// This class contains a discriminated union of information about pointers in
37 /// memory operands, relating them back to LLVM IR or to virtual locations (such
38 /// as frame indices) that are exposed during codegen.
39 struct MachinePointerInfo {
40 /// This is the IR pointer value for the access, or it is null if unknown.
41 /// If this is null, then the access is to a pointer in the default address
43 PointerUnion<const Value *, const PseudoSourceValue *> V;
45 /// Offset - This is an offset from the base Value*.
50 unsigned AddrSpace = 0;
52 explicit MachinePointerInfo(const Value *v, int64_t offset = 0,
54 : V(v), Offset(offset), StackID(ID) {
55 AddrSpace = v ? v->getType()->getPointerAddressSpace() : 0;
58 explicit MachinePointerInfo(const PseudoSourceValue *v, int64_t offset = 0,
60 : V(v), Offset(offset), StackID(ID) {
61 AddrSpace = v ? v->getAddressSpace() : 0;
64 explicit MachinePointerInfo(unsigned AddressSpace = 0)
65 : V((const Value *)nullptr), Offset(0), StackID(0),
66 AddrSpace(AddressSpace) {}
68 explicit MachinePointerInfo(
69 PointerUnion<const Value *, const PseudoSourceValue *> v,
72 : V(v), Offset(offset), StackID(ID) {
74 if (const auto *ValPtr = V.dyn_cast<const Value*>())
75 AddrSpace = ValPtr->getType()->getPointerAddressSpace();
77 AddrSpace = V.get<const PseudoSourceValue*>()->getAddressSpace();
81 MachinePointerInfo getWithOffset(int64_t O) const {
83 return MachinePointerInfo(AddrSpace);
84 if (V.is<const Value*>())
85 return MachinePointerInfo(V.get<const Value*>(), Offset+O, StackID);
86 return MachinePointerInfo(V.get<const PseudoSourceValue*>(), Offset+O,
90 /// Return true if memory region [V, V+Offset+Size) is known to be
92 bool isDereferenceable(unsigned Size, LLVMContext &C,
93 const DataLayout &DL) const;
95 /// Return the LLVM IR address space number that this pointer points into.
96 unsigned getAddrSpace() const;
98 /// Return a MachinePointerInfo record that refers to the constant pool.
99 static MachinePointerInfo getConstantPool(MachineFunction &MF);
101 /// Return a MachinePointerInfo record that refers to the specified
103 static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI,
106 /// Return a MachinePointerInfo record that refers to a jump table entry.
107 static MachinePointerInfo getJumpTable(MachineFunction &MF);
109 /// Return a MachinePointerInfo record that refers to a GOT entry.
110 static MachinePointerInfo getGOT(MachineFunction &MF);
112 /// Stack pointer relative access.
113 static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset,
116 /// Stack memory without other information.
117 static MachinePointerInfo getUnknownStack(MachineFunction &MF);
121 //===----------------------------------------------------------------------===//
122 /// A description of a memory reference used in the backend.
123 /// Instead of holding a StoreInst or LoadInst, this class holds the address
124 /// Value of the reference along with a byte size and offset. This allows it
125 /// to describe lowered loads and stores. Also, the special PseudoSourceValue
126 /// objects can be used to represent loads and stores to memory locations
127 /// that aren't explicit in the regular LLVM IR.
129 class MachineMemOperand {
131 /// Flags values. These may be or'd together.
132 enum Flags : uint16_t {
135 /// The memory access reads data.
137 /// The memory access writes data.
139 /// The memory access is volatile.
140 MOVolatile = 1u << 2,
141 /// The memory access is non-temporal.
142 MONonTemporal = 1u << 3,
143 /// The memory access is dereferenceable (i.e., doesn't trap).
144 MODereferenceable = 1u << 4,
145 /// The memory access always returns the same value (or traps).
146 MOInvariant = 1u << 5,
148 // Reserved for use by target-specific passes.
149 // Targets may override getSerializableMachineMemOperandTargetFlags() to
150 // enable MIR serialization/parsing of these flags. If more of these flags
151 // are added, the MIR printing/parsing code will need to be updated as well.
152 MOTargetFlag1 = 1u << 6,
153 MOTargetFlag2 = 1u << 7,
154 MOTargetFlag3 = 1u << 8,
156 LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ MOTargetFlag3)
160 /// Atomic information for this memory operation.
161 struct MachineAtomicInfo {
162 /// Synchronization scope ID for this memory operation.
163 unsigned SSID : 8; // SyncScope::ID
164 /// Atomic ordering requirements for this memory operation. For cmpxchg
165 /// atomic operations, atomic ordering requirements when store occurs.
166 unsigned Ordering : 4; // enum AtomicOrdering
167 /// For cmpxchg atomic operations, atomic ordering requirements when store
169 unsigned FailureOrdering : 4; // enum AtomicOrdering
172 MachinePointerInfo PtrInfo;
175 uint16_t BaseAlignLog2; // log_2(base_alignment) + 1
176 MachineAtomicInfo AtomicInfo;
178 const MDNode *Ranges;
181 /// Construct a MachineMemOperand object with the specified PtrInfo, flags,
182 /// size, and base alignment. For atomic operations the synchronization scope
183 /// and atomic ordering requirements must also be specified. For cmpxchg
184 /// atomic operations the atomic ordering requirements when store does not
185 /// occur must also be specified.
186 MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, uint64_t s,
187 unsigned base_alignment,
188 const AAMDNodes &AAInfo = AAMDNodes(),
189 const MDNode *Ranges = nullptr,
190 SyncScope::ID SSID = SyncScope::System,
191 AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
192 AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
194 const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
196 /// Return the base address of the memory access. This may either be a normal
197 /// LLVM IR Value, or one of the special values used in CodeGen.
198 /// Special values are those obtained via
199 /// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and
200 /// other PseudoSourceValue member functions which return objects which stand
201 /// for frame/stack pointer relative references and other special references
202 /// which are not representable in the high-level IR.
203 const Value *getValue() const { return PtrInfo.V.dyn_cast<const Value*>(); }
205 const PseudoSourceValue *getPseudoValue() const {
206 return PtrInfo.V.dyn_cast<const PseudoSourceValue*>();
209 const void *getOpaqueValue() const { return PtrInfo.V.getOpaqueValue(); }
211 /// Return the raw flags of the source value, \see Flags.
212 Flags getFlags() const { return FlagVals; }
214 /// Bitwise OR the current flags with the given flags.
215 void setFlags(Flags f) { FlagVals |= f; }
217 /// For normal values, this is a byte offset added to the base address.
218 /// For PseudoSourceValue::FPRel values, this is the FrameIndex number.
219 int64_t getOffset() const { return PtrInfo.Offset; }
221 unsigned getAddrSpace() const { return PtrInfo.getAddrSpace(); }
223 /// Return the size in bytes of the memory reference.
224 uint64_t getSize() const { return Size; }
226 /// Return the minimum known alignment in bytes of the actual memory
228 uint64_t getAlignment() const;
230 /// Return the minimum known alignment in bytes of the base address, without
232 uint64_t getBaseAlignment() const { return (1u << BaseAlignLog2) >> 1; }
234 /// Return the AA tags for the memory reference.
235 AAMDNodes getAAInfo() const { return AAInfo; }
237 /// Return the range tag for the memory reference.
238 const MDNode *getRanges() const { return Ranges; }
240 /// Returns the synchronization scope ID for this memory operation.
241 SyncScope::ID getSyncScopeID() const {
242 return static_cast<SyncScope::ID>(AtomicInfo.SSID);
245 /// Return the atomic ordering requirements for this memory operation. For
246 /// cmpxchg atomic operations, return the atomic ordering requirements when
248 AtomicOrdering getOrdering() const {
249 return static_cast<AtomicOrdering>(AtomicInfo.Ordering);
252 /// For cmpxchg atomic operations, return the atomic ordering requirements
253 /// when store does not occur.
254 AtomicOrdering getFailureOrdering() const {
255 return static_cast<AtomicOrdering>(AtomicInfo.FailureOrdering);
258 bool isLoad() const { return FlagVals & MOLoad; }
259 bool isStore() const { return FlagVals & MOStore; }
260 bool isVolatile() const { return FlagVals & MOVolatile; }
261 bool isNonTemporal() const { return FlagVals & MONonTemporal; }
262 bool isDereferenceable() const { return FlagVals & MODereferenceable; }
263 bool isInvariant() const { return FlagVals & MOInvariant; }
265 /// Returns true if this operation has an atomic ordering requirement of
266 /// unordered or higher, false otherwise.
267 bool isAtomic() const { return getOrdering() != AtomicOrdering::NotAtomic; }
269 /// Returns true if this memory operation doesn't have any ordering
270 /// constraints other than normal aliasing. Volatile and atomic memory
271 /// operations can't be reordered.
273 /// Currently, we don't model the difference between volatile and atomic
274 /// operations. They should retain their ordering relative to all memory
276 bool isUnordered() const { return !isVolatile(); }
278 /// Update this MachineMemOperand to reflect the alignment of MMO, if it has a
279 /// greater alignment. This must only be used when the new alignment applies
280 /// to all users of this MachineMemOperand.
281 void refineAlignment(const MachineMemOperand *MMO);
283 /// Change the SourceValue for this MachineMemOperand. This should only be
284 /// used when an object is being relocated and all references to it are being
286 void setValue(const Value *NewSV) { PtrInfo.V = NewSV; }
287 void setValue(const PseudoSourceValue *NewSV) { PtrInfo.V = NewSV; }
288 void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; }
290 /// Profile - Gather unique data for the object.
292 void Profile(FoldingSetNodeID &ID) const;
294 /// Support for operator<<.
296 void print(raw_ostream &OS) const;
297 void print(raw_ostream &OS, ModuleSlotTracker &MST) const;
300 friend bool operator==(const MachineMemOperand &LHS,
301 const MachineMemOperand &RHS) {
302 return LHS.getValue() == RHS.getValue() &&
303 LHS.getPseudoValue() == RHS.getPseudoValue() &&
304 LHS.getSize() == RHS.getSize() &&
305 LHS.getOffset() == RHS.getOffset() &&
306 LHS.getFlags() == RHS.getFlags() &&
307 LHS.getAAInfo() == RHS.getAAInfo() &&
308 LHS.getRanges() == RHS.getRanges() &&
309 LHS.getAlignment() == RHS.getAlignment() &&
310 LHS.getAddrSpace() == RHS.getAddrSpace();
313 friend bool operator!=(const MachineMemOperand &LHS,
314 const MachineMemOperand &RHS) {
315 return !(LHS == RHS);
319 inline raw_ostream &operator<<(raw_ostream &OS, const MachineMemOperand &MRO) {
324 } // End llvm namespace