1 //===-- Memory.cpp ----------------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "lldb/Target/Memory.h"
14 // Other libraries and framework includes
16 #include "lldb/Core/RangeMap.h"
17 #include "lldb/Core/State.h"
18 #include "lldb/Target/Process.h"
19 #include "lldb/Utility/DataBufferHeap.h"
20 #include "lldb/Utility/Log.h"
23 using namespace lldb_private;
25 //----------------------------------------------------------------------
26 // MemoryCache constructor
27 //----------------------------------------------------------------------
28 MemoryCache::MemoryCache(Process &process)
29 : m_mutex(), m_L1_cache(), m_L2_cache(), m_invalid_ranges(),
31 m_L2_cache_line_byte_size(process.GetMemoryCacheLineSize()) {}
33 //----------------------------------------------------------------------
35 //----------------------------------------------------------------------
36 MemoryCache::~MemoryCache() {}
38 void MemoryCache::Clear(bool clear_invalid_ranges) {
39 std::lock_guard<std::recursive_mutex> guard(m_mutex);
42 if (clear_invalid_ranges)
43 m_invalid_ranges.Clear();
44 m_L2_cache_line_byte_size = m_process.GetMemoryCacheLineSize();
47 void MemoryCache::AddL1CacheData(lldb::addr_t addr, const void *src,
50 addr, DataBufferSP(new DataBufferHeap(DataBufferHeap(src, src_len))));
53 void MemoryCache::AddL1CacheData(lldb::addr_t addr,
54 const DataBufferSP &data_buffer_sp) {
55 std::lock_guard<std::recursive_mutex> guard(m_mutex);
56 m_L1_cache[addr] = data_buffer_sp;
59 void MemoryCache::Flush(addr_t addr, size_t size) {
63 std::lock_guard<std::recursive_mutex> guard(m_mutex);
65 // Erase any blocks from the L1 cache that intersect with the flush range
66 if (!m_L1_cache.empty()) {
67 AddrRange flush_range(addr, size);
68 BlockMap::iterator pos = m_L1_cache.upper_bound(addr);
69 if (pos != m_L1_cache.begin()) {
72 while (pos != m_L1_cache.end()) {
73 AddrRange chunk_range(pos->first, pos->second->GetByteSize());
74 if (!chunk_range.DoesIntersect(flush_range))
76 pos = m_L1_cache.erase(pos);
80 if (!m_L2_cache.empty()) {
81 const uint32_t cache_line_byte_size = m_L2_cache_line_byte_size;
82 const addr_t end_addr = (addr + size - 1);
83 const addr_t first_cache_line_addr = addr - (addr % cache_line_byte_size);
84 const addr_t last_cache_line_addr =
85 end_addr - (end_addr % cache_line_byte_size);
86 // Watch for overflow where size will cause us to go off the end of the
87 // 64 bit address space
88 uint32_t num_cache_lines;
89 if (last_cache_line_addr >= first_cache_line_addr)
90 num_cache_lines = ((last_cache_line_addr - first_cache_line_addr) /
91 cache_line_byte_size) +
95 (UINT64_MAX - first_cache_line_addr + 1) / cache_line_byte_size;
97 uint32_t cache_idx = 0;
98 for (addr_t curr_addr = first_cache_line_addr; cache_idx < num_cache_lines;
99 curr_addr += cache_line_byte_size, ++cache_idx) {
100 BlockMap::iterator pos = m_L2_cache.find(curr_addr);
101 if (pos != m_L2_cache.end())
102 m_L2_cache.erase(pos);
107 void MemoryCache::AddInvalidRange(lldb::addr_t base_addr,
108 lldb::addr_t byte_size) {
110 std::lock_guard<std::recursive_mutex> guard(m_mutex);
111 InvalidRanges::Entry range(base_addr, byte_size);
112 m_invalid_ranges.Append(range);
113 m_invalid_ranges.Sort();
117 bool MemoryCache::RemoveInvalidRange(lldb::addr_t base_addr,
118 lldb::addr_t byte_size) {
120 std::lock_guard<std::recursive_mutex> guard(m_mutex);
121 const uint32_t idx = m_invalid_ranges.FindEntryIndexThatContains(base_addr);
122 if (idx != UINT32_MAX) {
123 const InvalidRanges::Entry *entry = m_invalid_ranges.GetEntryAtIndex(idx);
124 if (entry->GetRangeBase() == base_addr &&
125 entry->GetByteSize() == byte_size)
126 return m_invalid_ranges.RemoveEntrtAtIndex(idx);
132 size_t MemoryCache::Read(addr_t addr, void *dst, size_t dst_len,
134 size_t bytes_left = dst_len;
136 // Check the L1 cache for a range that contain the entire memory read.
137 // If we find a range in the L1 cache that does, we use it. Else we fall
138 // back to reading memory in m_L2_cache_line_byte_size byte sized chunks.
139 // The L1 cache contains chunks of memory that are not required to be
140 // m_L2_cache_line_byte_size bytes in size, so we don't try anything
141 // tricky when reading from them (no partial reads from the L1 cache).
143 std::lock_guard<std::recursive_mutex> guard(m_mutex);
144 if (!m_L1_cache.empty()) {
145 AddrRange read_range(addr, dst_len);
146 BlockMap::iterator pos = m_L1_cache.upper_bound(addr);
147 if (pos != m_L1_cache.begin()) {
150 AddrRange chunk_range(pos->first, pos->second->GetByteSize());
151 if (chunk_range.Contains(read_range)) {
152 memcpy(dst, pos->second->GetBytes() + addr - chunk_range.GetRangeBase(),
158 // If this memory read request is larger than the cache line size, then
159 // we (1) try to read as much of it at once as possible, and (2) don't
160 // add the data to the memory cache. We don't want to split a big read
161 // up into more separate reads than necessary, and with a large memory read
162 // request, it is unlikely that the caller function will ask for the next
163 // 4 bytes after the large memory read - so there's little benefit to saving
165 if (dst && dst_len > m_L2_cache_line_byte_size) {
167 m_process.ReadMemoryFromInferior(addr, dst, dst_len, error);
168 // Add this non block sized range to the L1 cache if we actually read
171 AddL1CacheData(addr, dst, bytes_read);
175 if (dst && bytes_left > 0) {
176 const uint32_t cache_line_byte_size = m_L2_cache_line_byte_size;
177 uint8_t *dst_buf = (uint8_t *)dst;
178 addr_t curr_addr = addr - (addr % cache_line_byte_size);
179 addr_t cache_offset = addr - curr_addr;
181 while (bytes_left > 0) {
182 if (m_invalid_ranges.FindEntryThatContains(curr_addr)) {
183 error.SetErrorStringWithFormat("memory read failed for 0x%" PRIx64,
185 return dst_len - bytes_left;
188 BlockMap::const_iterator pos = m_L2_cache.find(curr_addr);
189 BlockMap::const_iterator end = m_L2_cache.end();
192 size_t curr_read_size = cache_line_byte_size - cache_offset;
193 if (curr_read_size > bytes_left)
194 curr_read_size = bytes_left;
196 memcpy(dst_buf + dst_len - bytes_left,
197 pos->second->GetBytes() + cache_offset, curr_read_size);
199 bytes_left -= curr_read_size;
200 curr_addr += curr_read_size + cache_offset;
203 if (bytes_left > 0) {
204 // Get sequential cache page hits
205 for (++pos; (pos != end) && (bytes_left > 0); ++pos) {
206 assert((curr_addr % cache_line_byte_size) == 0);
208 if (pos->first != curr_addr)
211 curr_read_size = pos->second->GetByteSize();
212 if (curr_read_size > bytes_left)
213 curr_read_size = bytes_left;
215 memcpy(dst_buf + dst_len - bytes_left, pos->second->GetBytes(),
218 bytes_left -= curr_read_size;
219 curr_addr += curr_read_size;
221 // We have a cache page that succeeded to read some bytes
222 // but not an entire page. If this happens, we must cap
223 // off how much data we are able to read...
224 if (pos->second->GetByteSize() != cache_line_byte_size)
225 return dst_len - bytes_left;
230 // We need to read from the process
232 if (bytes_left > 0) {
233 assert((curr_addr % cache_line_byte_size) == 0);
234 std::unique_ptr<DataBufferHeap> data_buffer_heap_ap(
235 new DataBufferHeap(cache_line_byte_size, 0));
236 size_t process_bytes_read = m_process.ReadMemoryFromInferior(
237 curr_addr, data_buffer_heap_ap->GetBytes(),
238 data_buffer_heap_ap->GetByteSize(), error);
239 if (process_bytes_read == 0)
240 return dst_len - bytes_left;
242 if (process_bytes_read != cache_line_byte_size)
243 data_buffer_heap_ap->SetByteSize(process_bytes_read);
244 m_L2_cache[curr_addr] = DataBufferSP(data_buffer_heap_ap.release());
245 // We have read data and put it into the cache, continue through the
246 // loop again to get the data out of the cache...
251 return dst_len - bytes_left;
254 AllocatedBlock::AllocatedBlock(lldb::addr_t addr, uint32_t byte_size,
255 uint32_t permissions, uint32_t chunk_size)
256 : m_range(addr, byte_size), m_permissions(permissions),
257 m_chunk_size(chunk_size)
259 // The entire address range is free to start with.
260 m_free_blocks.Append(m_range);
261 assert(byte_size > chunk_size);
264 AllocatedBlock::~AllocatedBlock() {}
266 lldb::addr_t AllocatedBlock::ReserveBlock(uint32_t size) {
267 // We must return something valid for zero bytes.
270 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
272 const size_t free_count = m_free_blocks.GetSize();
273 for (size_t i=0; i<free_count; ++i)
275 auto &free_block = m_free_blocks.GetEntryRef(i);
276 const lldb::addr_t range_size = free_block.GetByteSize();
277 if (range_size >= size)
279 // We found a free block that is big enough for our data. Figure out how
280 // many chunks we will need and calculate the resulting block size we will
282 addr_t addr = free_block.GetRangeBase();
283 size_t num_chunks = CalculateChunksNeededForSize(size);
284 lldb::addr_t block_size = num_chunks * m_chunk_size;
285 lldb::addr_t bytes_left = range_size - block_size;
288 // The newly allocated block will take all of the bytes in this
289 // available block, so we can just add it to the allocated ranges and
290 // remove the range from the free ranges.
291 m_reserved_blocks.Insert(free_block, false);
292 m_free_blocks.RemoveEntryAtIndex(i);
296 // Make the new allocated range and add it to the allocated ranges.
297 Range<lldb::addr_t, uint32_t> reserved_block(free_block);
298 reserved_block.SetByteSize(block_size);
299 // Insert the reserved range and don't combine it with other blocks
300 // in the reserved blocks list.
301 m_reserved_blocks.Insert(reserved_block, false);
302 // Adjust the free range in place since we won't change the sorted
303 // ordering of the m_free_blocks list.
304 free_block.SetRangeBase(reserved_block.GetRangeEnd());
305 free_block.SetByteSize(bytes_left);
307 LLDB_LOGV(log, "({0}) (size = {1} ({1:x})) => {2:x}", this, size, addr);
312 LLDB_LOGV(log, "({0}) (size = {1} ({1:x})) => {2:x}", this, size,
313 LLDB_INVALID_ADDRESS);
314 return LLDB_INVALID_ADDRESS;
317 bool AllocatedBlock::FreeBlock(addr_t addr) {
318 bool success = false;
319 auto entry_idx = m_reserved_blocks.FindEntryIndexThatContains(addr);
320 if (entry_idx != UINT32_MAX)
322 m_free_blocks.Insert(m_reserved_blocks.GetEntryRef(entry_idx), true);
323 m_reserved_blocks.RemoveEntryAtIndex(entry_idx);
326 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
327 LLDB_LOGV(log, "({0}) (addr = {1:x}) => {2}", this, addr, success);
331 AllocatedMemoryCache::AllocatedMemoryCache(Process &process)
332 : m_process(process), m_mutex(), m_memory_map() {}
334 AllocatedMemoryCache::~AllocatedMemoryCache() {}
336 void AllocatedMemoryCache::Clear() {
337 std::lock_guard<std::recursive_mutex> guard(m_mutex);
338 if (m_process.IsAlive()) {
339 PermissionsToBlockMap::iterator pos, end = m_memory_map.end();
340 for (pos = m_memory_map.begin(); pos != end; ++pos)
341 m_process.DoDeallocateMemory(pos->second->GetBaseAddress());
343 m_memory_map.clear();
346 AllocatedMemoryCache::AllocatedBlockSP
347 AllocatedMemoryCache::AllocatePage(uint32_t byte_size, uint32_t permissions,
348 uint32_t chunk_size, Status &error) {
349 AllocatedBlockSP block_sp;
350 const size_t page_size = 4096;
351 const size_t num_pages = (byte_size + page_size - 1) / page_size;
352 const size_t page_byte_size = num_pages * page_size;
354 addr_t addr = m_process.DoAllocateMemory(page_byte_size, permissions, error);
356 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
358 log->Printf("Process::DoAllocateMemory (byte_size = 0x%8.8" PRIx32
359 ", permissions = %s) => 0x%16.16" PRIx64,
360 (uint32_t)page_byte_size, GetPermissionsAsCString(permissions),
364 if (addr != LLDB_INVALID_ADDRESS) {
366 new AllocatedBlock(addr, page_byte_size, permissions, chunk_size));
367 m_memory_map.insert(std::make_pair(permissions, block_sp));
372 lldb::addr_t AllocatedMemoryCache::AllocateMemory(size_t byte_size,
373 uint32_t permissions,
375 std::lock_guard<std::recursive_mutex> guard(m_mutex);
377 addr_t addr = LLDB_INVALID_ADDRESS;
378 std::pair<PermissionsToBlockMap::iterator, PermissionsToBlockMap::iterator>
379 range = m_memory_map.equal_range(permissions);
381 for (PermissionsToBlockMap::iterator pos = range.first; pos != range.second;
383 addr = (*pos).second->ReserveBlock(byte_size);
384 if (addr != LLDB_INVALID_ADDRESS)
388 if (addr == LLDB_INVALID_ADDRESS) {
389 AllocatedBlockSP block_sp(AllocatePage(byte_size, permissions, 16, error));
392 addr = block_sp->ReserveBlock(byte_size);
394 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
397 "AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8" PRIx32
398 ", permissions = %s) => 0x%16.16" PRIx64,
399 (uint32_t)byte_size, GetPermissionsAsCString(permissions),
404 bool AllocatedMemoryCache::DeallocateMemory(lldb::addr_t addr) {
405 std::lock_guard<std::recursive_mutex> guard(m_mutex);
407 PermissionsToBlockMap::iterator pos, end = m_memory_map.end();
408 bool success = false;
409 for (pos = m_memory_map.begin(); pos != end; ++pos) {
410 if (pos->second->Contains(addr)) {
411 success = pos->second->FreeBlock(addr);
415 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
417 log->Printf("AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16" PRIx64
419 (uint64_t)addr, success);