1 //===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Created by Greg Clayton on 6/26/07.
12 //===----------------------------------------------------------------------===//
14 #include "MachVMMemory.h"
15 #include "MachVMRegion.h"
17 #include <mach/mach_vm.h>
18 #include <mach/shared_region.h>
19 #include <sys/sysctl.h>
22 static const vm_size_t kInvalidPageSize = ~0;
24 MachVMMemory::MachVMMemory() :
25 m_page_size (kInvalidPageSize),
30 MachVMMemory::~MachVMMemory()
35 MachVMMemory::PageSize(task_t task)
37 if (m_page_size == kInvalidPageSize)
39 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
40 if (task != TASK_NULL)
43 mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
44 task_vm_info_data_t vm_info;
45 kr = task_info (task, TASK_VM_INFO, (task_info_t) &vm_info, &info_count);
46 if (kr == KERN_SUCCESS)
48 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info returned page size of 0x%x", (int) vm_info.page_size);
49 m_page_size = vm_info.page_size;
54 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call failed to get page size, TASK_VM_INFO %d, TASK_VM_INFO_COUNT %d, kern return %d", TASK_VM_INFO, TASK_VM_INFO_COUNT, kr);
58 m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
66 MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count)
68 const nub_size_t page_size = PageSize(task);
71 nub_size_t page_offset = (addr % page_size);
72 nub_size_t bytes_left_in_page = page_size - page_offset;
73 if (count > bytes_left_in_page)
74 count = bytes_left_in_page;
80 MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
82 MachVMRegion vmRegion(task);
84 if (vmRegion.GetRegionForAddress(address))
86 region_info->addr = vmRegion.StartAddress();
87 region_info->size = vmRegion.GetByteSize();
88 region_info->permissions = vmRegion.GetDNBPermissions();
92 region_info->addr = address;
93 region_info->size = 0;
94 if (vmRegion.GetError().Success())
96 // vmRegion.GetRegionForAddress() return false, indicating that "address"
97 // wasn't in a valid region, but the "vmRegion" info was successfully
98 // read from the task which means the info describes the next valid
99 // region from which we can infer the size of this invalid region
100 mach_vm_address_t start_addr = vmRegion.StartAddress();
101 if (address < start_addr)
102 region_info->size = start_addr - address;
104 // If we can't get any info about the size from the next region it means
105 // we asked about an address that was past all mappings, so the size
106 // of this region will take up all remaining address space.
107 if (region_info->size == 0)
108 region_info->size = INVALID_NUB_ADDRESS - region_info->addr;
110 // Not readable, writeable or executable
111 region_info->permissions = 0;
116 // For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
118 MachVMMemory::GetStolenPages(task_t task)
120 static uint64_t stolenPages = 0;
121 static bool calculated = false;
122 if (calculated) return stolenPages;
124 static int mib_reserved[CTL_MAXNAME];
125 static int mib_unusable[CTL_MAXNAME];
126 static int mib_other[CTL_MAXNAME];
127 static size_t mib_reserved_len = 0;
128 static size_t mib_unusable_len = 0;
129 static size_t mib_other_len = 0;
132 /* This can be used for testing: */
133 //tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
135 if(0 == mib_reserved_len)
137 mib_reserved_len = CTL_MAXNAME;
139 r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
144 mib_reserved_len = 0;
148 mib_unusable_len = CTL_MAXNAME;
150 r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
155 mib_reserved_len = 0;
160 mib_other_len = CTL_MAXNAME;
162 r = sysctlnametomib("machdep.memmap.Other", mib_other,
167 mib_reserved_len = 0;
172 if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
174 uint64_t reserved = 0, unusable = 0, other = 0;
179 reserved_len = sizeof(reserved);
180 unusable_len = sizeof(unusable);
181 other_len = sizeof(other);
183 /* These are all declared as QUAD/uint64_t sysctls in the kernel. */
185 if (sysctl (mib_reserved,
186 static_cast<u_int>(mib_reserved_len),
195 if (sysctl (mib_unusable,
196 static_cast<u_int>(mib_unusable_len),
205 if (sysctl (mib_other,
206 static_cast<u_int>(mib_other_len),
215 if (reserved_len == sizeof(reserved) &&
216 unusable_len == sizeof(unusable) &&
217 other_len == sizeof(other))
219 uint64_t stolen = reserved + unusable + other;
220 uint64_t mb128 = 128 * 1024 * 1024ULL;
224 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
225 stolenPages = stolen / PageSize (task);
234 static uint64_t GetPhysicalMemory()
236 // This doesn't change often at all. No need to poll each time.
237 static uint64_t physical_memory = 0;
238 static bool calculated = false;
239 if (calculated) return physical_memory;
241 size_t len = sizeof(physical_memory);
242 sysctlbyname("hw.memsize", &physical_memory, &len, NULL, 0);
245 return physical_memory;
248 // rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
250 MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
252 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
254 task_vm_info_data_t vm_info;
255 mach_msg_type_number_t info_count;
258 info_count = TASK_VM_INFO_COUNT;
259 kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
260 if (kr == KERN_SUCCESS)
261 dirty_size = vm_info.internal;
265 // Test whether the virtual address is within the architecture's shared region.
266 static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
268 mach_vm_address_t base = 0, size = 0;
271 #if defined (CPU_TYPE_ARM64) && defined (SHARED_REGION_BASE_ARM64)
273 base = SHARED_REGION_BASE_ARM64;
274 size = SHARED_REGION_SIZE_ARM64;
279 base = SHARED_REGION_BASE_ARM;
280 size = SHARED_REGION_SIZE_ARM;
283 case CPU_TYPE_X86_64:
284 base = SHARED_REGION_BASE_X86_64;
285 size = SHARED_REGION_SIZE_X86_64;
289 base = SHARED_REGION_BASE_I386;
290 size = SHARED_REGION_SIZE_I386;
294 // Log error abut unknown CPU type
300 return(addr >= base && addr < (base + size));
304 MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
306 // Collecting some other info cheaply but not reporting for now.
307 mach_vm_size_t empty = 0;
308 mach_vm_size_t fw_private = 0;
310 mach_vm_size_t aliased = 0;
311 bool global_shared_text_data_mapped = false;
312 vm_size_t pagesize = PageSize (task);
314 for (mach_vm_address_t addr=0, size=0; ; addr += size)
316 vm_region_top_info_data_t info;
317 mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
318 mach_port_t object_name;
320 kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
321 if (kr != KERN_SUCCESS) break;
323 if (InSharedRegion(addr, cputype))
326 fw_private += info.private_pages_resident * pagesize;
328 // Check if this process has the globally shared text and data regions mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
329 if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
330 vm_region_basic_info_data_64_t b_info;
331 mach_vm_address_t b_addr = addr;
332 mach_vm_size_t b_size = size;
333 count = VM_REGION_BASIC_INFO_COUNT_64;
335 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
336 if (kr != KERN_SUCCESS) break;
338 if (b_info.reserved) {
339 global_shared_text_data_mapped = TRUE;
343 // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
344 if (info.share_mode != SM_PRIVATE)
350 // Update counters according to the region type.
351 if (info.share_mode == SM_COW && info.ref_count == 1)
353 // Treat single reference SM_COW as SM_PRIVATE
354 info.share_mode = SM_PRIVATE;
357 switch (info.share_mode)
360 // Treat SM_LARGE_PAGE the same as SM_PRIVATE
361 // since they are not shareable and are wired.
363 rprvt += info.private_pages_resident * pagesize;
364 rprvt += info.shared_pages_resident * pagesize;
377 // Treat kernel_task specially
378 if (info.share_mode == SM_COW)
380 rprvt += info.private_pages_resident * pagesize;
386 if (info.share_mode == SM_COW)
388 rprvt += info.private_pages_resident * pagesize;
389 vprvt += info.private_pages_resident * pagesize;
394 // log that something is really bad.
403 GetPurgeableAndAnonymous(task_t task, uint64_t &purgeable, uint64_t &anonymous)
405 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
408 mach_msg_type_number_t info_count;
409 task_vm_info_data_t vm_info;
411 info_count = TASK_VM_INFO_COUNT;
412 kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
413 if (kr == KERN_SUCCESS)
415 purgeable = vm_info.purgeable_volatile_resident;
416 anonymous = vm_info.internal + vm_info.compressed - vm_info.purgeable_volatile_pmap;
422 #if defined (HOST_VM_INFO64_COUNT)
424 MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics64_data_t &vminfo, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, mach_vm_size_t &purgeable, mach_vm_size_t &anonymous)
427 MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vminfo, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, mach_vm_size_t &purgeable, mach_vm_size_t &anonymous)
430 if (scanType & eProfileHostMemory)
431 physical_memory = GetPhysicalMemory();
433 if (scanType & eProfileMemory)
435 static mach_port_t localHost = mach_host_self();
436 #if defined (HOST_VM_INFO64_COUNT)
437 mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
438 host_statistics64(localHost, HOST_VM_INFO64, (host_info64_t)&vminfo, &count);
440 mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
441 host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vminfo, &count);
442 vminfo.wire_count += GetStolenPages(task);
445 /* We are no longer reporting these. Let's not waste time.
446 GetMemorySizes(task, cputype, pid, rprvt, vprvt);
447 rsize = ti.resident_size;
448 vsize = ti.virtual_size;
450 if (scanType & eProfileMemoryDirtyPage)
452 // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
453 GetRegionSizes(task, rsize, dirty_size);
457 if (scanType & eProfileMemoryAnonymous)
459 GetPurgeableAndAnonymous(task, purgeable, anonymous);
467 MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
469 if (data == NULL || data_count == 0)
472 nub_size_t total_bytes_read = 0;
473 nub_addr_t curr_addr = address;
474 uint8_t *curr_data = (uint8_t*)data;
475 while (total_bytes_read < data_count)
477 mach_vm_size_t curr_size = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read);
478 mach_msg_type_number_t curr_bytes_read = 0;
479 vm_offset_t vm_memory = 0;
480 m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
482 if (DNBLogCheckLogBit(LOG_MEMORY))
483 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
487 if (curr_bytes_read != curr_size)
489 if (DNBLogCheckLogBit(LOG_MEMORY))
490 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
492 ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
493 ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
494 total_bytes_read += curr_bytes_read;
495 curr_addr += curr_bytes_read;
496 curr_data += curr_bytes_read;
503 return total_bytes_read;
508 MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
510 MachVMRegion vmRegion(task);
512 nub_size_t total_bytes_written = 0;
513 nub_addr_t curr_addr = address;
514 const uint8_t *curr_data = (const uint8_t*)data;
517 while (total_bytes_written < data_count)
519 if (vmRegion.GetRegionForAddress(curr_addr))
521 mach_vm_size_t curr_data_count = data_count - total_bytes_written;
522 mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
523 if (region_bytes_left == 0)
527 if (curr_data_count > region_bytes_left)
528 curr_data_count = region_bytes_left;
530 if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
532 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
533 if (bytes_written <= 0)
535 // Error should have already be posted by WriteRegion...
540 total_bytes_written += bytes_written;
541 curr_addr += bytes_written;
542 curr_data += bytes_written;
547 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
553 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
558 return total_bytes_written;
563 MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
565 if (data == NULL || data_count == 0)
568 nub_size_t total_bytes_written = 0;
569 nub_addr_t curr_addr = address;
570 const uint8_t *curr_data = (const uint8_t*)data;
571 while (total_bytes_written < data_count)
573 mach_msg_type_number_t curr_data_count = static_cast<mach_msg_type_number_t>(MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_written));
574 m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
575 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
576 m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
578 #if !defined (__i386__) && !defined (__x86_64__)
579 vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
581 m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
582 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
583 m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
588 total_bytes_written += curr_data_count;
589 curr_addr += curr_data_count;
590 curr_data += curr_data_count;
597 return total_bytes_written;