2 * Copyright (c) 2006 Peter Wemm
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * From: FreeBSD: src/lib/libkvm/kvm_minidump_amd64.c r261799
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 * ARM64 (AArch64) machine dependent routines for kvm and minidumps.
35 #include <sys/param.h>
43 #include "../../sys/arm64/include/minidump.h"
47 #include "kvm_private.h"
48 #include "kvm_aarch64.h"
50 #define aarch64_round_page(x) roundup2((kvaddr_t)(x), AARCH64_PAGE_SIZE)
53 struct minidumphdr hdr;
57 _aarch64_pte_get(kvm_t *kd, u_long pteindex)
59 aarch64_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
65 _aarch64_minidump_probe(kvm_t *kd)
68 return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_AARCH64) &&
69 _kvm_is_minidump(kd));
73 _aarch64_minidump_freevtop(kvm_t *kd)
75 struct vmstate *vm = kd->vmst;
82 _aarch64_minidump_initvtop(kvm_t *kd)
85 off_t off, dump_avail_off, sparse_off;
87 vmst = _kvm_malloc(kd, sizeof(*vmst));
89 _kvm_err(kd, kd->program, "cannot allocate vm");
93 if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) !=
95 _kvm_err(kd, kd->program, "cannot read dump header");
98 if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic,
99 sizeof(vmst->hdr.magic)) != 0) {
100 _kvm_err(kd, kd->program, "not a minidump for this platform");
104 vmst->hdr.version = le32toh(vmst->hdr.version);
105 if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) {
106 _kvm_err(kd, kd->program, "wrong minidump version. "
107 "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version);
110 vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize);
111 vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize);
112 vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize);
113 vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase);
114 vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys);
115 vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase);
116 vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend);
117 vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ?
118 le32toh(vmst->hdr.dumpavailsize) : 0;
120 /* Skip header and msgbuf */
121 dump_avail_off = AARCH64_PAGE_SIZE + aarch64_round_page(vmst->hdr.msgbufsize);
123 /* Skip dump_avail */
124 off = dump_avail_off + aarch64_round_page(vmst->hdr.dumpavailsize);
126 /* build physical address lookup table for sparse pages */
127 sparse_off = off + aarch64_round_page(vmst->hdr.bitmapsize) +
128 aarch64_round_page(vmst->hdr.pmapsize);
129 if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
130 vmst->hdr.bitmapsize, off, sparse_off, AARCH64_PAGE_SIZE,
131 sizeof(uint64_t)) == -1) {
134 off += aarch64_round_page(vmst->hdr.bitmapsize);
136 if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) {
139 off += aarch64_round_page(vmst->hdr.pmapsize);
145 _aarch64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa)
148 aarch64_physaddr_t offset;
151 aarch64_physaddr_t a;
155 offset = va & AARCH64_PAGE_MASK;
157 if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
158 a = (va - vm->hdr.dmapbase + vm->hdr.dmapphys) &
160 ofs = _kvm_pt_find(kd, a, AARCH64_PAGE_SIZE);
162 _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: "
163 "direct map address 0x%jx not in minidump",
168 return (AARCH64_PAGE_SIZE - offset);
169 } else if (va >= vm->hdr.kernbase) {
170 l3_index = (va - vm->hdr.kernbase) >> AARCH64_L3_SHIFT;
171 if (l3_index >= vm->hdr.pmapsize / sizeof(l3))
173 l3 = _aarch64_pte_get(kd, l3_index);
174 if ((l3 & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) {
175 _kvm_err(kd, kd->program,
176 "_aarch64_minidump_vatop: pde not valid");
179 a = l3 & ~AARCH64_ATTR_MASK;
180 ofs = _kvm_pt_find(kd, a, AARCH64_PAGE_SIZE);
182 _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: "
183 "physical address 0x%jx not in minidump",
188 return (AARCH64_PAGE_SIZE - offset);
190 _kvm_err(kd, kd->program,
191 "_aarch64_minidump_vatop: virtual address 0x%jx not minidumped",
197 _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
202 _aarch64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
207 "_aarch64_minidump_kvatop called in live kernel!");
210 return (_aarch64_minidump_vatop(kd, va, pa));
214 _aarch64_native(kvm_t *kd __unused)
225 _aarch64_entry_to_prot(aarch64_pte_t pte)
227 vm_prot_t prot = VM_PROT_READ;
229 /* Source: arm64/arm64/pmap.c:pmap_protect() */
230 if ((pte & AARCH64_ATTR_AP(AARCH64_ATTR_AP_RO)) == 0)
231 prot |= VM_PROT_WRITE;
232 if ((pte & AARCH64_ATTR_XN) == 0)
233 prot |= VM_PROT_EXECUTE;
238 _aarch64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
240 struct vmstate *vm = kd->vmst;
241 u_long nptes = vm->hdr.pmapsize / sizeof(aarch64_pte_t);
242 u_long bmindex, dva, pa, pteindex, va;
243 struct kvm_bitmap bm;
247 if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex))
250 for (pteindex = 0; pteindex < nptes; pteindex++) {
251 aarch64_pte_t pte = _aarch64_pte_get(kd, pteindex);
253 if ((pte & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE)
256 va = vm->hdr.kernbase + (pteindex << AARCH64_L3_SHIFT);
257 pa = pte & ~AARCH64_ATTR_MASK;
258 dva = vm->hdr.dmapbase + pa;
259 if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
260 _aarch64_entry_to_prot(pte), AARCH64_PAGE_SIZE, 0)) {
265 while (_kvm_bitmap_next(&bm, &bmindex)) {
266 pa = _kvm_bit_id_pa(kd, bmindex, AARCH64_PAGE_SIZE);
267 if (pa == _KVM_PA_INVALID)
269 dva = vm->hdr.dmapbase + pa;
270 if (vm->hdr.dmapend < (dva + AARCH64_PAGE_SIZE))
273 prot = VM_PROT_READ | VM_PROT_WRITE;
274 if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
275 prot, AARCH64_PAGE_SIZE, 0)) {
282 _kvm_bitmap_deinit(&bm);
286 static struct kvm_arch kvm_aarch64_minidump = {
287 .ka_probe = _aarch64_minidump_probe,
288 .ka_initvtop = _aarch64_minidump_initvtop,
289 .ka_freevtop = _aarch64_minidump_freevtop,
290 .ka_kvatop = _aarch64_minidump_kvatop,
291 .ka_native = _aarch64_native,
292 .ka_walk_pages = _aarch64_minidump_walk_pages,
295 KVM_ARCH(kvm_aarch64_minidump);