2 * Copyright (c) 2006 Peter Wemm
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
30 * i386 machine dependent routines for kvm and minidumps.
33 #include <sys/param.h>
34 #include <sys/endian.h>
42 #include "../../sys/i386/include/minidump.h"
46 #include "kvm_private.h"
49 #define i386_round_page(x) roundup2((kvaddr_t)(x), I386_PAGE_SIZE)
52 struct minidumphdr hdr;
56 _i386_pte_pae_get(kvm_t *kd, u_long pteindex)
58 i386_pte_pae_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
64 _i386_pte_get(kvm_t *kd, u_long pteindex)
66 i386_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
72 _i386_minidump_probe(kvm_t *kd)
75 return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_386) &&
76 _kvm_is_minidump(kd));
80 _i386_minidump_freevtop(kvm_t *kd)
82 struct vmstate *vm = kd->vmst;
89 _i386_minidump_initvtop(kvm_t *kd)
92 off_t off, sparse_off;
94 vmst = _kvm_malloc(kd, sizeof(*vmst));
96 _kvm_err(kd, kd->program, "cannot allocate vm");
100 if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) !=
102 _kvm_err(kd, kd->program, "cannot read dump header");
105 if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) {
106 _kvm_err(kd, kd->program, "not a minidump for this platform");
109 vmst->hdr.version = le32toh(vmst->hdr.version);
110 if (vmst->hdr.version != MINIDUMP_VERSION) {
111 _kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d",
112 MINIDUMP_VERSION, vmst->hdr.version);
115 vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize);
116 vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize);
117 vmst->hdr.ptesize = le32toh(vmst->hdr.ptesize);
118 vmst->hdr.kernbase = le32toh(vmst->hdr.kernbase);
119 vmst->hdr.paemode = le32toh(vmst->hdr.paemode);
121 /* Skip header and msgbuf */
122 off = I386_PAGE_SIZE + i386_round_page(vmst->hdr.msgbufsize);
124 sparse_off = off + i386_round_page(vmst->hdr.bitmapsize) +
125 i386_round_page(vmst->hdr.ptesize);
126 if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
127 I386_PAGE_SIZE, sizeof(uint32_t)) == -1) {
130 off += i386_round_page(vmst->hdr.bitmapsize);
132 if (_kvm_pmap_init(kd, vmst->hdr.ptesize, off) == -1) {
135 off += i386_round_page(vmst->hdr.ptesize);
141 _i386_minidump_vatop_pae(kvm_t *kd, kvaddr_t va, off_t *pa)
144 i386_physaddr_pae_t offset;
147 i386_physaddr_pae_t a;
151 offset = va & I386_PAGE_MASK;
153 if (va >= vm->hdr.kernbase) {
154 pteindex = (va - vm->hdr.kernbase) >> I386_PAGE_SHIFT;
155 if (pteindex >= vm->hdr.ptesize / sizeof(pte))
157 pte = _i386_pte_pae_get(kd, pteindex);
158 if ((pte & I386_PG_V) == 0) {
159 _kvm_err(kd, kd->program,
160 "_i386_minidump_vatop_pae: pte not valid");
163 a = pte & I386_PG_FRAME_PAE;
164 ofs = _kvm_pt_find(kd, a, I386_PAGE_SIZE);
166 _kvm_err(kd, kd->program,
167 "_i386_minidump_vatop_pae: physical address 0x%jx not in minidump",
172 return (I386_PAGE_SIZE - offset);
174 _kvm_err(kd, kd->program,
175 "_i386_minidump_vatop_pae: virtual address 0x%jx not minidumped",
181 _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
186 _i386_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa)
189 i386_physaddr_t offset;
196 offset = va & I386_PAGE_MASK;
198 if (va >= vm->hdr.kernbase) {
199 pteindex = (va - vm->hdr.kernbase) >> I386_PAGE_SHIFT;
200 if (pteindex >= vm->hdr.ptesize / sizeof(pte))
202 pte = _i386_pte_get(kd, pteindex);
203 if ((pte & I386_PG_V) == 0) {
204 _kvm_err(kd, kd->program,
205 "_i386_minidump_vatop: pte not valid");
208 a = pte & I386_PG_FRAME;
209 ofs = _kvm_pt_find(kd, a, I386_PAGE_SIZE);
211 _kvm_err(kd, kd->program,
212 "_i386_minidump_vatop: physical address 0x%jx not in minidump",
217 return (I386_PAGE_SIZE - offset);
219 _kvm_err(kd, kd->program,
220 "_i386_minidump_vatop: virtual address 0x%jx not minidumped",
226 _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
231 _i386_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
235 _kvm_err(kd, 0, "_i386_minidump_kvatop called in live kernel!");
238 if (kd->vmst->hdr.paemode)
239 return (_i386_minidump_vatop_pae(kd, va, pa));
241 return (_i386_minidump_vatop(kd, va, pa));
245 _i386_entry_to_prot(uint64_t pte)
247 vm_prot_t prot = VM_PROT_READ;
249 /* Source: i386/pmap.c:pmap_protect() */
250 if (pte & I386_PG_RW)
251 prot |= VM_PROT_WRITE;
252 if ((pte & I386_PG_NX) == 0)
253 prot |= VM_PROT_EXECUTE;
265 _i386_iterator_init(struct i386_iter *it, kvm_t *kd)
267 struct vmstate *vm = kd->vmst;
271 if (vm->hdr.paemode) {
272 it->nptes = vm->hdr.ptesize / sizeof(i386_pte_pae_t);
274 it->nptes = vm->hdr.ptesize / sizeof(i386_pte_t);
280 _i386_iterator_next(struct i386_iter *it, u_long *pa, u_long *va, u_long *dva,
283 struct vmstate *vm = it->kd->vmst;
285 i386_pte_pae_t pte64;
293 for (; it->pteindex < it->nptes && found == 0; it->pteindex++) {
294 if (vm->hdr.paemode) {
295 pte64 = _i386_pte_pae_get(it->kd, it->pteindex);
296 if ((pte64 & I386_PG_V) == 0)
298 *prot = _i386_entry_to_prot(pte64);
299 *pa = pte64 & I386_PG_FRAME_PAE;
301 pte32 = _i386_pte_get(it->kd, it->pteindex);
302 if ((pte32 & I386_PG_V) == 0)
304 *prot = _i386_entry_to_prot(pte32);
305 *pa = pte32 & I386_PG_FRAME;
307 *va = vm->hdr.kernbase + (it->pteindex << I386_PAGE_SHIFT);
314 _i386_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
320 _i386_iterator_init(&it, kd);
321 while (_i386_iterator_next(&it, &pa, &va, &dva, &prot)) {
322 if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
323 prot, I386_PAGE_SIZE, 0)) {
330 static struct kvm_arch kvm_i386_minidump = {
331 .ka_probe = _i386_minidump_probe,
332 .ka_initvtop = _i386_minidump_initvtop,
333 .ka_freevtop = _i386_minidump_freevtop,
334 .ka_kvatop = _i386_minidump_kvatop,
335 .ka_native = _i386_native,
336 .ka_walk_pages = _i386_minidump_walk_pages,
339 KVM_ARCH(kvm_i386_minidump);