]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libkvm/kvm_minidump_mips.c
bhyvectl(8): Normalize the man page date
[FreeBSD/FreeBSD.git] / lib / libkvm / kvm_minidump_mips.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Oleksandr Tymoshenko
5  * Copyright (c) 2008 Semihalf, Grzegorz Bernacki
6  * Copyright (c) 2006 Peter Wemm
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * From: FreeBSD: src/lib/libkvm/kvm_minidump_arm.c r214223
30  */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 /*
36  * MIPS machine dependent routines for kvm and minidumps.
37  */
38
39 #include <sys/param.h>
40 #include <vm/vm.h>
41 #include <kvm.h>
42 #include <limits.h>
43 #include <stdint.h>
44 #include <stdlib.h>
45 #include <string.h>
46 #include <unistd.h>
47
48 #include "../../sys/mips/include/cca.h"
49 #define _KVM_MINIDUMP
50 #include "../../sys/mips/include/cpuregs.h"
51 #include "../../sys/mips/include/minidump.h"
52
53 #include "kvm_private.h"
54 #include "kvm_mips.h"
55
56 #define mips_round_page(x)      roundup2((kvaddr_t)(x), MIPS_PAGE_SIZE)
57
58 struct vmstate {
59         struct          minidumphdr hdr;
60         int             pte_size;
61 };
62
63 static int
64 _mips_minidump_probe(kvm_t *kd)
65 {
66
67         if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS32 &&
68             kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS64)
69                 return (0);
70         if (kd->nlehdr.e_machine != EM_MIPS)
71                 return (0);
72         return (_kvm_is_minidump(kd));
73 }
74
75 static void
76 _mips_minidump_freevtop(kvm_t *kd)
77 {
78         struct vmstate *vm = kd->vmst;
79
80         free(vm);
81         kd->vmst = NULL;
82 }
83
84 static int
85 _mips_minidump_initvtop(kvm_t *kd)
86 {
87         struct vmstate *vmst;
88         off_t off, dump_avail_off, sparse_off;
89
90         vmst = _kvm_malloc(kd, sizeof(*vmst));
91         if (vmst == NULL) {
92                 _kvm_err(kd, kd->program, "cannot allocate vm");
93                 return (-1);
94         }
95
96         kd->vmst = vmst;
97
98         if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64 ||
99             kd->nlehdr.e_flags & EF_MIPS_ABI2)
100                 vmst->pte_size = 64;
101         else
102                 vmst->pte_size = 32;
103
104         if (pread(kd->pmfd, &vmst->hdr,
105             sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) {
106                 _kvm_err(kd, kd->program, "cannot read dump header");
107                 return (-1);
108         }
109
110         if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic,
111             sizeof(vmst->hdr.magic)) != 0) {
112                 _kvm_err(kd, kd->program, "not a minidump for this platform");
113                 return (-1);
114         }
115         vmst->hdr.version = _kvm32toh(kd, vmst->hdr.version);
116         if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) {
117                 _kvm_err(kd, kd->program, "wrong minidump version. "
118                     "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version);
119                 return (-1);
120         }
121         vmst->hdr.msgbufsize = _kvm32toh(kd, vmst->hdr.msgbufsize);
122         vmst->hdr.bitmapsize = _kvm32toh(kd, vmst->hdr.bitmapsize);
123         vmst->hdr.ptesize = _kvm32toh(kd, vmst->hdr.ptesize);
124         vmst->hdr.kernbase = _kvm64toh(kd, vmst->hdr.kernbase);
125         vmst->hdr.dmapbase = _kvm64toh(kd, vmst->hdr.dmapbase);
126         vmst->hdr.dmapend = _kvm64toh(kd, vmst->hdr.dmapend);
127         vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ?
128             _kvm32toh(kd, vmst->hdr.dumpavailsize) : 0;
129
130         /* Skip header and msgbuf */
131         dump_avail_off = MIPS_PAGE_SIZE + mips_round_page(vmst->hdr.msgbufsize);
132
133         /* Skip dump_avail */
134         off = dump_avail_off + mips_round_page(vmst->hdr.dumpavailsize);
135
136         sparse_off = off + mips_round_page(vmst->hdr.bitmapsize) +
137             mips_round_page(vmst->hdr.ptesize);
138         if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
139             vmst->hdr.bitmapsize, off, sparse_off, MIPS_PAGE_SIZE) == -1) {
140                 return (-1);
141         }
142         off += mips_round_page(vmst->hdr.bitmapsize);
143
144         if (_kvm_pmap_init(kd, vmst->hdr.ptesize, off) == -1) {
145                 return (-1);
146         }
147         off += mips_round_page(vmst->hdr.ptesize);
148
149         return (0);
150 }
151
152 static int
153 _mips_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
154 {
155         struct vmstate *vm;
156         mips_physaddr_t offset, a;
157         kvaddr_t pteindex;
158         u_long valid;
159         off_t ofs;
160         mips32_pte_t pte32;
161         mips64_pte_t pte64;
162
163         if (ISALIVE(kd)) {
164                 _kvm_err(kd, 0, "_mips_minidump_kvatop called in live kernel!");
165                 return (0);
166         }
167
168         offset = va & MIPS_PAGE_MASK;
169         /* Operate with page-aligned address */
170         va &= ~MIPS_PAGE_MASK;
171
172         vm = kd->vmst;
173         if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64) {
174                 if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END) {
175                         a = va & MIPS_XKPHYS_PHYS_MASK;
176                         goto found;
177                 }
178                 if (va >= MIPS64_KSEG0_START && va < MIPS64_KSEG0_END) {
179                         a = va & MIPS_KSEG0_PHYS_MASK;
180                         goto found;
181                 }
182                 if (va >= MIPS64_KSEG1_START && va < MIPS64_KSEG1_END) {
183                         a = va & MIPS_KSEG0_PHYS_MASK;
184                         goto found;
185                 }
186         } else {
187                 if (va >= MIPS32_KSEG0_START && va < MIPS32_KSEG0_END) {
188                         a = va & MIPS_KSEG0_PHYS_MASK;
189                         goto found;
190                 }
191                 if (va >= MIPS32_KSEG1_START && va < MIPS32_KSEG1_END) {
192                         a = va & MIPS_KSEG0_PHYS_MASK;
193                         goto found;
194                 }
195         }
196         if (va >= vm->hdr.kernbase) {
197                 pteindex = (va - vm->hdr.kernbase) >> MIPS_PAGE_SHIFT;
198                 if (vm->pte_size == 64) {
199                         valid = pteindex < vm->hdr.ptesize / sizeof(pte64);
200                         if (pteindex >= vm->hdr.ptesize / sizeof(pte64))
201                                 goto invalid;
202                         pte64 = _mips64_pte_get(kd, pteindex);
203                         valid = pte64 & MIPS_PTE_V;
204                         if (valid)
205                                 a = MIPS64_PTE_TO_PA(pte64);
206                 } else {
207                         if (pteindex >= vm->hdr.ptesize / sizeof(pte32))
208                                 goto invalid;
209                         pte32 = _mips32_pte_get(kd, pteindex);
210                         valid = pte32 & MIPS_PTE_V;
211                         if (valid)
212                                 a = MIPS32_PTE_TO_PA(pte32);
213                 }
214                 if (!valid) {
215                         _kvm_err(kd, kd->program, "_mips_minidump_kvatop: pte "
216                             "not valid");
217                         goto invalid;
218                 }
219         } else {
220                 _kvm_err(kd, kd->program, "_mips_minidump_kvatop: virtual "
221                     "address 0x%jx not minidumped", (uintmax_t)va);
222                 return (0);
223         }
224
225 found:
226         ofs = _kvm_pt_find(kd, a, MIPS_PAGE_SIZE);
227         if (ofs == -1) {
228                 _kvm_err(kd, kd->program, "_mips_minidump_kvatop: physical "
229                     "address 0x%jx not in minidump", (uintmax_t)a);
230                 goto invalid;
231         }
232
233         *pa = ofs + offset;
234         return (MIPS_PAGE_SIZE - offset);
235
236
237 invalid:
238         _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
239         return (0);
240 }
241
242 static int
243 #ifdef __mips__
244 _mips_native(kvm_t *kd)
245 #else
246 _mips_native(kvm_t *kd __unused)
247 #endif
248 {
249
250 #ifdef __mips__
251 #ifdef __mips_n64
252         if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS64)
253                 return (0);
254 #else
255         if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS32)
256                 return (0);
257 #ifdef __mips_n32
258         if (!(kd->nlehdr.e_flags & EF_MIPS_ABI2))
259                 return (0);
260 #else
261         if (kd->nlehdr.e_flags & EF_MIPS_ABI2)
262                 return (0);
263 #endif
264 #endif
265 #if _BYTE_ORDER == _LITTLE_ENDIAN
266         return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB);
267 #else
268         return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB);
269 #endif
270 #else
271         return (0);
272 #endif
273 }
274
275 struct mips_iter {
276         kvm_t *kd;
277         u_long nptes;
278         u_long pteindex;
279 };
280
281 static void
282 _mips_iterator_init(struct mips_iter *it, kvm_t *kd)
283 {
284         struct vmstate *vm = kd->vmst;
285
286         it->kd = kd;
287         it->pteindex = 0;
288         if (vm->pte_size == 64)
289                 it->nptes = vm->hdr.ptesize / sizeof(mips64_pte_t);
290         else
291                 it->nptes = vm->hdr.ptesize / sizeof(mips32_pte_t);
292         return;
293 }
294
295 static int
296 _mips_iterator_next(struct mips_iter *it, u_long *pa, u_long *va, u_long *dva,
297     vm_prot_t *prot)
298 {
299         struct vmstate *vm = it->kd->vmst;
300         int found = 0;
301         mips64_pte_t pte64;
302         mips32_pte_t pte32;
303
304         /*
305          * mips/mips/pmap.c: init_pte_prot / pmap_protect indicate that all
306          * pages are R|X at least.
307          */
308         *prot = VM_PROT_READ | VM_PROT_EXECUTE;
309         *pa = 0;
310         *va = 0;
311         *dva = 0;
312         for (;it->pteindex < it->nptes && found == 0; it->pteindex++) {
313                 if (vm->pte_size == 64) {
314                         pte64 = _mips64_pte_get(it->kd, it->pteindex);
315                         if ((pte64 & MIPS_PTE_V) == 0)
316                                 continue;
317                         if ((pte64 & MIPS64_PTE_RO) == 0)
318                                 *prot |= VM_PROT_WRITE;
319                         *pa = MIPS64_PTE_TO_PA(pte64);
320                 } else {
321                         pte32 = _mips32_pte_get(it->kd, it->pteindex);
322                         if ((pte32 & MIPS_PTE_V) == 0)
323                                 continue;
324                         if ((pte32 & MIPS32_PTE_RO) == 0)
325                                 *prot |= VM_PROT_WRITE;
326                         *pa = MIPS32_PTE_TO_PA(pte32);
327                 }
328                 *va = vm->hdr.kernbase + (it->pteindex << MIPS_PAGE_SHIFT);
329                 found = 1;
330                 /* advance pteindex regardless */
331         }
332
333         return found;
334 }
335
336 static int
337 _mips_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
338 {
339         struct mips_iter it;
340         u_long dva, pa, va;
341         vm_prot_t prot;
342
343         /* Generate direct mapped entries; need page entries for prot etc? */
344         if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64) {
345                 /* MIPS_XKPHYS_START..MIPS_XKPHYS_END */
346                 /* MIPS64_KSEG0_START..MIPS64_KSEG0_END */
347                 /* MIPS64_KSEG1_START..MIPS64_KSEG1_START */
348         } else {
349                 /* MIPS32_KSEG0_START..MIPS32_KSEG0_END */
350                 /* MIPS32_KSEG1_START..MIPS32_KSEG1_END */
351         }
352
353         _mips_iterator_init(&it, kd);
354         while (_mips_iterator_next(&it, &pa, &va, &dva, &prot)) {
355                 if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
356                     prot, MIPS_PAGE_SIZE, 0)) {
357                         return (0);
358                 }
359         }
360         return (1);
361 }
362
363 static struct kvm_arch kvm_mips_minidump = {
364         .ka_probe = _mips_minidump_probe,
365         .ka_initvtop = _mips_minidump_initvtop,
366         .ka_freevtop = _mips_minidump_freevtop,
367         .ka_kvatop = _mips_minidump_kvatop,
368         .ka_native = _mips_native,
369         .ka_walk_pages = _mips_minidump_walk_pages,
370 };
371
372 KVM_ARCH(kvm_mips_minidump);