]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libkvm/kvm_minidump_mips.c
Revise the description of MAP_STACK. In particular, describe the guard
[FreeBSD/FreeBSD.git] / lib / libkvm / kvm_minidump_mips.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Oleksandr Tymoshenko
5  * Copyright (c) 2008 Semihalf, Grzegorz Bernacki
6  * Copyright (c) 2006 Peter Wemm
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * From: FreeBSD: src/lib/libkvm/kvm_minidump_arm.c r214223
30  */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 /*
36  * MIPS machine dependent routines for kvm and minidumps.
37  */
38
39 #include <sys/param.h>
40 #include <vm/vm.h>
41 #include <kvm.h>
42 #include <limits.h>
43 #include <stdint.h>
44 #include <stdlib.h>
45 #include <string.h>
46 #include <unistd.h>
47
48 #include "../../sys/mips/include/cca.h"
49 #define _KVM_MINIDUMP
50 #include "../../sys/mips/include/cpuregs.h"
51 #include "../../sys/mips/include/minidump.h"
52
53 #include "kvm_private.h"
54 #include "kvm_mips.h"
55
56 #define mips_round_page(x)      roundup2((kvaddr_t)(x), MIPS_PAGE_SIZE)
57
58 struct vmstate {
59         struct          minidumphdr hdr;
60         int             pte_size;
61 };
62
63 static int
64 _mips_minidump_probe(kvm_t *kd)
65 {
66
67         if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS32 &&
68             kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS64)
69                 return (0);
70         if (kd->nlehdr.e_machine != EM_MIPS)
71                 return (0);
72         return (_kvm_is_minidump(kd));
73 }
74
75 static void
76 _mips_minidump_freevtop(kvm_t *kd)
77 {
78         struct vmstate *vm = kd->vmst;
79
80         free(vm);
81         kd->vmst = NULL;
82 }
83
84 static int
85 _mips_minidump_initvtop(kvm_t *kd)
86 {
87         struct vmstate *vmst;
88         off_t off, dump_avail_off, sparse_off;
89
90         vmst = _kvm_malloc(kd, sizeof(*vmst));
91         if (vmst == NULL) {
92                 _kvm_err(kd, kd->program, "cannot allocate vm");
93                 return (-1);
94         }
95
96         kd->vmst = vmst;
97
98         if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64 ||
99             kd->nlehdr.e_flags & EF_MIPS_ABI2)
100                 vmst->pte_size = 64;
101         else
102                 vmst->pte_size = 32;
103
104         if (pread(kd->pmfd, &vmst->hdr,
105             sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) {
106                 _kvm_err(kd, kd->program, "cannot read dump header");
107                 return (-1);
108         }
109
110         if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic,
111             sizeof(vmst->hdr.magic)) != 0) {
112                 _kvm_err(kd, kd->program, "not a minidump for this platform");
113                 return (-1);
114         }
115         vmst->hdr.version = _kvm32toh(kd, vmst->hdr.version);
116         if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) {
117                 _kvm_err(kd, kd->program, "wrong minidump version. "
118                     "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version);
119                 return (-1);
120         }
121         vmst->hdr.msgbufsize = _kvm32toh(kd, vmst->hdr.msgbufsize);
122         vmst->hdr.bitmapsize = _kvm32toh(kd, vmst->hdr.bitmapsize);
123         vmst->hdr.ptesize = _kvm32toh(kd, vmst->hdr.ptesize);
124         vmst->hdr.kernbase = _kvm64toh(kd, vmst->hdr.kernbase);
125         vmst->hdr.dmapbase = _kvm64toh(kd, vmst->hdr.dmapbase);
126         vmst->hdr.dmapend = _kvm64toh(kd, vmst->hdr.dmapend);
127         vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ?
128             _kvm32toh(kd, vmst->hdr.dumpavailsize) : 0;
129
130         /* Skip header and msgbuf */
131         dump_avail_off = MIPS_PAGE_SIZE + mips_round_page(vmst->hdr.msgbufsize);
132
133         /* Skip dump_avail */
134         off = dump_avail_off + mips_round_page(vmst->hdr.dumpavailsize);
135
136         sparse_off = off + mips_round_page(vmst->hdr.bitmapsize) +
137             mips_round_page(vmst->hdr.ptesize);
138         if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
139             vmst->hdr.bitmapsize, off, sparse_off, MIPS_PAGE_SIZE,
140             sizeof(uint32_t)) == -1) {
141                 return (-1);
142         }
143         off += mips_round_page(vmst->hdr.bitmapsize);
144
145         if (_kvm_pmap_init(kd, vmst->hdr.ptesize, off) == -1) {
146                 return (-1);
147         }
148         off += mips_round_page(vmst->hdr.ptesize);
149
150         return (0);
151 }
152
153 static int
154 _mips_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
155 {
156         struct vmstate *vm;
157         mips_physaddr_t offset, a;
158         kvaddr_t pteindex;
159         u_long valid;
160         off_t ofs;
161         mips32_pte_t pte32;
162         mips64_pte_t pte64;
163
164         if (ISALIVE(kd)) {
165                 _kvm_err(kd, 0, "_mips_minidump_kvatop called in live kernel!");
166                 return (0);
167         }
168
169         offset = va & MIPS_PAGE_MASK;
170         /* Operate with page-aligned address */
171         va &= ~MIPS_PAGE_MASK;
172
173         vm = kd->vmst;
174         if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64) {
175                 if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END) {
176                         a = va & MIPS_XKPHYS_PHYS_MASK;
177                         goto found;
178                 }
179                 if (va >= MIPS64_KSEG0_START && va < MIPS64_KSEG0_END) {
180                         a = va & MIPS_KSEG0_PHYS_MASK;
181                         goto found;
182                 }
183                 if (va >= MIPS64_KSEG1_START && va < MIPS64_KSEG1_END) {
184                         a = va & MIPS_KSEG0_PHYS_MASK;
185                         goto found;
186                 }
187         } else {
188                 if (va >= MIPS32_KSEG0_START && va < MIPS32_KSEG0_END) {
189                         a = va & MIPS_KSEG0_PHYS_MASK;
190                         goto found;
191                 }
192                 if (va >= MIPS32_KSEG1_START && va < MIPS32_KSEG1_END) {
193                         a = va & MIPS_KSEG0_PHYS_MASK;
194                         goto found;
195                 }
196         }
197         if (va >= vm->hdr.kernbase) {
198                 pteindex = (va - vm->hdr.kernbase) >> MIPS_PAGE_SHIFT;
199                 if (vm->pte_size == 64) {
200                         valid = pteindex < vm->hdr.ptesize / sizeof(pte64);
201                         if (pteindex >= vm->hdr.ptesize / sizeof(pte64))
202                                 goto invalid;
203                         pte64 = _mips64_pte_get(kd, pteindex);
204                         valid = pte64 & MIPS_PTE_V;
205                         if (valid)
206                                 a = MIPS64_PTE_TO_PA(pte64);
207                 } else {
208                         if (pteindex >= vm->hdr.ptesize / sizeof(pte32))
209                                 goto invalid;
210                         pte32 = _mips32_pte_get(kd, pteindex);
211                         valid = pte32 & MIPS_PTE_V;
212                         if (valid)
213                                 a = MIPS32_PTE_TO_PA(pte32);
214                 }
215                 if (!valid) {
216                         _kvm_err(kd, kd->program, "_mips_minidump_kvatop: pte "
217                             "not valid");
218                         goto invalid;
219                 }
220         } else {
221                 _kvm_err(kd, kd->program, "_mips_minidump_kvatop: virtual "
222                     "address 0x%jx not minidumped", (uintmax_t)va);
223                 return (0);
224         }
225
226 found:
227         ofs = _kvm_pt_find(kd, a, MIPS_PAGE_SIZE);
228         if (ofs == -1) {
229                 _kvm_err(kd, kd->program, "_mips_minidump_kvatop: physical "
230                     "address 0x%jx not in minidump", (uintmax_t)a);
231                 goto invalid;
232         }
233
234         *pa = ofs + offset;
235         return (MIPS_PAGE_SIZE - offset);
236
237
238 invalid:
239         _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
240         return (0);
241 }
242
243 static int
244 #ifdef __mips__
245 _mips_native(kvm_t *kd)
246 #else
247 _mips_native(kvm_t *kd __unused)
248 #endif
249 {
250
251 #ifdef __mips__
252 #ifdef __mips_n64
253         if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS64)
254                 return (0);
255 #else
256         if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS32)
257                 return (0);
258 #ifdef __mips_n32
259         if (!(kd->nlehdr.e_flags & EF_MIPS_ABI2))
260                 return (0);
261 #else
262         if (kd->nlehdr.e_flags & EF_MIPS_ABI2)
263                 return (0);
264 #endif
265 #endif
266 #if _BYTE_ORDER == _LITTLE_ENDIAN
267         return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB);
268 #else
269         return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB);
270 #endif
271 #else
272         return (0);
273 #endif
274 }
275
276 struct mips_iter {
277         kvm_t *kd;
278         u_long nptes;
279         u_long pteindex;
280 };
281
282 static void
283 _mips_iterator_init(struct mips_iter *it, kvm_t *kd)
284 {
285         struct vmstate *vm = kd->vmst;
286
287         it->kd = kd;
288         it->pteindex = 0;
289         if (vm->pte_size == 64)
290                 it->nptes = vm->hdr.ptesize / sizeof(mips64_pte_t);
291         else
292                 it->nptes = vm->hdr.ptesize / sizeof(mips32_pte_t);
293         return;
294 }
295
296 static int
297 _mips_iterator_next(struct mips_iter *it, u_long *pa, u_long *va, u_long *dva,
298     vm_prot_t *prot)
299 {
300         struct vmstate *vm = it->kd->vmst;
301         int found = 0;
302         mips64_pte_t pte64;
303         mips32_pte_t pte32;
304
305         /*
306          * mips/mips/pmap.c: init_pte_prot / pmap_protect indicate that all
307          * pages are R|X at least.
308          */
309         *prot = VM_PROT_READ | VM_PROT_EXECUTE;
310         *pa = 0;
311         *va = 0;
312         *dva = 0;
313         for (;it->pteindex < it->nptes && found == 0; it->pteindex++) {
314                 if (vm->pte_size == 64) {
315                         pte64 = _mips64_pte_get(it->kd, it->pteindex);
316                         if ((pte64 & MIPS_PTE_V) == 0)
317                                 continue;
318                         if ((pte64 & MIPS64_PTE_RO) == 0)
319                                 *prot |= VM_PROT_WRITE;
320                         *pa = MIPS64_PTE_TO_PA(pte64);
321                 } else {
322                         pte32 = _mips32_pte_get(it->kd, it->pteindex);
323                         if ((pte32 & MIPS_PTE_V) == 0)
324                                 continue;
325                         if ((pte32 & MIPS32_PTE_RO) == 0)
326                                 *prot |= VM_PROT_WRITE;
327                         *pa = MIPS32_PTE_TO_PA(pte32);
328                 }
329                 *va = vm->hdr.kernbase + (it->pteindex << MIPS_PAGE_SHIFT);
330                 found = 1;
331                 /* advance pteindex regardless */
332         }
333
334         return found;
335 }
336
337 static int
338 _mips_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
339 {
340         struct mips_iter it;
341         u_long dva, pa, va;
342         vm_prot_t prot;
343
344         /* Generate direct mapped entries; need page entries for prot etc? */
345         if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64) {
346                 /* MIPS_XKPHYS_START..MIPS_XKPHYS_END */
347                 /* MIPS64_KSEG0_START..MIPS64_KSEG0_END */
348                 /* MIPS64_KSEG1_START..MIPS64_KSEG1_START */
349         } else {
350                 /* MIPS32_KSEG0_START..MIPS32_KSEG0_END */
351                 /* MIPS32_KSEG1_START..MIPS32_KSEG1_END */
352         }
353
354         _mips_iterator_init(&it, kd);
355         while (_mips_iterator_next(&it, &pa, &va, &dva, &prot)) {
356                 if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
357                     prot, MIPS_PAGE_SIZE, 0)) {
358                         return (0);
359                 }
360         }
361         return (1);
362 }
363
364 static struct kvm_arch kvm_mips_minidump = {
365         .ka_probe = _mips_minidump_probe,
366         .ka_initvtop = _mips_minidump_initvtop,
367         .ka_freevtop = _mips_minidump_freevtop,
368         .ka_kvatop = _mips_minidump_kvatop,
369         .ka_native = _mips_native,
370         .ka_walk_pages = _mips_minidump_walk_pages,
371 };
372
373 KVM_ARCH(kvm_mips_minidump);