2 * Copyright (c) 2006 Peter Wemm
3 * Copyright (c) 2015 The FreeBSD Foundation
5 * Copyright (c) 2019 Mitchell Horne
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include "opt_watchdog.h"
34 #include <sys/param.h>
35 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/kerneldump.h>
40 #include <sys/msgbuf.h>
41 #include <sys/watchdog.h>
42 #include <sys/vmmeter.h>
45 #include <vm/vm_param.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_phys.h>
50 #include <machine/atomic.h>
51 #include <machine/elf.h>
52 #include <machine/md_var.h>
53 #include <machine/minidump.h>
55 CTASSERT(sizeof(struct kerneldumpheader) == 512);
56 CTASSERT(sizeof(*vm_page_dump) == 8);
58 uint64_t *vm_page_dump;
59 int vm_page_dump_size;
61 static struct kerneldumpheader kdh;
63 /* Handle chunked writes. */
66 static size_t counter, progress, dumpsize;
68 static uint64_t tmpbuffer[PAGE_SIZE / sizeof(uint64_t)];
74 } progress_track[10] = {
88 report_progress(size_t progress, size_t dumpsize)
92 sofar = 100 - ((progress * 100) / dumpsize);
93 for (i = 0; i < nitems(progress_track); i++) {
94 if (sofar < progress_track[i].min_per ||
95 sofar > progress_track[i].max_per)
97 if (progress_track[i].visited)
99 progress_track[i].visited = 1;
100 printf("..%d%%", sofar);
106 is_dumpable(vm_paddr_t pa)
111 if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
112 return ((m->flags & PG_NODUMP) == 0);
114 for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
115 if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
122 blk_flush(struct dumperinfo *di)
129 error = dump_append(di, dump_va, 0, fragsz);
135 * Write a block of data to the dump file.
137 * Caller can provide data through a pointer or by specifying its
140 * XXX writes using pa should be no larger than PAGE_SIZE.
143 blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
149 maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE);
150 if (maxdumpsz == 0) /* seatbelt */
151 maxdumpsz = PAGE_SIZE;
153 if ((sz % PAGE_SIZE) != 0) {
154 printf("size not page aligned\n");
157 if (ptr != NULL && pa != 0) {
158 printf("cant have both va and pa!\n");
161 if ((((uintptr_t)pa) % PAGE_SIZE) != 0) {
162 printf("address not page aligned %#lx\n", (uintptr_t)pa);
167 * If we're doing a virtual dump, flush any
168 * pre-existing pa pages.
170 error = blk_flush(di);
175 len = maxdumpsz - fragsz;
181 report_progress(progress, dumpsize);
182 counter &= (1 << 22) - 1;
185 wdog_kern_pat(WD_LASTVAL);
188 error = dump_append(di, ptr, 0, len);
194 dump_va = (void *)PHYS_TO_DMAP(pa);
198 error = blk_flush(di);
203 /* Check for user abort */
208 printf(" (CTRL-C to abort) ");
215 minidumpsys(struct dumperinfo *di)
219 struct minidumphdr mdhdr;
234 /* Build set of dumpable pages from kernel pmap */
235 for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
236 pmapsize += PAGE_SIZE;
237 if (!pmap_get_tables(pmap_kernel(), va, &l1, &l2, &l3))
240 /* We should always be using the l2 table for kvm */
244 /* l2 may be a superpage */
245 if ((*l2 & PTE_RWX) != 0) {
246 pa = (*l2 >> PTE_PPN1_S) << L2_SHIFT;
247 for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) {
252 for (i = 0; i < Ln_ENTRIES; i++) {
253 if ((l3[i] & PTE_V) == 0)
255 pa = (l3[i] >> PTE_PPN0_S) * PAGE_SIZE;
262 /* Calculate dump size */
264 dumpsize += round_page(msgbufp->msg_size);
265 dumpsize += round_page(vm_page_dump_size);
266 for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
267 bits = vm_page_dump[i];
269 bit = ffsl(bits) - 1;
270 pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) +
272 /* Clear out undumpable pages now if needed */
274 dumpsize += PAGE_SIZE;
277 bits &= ~(1ul << bit);
280 dumpsize += PAGE_SIZE;
284 /* Initialize mdhdr */
285 bzero(&mdhdr, sizeof(mdhdr));
286 strcpy(mdhdr.magic, MINIDUMP_MAGIC);
287 mdhdr.version = MINIDUMP_VERSION;
288 mdhdr.msgbufsize = msgbufp->msg_size;
289 mdhdr.bitmapsize = vm_page_dump_size;
290 mdhdr.pmapsize = pmapsize;
291 mdhdr.kernbase = KERNBASE;
292 mdhdr.dmapphys = DMAP_MIN_PHYSADDR;
293 mdhdr.dmapbase = DMAP_MIN_ADDRESS;
294 mdhdr.dmapend = DMAP_MAX_ADDRESS;
296 dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_RISCV_VERSION,
299 error = dump_start(di, &kdh);
303 printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20,
304 ptoa((uintmax_t)physmem) / 1048576);
306 /* Dump minidump header */
307 bzero(&tmpbuffer, sizeof(tmpbuffer));
308 bcopy(&mdhdr, &tmpbuffer, sizeof(mdhdr));
309 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
313 /* Dump msgbuf up front */
314 error = blk_write(di, (char *)msgbufp->msg_ptr, 0,
315 round_page(msgbufp->msg_size));
320 error = blk_write(di, (char *)vm_page_dump, 0,
321 round_page(vm_page_dump_size));
325 /* Dump kernel page directory pages */
326 bzero(&tmpbuffer, sizeof(tmpbuffer));
327 for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
328 if (!pmap_get_tables(pmap_kernel(), va, &l1, &l2, &l3)) {
329 /* We always write a page, even if it is zero */
330 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
333 /* Flush, in case we reuse tmpbuffer in the same block */
334 error = blk_flush(di);
337 } else if ((*l2 & PTE_RWX) != 0) {
338 /* Generate fake l3 entries based on the l2 superpage */
339 for (i = 0; i < Ln_ENTRIES; i++) {
340 tmpbuffer[i] = (*l2 | (i << PTE_PPN0_S));
342 /* We always write a page, even if it is zero */
343 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
346 /* Flush, in case we reuse tmpbuffer in the same block */
347 error = blk_flush(di);
350 bzero(&tmpbuffer, sizeof(tmpbuffer));
352 pa = (*l2 >> PTE_PPN0_S) * PAGE_SIZE;
354 /* We always write a page, even if it is zero */
355 error = blk_write(di, NULL, pa, PAGE_SIZE);
361 /* Dump memory chunks */
362 /* XXX cluster it up and use blk_dump() */
363 for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
364 bits = vm_page_dump[i];
366 bit = ffsl(bits) - 1;
367 pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) +
369 error = blk_write(di, 0, pa, PAGE_SIZE);
372 bits &= ~(1ul << bit);
376 error = blk_flush(di);
380 error = dump_finish(di, &kdh);
384 printf("\nDump complete\n");
392 if (error == ENOSPC) {
393 printf("Dump map grown while dumping. ");
394 if (retry_count < 5) {
395 printf("Retrying...\n");
398 printf("Dump failed.\n");
400 else if (error == ECANCELED)
401 printf("Dump aborted\n");
402 else if (error == E2BIG)
403 printf("Dump failed. Partition too small.\n");
405 printf("** DUMP FAILED (ERROR %d) **\n", error);
410 * Add a page to the minidump bitmap.
413 dump_add_page(vm_paddr_t pa)
418 idx = pa >> 6; /* 2^6 = 64 */
420 atomic_set_long(&vm_page_dump[idx], 1ul << bit);
424 * Remove page from the minidump bitmap.
427 dump_drop_page(vm_paddr_t pa)
432 idx = pa >> 6; /* 2^6 = 64 */
434 atomic_clear_long(&vm_page_dump[idx], 1ul << bit);