2 * Copyright (c) 2006 Peter Wemm
3 * Copyright (c) 2015 The FreeBSD Foundation
5 * Copyright (c) 2019 Mitchell Horne
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include "opt_watchdog.h"
34 #include <sys/param.h>
35 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/kerneldump.h>
40 #include <sys/msgbuf.h>
41 #include <sys/watchdog.h>
42 #include <sys/vmmeter.h>
45 #include <vm/vm_param.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_phys.h>
48 #include <vm/vm_dumpset.h>
51 #include <machine/atomic.h>
52 #include <machine/elf.h>
53 #include <machine/md_var.h>
54 #include <machine/minidump.h>
56 CTASSERT(sizeof(struct kerneldumpheader) == 512);
58 static struct kerneldumpheader kdh;
60 /* Handle chunked writes. */
63 static size_t counter, progress, dumpsize;
65 static uint64_t tmpbuffer[PAGE_SIZE / sizeof(uint64_t)];
71 } progress_track[10] = {
85 report_progress(size_t progress, size_t dumpsize)
89 sofar = 100 - ((progress * 100) / dumpsize);
90 for (i = 0; i < nitems(progress_track); i++) {
91 if (sofar < progress_track[i].min_per ||
92 sofar > progress_track[i].max_per)
94 if (progress_track[i].visited)
96 progress_track[i].visited = 1;
97 printf("..%d%%", sofar);
103 is_dumpable(vm_paddr_t pa)
108 if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
109 return ((m->flags & PG_NODUMP) == 0);
111 for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
112 if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
119 blk_flush(struct dumperinfo *di)
126 error = dump_append(di, dump_va, 0, fragsz);
132 * Write a block of data to the dump file.
134 * Caller can provide data through a pointer or by specifying its
137 * XXX writes using pa should be no larger than PAGE_SIZE.
140 blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
146 maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE);
147 if (maxdumpsz == 0) /* seatbelt */
148 maxdumpsz = PAGE_SIZE;
150 if ((sz % PAGE_SIZE) != 0) {
151 printf("size not page aligned\n");
154 if (ptr != NULL && pa != 0) {
155 printf("cant have both va and pa!\n");
158 if ((((uintptr_t)pa) % PAGE_SIZE) != 0) {
159 printf("address not page aligned %#lx\n", (uintptr_t)pa);
164 * If we're doing a virtual dump, flush any
165 * pre-existing pa pages.
167 error = blk_flush(di);
172 len = maxdumpsz - fragsz;
178 report_progress(progress, dumpsize);
179 counter &= (1 << 22) - 1;
182 wdog_kern_pat(WD_LASTVAL);
185 error = dump_append(di, ptr, 0, len);
191 dump_va = (void *)PHYS_TO_DMAP(pa);
195 error = blk_flush(di);
200 /* Check for user abort */
205 printf(" (CTRL-C to abort) ");
212 minidumpsys(struct dumperinfo *di)
216 struct minidumphdr mdhdr;
230 /* Build set of dumpable pages from kernel pmap */
231 for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
232 pmapsize += PAGE_SIZE;
233 if (!pmap_get_tables(pmap_kernel(), va, &l1, &l2, &l3))
236 /* We should always be using the l2 table for kvm */
240 /* l2 may be a superpage */
241 if ((*l2 & PTE_RWX) != 0) {
242 pa = (*l2 >> PTE_PPN1_S) << L2_SHIFT;
243 for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) {
248 for (i = 0; i < Ln_ENTRIES; i++) {
249 if ((l3[i] & PTE_V) == 0)
251 pa = (l3[i] >> PTE_PPN0_S) * PAGE_SIZE;
258 /* Calculate dump size */
260 dumpsize += round_page(msgbufp->msg_size);
261 dumpsize += round_page(sizeof(dump_avail));
262 dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
263 VM_PAGE_DUMP_FOREACH(pa) {
264 /* Clear out undumpable pages now if needed */
266 dumpsize += PAGE_SIZE;
270 dumpsize += PAGE_SIZE;
274 /* Initialize mdhdr */
275 bzero(&mdhdr, sizeof(mdhdr));
276 strcpy(mdhdr.magic, MINIDUMP_MAGIC);
277 mdhdr.version = MINIDUMP_VERSION;
278 mdhdr.msgbufsize = msgbufp->msg_size;
279 mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages));
280 mdhdr.pmapsize = pmapsize;
281 mdhdr.kernbase = KERNBASE;
282 mdhdr.dmapphys = DMAP_MIN_PHYSADDR;
283 mdhdr.dmapbase = DMAP_MIN_ADDRESS;
284 mdhdr.dmapend = DMAP_MAX_ADDRESS;
285 mdhdr.dumpavailsize = round_page(sizeof(dump_avail));
287 dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_RISCV_VERSION,
290 error = dump_start(di, &kdh);
294 printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20,
295 ptoa((uintmax_t)physmem) / 1048576);
297 /* Dump minidump header */
298 bzero(&tmpbuffer, sizeof(tmpbuffer));
299 bcopy(&mdhdr, &tmpbuffer, sizeof(mdhdr));
300 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
304 /* Dump msgbuf up front */
305 error = blk_write(di, (char *)msgbufp->msg_ptr, 0,
306 round_page(msgbufp->msg_size));
310 /* Dump dump_avail */
311 _Static_assert(sizeof(dump_avail) <= sizeof(tmpbuffer),
312 "Large dump_avail not handled");
313 bzero(tmpbuffer, sizeof(tmpbuffer));
314 memcpy(tmpbuffer, dump_avail, sizeof(dump_avail));
315 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
320 error = blk_write(di, (char *)vm_page_dump, 0,
321 round_page(BITSET_SIZE(vm_page_dump_pages)));
325 /* Dump kernel page directory pages */
326 bzero(&tmpbuffer, sizeof(tmpbuffer));
327 for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
328 if (!pmap_get_tables(pmap_kernel(), va, &l1, &l2, &l3)) {
329 /* We always write a page, even if it is zero */
330 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
333 /* Flush, in case we reuse tmpbuffer in the same block */
334 error = blk_flush(di);
337 } else if ((*l2 & PTE_RWX) != 0) {
338 /* Generate fake l3 entries based on the l2 superpage */
339 for (i = 0; i < Ln_ENTRIES; i++) {
340 tmpbuffer[i] = (*l2 | (i << PTE_PPN0_S));
342 /* We always write a page, even if it is zero */
343 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
346 /* Flush, in case we reuse tmpbuffer in the same block */
347 error = blk_flush(di);
350 bzero(&tmpbuffer, sizeof(tmpbuffer));
352 pa = (*l2 >> PTE_PPN0_S) * PAGE_SIZE;
354 /* We always write a page, even if it is zero */
355 error = blk_write(di, NULL, pa, PAGE_SIZE);
361 /* Dump memory chunks */
362 /* XXX cluster it up and use blk_dump() */
363 VM_PAGE_DUMP_FOREACH(pa) {
364 error = blk_write(di, 0, pa, PAGE_SIZE);
369 error = blk_flush(di);
373 error = dump_finish(di, &kdh);
377 printf("\nDump complete\n");
385 if (error == ENOSPC) {
386 printf("Dump map grown while dumping. ");
387 if (retry_count < 5) {
388 printf("Retrying...\n");
391 printf("Dump failed.\n");
393 else if (error == ECANCELED)
394 printf("Dump aborted\n");
395 else if (error == E2BIG) {
396 printf("Dump failed. Partition too small (about %lluMB were "
397 "needed this time).\n", (long long)dumpsize >> 20);
399 printf("** DUMP FAILED (ERROR %d) **\n", error);