2 * Copyright (c) 2006 Peter Wemm
3 * Copyright (c) 2015 The FreeBSD Foundation
6 * This software was developed by Andrew Turner under
7 * sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_watchdog.h"
37 #include "opt_watchdog.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/kerneldump.h>
45 #include <sys/msgbuf.h>
46 #include <sys/watchdog.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_page.h>
51 #include <vm/vm_phys.h>
54 #include <machine/md_var.h>
55 #include <machine/pte.h>
56 #include <machine/minidump.h>
58 CTASSERT(sizeof(struct kerneldumpheader) == 512);
61 * Don't touch the first SIZEOF_METADATA bytes on the dump device. This
62 * is to protect us from metadata and to protect metadata from us.
64 #define SIZEOF_METADATA (64*1024)
66 uint64_t *vm_page_dump;
67 int vm_page_dump_size;
69 static struct kerneldumpheader kdh;
72 /* Handle chunked writes. */
75 static size_t counter, progress, dumpsize;
77 static uint64_t tmpbuffer[PAGE_SIZE / sizeof(uint64_t)];
79 CTASSERT(sizeof(*vm_page_dump) == 8);
82 is_dumpable(vm_paddr_t pa)
87 if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
88 return ((m->flags & PG_NODUMP) == 0);
89 for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
90 if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
97 blk_flush(struct dumperinfo *di)
104 error = dump_write(di, dump_va, 0, dumplo, fragsz);
114 } progress_track[10] = {
128 report_progress(size_t progress, size_t dumpsize)
132 sofar = 100 - ((progress * 100) / dumpsize);
133 for (i = 0; i < nitems(progress_track); i++) {
134 if (sofar < progress_track[i].min_per ||
135 sofar > progress_track[i].max_per)
137 if (progress_track[i].visited)
139 progress_track[i].visited = 1;
140 printf("..%d%%", sofar);
146 blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
152 maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE);
153 if (maxdumpsz == 0) /* seatbelt */
154 maxdumpsz = PAGE_SIZE;
156 if ((sz % PAGE_SIZE) != 0) {
157 printf("size not page aligned\n");
160 if (ptr != NULL && pa != 0) {
161 printf("cant have both va and pa!\n");
164 if ((((uintptr_t)pa) % PAGE_SIZE) != 0) {
165 printf("address not page aligned %p\n", ptr);
170 * If we're doing a virtual dump, flush any
171 * pre-existing pa pages.
173 error = blk_flush(di);
178 len = maxdumpsz - fragsz;
184 report_progress(progress, dumpsize);
185 counter &= (1 << 22) - 1;
188 wdog_kern_pat(WD_LASTVAL);
191 error = dump_write(di, ptr, 0, dumplo, len);
198 dump_va = (void *)PHYS_TO_DMAP(pa);
202 error = blk_flush(di);
207 /* Check for user abort. */
212 printf(" (CTRL-C to abort) ");
219 minidumpsys(struct dumperinfo *di)
221 pd_entry_t *l0, *l1, *l2;
230 struct minidumphdr mdhdr;
237 for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
238 pmapsize += PAGE_SIZE;
239 if (!pmap_get_tables(pmap_kernel(), va, &l0, &l1, &l2, &l3))
242 /* We should always be using the l2 table for kvm */
246 if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
247 pa = *l2 & ~ATTR_MASK;
248 for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) {
252 } else if ((*l2 & ATTR_DESCR_MASK) == L2_TABLE) {
253 for (i = 0; i < Ln_ENTRIES; i++) {
254 if ((l3[i] & ATTR_DESCR_MASK) != L3_PAGE)
256 pa = l3[i] & ~ATTR_MASK;
263 /* Calculate dump size. */
265 dumpsize += round_page(msgbufp->msg_size);
266 dumpsize += round_page(vm_page_dump_size);
267 for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
268 bits = vm_page_dump[i];
270 bit = ffsl(bits) - 1;
271 pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) +
273 /* Clear out undumpable pages now if needed */
275 dumpsize += PAGE_SIZE;
278 bits &= ~(1ul << bit);
281 dumpsize += PAGE_SIZE;
283 /* Determine dump offset on device. */
284 if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
288 dumplo = di->mediaoffset + di->mediasize - dumpsize;
289 dumplo -= sizeof(kdh) * 2;
292 /* Initialize mdhdr */
293 bzero(&mdhdr, sizeof(mdhdr));
294 strcpy(mdhdr.magic, MINIDUMP_MAGIC);
295 mdhdr.version = MINIDUMP_VERSION;
296 mdhdr.msgbufsize = msgbufp->msg_size;
297 mdhdr.bitmapsize = vm_page_dump_size;
298 mdhdr.pmapsize = pmapsize;
299 mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
300 mdhdr.dmapphys = DMAP_MIN_PHYSADDR;
301 mdhdr.dmapbase = DMAP_MIN_ADDRESS;
302 mdhdr.dmapend = DMAP_MAX_ADDRESS;
304 mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_AARCH64_VERSION,
305 dumpsize, di->blocksize);
307 printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20,
308 ptoa((uintmax_t)physmem) / 1048576);
311 error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
314 dumplo += sizeof(kdh);
317 bzero(&tmpbuffer, sizeof(tmpbuffer));
318 bcopy(&mdhdr, &tmpbuffer, sizeof(mdhdr));
319 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
323 /* Dump msgbuf up front */
324 error = blk_write(di, (char *)msgbufp->msg_ptr, 0,
325 round_page(msgbufp->msg_size));
330 error = blk_write(di, (char *)vm_page_dump, 0,
331 round_page(vm_page_dump_size));
335 /* Dump kernel page directory pages */
336 bzero(&tmpbuffer, sizeof(tmpbuffer));
337 for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
338 if (!pmap_get_tables(pmap_kernel(), va, &l0, &l1, &l2, &l3)) {
339 /* We always write a page, even if it is zero */
340 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
343 /* flush, in case we reuse tmpbuffer in the same block*/
344 error = blk_flush(di);
347 } else if (l2 == NULL) {
348 pa = (*l1 & ~ATTR_MASK) | (va & L1_OFFSET);
350 /* Generate fake l3 entries based upon the l1 entry */
351 for (i = 0; i < Ln_ENTRIES; i++) {
352 tmpbuffer[i] = pa + (i * PAGE_SIZE) |
353 ATTR_DEFAULT | L3_PAGE;
355 /* We always write a page, even if it is zero */
356 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
359 /* flush, in case we reuse tmpbuffer in the same block*/
360 error = blk_flush(di);
363 bzero(&tmpbuffer, sizeof(tmpbuffer));
364 } else if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
365 /* TODO: Handle an invalid L2 entry */
366 pa = (*l2 & ~ATTR_MASK) | (va & L2_OFFSET);
368 /* Generate fake l3 entries based upon the l1 entry */
369 for (i = 0; i < Ln_ENTRIES; i++) {
370 tmpbuffer[i] = pa + (i * PAGE_SIZE) |
371 ATTR_DEFAULT | L3_PAGE;
373 /* We always write a page, even if it is zero */
374 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
377 /* flush, in case we reuse fakepd in the same block */
378 error = blk_flush(di);
381 bzero(&tmpbuffer, sizeof(tmpbuffer));
384 pa = *l2 & ~ATTR_MASK;
386 /* We always write a page, even if it is zero */
387 error = blk_write(di, NULL, pa, PAGE_SIZE);
393 /* Dump memory chunks */
394 /* XXX cluster it up and use blk_dump() */
395 for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
396 bits = vm_page_dump[i];
398 bit = ffsl(bits) - 1;
399 pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) +
401 error = blk_write(di, 0, pa, PAGE_SIZE);
404 bits &= ~(1ul << bit);
408 error = blk_flush(di);
413 error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
416 dumplo += sizeof(kdh);
418 /* Signal completion, signoff and exit stage left. */
419 dump_write(di, NULL, 0, 0, 0);
420 printf("\nDump complete\n");
428 if (error == ENOSPC) {
429 printf("Dump map grown while dumping. ");
430 if (retry_count < 5) {
431 printf("Retrying...\n");
434 printf("Dump failed.\n");
436 else if (error == ECANCELED)
437 printf("Dump aborted\n");
438 else if (error == E2BIG)
439 printf("Dump failed. Partition too small.\n");
441 printf("** DUMP FAILED (ERROR %d) **\n", error);
446 dump_add_page(vm_paddr_t pa)
451 idx = pa >> 6; /* 2^6 = 64 */
453 atomic_set_long(&vm_page_dump[idx], 1ul << bit);
457 dump_drop_page(vm_paddr_t pa)
462 idx = pa >> 6; /* 2^6 = 64 */
464 atomic_clear_long(&vm_page_dump[idx], 1ul << bit);