2 * Copyright (c) 2006 Peter Wemm
3 * Copyright (c) 2015 The FreeBSD Foundation
6 * This software was developed by Andrew Turner under
7 * sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_watchdog.h"
37 #include "opt_watchdog.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/kerneldump.h>
45 #include <sys/msgbuf.h>
46 #include <sys/watchdog.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_page.h>
51 #include <vm/vm_phys.h>
54 #include <machine/md_var.h>
55 #include <machine/pmap.h>
56 #include <machine/pte.h>
57 #include <machine/vmparam.h>
58 #include <machine/minidump.h>
60 CTASSERT(sizeof(struct kerneldumpheader) == 512);
63 * Don't touch the first SIZEOF_METADATA bytes on the dump device. This
64 * is to protect us from metadata and to protect metadata from us.
66 #define SIZEOF_METADATA (64*1024)
68 uint64_t *vm_page_dump;
69 int vm_page_dump_size;
71 static struct kerneldumpheader kdh;
74 /* Handle chunked writes. */
77 static size_t counter, progress, dumpsize;
79 static uint64_t tmpbuffer[PAGE_SIZE / sizeof(uint64_t)];
81 CTASSERT(sizeof(*vm_page_dump) == 8);
84 is_dumpable(vm_paddr_t pa)
89 if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
90 return ((m->flags & PG_NODUMP) == 0);
91 for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
92 if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
99 blk_flush(struct dumperinfo *di)
106 error = dump_write(di, dump_va, 0, dumplo, fragsz);
116 } progress_track[10] = {
130 report_progress(size_t progress, size_t dumpsize)
134 sofar = 100 - ((progress * 100) / dumpsize);
135 for (i = 0; i < nitems(progress_track); i++) {
136 if (sofar < progress_track[i].min_per ||
137 sofar > progress_track[i].max_per)
139 if (progress_track[i].visited)
141 progress_track[i].visited = 1;
142 printf("..%d%%", sofar);
148 blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
154 maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE);
155 if (maxdumpsz == 0) /* seatbelt */
156 maxdumpsz = PAGE_SIZE;
158 if ((sz % PAGE_SIZE) != 0) {
159 printf("size not page aligned\n");
162 if (ptr != NULL && pa != 0) {
163 printf("cant have both va and pa!\n");
166 if ((((uintptr_t)pa) % PAGE_SIZE) != 0) {
167 printf("address not page aligned %p\n", ptr);
172 * If we're doing a virtual dump, flush any
173 * pre-existing pa pages.
175 error = blk_flush(di);
180 len = maxdumpsz - fragsz;
186 report_progress(progress, dumpsize);
187 counter &= (1 << 22) - 1;
190 wdog_kern_pat(WD_LASTVAL);
193 error = dump_write(di, ptr, 0, dumplo, len);
200 dump_va = (void *)PHYS_TO_DMAP(pa);
204 error = blk_flush(di);
209 /* Check for user abort. */
214 printf(" (CTRL-C to abort) ");
221 minidumpsys(struct dumperinfo *di)
232 struct minidumphdr mdhdr;
239 for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
240 pmapsize += PAGE_SIZE;
241 if (!pmap_get_tables(pmap_kernel(), va, &l1, &l2, &l3))
244 /* We should always be using the l2 table for kvm */
248 if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
249 pa = *l2 & ~ATTR_MASK;
250 for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) {
254 } else if ((*l2 & ATTR_DESCR_MASK) == L2_TABLE) {
255 for (i = 0; i < Ln_ENTRIES; i++) {
256 if ((l3[i] & ATTR_DESCR_MASK) != L3_PAGE)
258 pa = l3[i] & ~ATTR_MASK;
265 /* Calculate dump size. */
267 dumpsize += round_page(msgbufp->msg_size);
268 dumpsize += round_page(vm_page_dump_size);
269 for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
270 bits = vm_page_dump[i];
272 bit = ffsl(bits) - 1;
273 pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) +
275 /* Clear out undumpable pages now if needed */
277 dumpsize += PAGE_SIZE;
280 bits &= ~(1ul << bit);
283 dumpsize += PAGE_SIZE;
285 /* Determine dump offset on device. */
286 if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
290 dumplo = di->mediaoffset + di->mediasize - dumpsize;
291 dumplo -= sizeof(kdh) * 2;
294 /* Initialize mdhdr */
295 bzero(&mdhdr, sizeof(mdhdr));
296 strcpy(mdhdr.magic, MINIDUMP_MAGIC);
297 mdhdr.version = MINIDUMP_VERSION;
298 mdhdr.msgbufsize = msgbufp->msg_size;
299 mdhdr.bitmapsize = vm_page_dump_size;
300 mdhdr.pmapsize = pmapsize;
301 mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
302 mdhdr.dmapphys = DMAP_MIN_PHYSADDR;
303 mdhdr.dmapbase = DMAP_MIN_ADDRESS;
304 mdhdr.dmapend = DMAP_MAX_ADDRESS;
306 mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_AARCH64_VERSION,
307 dumpsize, di->blocksize);
309 printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20,
310 ptoa((uintmax_t)physmem) / 1048576);
313 error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
316 dumplo += sizeof(kdh);
319 bzero(&tmpbuffer, sizeof(tmpbuffer));
320 bcopy(&mdhdr, &tmpbuffer, sizeof(mdhdr));
321 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
325 /* Dump msgbuf up front */
326 error = blk_write(di, (char *)msgbufp->msg_ptr, 0,
327 round_page(msgbufp->msg_size));
332 error = blk_write(di, (char *)vm_page_dump, 0,
333 round_page(vm_page_dump_size));
337 /* Dump kernel page directory pages */
338 bzero(&tmpbuffer, sizeof(tmpbuffer));
339 for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
340 if (!pmap_get_tables(pmap_kernel(), va, &l1, &l2, &l3)) {
341 /* We always write a page, even if it is zero */
342 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
345 /* flush, in case we reuse tmpbuffer in the same block*/
346 error = blk_flush(di);
349 } else if (l2 == NULL) {
350 pa = (*l1 & ~ATTR_MASK) | (va & L1_OFFSET);
352 /* Generate fake l3 entries based upon the l1 entry */
353 for (i = 0; i < Ln_ENTRIES; i++) {
354 tmpbuffer[i] = pa + (i * PAGE_SIZE) |
355 ATTR_DEFAULT | L3_PAGE;
357 /* We always write a page, even if it is zero */
358 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
361 /* flush, in case we reuse tmpbuffer in the same block*/
362 error = blk_flush(di);
365 bzero(&tmpbuffer, sizeof(tmpbuffer));
366 } else if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
367 /* TODO: Handle an invalid L2 entry */
368 pa = (*l2 & ~ATTR_MASK) | (va & L2_OFFSET);
370 /* Generate fake l3 entries based upon the l1 entry */
371 for (i = 0; i < Ln_ENTRIES; i++) {
372 tmpbuffer[i] = pa + (i * PAGE_SIZE) |
373 ATTR_DEFAULT | L3_PAGE;
375 /* We always write a page, even if it is zero */
376 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
379 /* flush, in case we reuse fakepd in the same block */
380 error = blk_flush(di);
383 bzero(&tmpbuffer, sizeof(tmpbuffer));
386 pa = *l2 & ~ATTR_MASK;
388 /* We always write a page, even if it is zero */
389 error = blk_write(di, NULL, pa, PAGE_SIZE);
395 /* Dump memory chunks */
396 /* XXX cluster it up and use blk_dump() */
397 for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
398 bits = vm_page_dump[i];
400 bit = ffsl(bits) - 1;
401 pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) +
403 error = blk_write(di, 0, pa, PAGE_SIZE);
406 bits &= ~(1ul << bit);
410 error = blk_flush(di);
415 error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
418 dumplo += sizeof(kdh);
420 /* Signal completion, signoff and exit stage left. */
421 dump_write(di, NULL, 0, 0, 0);
422 printf("\nDump complete\n");
430 if (error == ENOSPC) {
431 printf("Dump map grown while dumping. ");
432 if (retry_count < 5) {
433 printf("Retrying...\n");
436 printf("Dump failed.\n");
438 else if (error == ECANCELED)
439 printf("Dump aborted\n");
440 else if (error == E2BIG)
441 printf("Dump failed. Partition too small.\n");
443 printf("** DUMP FAILED (ERROR %d) **\n", error);
448 dump_add_page(vm_paddr_t pa)
453 idx = pa >> 6; /* 2^6 = 64 */
455 atomic_set_long(&vm_page_dump[idx], 1ul << bit);
459 dump_drop_page(vm_paddr_t pa)
464 idx = pa >> 6; /* 2^6 = 64 */
466 atomic_clear_long(&vm_page_dump[idx], 1ul << bit);