2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2006 Peter Wemm
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
33 #include "opt_watchdog.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/kerneldump.h>
41 #include <sys/msgbuf.h>
42 #include <sys/sysctl.h>
43 #include <sys/watchdog.h>
44 #include <sys/vmmeter.h>
46 #include <vm/vm_param.h>
47 #include <vm/vm_page.h>
48 #include <vm/vm_phys.h>
49 #include <vm/vm_dumpset.h>
51 #include <machine/atomic.h>
52 #include <machine/elf.h>
53 #include <machine/md_var.h>
54 #include <machine/minidump.h>
55 #include <machine/vmparam.h>
57 CTASSERT(sizeof(struct kerneldumpheader) == 512);
59 static struct kerneldumpheader kdh;
61 /* Handle chunked writes. */
64 static size_t progress, dumpsize, wdog_next;
66 static int dump_retry_count = 5;
67 SYSCTL_INT(_machdep, OID_AUTO, dump_retry_count, CTLFLAG_RWTUN,
68 &dump_retry_count, 0, "Number of times dump has to retry before bailing out");
71 blk_flush(struct dumperinfo *di)
78 error = dump_append(di, dump_va, 0, fragsz);
83 /* Pat the watchdog approximately every 128MB of the dump. */
84 #define WDOG_DUMP_INTERVAL (128 * 1024 * 1024)
87 blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
93 maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE);
94 if (maxdumpsz == 0) /* seatbelt */
95 maxdumpsz = PAGE_SIZE;
97 if ((sz % PAGE_SIZE) != 0) {
98 printf("size not page aligned\n");
101 if (ptr != NULL && pa != 0) {
102 printf("cant have both va and pa!\n");
105 if ((((uintptr_t)pa) % PAGE_SIZE) != 0) {
106 printf("address not page aligned %p\n", ptr);
110 /* If we're doing a virtual dump, flush any pre-existing pa pages */
111 error = blk_flush(di);
116 len = maxdumpsz - fragsz;
121 dumpsys_pb_progress(len);
122 if (progress <= wdog_next) {
123 wdog_kern_pat(WD_LASTVAL);
124 if (wdog_next > WDOG_DUMP_INTERVAL)
125 wdog_next -= WDOG_DUMP_INTERVAL;
131 error = dump_append(di, ptr, 0, len);
137 for (i = 0; i < len; i += PAGE_SIZE)
138 dump_va = pmap_kenter_temporary(pa + i, (i + fragsz) >> PAGE_SHIFT);
142 if (fragsz == maxdumpsz) {
143 error = blk_flush(di);
149 /* Check for user abort. */
154 printf(" (CTRL-C to abort) ");
160 /* A fake page table page, to avoid having to handle both 4K and 2M pages */
161 static pd_entry_t fakepd[NPDEPG];
164 cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
167 vm_offset_t va, kva_end;
169 uint64_t *pml4, *pdp, *pd, *pt, pa;
170 uint64_t pdpe, pde, pte;
173 struct minidumphdr mdhdr;
180 /* Snapshot the KVA upper bound in case it grows. */
181 kva_end = MAX(KERNBASE + nkpt * NBPDR, kernel_vm_end);
184 * Walk the kernel page table pages, setting the active entries in the
187 * NB: for a live dump, we may be racing with updates to the page
188 * tables, so care must be taken to read each entry only once.
191 for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; ) {
193 * We always write a page, even if it is zero. Each
194 * page written corresponds to 1GB of space
196 pmapsize += PAGE_SIZE;
197 ii = pmap_pml4e_index(va);
198 pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii;
199 pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
200 pdpe = atomic_load_64(&pdp[pmap_pdpe_index(va)]);
201 if ((pdpe & PG_V) == 0) {
207 * 1GB page is represented as 512 2MB pages in a dump.
209 if ((pdpe & PG_PS) != 0) {
211 pa = pdpe & PG_PS_FRAME;
212 for (n = 0; n < NPDEPG * NPTEPG; n++) {
213 if (vm_phys_is_dumpable(pa))
220 pd = (uint64_t *)PHYS_TO_DMAP(pdpe & PG_FRAME);
221 for (n = 0; n < NPDEPG; n++, va += NBPDR) {
222 pde = atomic_load_64(&pd[pmap_pde_index(va)]);
224 if ((pde & PG_V) == 0)
227 if ((pde & PG_PS) != 0) {
228 /* This is an entire 2M page. */
229 pa = pde & PG_PS_FRAME;
230 for (k = 0; k < NPTEPG; k++) {
231 if (vm_phys_is_dumpable(pa))
239 /* set bit for this PTE page */
240 if (vm_phys_is_dumpable(pa))
242 /* and for each valid page in this 2MB block */
243 pt = (uint64_t *)PHYS_TO_DMAP(pde & PG_FRAME);
244 for (k = 0; k < NPTEPG; k++) {
245 pte = atomic_load_64(&pt[k]);
246 if ((pte & PG_V) == 0)
249 if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa))
255 /* Calculate dump size. */
256 mbp = state->msgbufp;
258 dumpsize += round_page(mbp->msg_size);
259 dumpsize += round_page(sizeof(dump_avail));
260 dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
261 VM_PAGE_DUMP_FOREACH(pa) {
262 /* Clear out undumpable pages now if needed */
263 if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) {
264 dumpsize += PAGE_SIZE;
269 dumpsize += PAGE_SIZE;
271 wdog_next = progress = dumpsize;
272 dumpsys_pb_init(dumpsize);
274 /* Initialize mdhdr */
275 bzero(&mdhdr, sizeof(mdhdr));
276 strcpy(mdhdr.magic, MINIDUMP_MAGIC);
277 mdhdr.version = MINIDUMP_VERSION;
278 mdhdr.msgbufsize = mbp->msg_size;
279 mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages));
280 mdhdr.pmapsize = pmapsize;
281 mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
282 mdhdr.dmapbase = DMAP_MIN_ADDRESS;
283 mdhdr.dmapend = DMAP_MAX_ADDRESS;
284 mdhdr.dumpavailsize = round_page(sizeof(dump_avail));
286 dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_AMD64_VERSION,
289 error = dump_start(di, &kdh);
293 printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20,
294 ptoa((uintmax_t)physmem) / 1048576);
297 bzero(&fakepd, sizeof(fakepd));
298 bcopy(&mdhdr, &fakepd, sizeof(mdhdr));
299 error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE);
303 /* Dump msgbuf up front */
304 error = blk_write(di, mbp->msg_ptr, 0, round_page(mbp->msg_size));
308 /* Dump dump_avail */
309 _Static_assert(sizeof(dump_avail) <= sizeof(fakepd),
310 "Large dump_avail not handled");
311 bzero(&fakepd, sizeof(fakepd));
312 memcpy(fakepd, dump_avail, sizeof(dump_avail));
313 error = blk_write(di, (char *)fakepd, 0, PAGE_SIZE);
318 error = blk_write(di, (char *)vm_page_dump, 0,
319 round_page(BITSET_SIZE(vm_page_dump_pages)));
323 /* Dump kernel page directory pages */
324 bzero(fakepd, sizeof(fakepd));
325 for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; va += NBPDP) {
326 ii = pmap_pml4e_index(va);
327 pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii;
328 pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
329 pdpe = atomic_load_64(&pdp[pmap_pdpe_index(va)]);
331 /* We always write a page, even if it is zero */
332 if ((pdpe & PG_V) == 0) {
333 error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE);
336 /* flush, in case we reuse fakepd in the same block */
337 error = blk_flush(di);
343 /* 1GB page is represented as 512 2MB pages in a dump */
344 if ((pdpe & PG_PS) != 0) {
345 /* PDPE and PDP have identical layout in this case */
347 for (j = 1; j < NPDEPG; j++)
348 fakepd[j] = fakepd[j - 1] + NBPDR;
349 error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE);
352 /* flush, in case we reuse fakepd in the same block */
353 error = blk_flush(di);
356 bzero(fakepd, sizeof(fakepd));
360 pa = pdpe & PG_FRAME;
361 if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) {
362 pd = (uint64_t *)PHYS_TO_DMAP(pa);
363 error = blk_write(di, (char *)pd, 0, PAGE_SIZE);
365 /* Malformed pa, write the zeroed fakepd. */
366 error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE);
370 error = blk_flush(di);
375 /* Dump memory chunks */
376 VM_PAGE_DUMP_FOREACH(pa) {
377 error = blk_write(di, 0, pa, PAGE_SIZE);
382 error = blk_flush(di);
386 error = dump_finish(di, &kdh);
390 printf("\nDump complete\n");
398 if (error == ENOSPC) {
399 printf("Dump map grown while dumping. ");
400 if (retry_count < dump_retry_count) {
401 printf("Retrying...\n");
404 printf("Dump failed.\n");
406 else if (error == ECANCELED)
407 printf("Dump aborted\n");
408 else if (error == E2BIG) {
409 printf("Dump failed. Partition too small (about %lluMB were "
410 "needed this time).\n", (long long)dumpsize >> 20);
412 printf("** DUMP FAILED (ERROR %d) **\n", error);