]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libkvm/kvm_minidump_amd64.c
Fix file descriptor reference count leak.
[FreeBSD/FreeBSD.git] / lib / libkvm / kvm_minidump_amd64.c
1 /*-
2  * Copyright (c) 2006 Peter Wemm
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28
29 /*
30  * AMD64 machine dependent routines for kvm and minidumps.
31  */
32
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <stdint.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <kvm.h>
40
41 #include "../../sys/amd64/include/minidump.h"
42
43 #include <limits.h>
44
45 #include "kvm_private.h"
46 #include "kvm_amd64.h"
47
48 #define amd64_round_page(x)     roundup2((kvaddr_t)(x), AMD64_PAGE_SIZE)
49
50 struct vmstate {
51         struct minidumphdr hdr;
52         struct hpt hpt;
53         amd64_pte_t *page_map;
54 };
55
56 static int
57 _amd64_minidump_probe(kvm_t *kd)
58 {
59
60         return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_X86_64) &&
61             _kvm_is_minidump(kd));
62 }
63
64 static void
65 _amd64_minidump_freevtop(kvm_t *kd)
66 {
67         struct vmstate *vm = kd->vmst;
68
69         _kvm_hpt_free(&vm->hpt);
70         if (vm->page_map)
71                 free(vm->page_map);
72         free(vm);
73         kd->vmst = NULL;
74 }
75
76 static int
77 _amd64_minidump_initvtop(kvm_t *kd)
78 {
79         struct vmstate *vmst;
80         uint64_t *bitmap;
81         off_t off;
82
83         vmst = _kvm_malloc(kd, sizeof(*vmst));
84         if (vmst == NULL) {
85                 _kvm_err(kd, kd->program, "cannot allocate vm");
86                 return (-1);
87         }
88         kd->vmst = vmst;
89         if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) !=
90             sizeof(vmst->hdr)) {
91                 _kvm_err(kd, kd->program, "cannot read dump header");
92                 return (-1);
93         }
94         if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) {
95                 _kvm_err(kd, kd->program, "not a minidump for this platform");
96                 return (-1);
97         }
98
99         /*
100          * NB: amd64 minidump header is binary compatible between version 1
101          * and version 2; this may not be the case for the future versions.
102          */
103         vmst->hdr.version = le32toh(vmst->hdr.version);
104         if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) {
105                 _kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d",
106                     MINIDUMP_VERSION, vmst->hdr.version);
107                 return (-1);
108         }
109         vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize);
110         vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize);
111         vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize);
112         vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase);
113         vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase);
114         vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend);
115
116         /* Skip header and msgbuf */
117         off = AMD64_PAGE_SIZE + amd64_round_page(vmst->hdr.msgbufsize);
118
119         bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize);
120         if (bitmap == NULL) {
121                 _kvm_err(kd, kd->program, "cannot allocate %d bytes for bitmap", vmst->hdr.bitmapsize);
122                 return (-1);
123         }
124         if (pread(kd->pmfd, bitmap, vmst->hdr.bitmapsize, off) !=
125             (ssize_t)vmst->hdr.bitmapsize) {
126                 _kvm_err(kd, kd->program, "cannot read %d bytes for page bitmap", vmst->hdr.bitmapsize);
127                 free(bitmap);
128                 return (-1);
129         }
130         off += amd64_round_page(vmst->hdr.bitmapsize);
131
132         vmst->page_map = _kvm_malloc(kd, vmst->hdr.pmapsize);
133         if (vmst->page_map == NULL) {
134                 _kvm_err(kd, kd->program, "cannot allocate %d bytes for page_map", vmst->hdr.pmapsize);
135                 free(bitmap);
136                 return (-1);
137         }
138         if (pread(kd->pmfd, vmst->page_map, vmst->hdr.pmapsize, off) !=
139             (ssize_t)vmst->hdr.pmapsize) {
140                 _kvm_err(kd, kd->program, "cannot read %d bytes for page_map", vmst->hdr.pmapsize);
141                 free(bitmap);
142                 return (-1);
143         }
144         off += vmst->hdr.pmapsize;
145
146         /* build physical address hash table for sparse pages */
147         _kvm_hpt_init(kd, &vmst->hpt, bitmap, vmst->hdr.bitmapsize, off,
148             AMD64_PAGE_SIZE, sizeof(*bitmap));
149         free(bitmap);
150
151         return (0);
152 }
153
154 static int
155 _amd64_minidump_vatop_v1(kvm_t *kd, kvaddr_t va, off_t *pa)
156 {
157         struct vmstate *vm;
158         amd64_physaddr_t offset;
159         amd64_pte_t pte;
160         kvaddr_t pteindex;
161         amd64_physaddr_t a;
162         off_t ofs;
163
164         vm = kd->vmst;
165         offset = va & AMD64_PAGE_MASK;
166
167         if (va >= vm->hdr.kernbase) {
168                 pteindex = (va - vm->hdr.kernbase) >> AMD64_PAGE_SHIFT;
169                 if (pteindex >= vm->hdr.pmapsize / sizeof(*vm->page_map))
170                         goto invalid;
171                 pte = le64toh(vm->page_map[pteindex]);
172                 if ((pte & AMD64_PG_V) == 0) {
173                         _kvm_err(kd, kd->program,
174                             "_amd64_minidump_vatop_v1: pte not valid");
175                         goto invalid;
176                 }
177                 a = pte & AMD64_PG_FRAME;
178                 ofs = _kvm_hpt_find(&vm->hpt, a);
179                 if (ofs == -1) {
180                         _kvm_err(kd, kd->program,
181             "_amd64_minidump_vatop_v1: physical address 0x%jx not in minidump",
182                             (uintmax_t)a);
183                         goto invalid;
184                 }
185                 *pa = ofs + offset;
186                 return (AMD64_PAGE_SIZE - offset);
187         } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
188                 a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK;
189                 ofs = _kvm_hpt_find(&vm->hpt, a);
190                 if (ofs == -1) {
191                         _kvm_err(kd, kd->program,
192     "_amd64_minidump_vatop_v1: direct map address 0x%jx not in minidump",
193                             (uintmax_t)va);
194                         goto invalid;
195                 }
196                 *pa = ofs + offset;
197                 return (AMD64_PAGE_SIZE - offset);
198         } else {
199                 _kvm_err(kd, kd->program,
200             "_amd64_minidump_vatop_v1: virtual address 0x%jx not minidumped",
201                     (uintmax_t)va);
202                 goto invalid;
203         }
204
205 invalid:
206         _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
207         return (0);
208 }
209
210 static int
211 _amd64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa)
212 {
213         amd64_pte_t pt[AMD64_NPTEPG];
214         struct vmstate *vm;
215         amd64_physaddr_t offset;
216         amd64_pde_t pde;
217         amd64_pte_t pte;
218         kvaddr_t pteindex;
219         kvaddr_t pdeindex;
220         amd64_physaddr_t a;
221         off_t ofs;
222
223         vm = kd->vmst;
224         offset = va & AMD64_PAGE_MASK;
225
226         if (va >= vm->hdr.kernbase) {
227                 pdeindex = (va - vm->hdr.kernbase) >> AMD64_PDRSHIFT;
228                 if (pdeindex >= vm->hdr.pmapsize / sizeof(*vm->page_map))
229                         goto invalid;
230                 pde = le64toh(vm->page_map[pdeindex]);
231                 if ((pde & AMD64_PG_V) == 0) {
232                         _kvm_err(kd, kd->program,
233                             "_amd64_minidump_vatop: pde not valid");
234                         goto invalid;
235                 }
236                 if ((pde & AMD64_PG_PS) == 0) {
237                         a = pde & AMD64_PG_FRAME;
238                         ofs = _kvm_hpt_find(&vm->hpt, a);
239                         if (ofs == -1) {
240                                 _kvm_err(kd, kd->program,
241             "_amd64_minidump_vatop: pt physical address 0x%jx not in minidump",
242                                     (uintmax_t)a);
243                                 goto invalid;
244                         }
245                         /* TODO: Just read the single PTE */
246                         if (pread(kd->pmfd, &pt, AMD64_PAGE_SIZE, ofs) !=
247                             AMD64_PAGE_SIZE) {
248                                 _kvm_err(kd, kd->program,
249                                     "cannot read %d bytes for page table",
250                                     AMD64_PAGE_SIZE);
251                                 return (-1);
252                         }
253                         pteindex = (va >> AMD64_PAGE_SHIFT) &
254                             (AMD64_NPTEPG - 1);
255                         pte = le64toh(pt[pteindex]);
256                         if ((pte & AMD64_PG_V) == 0) {
257                                 _kvm_err(kd, kd->program,
258                                     "_amd64_minidump_vatop: pte not valid");
259                                 goto invalid;
260                         }
261                         a = pte & AMD64_PG_FRAME;
262                 } else {
263                         a = pde & AMD64_PG_PS_FRAME;
264                         a += (va & AMD64_PDRMASK) ^ offset;
265                 }
266                 ofs = _kvm_hpt_find(&vm->hpt, a);
267                 if (ofs == -1) {
268                         _kvm_err(kd, kd->program,
269             "_amd64_minidump_vatop: physical address 0x%jx not in minidump",
270                             (uintmax_t)a);
271                         goto invalid;
272                 }
273                 *pa = ofs + offset;
274                 return (AMD64_PAGE_SIZE - offset);
275         } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
276                 a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK;
277                 ofs = _kvm_hpt_find(&vm->hpt, a);
278                 if (ofs == -1) {
279                         _kvm_err(kd, kd->program,
280             "_amd64_minidump_vatop: direct map address 0x%jx not in minidump",
281                             (uintmax_t)va);
282                         goto invalid;
283                 }
284                 *pa = ofs + offset;
285                 return (AMD64_PAGE_SIZE - offset);
286         } else {
287                 _kvm_err(kd, kd->program,
288             "_amd64_minidump_vatop: virtual address 0x%jx not minidumped",
289                     (uintmax_t)va);
290                 goto invalid;
291         }
292
293 invalid:
294         _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
295         return (0);
296 }
297
298 static int
299 _amd64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
300 {
301
302         if (ISALIVE(kd)) {
303                 _kvm_err(kd, 0,
304                     "_amd64_minidump_kvatop called in live kernel!");
305                 return (0);
306         }
307         if (((struct vmstate *)kd->vmst)->hdr.version == 1)
308                 return (_amd64_minidump_vatop_v1(kd, va, pa));
309         else
310                 return (_amd64_minidump_vatop(kd, va, pa));
311 }
312
313 static struct kvm_arch kvm_amd64_minidump = {
314         .ka_probe = _amd64_minidump_probe,
315         .ka_initvtop = _amd64_minidump_initvtop,
316         .ka_freevtop = _amd64_minidump_freevtop,
317         .ka_kvatop = _amd64_minidump_kvatop,
318         .ka_native = _amd64_native,
319 };
320
321 KVM_ARCH(kvm_amd64_minidump);