2 * Copyright (c) 2006 Peter Wemm
3 * Copyright (c) 2019 Leandro Lupori
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * From: FreeBSD: src/lib/libkvm/kvm_minidump_riscv.c
29 #include <sys/param.h>
40 #include "../../sys/powerpc/include/minidump.h"
41 #include "kvm_private.h"
42 #include "kvm_powerpc64.h"
45 * PowerPC64 HPT machine dependent routines for kvm and minidumps.
47 * Address Translation parameters:
49 * b = 12 (SLB base page size: 4 KB)
50 * b = 24 (SLB base page size: 16 MB)
51 * p = 12 (page size: 4 KB)
52 * p = 24 (page size: 16 MB)
53 * s = 28 (segment size: 256 MB)
56 /* Large (huge) page params */
57 #define LP_PAGE_SHIFT 24
58 #define LP_PAGE_SIZE (1ULL << LP_PAGE_SHIFT)
59 #define LP_PAGE_MASK 0x00ffffffULL
63 #define SEGMENT_LENGTH 0x10000000ULL
65 #define round_seg(x) roundup2((uint64_t)(x), SEGMENT_LENGTH)
67 /* Virtual real-mode VSID in LPARs */
68 #define VSID_VRMA 0x1ffffffULL
70 #define SLBV_L 0x0000000000000100ULL /* Large page selector */
71 #define SLBV_CLASS 0x0000000000000080ULL /* Class selector */
72 #define SLBV_LP_MASK 0x0000000000000030ULL
73 #define SLBV_VSID_MASK 0x3ffffffffffff000ULL /* Virtual SegID mask */
74 #define SLBV_VSID_SHIFT 12
76 #define SLBE_B_MASK 0x0000000006000000ULL
77 #define SLBE_B_256MB 0x0000000000000000ULL
78 #define SLBE_VALID 0x0000000008000000ULL /* SLB entry valid */
79 #define SLBE_INDEX_MASK 0x0000000000000fffULL /* SLB index mask */
80 #define SLBE_ESID_MASK 0xfffffffff0000000ULL /* Effective SegID mask */
81 #define SLBE_ESID_SHIFT 28
85 #define LPTEH_VSID_SHIFT 12
86 #define LPTEH_AVPN_MASK 0xffffffffffffff80ULL
87 #define LPTEH_B_MASK 0xc000000000000000ULL
88 #define LPTEH_B_256MB 0x0000000000000000ULL
89 #define LPTEH_BIG 0x0000000000000004ULL /* 4KB/16MB page */
90 #define LPTEH_HID 0x0000000000000002ULL
91 #define LPTEH_VALID 0x0000000000000001ULL
93 #define LPTEL_RPGN 0xfffffffffffff000ULL
94 #define LPTEL_LP_MASK 0x00000000000ff000ULL
95 #define LPTEL_NOEXEC 0x0000000000000004ULL
97 /* Supervisor (U: RW, S: RW) */
98 #define LPTEL_BW 0x0000000000000002ULL
100 /* Both Read Only (U: RO, S: RO) */
101 #define LPTEL_BR 0x0000000000000003ULL
103 #define LPTEL_RW LPTEL_BW
104 #define LPTEL_RO LPTEL_BR
107 * PTE AVA field manipulation macros.
109 * AVA[0:54] = PTEH[2:56]
110 * AVA[VSID] = AVA[0:49] = PTEH[2:51]
111 * AVA[PAGE] = AVA[50:54] = PTEH[52:56]
113 #define PTEH_AVA_VSID_MASK 0x3ffffffffffff000UL
114 #define PTEH_AVA_VSID_SHIFT 12
115 #define PTEH_AVA_VSID(p) \
116 (((p) & PTEH_AVA_VSID_MASK) >> PTEH_AVA_VSID_SHIFT)
118 #define PTEH_AVA_PAGE_MASK 0x0000000000000f80UL
119 #define PTEH_AVA_PAGE_SHIFT 7
120 #define PTEH_AVA_PAGE(p) \
121 (((p) & PTEH_AVA_PAGE_MASK) >> PTEH_AVA_PAGE_SHIFT)
123 /* Masks to obtain the Physical Address from PTE low 64-bit word. */
124 #define PTEL_PA_MASK 0x0ffffffffffff000UL
125 #define PTEL_LP_PA_MASK 0x0fffffffff000000UL
127 #define PTE_HASH_MASK 0x0000007fffffffffUL
130 * Number of AVA/VA page bits to shift right, in order to leave only the
131 * ones that should be considered.
133 * q = MIN(54, 77-b) (PowerISA v2.07B, 5.7.7.3)
134 * n = q + 1 - 50 (VSID size in bits)
136 * s(va) = (28 - b) - n
138 * q: bit number of lower limit of VA/AVA bits to compare
139 * n: number of AVA/VA page bits to compare
141 * 28 - b: VA page size in bits
143 #define AVA_PAGE_SHIFT(b) (5 - (MIN(54, 77-(b)) + 1 - 50))
144 #define VA_PAGE_SHIFT(b) (28 - (b) - (MIN(54, 77-(b)) + 1 - 50))
146 /* Kernel ESID -> VSID mapping */
147 #define KERNEL_VSID_BIT 0x0000001000000000UL /* Bit set in all kernel VSIDs */
148 #define KERNEL_VSID(esid) ((((((uint64_t)esid << 8) | ((uint64_t)esid >> 28)) \
149 * 0x13bbUL) & (KERNEL_VSID_BIT - 1)) | \
154 typedef uint64_t ppc64_physaddr_t;
167 ppc64_slb_entry_t *slbs;
173 slb_fill(ppc64_slb_entry_t *slb, uint64_t ea, uint64_t i)
177 esid = ea >> SLBE_ESID_SHIFT;
178 slb->slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
179 slb->slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID | i;
185 struct minidumphdr *hdr;
186 struct hpt_data *data;
187 ppc64_slb_entry_t *slb;
189 uint64_t ea, i, maxmem;
191 hdr = &kd->vmst->hdr;
192 data = PPC64_MMU_DATA(kd);
195 maxmem = hdr->bitmapsize * 8 * PPC64_PAGE_SIZE;
196 slbsize = round_seg(hdr->kernend + 1 - hdr->kernbase + maxmem) /
197 SEGMENT_LENGTH * sizeof(ppc64_slb_entry_t);
198 data->slbs = _kvm_malloc(kd, slbsize);
199 if (data->slbs == NULL) {
200 _kvm_err(kd, kd->program, "cannot allocate slbs");
203 data->slbsize = slbsize;
205 dprintf("%s: maxmem=0x%jx, segs=%jd, slbsize=0x%jx\n",
206 __func__, (uintmax_t)maxmem,
207 (uintmax_t)slbsize / sizeof(ppc64_slb_entry_t), (uintmax_t)slbsize);
210 * Generate needed SLB entries.
212 * When translating addresses from EA to VA to PA, the needed SLB
213 * entry could be generated on the fly, but this is not the case
214 * for the walk_pages method, that needs to search the SLB entry
215 * by VSID, in order to find out the EA from a PTE.
219 for (ea = hdr->kernbase, i = 0, slb = data->slbs;
220 ea < hdr->kernend; ea += SEGMENT_LENGTH, i++, slb++)
221 slb_fill(slb, ea, i);
224 for (ea = hdr->dmapbase;
225 ea < MIN(hdr->dmapend, hdr->dmapbase + maxmem);
226 ea += SEGMENT_LENGTH, i++, slb++) {
227 slb_fill(slb, ea, i);
228 if (hdr->hw_direct_map)
236 ppc64mmu_hpt_cleanup(kvm_t *kd)
238 struct hpt_data *data;
240 if (kd->vmst == NULL)
243 data = PPC64_MMU_DATA(kd);
246 PPC64_MMU_DATA(kd) = NULL;
250 ppc64mmu_hpt_init(kvm_t *kd)
252 struct hpt_data *data;
255 data = _kvm_malloc(kd, sizeof(*data));
257 _kvm_err(kd, kd->program, "cannot allocate MMU data");
261 PPC64_MMU_DATA(kd) = data;
263 if (slb_init(kd) == -1)
269 ppc64mmu_hpt_cleanup(kd);
273 static ppc64_slb_entry_t *
274 slb_search(kvm_t *kd, kvaddr_t ea)
276 struct hpt_data *data;
277 ppc64_slb_entry_t *slb;
280 data = PPC64_MMU_DATA(kd);
282 n = data->slbsize / sizeof(ppc64_slb_entry_t);
285 for (i = 0; i < n; i++, slb++) {
286 if ((slb->slbe & SLBE_VALID) == 0)
289 /* Compare 36-bit ESID of EA with segment one (64-s) */
290 if ((slb->slbe & SLBE_ESID_MASK) != (ea & SLBE_ESID_MASK))
294 dprintf("SEG#%02d: slbv=0x%016jx, slbe=0x%016jx\n",
295 i, (uintmax_t)slb->slbv, (uintmax_t)slb->slbe);
301 _kvm_err(kd, kd->program, "%s: segment not found for EA 0x%jx",
302 __func__, (uintmax_t)ea);
308 static ppc64_pt_entry_t
309 pte_get(kvm_t *kd, u_long ptex)
311 ppc64_pt_entry_t pte, *p;
313 p = _kvm_pmap_get(kd, ptex, sizeof(pte));
314 pte.pte_hi = be64toh(p->pte_hi);
315 pte.pte_lo = be64toh(p->pte_lo);
320 pte_search(kvm_t *kd, ppc64_slb_entry_t *slb, uint64_t hid, kvaddr_t ea,
323 uint64_t hash, hmask;
325 uint64_t va_vsid, va_page;
327 int ava_pg_shift, va_pg_shift;
328 ppc64_pt_entry_t pte;
333 * va(78) = va_vsid(50) || va_page(s-b) || offset(b)
335 * va_vsid: 50-bit VSID (78-s)
336 * va_page: (s-b)-bit VA page
338 b = slb->slbv & SLBV_L? LP_PAGE_SHIFT : PPC64_PAGE_SHIFT;
339 va_vsid = (slb->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT;
340 va_page = (ea & ~SLBE_ESID_MASK) >> b;
342 dprintf("%s: hid=0x%jx, ea=0x%016jx, b=%d, va_vsid=0x%010jx, "
344 __func__, (uintmax_t)hid, (uintmax_t)ea, b,
345 (uintmax_t)va_vsid, (uintmax_t)va_page);
350 * Primary hash: va_vsid(11:49) ^ va_page(s-b)
351 * Secondary hash: ~primary_hash
353 hash = (va_vsid & PTE_HASH_MASK) ^ va_page;
355 hash = ~hash & PTE_HASH_MASK;
360 * pteg = (hash(0:38) & hmask) << 3
362 * hmask (hash mask): mask generated from HTABSIZE || 11*0b1
363 * hmask = number_of_ptegs - 1
365 hmask = kd->vmst->hdr.pmapsize / (8 * sizeof(ppc64_pt_entry_t)) - 1;
366 pteg = (hash & hmask) << 3;
368 ava_pg_shift = AVA_PAGE_SHIFT(b);
369 va_pg_shift = VA_PAGE_SHIFT(b);
371 dprintf("%s: hash=0x%010jx, hmask=0x%010jx, (hash & hmask)=0x%010jx, "
372 "pteg=0x%011jx, ava_pg_shift=%d, va_pg_shift=%d\n",
373 __func__, (uintmax_t)hash, (uintmax_t)hmask,
374 (uintmax_t)(hash & hmask), (uintmax_t)pteg,
375 ava_pg_shift, va_pg_shift);
378 for (ptex = pteg; ptex < pteg + 8; ptex++) {
379 pte = pte_get(kd, ptex);
381 /* Check H, V and B */
382 if ((pte.pte_hi & LPTEH_HID) != hid ||
383 (pte.pte_hi & LPTEH_VALID) == 0 ||
384 (pte.pte_hi & LPTEH_B_MASK) != LPTEH_B_256MB)
387 /* Compare AVA with VA */
388 if (PTEH_AVA_VSID(pte.pte_hi) != va_vsid ||
389 (PTEH_AVA_PAGE(pte.pte_hi) >> ava_pg_shift) !=
390 (va_page >> va_pg_shift))
394 * Check if PTE[L] matches SLBV[L].
396 * Note: this check ignores PTE[LP], as does the kernel.
398 if (b == PPC64_PAGE_SHIFT) {
399 if (pte.pte_hi & LPTEH_BIG)
401 } else if ((pte.pte_hi & LPTEH_BIG) == 0)
405 dprintf("%s: PTE found: ptex=0x%jx, pteh=0x%016jx, "
407 __func__, (uintmax_t)ptex, (uintmax_t)pte.pte_hi,
408 (uintmax_t)pte.pte_lo);
413 if (ptex == pteg + 8) {
414 /* Try secondary hash */
416 return (pte_search(kd, slb, LPTEH_HID, ea, p));
418 _kvm_err(kd, kd->program,
419 "%s: pte not found", __func__);
430 pte_lookup(kvm_t *kd, kvaddr_t ea, ppc64_pt_entry_t *pte)
432 ppc64_slb_entry_t *slb;
434 /* First, find SLB */
435 if ((slb = slb_search(kd, ea)) == NULL)
439 return (pte_search(kd, slb, 0, ea, pte));
443 ppc64mmu_hpt_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
445 struct minidumphdr *hdr;
447 ppc64_pt_entry_t pte;
448 ppc64_physaddr_t pgoff, pgpa;
454 pgoff = va & PPC64_PAGE_MASK;
456 dprintf("%s: va=0x%016jx\n", __func__, (uintmax_t)va);
459 * A common use case of libkvm is to first find a symbol address
460 * from the kernel image and then use kvatop to translate it and
461 * to be able to fetch its corresponding data.
463 * The problem is that, in PowerPC64 case, the addresses of relocated
464 * data won't match those in the kernel image. This is handled here by
465 * adding the relocation offset to those addresses.
467 if (va < hdr->dmapbase)
468 va += hdr->startkernel - PPC64_KERNBASE;
471 if (va >= hdr->dmapbase && va <= hdr->dmapend) {
472 pgpa = (va & ~hdr->dmapbase) & ~PPC64_PAGE_MASK;
473 ptoff = _kvm_pt_find(kd, pgpa, PPC64_PAGE_SIZE);
475 _kvm_err(kd, kd->program, "%s: "
476 "direct map address 0x%jx not in minidump",
477 __func__, (uintmax_t)va);
481 return (PPC64_PAGE_SIZE - pgoff);
482 /* Translate VA to PA */
483 } else if (va >= hdr->kernbase) {
484 if ((err = pte_lookup(kd, va, &pte)) == -1) {
485 _kvm_err(kd, kd->program,
486 "%s: pte not valid", __func__);
490 if (pte.pte_hi & LPTEH_BIG)
491 pgpa = (pte.pte_lo & PTEL_LP_PA_MASK) |
492 (va & ~PPC64_PAGE_MASK & LP_PAGE_MASK);
494 pgpa = pte.pte_lo & PTEL_PA_MASK;
495 dprintf("%s: pgpa=0x%016jx\n", __func__, (uintmax_t)pgpa);
497 ptoff = _kvm_pt_find(kd, pgpa, PPC64_PAGE_SIZE);
499 _kvm_err(kd, kd->program, "%s: "
500 "physical address 0x%jx not in minidump",
501 __func__, (uintmax_t)pgpa);
505 return (PPC64_PAGE_SIZE - pgoff);
507 _kvm_err(kd, kd->program,
508 "%s: virtual address 0x%jx not minidumped",
509 __func__, (uintmax_t)va);
514 _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va);
519 entry_to_prot(ppc64_pt_entry_t *pte)
521 vm_prot_t prot = VM_PROT_READ;
523 if (pte->pte_lo & LPTEL_RW)
524 prot |= VM_PROT_WRITE;
525 if ((pte->pte_lo & LPTEL_NOEXEC) != 0)
526 prot |= VM_PROT_EXECUTE;
530 static ppc64_slb_entry_t *
531 slb_vsid_search(kvm_t *kd, uint64_t vsid)
533 struct hpt_data *data;
534 ppc64_slb_entry_t *slb;
537 data = PPC64_MMU_DATA(kd);
539 n = data->slbsize / sizeof(ppc64_slb_entry_t);
540 vsid <<= SLBV_VSID_SHIFT;
543 for (i = 0; i < n; i++, slb++) {
544 /* Check if valid and compare VSID */
545 if ((slb->slbe & SLBE_VALID) &&
546 (slb->slbv & SLBV_VSID_MASK) == vsid)
552 _kvm_err(kd, kd->program,
553 "%s: segment not found for VSID 0x%jx",
554 __func__, (uintmax_t)vsid >> SLBV_VSID_SHIFT);
561 get_ea(kvm_t *kd, ppc64_pt_entry_t *pte, u_long ptex)
563 ppc64_slb_entry_t *slb;
564 uint64_t ea, hash, vsid;
568 vsid = PTEH_AVA_VSID(pte->pte_hi);
569 if ((slb = slb_vsid_search(kd, vsid)) == NULL)
572 /* Get ESID part of EA */
573 ea = slb->slbe & SLBE_ESID_MASK;
575 b = slb->slbv & SLBV_L? LP_PAGE_SHIFT : PPC64_PAGE_SHIFT;
578 * If there are less than 64K PTEGs (16-bit), the upper bits of
579 * EA page must be obtained from PTEH's AVA.
581 if (kd->vmst->hdr.pmapsize / (8 * sizeof(ppc64_pt_entry_t)) <
584 * Add 0 to 5 EA bits, right after VSID.
588 shift = AVA_PAGE_SHIFT(b);
589 ea |= (PTEH_AVA_PAGE(pte->pte_hi) >> shift) <<
590 (SLBE_ESID_SHIFT - 5 + shift);
593 /* Get VA page from hash and add to EA. */
594 hash = (ptex & ~7) >> 3;
595 if (pte->pte_hi & LPTEH_HID)
596 hash = ~hash & PTE_HASH_MASK;
597 ea |= ((hash ^ (vsid & PTE_HASH_MASK)) << b) & ~SLBE_ESID_MASK;
602 ppc64mmu_hpt_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
613 nptes = vm->hdr.pmapsize / sizeof(ppc64_pt_entry_t);
615 /* Walk through PTEs */
616 for (ptex = 0; ptex < nptes; ptex++) {
617 ppc64_pt_entry_t pte = pte_get(kd, ptex);
618 if ((pte.pte_hi & LPTEH_VALID) == 0)
621 /* Skip non-kernel related pages, as well as VRMA ones */
622 vsid = PTEH_AVA_VSID(pte.pte_hi);
623 if ((vsid & KERNEL_VSID_BIT) == 0 ||
624 (vsid >> PPC64_PAGE_SHIFT) == VSID_VRMA)
627 /* Retrieve page's VA (EA on PPC64 terminology) */
628 if ((va = get_ea(kd, &pte, ptex)) == ~0UL)
631 /* Get PA and page size */
632 if (pte.pte_hi & LPTEH_BIG) {
633 pa = pte.pte_lo & PTEL_LP_PA_MASK;
634 pagesz = LP_PAGE_SIZE;
636 pa = pte.pte_lo & PTEL_PA_MASK;
637 pagesz = PPC64_PAGE_SIZE;
640 /* Get DMAP address */
641 dva = vm->hdr.dmapbase + pa;
643 if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
644 entry_to_prot(&pte), pagesz, 0))
654 static struct ppc64_mmu_ops ops = {
655 .init = ppc64mmu_hpt_init,
656 .cleanup = ppc64mmu_hpt_cleanup,
657 .kvatop = ppc64mmu_hpt_kvatop,
658 .walk_pages = ppc64mmu_hpt_walk_pages,
660 struct ppc64_mmu_ops *ppc64_mmu_ops_hpt = &ops;