2 * Copyright (c) 2006 Kip Macy <kmacy@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_kstack_pages.h"
32 #include "opt_msgbuf.h"
34 #include "opt_trap_trace.h"
36 #include <sys/param.h>
37 #include <sys/kernel.h>
41 #include <sys/msgbuf.h>
42 #include <sys/mutex.h>
45 #include <sys/sched.h>
46 #include <sys/sysctl.h>
47 #include <sys/systm.h>
48 #include <sys/vmmeter.h>
50 #include <dev/ofw/openfirm.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_param.h>
55 #include <vm/vm_kern.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_extern.h>
59 #include <vm/vm_pageout.h>
60 #include <vm/vm_pager.h>
61 #include <vm/vm_phys.h>
64 #include <machine/cpu.h>
65 #include <machine/frame.h>
66 #include <machine/instr.h>
67 #include <machine/md_var.h>
68 #include <machine/metadata.h>
69 #include <machine/ofw_mem.h>
70 #include <machine/mmu.h>
71 #include <machine/smp.h>
72 #include <machine/tlb.h>
73 #include <machine/tte.h>
74 #include <machine/tte_hash.h>
75 #include <machine/pcb.h>
76 #include <machine/pstate.h>
77 #include <machine/tsb.h>
79 #include <machine/hypervisorvar.h>
80 #include <machine/hv_api.h>
83 void trap_trace_report(int);
89 #ifndef PMAP_SHPGPERPROC
90 #define PMAP_SHPGPERPROC 200
94 * Virtual and physical address of message buffer.
96 struct msgbuf *msgbufp;
97 vm_paddr_t msgbuf_phys;
100 * Map of physical memory reagions.
102 vm_paddr_t phys_avail[128];
103 vm_paddr_t phys_avail_tmp[128];
104 static struct ofw_mem_region mra[128];
105 static struct ofw_map translations[128];
106 static int translations_size;
109 struct ofw_mem_region sparc64_memreg[128];
112 extern vm_paddr_t mmu_fault_status_area;
115 * First and last available kernel virtual addresses.
117 vm_offset_t virtual_avail;
118 vm_offset_t virtual_end;
119 vm_offset_t kernel_vm_end;
120 vm_offset_t vm_max_kernel_address;
122 #ifndef PMAP_SHPGPERPROC
123 #define PMAP_SHPGPERPROC 200
126 * Data for the pv entry allocation mechanism
128 static uma_zone_t pvzone;
129 static struct vm_object pvzone_obj;
130 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
132 static int pmap_debug_range = 1;
133 static int use_256M_pages = 1;
135 static struct mtx pmap_ctx_lock;
136 static uint16_t ctx_stack[PMAP_CONTEXT_MAX];
137 static int ctx_stack_top;
139 static int permanent_mappings = 0;
140 static uint64_t nucleus_memory;
141 static uint64_t nucleus_mappings[4];
145 struct pmap kernel_pmap_store;
147 hv_tsb_info_t kernel_td[MAX_TSB_INFO];
150 * This should be determined at boot time
151 * with tiny TLBS it doesn't make sense to try and selectively
152 * invalidate more than this
154 #define MAX_INVALIDATES 32
155 #define MAX_TSB_CLEARS 128
158 * Allocate physical memory for use in pmap_bootstrap.
160 static vm_paddr_t pmap_bootstrap_alloc(vm_size_t size);
163 * If user pmap is processed with pmap_remove and with pmap_remove and the
164 * resident count drops to 0, there are no more pages to remove, so we
167 #define PMAP_REMOVE_DONE(pm) \
168 ((pm) != kernel_pmap && (pm)->pm_stats.resident_count == 0)
171 * Kernel MMU interface
173 #define curthread_pmap vmspace_pmap(curthread->td_proc->p_vmspace)
176 #define KDPRINTF if (pmap_debug) printf
178 if (curthread_pmap && (curthread_pmap->pm_context != 0) && ((PCPU_GET(cpumask) & curthread_pmap->pm_active) == 0)) \
179 panic("cpumask(0x%x) & active (0x%x) == 0 pid == %d\n", \
180 PCPU_GET(cpumask), curthread_pmap->pm_active, curthread->td_proc->p_pid); \
181 if (pmap_debug) printf
186 #define KDPRINTF(...)
190 static void free_pv_entry(pv_entry_t pv);
191 static pv_entry_t get_pv_entry(pmap_t locked_pmap);
193 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
194 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
195 static void pmap_remove_tte(pmap_t pmap, tte_t tte_data, vm_offset_t va);
196 static void pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot);
197 static void pmap_tsb_reset(pmap_t pmap);
198 static void pmap_tsb_resize(pmap_t pmap);
199 static void pmap_tte_hash_resize(pmap_t pmap);
201 void pmap_set_ctx_panic(uint64_t error, vm_paddr_t tsb_ra, pmap_t pmap);
203 struct tsb_resize_info {
204 uint64_t tri_tsbscratch;
209 * Quick sort callout for comparing memory regions.
211 static int mr_cmp(const void *a, const void *b);
212 static int om_cmp(const void *a, const void *b);
214 mr_cmp(const void *a, const void *b)
216 const struct ofw_mem_region *mra;
217 const struct ofw_mem_region *mrb;
221 if (mra->mr_start < mrb->mr_start)
223 else if (mra->mr_start > mrb->mr_start)
229 om_cmp(const void *a, const void *b)
231 const struct ofw_map *oma;
232 const struct ofw_map *omb;
236 if (oma->om_start < omb->om_start)
238 else if (oma->om_start > omb->om_start)
245 free_context(uint16_t ctx)
247 mtx_lock_spin(&pmap_ctx_lock);
248 ctx_stack[ctx_stack_top++] = ctx;
249 mtx_unlock_spin(&pmap_ctx_lock);
251 KASSERT(ctx_stack_top < PMAP_CONTEXT_MAX,
252 ("context stack overrun - system error"));
255 static __inline uint16_t
260 mtx_lock_spin(&pmap_ctx_lock);
261 ctx = ctx_stack[--ctx_stack_top];
262 mtx_unlock_spin(&pmap_ctx_lock);
264 KASSERT(ctx_stack_top > 0,
265 ("context stack underrun - need to implement context stealing"));
271 free_pv_entry(pv_entry_t pv)
274 uma_zfree(pvzone, pv);
278 * get a new pv_entry, allocating a block from the system
282 get_pv_entry(pmap_t locked_pmap)
284 static const struct timeval printinterval = { 60, 0 };
285 static struct timeval lastprint;
286 struct vpgqueues *vpq;
289 pv_entry_t allocated_pv, next_pv, pv;
293 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
294 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
295 allocated_pv = uma_zalloc(pvzone, M_NOWAIT);
296 if (allocated_pv != NULL) {
298 if (pv_entry_count > pv_entry_high_water)
301 return (allocated_pv);
305 * Reclaim pv entries: At first, destroy mappings to inactive
306 * pages. After that, if a pv entry is still needed, destroy
307 * mappings to active pages.
309 if (ratecheck(&lastprint, &printinterval))
310 printf("Approaching the limit on PV entries, "
311 "increase the vm.pmap.shpgperproc tunable.\n");
313 vpq = &vm_page_queues[PQ_INACTIVE];
315 TAILQ_FOREACH(m, &vpq->pl, pageq) {
316 if (m->hold_count || m->busy)
318 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
321 /* Avoid deadlock and lock recursion. */
322 if (pmap > locked_pmap)
324 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
326 pmap->pm_stats.resident_count--;
328 tte_data = tte_hash_delete(pmap->pm_hash, va);
330 KASSERT((tte_data & VTD_WIRED) == 0,
331 ("get_pv_entry: wired pte %#jx", (uintmax_t)tte_data));
332 if (tte_data & VTD_REF)
333 vm_page_flag_set(m, PG_REFERENCED);
334 if (tte_data & VTD_W) {
335 KASSERT((tte_data & VTD_SW_W),
336 ("get_pv_entry: modified page not writable: va: %lx, tte: %lx",
341 pmap_invalidate_page(pmap, va, TRUE);
342 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
343 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
344 if (TAILQ_EMPTY(&m->md.pv_list))
345 vm_page_flag_clear(m, PG_WRITEABLE);
346 m->md.pv_list_count--;
348 if (pmap != locked_pmap)
350 if (allocated_pv == NULL)
356 if (allocated_pv == NULL) {
357 if (vpq == &vm_page_queues[PQ_INACTIVE]) {
358 vpq = &vm_page_queues[PQ_ACTIVE];
361 panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable");
363 return (allocated_pv);
367 * Allocate a physical page of memory directly from the phys_avail map.
368 * Can only be called from pmap_bootstrap before avail start and end are
372 pmap_bootstrap_alloc(vm_size_t size)
377 size = round_page(size);
379 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
380 if (phys_avail[i + 1] - phys_avail[i] < size)
383 phys_avail[i] += size;
384 pmap_scrub_pages(pa, size);
387 panic("pmap_bootstrap_alloc");
391 * Activate a user pmap. The pmap must be activated before its address space
392 * can be accessed in any way.
395 pmap_activate(struct thread *td)
397 pmap_t pmap, oldpmap;
401 pmap = vmspace_pmap(td->td_proc->p_vmspace);
402 oldpmap = PCPU_GET(curpmap);
404 atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
405 atomic_set_int(&pmap->pm_tlbactive, PCPU_GET(cpumask));
406 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
408 oldpmap->pm_active &= ~1;
409 pmap->pm_active |= 1;
410 pmap->pm_tlbactive |= 1;
413 pmap->pm_hashscratch = tte_hash_set_scratchpad_user(pmap->pm_hash, pmap->pm_context);
414 pmap->pm_tsbscratch = tsb_set_scratchpad_user(&pmap->pm_tsb);
415 pmap->pm_tsb_miss_count = pmap->pm_tsb_cap_miss_count = 0;
417 PCPU_SET(curpmap, pmap);
418 if (pmap->pm_context != 0)
419 if ((err = hv_mmu_tsb_ctxnon0(1, pmap->pm_tsb_ra)) != H_EOK)
420 panic("failed to set TSB 0x%lx - context == %ld\n",
421 pmap->pm_tsb_ra, pmap->pm_context);
422 stxa(MMU_CID_S, ASI_MMU_CONTEXTID, pmap->pm_context);
428 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
433 * Increase the starting virtual address of the given mapping if a
434 * different alignment might result in more superpage mappings.
437 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
438 vm_offset_t *addr, vm_size_t size)
443 * Bootstrap the system enough to run with virtual memory.
446 pmap_bootstrap(vm_offset_t ekva)
450 vm_paddr_t pa, tsb_8k_pa, tsb_4m_pa, kernel_hash_pa, nucleus_memory_start;
451 vm_size_t physsz, virtsz, kernel_hash_shift;
452 ihandle_t pmem, vmem;
454 uint64_t tsb_8k_size, tsb_4m_size, error, physmem_tunable, physmemstart_tunable;
455 vm_paddr_t real_phys_avail[128], tmp_phys_avail[128], bounds;
458 if ((vmem = OF_finddevice("/virtual-memory")) == -1)
459 panic("pmap_bootstrap: finddevice /virtual-memory");
460 if ((sz = OF_getproplen(vmem, "translations")) == -1)
461 panic("pmap_bootstrap: getproplen translations");
462 if (sizeof(translations) < sz)
463 panic("pmap_bootstrap: translations too small");
464 bzero(translations, sz);
465 if (OF_getprop(vmem, "translations", translations, sz) == -1)
466 panic("pmap_bootstrap: getprop /virtual-memory/translations");
467 sz /= sizeof(*translations);
468 translations_size = sz;
469 nucleus_memory_start = 0;
470 CTR0(KTR_PMAP, "pmap_bootstrap: translations");
471 qsort(translations, sz, sizeof (*translations), om_cmp);
473 for (i = 0; i < sz; i++) {
474 KDPRINTF("om_size=%ld om_start=%lx om_tte=%lx\n",
475 translations[i].om_size, translations[i].om_start,
476 translations[i].om_tte);
477 if ((translations[i].om_start >= KERNBASE) &&
478 (translations[i].om_start <= KERNBASE + 3*PAGE_SIZE_4M)) {
479 for (j = 0; j < translations[i].om_size; j += PAGE_SIZE_4M) {
480 KDPRINTF("mapping permanent translation\n");
481 pa = TTE_GET_PA(translations[i].om_tte) + j;
482 va = translations[i].om_start + j;
483 error = hv_mmu_map_perm_addr(va, KCONTEXT,
484 pa | TTE_KERNEL | VTD_4M, MAP_ITLB | MAP_DTLB);
486 panic("map_perm_addr returned error=%ld", error);
488 if ((nucleus_memory_start == 0) || (pa < nucleus_memory_start))
489 nucleus_memory_start = pa;
490 printf("nucleus_mappings[%d] = 0x%lx\n", permanent_mappings, pa);
491 nucleus_mappings[permanent_mappings++] = pa;
492 nucleus_memory += PAGE_SIZE_4M;
494 mp_add_nucleus_mapping(va, pa|TTE_KERNEL|VTD_4M);
501 * Find out what physical memory is available from the prom and
502 * initialize the phys_avail array. This must be done before
503 * pmap_bootstrap_alloc is called.
505 if ((pmem = OF_finddevice("/memory")) == -1)
506 panic("pmap_bootstrap: finddevice /memory");
507 if ((sz = OF_getproplen(pmem, "available")) == -1)
508 panic("pmap_bootstrap: getproplen /memory/available");
509 if (sizeof(vm_paddr_t)*128 < sz) /* FIXME */
510 panic("pmap_bootstrap: phys_avail too small");
511 if (sizeof(mra) < sz)
512 panic("pmap_bootstrap: mra too small");
514 if (OF_getprop(pmem, "available", mra, sz) == -1)
515 panic("pmap_bootstrap: getprop /memory/available");
518 CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
520 qsort(mra, sz, sizeof (*mra), mr_cmp);
521 physmemstart_tunable = physmem_tunable = physmem = physsz = 0;
523 if (TUNABLE_ULONG_FETCH("hw.physmemstart", &physmemstart_tunable)) {
524 KDPRINTF("desired physmemstart=0x%lx\n", physmemstart_tunable);
526 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable)) {
527 physmem = atop(physmem_tunable);
528 KDPRINTF("desired physmem=0x%lx\n", physmem_tunable);
530 if ((physmem_tunable != 0) && (physmemstart_tunable != 0))
531 physmem_tunable += physmemstart_tunable;
533 bzero(real_phys_avail, sizeof(real_phys_avail));
534 bzero(tmp_phys_avail, sizeof(tmp_phys_avail));
536 for (i = 0, j = 0; i < sz; i++) {
538 KDPRINTF("start=%#lx size=%#lx\n", mra[i].mr_start, mra[i].mr_size);
539 if (mra[i].mr_size < PAGE_SIZE_4M)
542 if ((mra[i].mr_start & PAGE_MASK_4M) || (mra[i].mr_size & PAGE_MASK_4M)) {
543 uint64_t newstart, roundup;
544 newstart = ((mra[i].mr_start + (PAGE_MASK_4M)) & ~PAGE_MASK_4M);
545 roundup = newstart - mra[i].mr_start;
546 size = (mra[i].mr_size - roundup) & ~PAGE_MASK_4M;
547 mra[i].mr_start = newstart;
548 if (size < PAGE_SIZE_4M)
550 mra[i].mr_size = size;
552 real_phys_avail[j] = mra[i].mr_start;
553 if (physmem_tunable != 0 && ((physsz + mra[i].mr_size) >= physmem_tunable)) {
554 mra[i].mr_size = physmem_tunable - physsz;
555 physsz = physmem_tunable;
556 real_phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size;
559 physsz += mra[i].mr_size;
560 real_phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size;
563 physmem = btoc(physsz - physmemstart_tunable);
566 * This is needed for versions of OFW that would allocate us memory
567 * and then forget to remove it from the available ranges ...
568 * as well as for compensating for the above move of nucleus pages
570 for (i = 0, j = 0, bounds = (1UL<<32); real_phys_avail[i] != 0; i += 2) {
571 vm_paddr_t start = real_phys_avail[i];
572 uint64_t end = real_phys_avail[i + 1];
573 CTR2(KTR_PMAP, "start=%#lx size=%#lx\n", start, end);
574 KDPRINTF("real_phys start=%#lx end=%#lx\n", start, end);
576 * Is kernel memory at the beginning of range?
578 if (nucleus_memory_start == start) {
579 start += nucleus_memory;
582 * Is kernel memory at the end of range?
584 if (nucleus_memory_start == (end - nucleus_memory))
585 end -= nucleus_memory;
587 if (physmemstart_tunable != 0 &&
588 (end < physmemstart_tunable))
591 if (physmemstart_tunable != 0 &&
592 ((start < physmemstart_tunable))) {
593 start = physmemstart_tunable;
597 * Is kernel memory in the middle somewhere?
599 if ((nucleus_memory_start > start) &&
600 (nucleus_memory_start < end)) {
601 phys_avail[j] = start;
602 phys_avail[j+1] = nucleus_memory_start;
603 start = nucleus_memory_start + nucleus_memory;
607 * Break phys_avail up on 4GB boundaries to try
608 * to work around PCI-e allocation bug
609 * we rely on the fact that kernel memory is allocated
610 * from the first 4GB of physical memory
612 while (bounds < start)
615 while (bounds < end) {
616 phys_avail[j] = start;
617 phys_avail[j + 1] = bounds;
622 phys_avail[j] = start;
623 phys_avail[j + 1] = end;
628 * Merge nucleus memory in to real_phys_avail
631 for (i = 0; real_phys_avail[i] != 0; i += 2) {
632 if (real_phys_avail[i] == nucleus_memory_start + nucleus_memory)
633 real_phys_avail[i] -= nucleus_memory;
635 if (real_phys_avail[i + 1] == nucleus_memory_start)
636 real_phys_avail[i + 1] += nucleus_memory;
638 if (real_phys_avail[i + 1] == real_phys_avail[i + 2]) {
639 real_phys_avail[i + 1] = real_phys_avail[i + 3];
640 for (k = i + 2; real_phys_avail[k] != 0; k += 2) {
641 real_phys_avail[k] = real_phys_avail[k + 2];
642 real_phys_avail[k + 1] = real_phys_avail[k + 3];
646 for (i = 0; phys_avail[i] != 0; i += 2)
647 if (pmap_debug_range || pmap_debug)
648 printf("phys_avail[%d]=0x%lx phys_avail[%d]=0x%lx\n",
649 i, phys_avail[i], i+1, phys_avail[i+1]);
652 * Shuffle the memory range containing the 256MB page with
653 * nucleus_memory to the beginning of the phys_avail array
654 * so that physical memory from that page is preferentially
657 for (j = 0; phys_avail[j] != 0; j += 2)
658 if (nucleus_memory_start < phys_avail[j])
661 * Don't shuffle unless we have a full 256M page in the range
662 * our kernel malloc appears to be horribly brittle
664 if ((phys_avail[j + 1] - phys_avail[j]) <
665 (PAGE_SIZE_256M - nucleus_memory))
668 for (i = j, k = 0; phys_avail[i] != 0; k++, i++)
669 tmp_phys_avail[k] = phys_avail[i];
670 for (i = 0; i < j; i++)
671 tmp_phys_avail[k + i] = phys_avail[i];
672 for (i = 0; i < 128; i++)
673 phys_avail[i] = tmp_phys_avail[i];
676 for (i = 0; real_phys_avail[i] != 0; i += 2)
677 if (pmap_debug_range || pmap_debug)
678 printf("real_phys_avail[%d]=0x%lx real_phys_avail[%d]=0x%lx\n",
679 i, real_phys_avail[i], i+1, real_phys_avail[i+1]);
681 for (i = 0; phys_avail[i] != 0; i += 2)
682 if (pmap_debug_range || pmap_debug)
683 printf("phys_avail[%d]=0x%lx phys_avail[%d]=0x%lx\n",
684 i, phys_avail[i], i+1, phys_avail[i+1]);
686 * Calculate the size of kernel virtual memory, and the size and mask
687 * for the kernel tsb.
689 virtsz = roundup(physsz, PAGE_SIZE_4M << (PAGE_SHIFT - TTE_SHIFT));
690 vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
693 * Set the start and end of kva. The kernel is loaded at the first
694 * available 4 meg super page, so round up to the end of the page.
696 virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
697 virtual_end = vm_max_kernel_address;
698 kernel_vm_end = vm_max_kernel_address;
701 * Allocate and map a 4MB page for the kernel hashtable
705 kernel_hash_shift = 10; /* PAGE_SIZE_4M*2 */
707 kernel_hash_shift = 6; /* PAGE_SIZE_8K*64 */
710 kernel_hash_pa = pmap_bootstrap_alloc((1<<(kernel_hash_shift + PAGE_SHIFT)));
711 if (kernel_hash_pa & PAGE_MASK_4M)
712 panic("pmap_bootstrap: hashtable pa unaligned\n");
714 * Set up TSB descriptors for the hypervisor
718 tsb_8k_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
720 /* avoid alignment complaints from the hypervisor */
721 tsb_8k_size = PAGE_SIZE_4M;
724 tsb_8k_pa = pmap_bootstrap_alloc(tsb_8k_size);
725 if (tsb_8k_pa & PAGE_MASK_4M)
726 panic("pmap_bootstrap: tsb unaligned\n");
727 KDPRINTF("tsb_8k_size is 0x%lx, tsb_8k_pa is 0x%lx\n", tsb_8k_size, tsb_8k_pa);
729 tsb_4m_size = (virtsz >> (PAGE_SHIFT_4M - TTE_SHIFT)) << 3;
730 tsb_4m_pa = pmap_bootstrap_alloc(tsb_4m_size);
732 kernel_td[TSB8K_INDEX].hti_idxpgsz = TTE8K;
733 kernel_td[TSB8K_INDEX].hti_assoc = 1;
734 kernel_td[TSB8K_INDEX].hti_ntte = (tsb_8k_size >> TTE_SHIFT);
735 kernel_td[TSB8K_INDEX].hti_ctx_index = 0;
736 kernel_td[TSB8K_INDEX].hti_pgszs = TSB8K;
737 kernel_td[TSB8K_INDEX].hti_rsvd = 0;
738 kernel_td[TSB8K_INDEX].hti_ra = tsb_8k_pa;
741 * Initialize kernel's private TSB from 8K page TSB
744 kernel_pmap->pm_tsb.hti_idxpgsz = TTE8K;
745 kernel_pmap->pm_tsb.hti_assoc = 1;
746 kernel_pmap->pm_tsb.hti_ntte = (tsb_8k_size >> TTE_SHIFT);
747 kernel_pmap->pm_tsb.hti_ctx_index = 0;
748 kernel_pmap->pm_tsb.hti_pgszs = TSB8K;
749 kernel_pmap->pm_tsb.hti_rsvd = 0;
750 kernel_pmap->pm_tsb.hti_ra = tsb_8k_pa;
752 kernel_pmap->pm_tsb_ra = vtophys((vm_offset_t)&kernel_pmap->pm_tsb);
753 tsb_set_scratchpad_kernel(&kernel_pmap->pm_tsb);
756 * Initialize kernel TSB for 4M pages
757 * currently (not by design) used for permanent mappings
761 KDPRINTF("tsb_4m_pa is 0x%lx tsb_4m_size is 0x%lx\n", tsb_4m_pa, tsb_4m_size);
762 kernel_td[TSB4M_INDEX].hti_idxpgsz = TTE4M;
763 kernel_td[TSB4M_INDEX].hti_assoc = 1;
764 kernel_td[TSB4M_INDEX].hti_ntte = (tsb_4m_size >> TTE_SHIFT);
765 kernel_td[TSB4M_INDEX].hti_ctx_index = 0;
766 kernel_td[TSB4M_INDEX].hti_pgszs = TSB4M|TSB256M;
767 kernel_td[TSB4M_INDEX].hti_rsvd = 0;
768 kernel_td[TSB4M_INDEX].hti_ra = tsb_4m_pa;
770 * allocate MMU fault status areas for all CPUS
772 mmu_fault_status_area = pmap_bootstrap_alloc(MMFSA_SIZE*MAXCPU);
775 * Allocate and map the dynamic per-CPU area for the BSP.
777 dpcpu0 = (void *)TLB_PHYS_TO_DIRECT(pmap_bootstrap_alloc(DPCPU_SIZE));
780 * Allocate and map the message buffer.
782 msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE);
783 msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(msgbuf_phys);
786 * Allocate a kernel stack with guard page for thread0 and map it into
789 pa = pmap_bootstrap_alloc(KSTACK_PAGES*PAGE_SIZE);
791 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE;
792 kstack0 = virtual_avail;
793 virtual_avail += KSTACK_PAGES * PAGE_SIZE;
794 for (i = 0; i < KSTACK_PAGES; i++) {
795 pa = kstack0_phys + i * PAGE_SIZE;
796 va = kstack0 + i * PAGE_SIZE;
797 tsb_set_tte_real(&kernel_td[TSB8K_INDEX], va, va,
798 pa | TTE_KERNEL | VTD_8K, 0);
801 * Calculate the last available physical address.
803 for (i = 0; phys_avail[i + 2] != 0; i += 2)
804 KDPRINTF("phys_avail[%d]=0x%lx phys_avail[%d]=0x%lx\n",
805 i, phys_avail[i], i+1, phys_avail[i+1]);
806 KDPRINTF("phys_avail[%d]=0x%lx phys_avail[%d]=0x%lx\n",
807 i, phys_avail[i], i+1, phys_avail[i+1]);
809 Maxmem = sparc64_btop(phys_avail[i + 1]);
812 * Add the prom mappings to the kernel tsb.
814 for (i = 0; i < sz; i++) {
816 "translation: start=%#lx size=%#lx tte=%#lx",
817 translations[i].om_start, translations[i].om_size,
818 translations[i].om_tte);
819 KDPRINTF("om_size=%ld om_start=%lx om_tte=%lx\n",
820 translations[i].om_size, translations[i].om_start,
821 translations[i].om_tte);
823 if (translations[i].om_start < VM_MIN_PROM_ADDRESS ||
824 translations[i].om_start > VM_MAX_PROM_ADDRESS)
827 for (off = 0; off < translations[i].om_size;
829 va = translations[i].om_start + off;
830 pa = TTE_GET_PA(translations[i].om_tte) + off;
831 tsb_assert_invalid(&kernel_td[TSB8K_INDEX], va);
832 tsb_set_tte_real(&kernel_td[TSB8K_INDEX], va, va, pa |
833 TTE_KERNEL | VTD_8K, 0);
837 if ((error = hv_mmu_tsb_ctx0(MAX_TSB_INFO,
838 vtophys((vm_offset_t)kernel_td))) != H_EOK)
839 panic("failed to set ctx0 TSBs error: %ld", error);
842 mp_set_tsb_desc_ra(vtophys((vm_offset_t)&kernel_td));
845 * setup direct mappings
848 for (i = 0, pa = real_phys_avail[i]; pa != 0; i += 2, pa = real_phys_avail[i]) {
849 vm_paddr_t tag_pa = 0, next_pa = 0;
850 uint64_t size_bits = VTD_4M;
851 while (pa < real_phys_avail[i + 1]) {
852 if (use_256M_pages &&
853 (pa & PAGE_MASK_256M) == 0 &&
854 ((pa + PAGE_SIZE_256M) <= real_phys_avail[i + 1])) {
856 size_bits = VTD_256M;
857 next_pa = pa + PAGE_SIZE_256M;
858 } else if (next_pa <= pa) {
862 tsb_assert_invalid(&kernel_td[TSB4M_INDEX], TLB_PHYS_TO_DIRECT(pa));
863 tsb_set_tte_real(&kernel_td[TSB4M_INDEX], TLB_PHYS_TO_DIRECT(pa),
864 TLB_PHYS_TO_DIRECT(pa),
865 tag_pa | TTE_KERNEL | size_bits, 0);
871 * Get the available physical memory ranges from /memory/reg. These
872 * are only used for kernel dumps, but it may not be wise to do prom
873 * calls in that situation.
875 if ((sz = OF_getproplen(pmem, "reg")) == -1)
876 panic("pmap_bootstrap: getproplen /memory/reg");
877 if (sizeof(sparc64_memreg) < sz)
878 panic("pmap_bootstrap: sparc64_memreg too small");
879 if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1)
880 panic("pmap_bootstrap: getprop /memory/reg");
881 sparc64_nmemreg = sz / sizeof(*sparc64_memreg);
885 pm->pm_tlbactive = ~0;
887 PMAP_LOCK_INIT(kernel_pmap);
889 TAILQ_INIT(&kernel_pmap->pm_pvlist);
892 * This could happen earlier - but I put it here to avoid
893 * attempts to do updates until they're legal
895 pm->pm_hash = tte_hash_kernel_create(TLB_PHYS_TO_DIRECT(kernel_hash_pa), kernel_hash_shift,
896 pmap_bootstrap_alloc(PAGE_SIZE));
897 pm->pm_hashscratch = tte_hash_set_scratchpad_kernel(pm->pm_hash);
899 for (i = 0; i < translations_size; i++) {
900 KDPRINTF("om_size=%ld om_start=%lx om_tte=%lx\n",
901 translations[i].om_size, translations[i].om_start,
902 translations[i].om_tte);
904 if (translations[i].om_start < VM_MIN_PROM_ADDRESS ||
905 translations[i].om_start > VM_MAX_PROM_ADDRESS) {
906 KDPRINTF("skipping\n");
909 for (off = 0; off < translations[i].om_size; off += PAGE_SIZE) {
910 va = translations[i].om_start + off;
911 pa = TTE_GET_PA(translations[i].om_tte) + off;
912 tte_hash_insert(pm->pm_hash, va, pa | TTE_KERNEL | VTD_8K);
914 KDPRINTF("set om_size=%ld om_start=%lx om_tte=%lx\n",
915 translations[i].om_size, translations[i].om_start,
916 translations[i].om_tte);
918 for (i = 0; i < KSTACK_PAGES; i++) {
919 pa = kstack0_phys + i * PAGE_SIZE;
920 va = kstack0 + i * PAGE_SIZE;
921 tte_hash_insert(pm->pm_hash, va, pa | TTE_KERNEL | VTD_8K);
924 * Add direct mappings to hash
928 /* hash only supports 8k pages */
929 for (pa = PAGE_SIZE_4M; pa < phys_avail[2]; pa += PAGE_SIZE_4M)
930 tte_hash_insert(pm->pm_hash, TLB_PHYS_TO_DIRECT(pa),
931 pa | TTE_KERNEL | VTD_4M);
936 printf("pmap_bootstrap done\n");
942 * Routine: pmap_change_wiring
943 * Function: Change the wiring attribute for a map/virtual-address
946 * The mapping must already exist in the pmap.
949 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
953 iswired = tte_get_virt_bit(pmap, va, VTD_WIRED);
955 if (wired && !iswired) {
956 pmap->pm_stats.wired_count++;
957 tte_set_virt_bit(pmap, va, VTD_WIRED);
958 } else if (!wired && iswired) {
959 pmap->pm_stats.wired_count--;
960 tte_clear_virt_bit(pmap, va, VTD_WIRED);
966 pmap_clear_modify(vm_page_t m)
968 KDPRINTF("pmap_clear_modify(0x%lx)\n", VM_PAGE_TO_PHYS(m));
969 tte_clear_phys_bit(m, VTD_W);
973 pmap_clear_reference(vm_page_t m)
975 KDPRINTF("pmap_clear_reference(0x%lx)\n", VM_PAGE_TO_PHYS(m));
976 tte_clear_phys_bit(m, VTD_REF);
980 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
981 vm_size_t len, vm_offset_t src_addr)
983 vm_offset_t addr, end_addr;
985 end_addr = src_addr + len;
987 * Don't let optional prefaulting of pages make us go
988 * way below the low water mark of free pages or way
989 * above high water mark of used pv entries.
991 if (cnt.v_free_count < cnt.v_free_reserved ||
992 pv_entry_count > pv_entry_high_water)
996 vm_page_lock_queues();
997 if (dst_pmap < src_pmap) {
1001 PMAP_LOCK(src_pmap);
1002 PMAP_LOCK(dst_pmap);
1004 for (addr = src_addr; addr < end_addr; addr += PAGE_SIZE) {
1008 tte_data = tte_hash_lookup(src_pmap->pm_hash, addr);
1010 if ((tte_data & VTD_MANAGED) != 0) {
1011 if (tte_hash_lookup(dst_pmap->pm_hash, addr) == 0) {
1012 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tte_data));
1014 tte_hash_insert(dst_pmap->pm_hash, addr, tte_data & ~(VTD_W|VTD_REF|VTD_WIRED));
1015 dst_pmap->pm_stats.resident_count++;
1016 pmap_insert_entry(dst_pmap, addr, m);
1020 vm_page_unlock_queues();
1021 PMAP_UNLOCK(src_pmap);
1022 PMAP_UNLOCK(dst_pmap);
1026 pmap_copy_page(vm_page_t src, vm_page_t dst)
1028 vm_paddr_t srcpa, dstpa;
1029 srcpa = VM_PAGE_TO_PHYS(src);
1030 dstpa = VM_PAGE_TO_PHYS(dst);
1032 novbcopy((char *)TLB_PHYS_TO_DIRECT(srcpa), (char *)TLB_PHYS_TO_DIRECT(dstpa), PAGE_SIZE);
1037 static __inline void
1038 pmap_add_tte(pmap_t pmap, vm_offset_t va, vm_page_t m, tte_t *tte_data, int wired)
1042 pmap->pm_stats.wired_count++;
1044 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
1045 pmap_insert_entry(pmap, va, m);
1046 *tte_data |= VTD_MANAGED;
1051 * Map the given physical page at the specified virtual address in the
1052 * target pmap with the protection requested. If specified the page
1053 * will be wired down.
1056 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
1057 vm_prot_t prot, boolean_t wired)
1060 uint64_t tte_data, otte_data;
1064 if (pmap->pm_context)
1065 DPRINTF("pmap_enter(va=%lx, pa=0x%lx, prot=%x)\n", va,
1066 VM_PAGE_TO_PHYS(m), prot);
1070 vm_page_lock_queues();
1073 tte_data = pa = VM_PAGE_TO_PHYS(m);
1074 otte_data = tte_hash_delete(pmap->pm_hash, va);
1075 opa = TTE_GET_PA(otte_data);
1079 * This is a new mapping
1081 pmap->pm_stats.resident_count++;
1082 pmap_add_tte(pmap, va, m, &tte_data, wired);
1084 } else if (pa != opa) {
1086 * Mapping has changed, handle validating new mapping.
1089 if (otte_data & VTD_WIRED)
1090 pmap->pm_stats.wired_count--;
1092 if (otte_data & VTD_MANAGED) {
1093 om = PHYS_TO_VM_PAGE(opa);
1094 pmap_remove_entry(pmap, om, va);
1097 pmap_add_tte(pmap, va, m, &tte_data, wired);
1099 } else /* (pa == opa) */ {
1101 * Mapping has not changed, must be protection or wiring change.
1105 * Wiring change, just update stats. We don't worry about
1106 * wiring PT pages as they remain resident as long as there
1107 * are valid mappings in them. Hence, if a user page is wired,
1108 * the PT page will be also.
1110 if (wired && ((otte_data & VTD_WIRED) == 0))
1111 pmap->pm_stats.wired_count++;
1112 else if (!wired && (otte_data & VTD_WIRED))
1113 pmap->pm_stats.wired_count--;
1116 * We might be turning off write access to the page,
1117 * so we go ahead and sense modify status.
1119 if (otte_data & VTD_MANAGED) {
1121 tte_data |= VTD_MANAGED;
1126 * Now validate mapping with desired protection/wiring.
1128 if ((prot & VM_PROT_WRITE) != 0) {
1129 tte_data |= VTD_SW_W;
1130 vm_page_flag_set(m, PG_WRITEABLE);
1132 if ((prot & VM_PROT_EXECUTE) != 0)
1135 tte_data |= VTD_WIRED;
1136 if (pmap == kernel_pmap)
1140 if ((otte_data & ~(VTD_W|VTD_REF)) != tte_data) {
1141 if (otte_data & VTD_V) {
1142 if (otte_data & VTD_REF) {
1143 if (otte_data & VTD_MANAGED)
1144 vm_page_flag_set(om, PG_REFERENCED);
1145 if ((opa != pa) || ((opa & VTD_X) != (pa & VTD_X)))
1148 if (otte_data & VTD_W) {
1149 if (otte_data & VTD_MANAGED)
1151 if ((pa & VTD_SW_W) != 0)
1155 pmap_invalidate_page(pmap, va, TRUE);
1160 tte_hash_insert(pmap->pm_hash, va, tte_data|TTE_MINFLAGS|VTD_REF);
1162 * XXX this needs to be locked for the threaded / kernel case
1164 tsb_set_tte(&pmap->pm_tsb, va, tte_data|TTE_MINFLAGS|VTD_REF,
1167 if (tte_hash_needs_resize(pmap->pm_hash))
1168 pmap_tte_hash_resize(pmap);
1171 * 512 is an arbitrary number of tsb misses
1173 if (0 && pmap->pm_context != 0 && pmap->pm_tsb_miss_count > 512)
1174 pmap_tsb_resize(pmap);
1176 vm_page_unlock_queues();
1182 * Maps a sequence of resident pages belonging to the same object.
1183 * The sequence begins with the given page m_start. This page is
1184 * mapped at the given virtual address start. Each subsequent page is
1185 * mapped at a virtual address that is offset from start by the same
1186 * amount as the page is offset from m_start within the object. The
1187 * last page in the sequence is the page with the largest offset from
1188 * m_start that can be mapped at a virtual address less than the given
1189 * virtual address end. Not every virtual page between start and end
1190 * is mapped; only those for which a resident page exists with the
1191 * corresponding offset from m_start are mapped.
1194 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
1195 vm_page_t m_start, vm_prot_t prot)
1198 vm_pindex_t diff, psize;
1200 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
1201 psize = atop(end - start);
1204 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1205 pmap_enter_quick_locked(pmap, start + ptoa(diff), m, prot);
1206 m = TAILQ_NEXT(m, listq);
1212 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
1215 pmap_enter_quick_locked(pmap, va, m, prot);
1220 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
1225 if (pmap->pm_context)
1226 KDPRINTF("pmap_enter_quick(ctx=0x%lx va=%lx, pa=0x%lx prot=%x)\n",
1227 pmap->pm_context, va, VM_PAGE_TO_PHYS(m), prot);
1229 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1230 if (tte_hash_lookup(pmap->pm_hash, va))
1233 tte_data = VM_PAGE_TO_PHYS(m);
1235 * Enter on the PV list if part of our managed memory. Note that we
1236 * raise IPL while manipulating pv_table since pmap_enter can be
1237 * called at interrupt time.
1239 if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
1240 pmap_insert_entry(pmap, va, m);
1241 tte_data |= VTD_MANAGED;
1244 pmap->pm_stats.resident_count++;
1246 if ((prot & VM_PROT_EXECUTE) != 0)
1249 tte_hash_insert(pmap->pm_hash, va, tte_data | TTE_MINFLAGS);
1253 * Extract the physical page address associated with the given
1254 * map/virtual_address pair.
1257 pmap_extract(pmap_t pmap, vm_offset_t va)
1262 tte_data = tte_hash_lookup(pmap->pm_hash, va);
1263 pa = TTE_GET_PA(tte_data) | (va & TTE_GET_PAGE_MASK(tte_data));
1269 * Atomically extract and hold the physical page with the given
1270 * pmap and virtual address pair if that mapping permits the given
1274 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1280 vm_page_lock_queues();
1282 tte_data = tte_hash_lookup(pmap->pm_hash, va);
1283 if (tte_data != 0 &&
1284 ((tte_data & VTD_SW_W) || (prot & VM_PROT_WRITE) == 0)) {
1285 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tte_data));
1288 vm_page_unlock_queues();
1295 pmap_alloc_zeroed_contig_pages(int npages, uint64_t alignment)
1303 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1304 m = vm_phys_alloc_contig(npages, phys_avail[i],
1305 phys_avail[i + 1], alignment, (1UL<<34));
1310 printf("vm_phys_alloc_contig failed - waiting to retry\n");
1315 for (i = 0, tm = m; i < npages; i++, tm++) {
1317 if ((tm->flags & PG_ZERO) == 0)
1320 ptr = (void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
1326 pmap_free_contig_pages(void *ptr, int npages)
1331 m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)ptr));
1332 for (i = 0; i < npages; i++, m++) {
1334 atomic_subtract_int(&cnt.v_wire_count, 1);
1340 pmap_growkernel(vm_offset_t addr)
1349 /* allocate pv_entry zones */
1350 int shpgperproc = PMAP_SHPGPERPROC;
1352 for (ctx_stack_top = 1; ctx_stack_top < PMAP_CONTEXT_MAX; ctx_stack_top++)
1353 ctx_stack[ctx_stack_top] = ctx_stack_top;
1355 mtx_init(&pmap_ctx_lock, "ctx lock", NULL, MTX_SPIN);
1358 * Initialize the address space (zone) for the pv entries. Set a
1359 * high water mark so that the system can recover from excessive
1360 * numbers of pv entries.
1362 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
1363 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1364 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1365 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
1366 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1367 pv_entry_high_water = 9 * (pv_entry_max / 10);
1368 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
1375 * Create a pv entry for page at pa for
1379 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
1383 KDPRINTF("pmap_insert_entry(va=0x%lx, pa=0x%lx)\n", va, VM_PAGE_TO_PHYS(m));
1384 pv = get_pv_entry(pmap);
1388 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1389 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1390 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1391 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1392 m->md.pv_list_count++;
1396 static int trap_trace_report_done;
1401 pmap_ipi(pmap_t pmap, char *func, uint64_t arg1, uint64_t arg2)
1404 int i, cpu_count, retried;
1406 cpumask_t cpumask, active, curactive;
1407 cpumask_t active_total, ackmask;
1415 cpumask = PCPU_GET(cpumask);
1416 cpulist = PCPU_GET(cpulist);
1419 if (rdpr(pil) != 14)
1420 panic("pil %ld != 14", rdpr(pil));
1422 #ifndef CPUMASK_NOT_BEING_ERRONEOUSLY_CHANGED
1423 /* by definition cpumask should have curcpu's bit set */
1424 if (cpumask != (1 << curcpu))
1425 panic("cpumask(0x%x) != (1 << curcpu) (0x%x)\n",
1426 cpumask, (1 << curcpu));
1430 if ((active_total = (pmap->pm_tlbactive & ~cpumask)) == 0)
1433 if (pmap->pm_context != 0)
1434 active_total = active = (pmap->pm_tlbactive & ~cpumask);
1437 active_total = active = PCPU_GET(other_cpus);
1444 for (i = curactive = cpu_count = 0, cpus = active; i < mp_ncpus && cpus; i++, cpus = (cpus>>1)) {
1445 if ((cpus & 0x1) == 0)
1448 curactive |= (1 << i);
1449 cpulist[cpu_count] = (uint16_t)i;
1454 cpu_ipi_selected(cpu_count, cpulist, (uint64_t)func, (uint64_t)arg1,
1455 (uint64_t)arg2, (uint64_t *)&ackmask);
1457 while (ackmask != curactive) {
1465 printf("cpu with cpumask=0x%x appears to not be responding to ipis\n",
1466 curactive & ~ackmask);
1469 if (!trap_trace_report_done) {
1470 trap_trace_report_done = 1;
1471 for (j = 0; j < MAXCPU; j++)
1472 if (((1 << j) & curactive & ~ackmask) != 0) {
1473 struct pcpu *pc = pcpu_find(j);
1474 printf("pcpu pad 0x%jx 0x%jx 0x%jx 0x%jx 0x%jx 0x%jx 0x%jx\n",
1475 pc->pad[0], pc->pad[1], pc->pad[2], pc->pad[3],
1476 pc->pad[4], pc->pad[5], pc->pad[6]);
1477 trap_trace_report(j);
1482 hv_cpu_state((uint64_t)ffs64(curactive & ~ackmask), &cpu_state);
1483 printf("cpu_state of %ld is %ld\n", ffs64(curactive & ~ackmask), cpu_state);
1485 printf("I'm going to send off another ipi just to confirm that it isn't a memory barrier bug\n"
1486 "and then I'm going to panic\n");
1492 panic(" ackmask=0x%x active=0x%x\n", ackmask, curactive);
1496 active_total |= curactive;
1497 if ((active = ((pmap->pm_tlbactive & all_cpus) & ~(active_total|cpumask))) != 0) {
1498 printf("pmap_ipi: retrying");
1502 return (active_total);
1507 pmap_invalidate_page(pmap_t pmap, vm_offset_t va, int cleartsb)
1510 if (cleartsb == TRUE)
1511 tsb_clear_tte(&pmap->pm_tsb, va);
1513 DPRINTF("pmap_invalidate_page(va=0x%lx)\n", va);
1515 invlpg(va, pmap->pm_context);
1517 pmap_ipi(pmap, (void *)tl_invlpg, (uint64_t)va, (uint64_t)pmap->pm_context);
1523 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int cleartsb)
1525 vm_offset_t tva, invlrngva;
1530 if ((eva - sva) == PAGE_SIZE) {
1531 pmap_invalidate_page(pmap, sva, cleartsb);
1536 KASSERT(sva < eva, ("invalidating negative or zero range sva=0x%lx eva=0x%lx", sva, eva));
1538 if (cleartsb == TRUE)
1539 tsb_clear_range(&pmap->pm_tsb, sva, eva);
1542 if ((sva - eva) < PAGE_SIZE*64) {
1543 for (tva = sva; tva < eva; tva += PAGE_SIZE_8K)
1544 invlpg(tva, pmap->pm_context);
1546 } else if (pmap->pm_context) {
1548 invlctx(pmap->pm_context);
1555 invlrngva = sva | ((eva - sva) >> PAGE_SHIFT);
1556 active = pmap_ipi(pmap, (void *)func, pmap->pm_context, invlrngva);
1557 active &= ~pmap->pm_active;
1558 atomic_clear_int(&pmap->pm_tlbactive, active);
1564 pmap_invalidate_all(pmap_t pmap)
1567 KASSERT(pmap != kernel_pmap, ("invalidate_all called on kernel_pmap"));
1569 tsb_clear(&pmap->pm_tsb);
1572 invlctx(pmap->pm_context);
1574 pmap_ipi(pmap, tl_invlctx, pmap->pm_context, 0);
1575 pmap->pm_tlbactive = pmap->pm_active;
1581 pmap_is_modified(vm_page_t m)
1584 return (tte_get_phys_bit(m, VTD_W));
1589 pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
1591 return (tte_hash_lookup(pmap->pm_hash, va) == 0);
1595 * Extract the physical page address associated with the given kernel virtual
1600 pmap_kextract(vm_offset_t va)
1606 if (va > KERNBASE && va < KERNBASE + nucleus_memory) {
1608 offset = va - KERNBASE;
1609 pa = nucleus_mappings[offset >> 22] | (va & PAGE_MASK_4M);
1611 if ((pa == 0) && (tte_data = tsb_lookup_tte(va, 0)) != 0)
1612 pa = TTE_GET_PA(tte_data) | (va & TTE_GET_PAGE_MASK(tte_data));
1614 if ((pa == 0) && (tte_data = tte_hash_lookup(kernel_pmap->pm_hash, va)) != 0)
1615 pa = TTE_GET_PA(tte_data) | (va & TTE_GET_PAGE_MASK(tte_data));
1621 * Map a range of physical addresses into kernel virtual address space.
1623 * The value passed in *virt is a suggested virtual address for the mapping.
1624 * Architectures which can support a direct-mapped physical to virtual region
1625 * can return the appropriate address within that region, leaving '*virt'
1629 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1631 return TLB_PHYS_TO_DIRECT(start);
1635 pmap_mincore(pmap_t pmap, vm_offset_t addr)
1641 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
1642 vm_pindex_t index, vm_size_t size)
1644 printf("pmap_object_init_pt\n");
1649 * Returns true if the pmap's pv is one of the first
1650 * 16 pvs linked to from this page. This count may
1651 * be changed upwards or downwards in the future; it
1652 * is only necessary that true be returned for a small
1653 * subset of pmaps for proper page aging.
1656 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
1661 if (m->flags & PG_FICTITIOUS)
1664 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1665 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1666 if (pv->pv_pmap == pmap) {
1677 * Initialize a vm_page's machine-dependent fields.
1680 pmap_page_init(vm_page_t m)
1683 TAILQ_INIT(&m->md.pv_list);
1684 m->md.pv_list_count = 0;
1688 * Return the number of managed mappings to the given physical page
1692 pmap_page_wired_mappings(vm_page_t m)
1700 if ((m->flags & PG_FICTITIOUS) != 0)
1702 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1703 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1706 tte_data = tte_hash_lookup(pmap->pm_hash, pv->pv_va);
1707 if ((tte_data & VTD_WIRED) != 0)
1715 * Lower the permission for all mappings to a given page.
1718 pmap_remove_write(vm_page_t m)
1720 if ((m->flags & PG_WRITEABLE) == 0)
1722 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1723 tte_clear_phys_bit(m, VTD_SW_W|VTD_W);
1724 vm_page_flag_clear(m, PG_WRITEABLE);
1727 * Initialize the pmap associated with process 0.
1730 pmap_pinit0(pmap_t pmap)
1732 PMAP_LOCK_INIT(pmap);
1733 pmap->pm_active = pmap->pm_tlbactive = ~0;
1734 pmap->pm_context = 0;
1735 pmap->pm_tsb_ra = kernel_pmap->pm_tsb_ra;
1736 pmap->pm_hash = kernel_pmap->pm_hash;
1738 PCPU_SET(curpmap, pmap);
1740 TAILQ_INIT(&pmap->pm_pvlist);
1741 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1745 * Initialize a preallocated and zeroed pmap structure, such as one in a
1746 * vmspace structure.
1749 pmap_pinit(pmap_t pmap)
1753 pmap->pm_context = get_context();
1754 pmap->pm_tsb_ra = vtophys(&pmap->pm_tsb);
1756 vm_page_lock_queues();
1757 pmap->pm_hash = tte_hash_create(pmap->pm_context, &pmap->pm_hashscratch);
1758 tsb_init(&pmap->pm_tsb, &pmap->pm_tsbscratch, TSB_INIT_SHIFT);
1759 vm_page_unlock_queues();
1760 pmap->pm_tsb_miss_count = pmap->pm_tsb_cap_miss_count = 0;
1761 pmap->pm_active = pmap->pm_tlbactive = 0;
1762 for (i = 0; i < TSB_MAX_RESIZE; i++)
1763 pmap->pm_old_tsb_ra[i] = 0;
1765 TAILQ_INIT(&pmap->pm_pvlist);
1766 PMAP_LOCK_INIT(pmap);
1767 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1772 * Set the physical protection on the specified range of this map as requested.
1775 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1782 DPRINTF("pmap_protect(0x%lx, 0x%lx, %d)\n", sva, eva, prot);
1784 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1785 pmap_remove(pmap, sva, eva);
1789 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
1790 (VM_PROT_WRITE|VM_PROT_EXECUTE))
1793 clearbits = anychanged = 0;
1795 if ((prot & VM_PROT_WRITE) == 0)
1796 clearbits |= (VTD_W|VTD_SW_W);
1797 if ((prot & VM_PROT_EXECUTE) == 0)
1800 vm_page_lock_queues();
1802 for (tva = sva; tva < eva; tva += PAGE_SIZE) {
1806 if ((otte_data = tte_hash_clear_bits(pmap->pm_hash, tva,
1810 * XXX technically we should do a shootdown if it
1811 * was referenced and was executable - but is not now
1813 if (!anychanged && (otte_data & VTD_W))
1816 if (otte_data & VTD_MANAGED) {
1819 if (otte_data & VTD_REF) {
1820 m = PHYS_TO_VM_PAGE(TTE_GET_PA(otte_data));
1821 vm_page_flag_set(m, PG_REFERENCED);
1823 if (otte_data & VTD_W) {
1824 m = PHYS_TO_VM_PAGE(TTE_GET_PA(otte_data));
1830 vm_page_unlock_queues();
1832 pmap_invalidate_range(pmap, sva, eva, TRUE);
1837 * Map a list of wired pages into kernel virtual address space. This is
1838 * intended for temporary mappings which do not need page modification or
1839 * references recorded. Existing mappings in the region are overwritten.
1842 pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
1849 while (count-- > 0) {
1850 otte |= tte_hash_update(kernel_pmap->pm_hash, va,
1851 VM_PAGE_TO_PHYS(*m) | TTE_KERNEL | VTD_8K);
1855 if ((otte & VTD_REF) != 0)
1856 pmap_invalidate_range(kernel_pmap, sva, va, FALSE);
1860 * Remove page mappings from kernel virtual address space. Intended for
1861 * temporary mappings entered by pmap_qenter.
1864 pmap_qremove(vm_offset_t sva, int count)
1872 while (count-- > 0) {
1873 otte |= tte_hash_delete(kernel_pmap->pm_hash, va);
1876 if ((otte & VTD_REF) != 0)
1877 pmap_invalidate_range(kernel_pmap, sva, va, TRUE);
1881 * Release any resources held by the given physical map.
1882 * Called when a pmap initialized by pmap_pinit is being released.
1883 * Should only be called if the map contains no valid mappings.
1886 pmap_release(pmap_t pmap)
1888 KASSERT(pmap->pm_stats.resident_count == 0,
1889 ("pmap_release: pmap resident count %ld != 0",
1890 pmap->pm_stats.resident_count));
1892 tsb_deinit(&pmap->pm_tsb);
1893 tte_hash_destroy(pmap->pm_hash);
1894 free_context(pmap->pm_context);
1895 PMAP_LOCK_DESTROY(pmap);
1899 * Remove the given range of addresses from the specified map.
1902 pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
1908 * Perform an unsynchronized read. This is, however, safe.
1910 if (pmap->pm_stats.resident_count == 0)
1913 DPRINTF("pmap_remove(start=0x%lx, end=0x%lx)\n",
1916 vm_page_lock_queues();
1918 for (tva = start; tva < end; tva += PAGE_SIZE) {
1919 if ((tte_data = tte_hash_delete(pmap->pm_hash, tva)) == 0)
1921 pmap_remove_tte(pmap, tte_data, tva);
1922 if (tte_data & (VTD_REF|VTD_W))
1925 vm_page_unlock_queues();
1927 pmap_invalidate_range(pmap, start, end, TRUE);
1932 * Routine: pmap_remove_all
1934 * Removes this physical page from
1935 * all physical maps in which it resides.
1936 * Reflects back modify bits to the pager.
1939 * Original versions of this routine were very
1940 * inefficient because they iteratively called
1941 * pmap_remove (slow...)
1945 pmap_remove_all(vm_page_t m)
1949 DPRINTF("pmap_remove_all 0x%lx\n", VM_PAGE_TO_PHYS(m));
1951 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1952 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1953 PMAP_LOCK(pv->pv_pmap);
1954 pv->pv_pmap->pm_stats.resident_count--;
1956 tte_data = tte_hash_delete(pv->pv_pmap->pm_hash, pv->pv_va);
1958 if (tte_data & VTD_WIRED)
1959 pv->pv_pmap->pm_stats.wired_count--;
1960 if (tte_data & VTD_REF)
1961 vm_page_flag_set(m, PG_REFERENCED);
1964 * Update the vm_page_t clean and reference bits.
1966 if (tte_data & VTD_W) {
1967 KASSERT((tte_data & VTD_SW_W),
1968 ("pmap_remove_all: modified page not writable: va: %lx, tte: %lx",
1969 pv->pv_va, tte_data));
1973 pmap_invalidate_page(pv->pv_pmap, pv->pv_va, TRUE);
1974 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
1975 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1976 m->md.pv_list_count--;
1977 PMAP_UNLOCK(pv->pv_pmap);
1980 vm_page_flag_clear(m, PG_WRITEABLE);
1984 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
1987 if (pmap != kernel_pmap)
1988 DPRINTF("pmap_remove_entry(va=0x%lx, pa=0x%lx)\n", va, VM_PAGE_TO_PHYS(m));
1989 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1990 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1991 if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
1992 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1993 if (pmap == pv->pv_pmap && va == pv->pv_va)
1997 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
1998 if (va == pv->pv_va)
2002 KASSERT(pv != NULL, ("pmap_remove_entry: pv not found va=0x%lx pa=0x%lx", va, VM_PAGE_TO_PHYS(m)));
2003 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2004 m->md.pv_list_count--;
2005 if (TAILQ_EMPTY(&m->md.pv_list))
2006 vm_page_flag_clear(m, PG_WRITEABLE);
2007 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
2013 pmap_remove_pages(pmap_t pmap)
2020 DPRINTF("pmap_remove_pages(ctx=0x%lx)\n", pmap->pm_context);
2021 vm_page_lock_queues();
2023 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
2024 tte_data = tte_hash_delete(pmap->pm_hash, pv->pv_va);
2026 if (tte_data == 0) {
2027 printf("TTE IS ZERO @ VA %016lx\n", pv->pv_va);
2030 if (tte_data & VTD_WIRED) {
2031 panic("wired page in process not handled correctly");
2032 pmap->pm_stats.wired_count--;
2034 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tte_data));
2036 pmap->pm_stats.resident_count--;
2038 if (tte_data & VTD_W) {
2042 npv = TAILQ_NEXT(pv, pv_plist);
2043 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
2045 m->md.pv_list_count--;
2046 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2047 if (TAILQ_EMPTY(&m->md.pv_list))
2048 vm_page_flag_clear(m, PG_WRITEABLE);
2052 pmap->pm_hash = tte_hash_reset(pmap->pm_hash, &pmap->pm_hashscratch);
2054 pmap_tsb_reset(pmap);
2056 vm_page_unlock_queues();
2057 pmap_invalidate_all(pmap);
2062 pmap_tsb_reset(pmap_t pmap)
2066 for (i = 1; i < TSB_MAX_RESIZE && pmap->pm_old_tsb_ra[i]; i++) {
2067 pmap_free_contig_pages((void *)TLB_PHYS_TO_DIRECT(pmap->pm_old_tsb_ra[i]),
2068 (1 << (TSB_INIT_SHIFT + i)));
2069 pmap->pm_old_tsb_ra[i] = 0;
2071 if (pmap->pm_old_tsb_ra[0] != 0) {
2072 vm_paddr_t tsb_pa = pmap->pm_tsb.hti_ra;
2073 int size = tsb_size(&pmap->pm_tsb);
2074 pmap->pm_tsb.hti_ntte = (1 << (TSB_INIT_SHIFT + PAGE_SHIFT - TTE_SHIFT));
2075 pmap->pm_tsb.hti_ra = pmap->pm_old_tsb_ra[0];
2076 pmap_free_contig_pages((void *)TLB_PHYS_TO_DIRECT(tsb_pa), size);
2077 pmap->pm_tsbscratch = pmap->pm_tsb.hti_ra | (uint64_t)TSB_INIT_SHIFT;
2078 pmap->pm_old_tsb_ra[0] = 0;
2083 pmap_scrub_pages(vm_paddr_t pa, int64_t size)
2085 uint64_t bytes_zeroed;
2087 hv_mem_scrub(pa, size, &bytes_zeroed);
2089 size -= bytes_zeroed;
2094 pmap_remove_tte(pmap_t pmap, tte_t tte_data, vm_offset_t va)
2099 if (pmap != kernel_pmap)
2100 DPRINTF("pmap_remove_tte(va=0x%lx, pa=0x%lx)\n", va, TTE_GET_PA(tte_data));
2102 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2103 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2104 if (tte_data & VTD_WIRED)
2105 pmap->pm_stats.wired_count--;
2107 pmap->pm_stats.resident_count--;
2109 if (tte_data & VTD_MANAGED) {
2110 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tte_data));
2111 if (tte_data & VTD_W) {
2114 if (tte_data & VTD_REF)
2115 vm_page_flag_set(m, PG_REFERENCED);
2116 pmap_remove_entry(pmap, m, va);
2120 /* resize the tsb if the number of capacity misses is greater than 1/4 of
2124 pmap_tsb_resize(pmap_t pmap)
2126 uint32_t miss_count;
2127 uint32_t cap_miss_count;
2128 struct tsb_resize_info info;
2129 hv_tsb_info_t hvtsb;
2130 uint64_t tsbscratch;
2132 KASSERT(pmap == curthread_pmap, ("operating on non-current pmap"));
2133 miss_count = pmap->pm_tsb_miss_count;
2134 cap_miss_count = pmap->pm_tsb_cap_miss_count;
2135 int npages_shift = tsb_page_shift(pmap);
2137 if (npages_shift < (TSB_INIT_SHIFT + TSB_MAX_RESIZE) &&
2138 cap_miss_count > (miss_count >> 1)) {
2139 DPRINTF("resizing tsb for proc=%s pid=%d\n",
2140 curthread->td_proc->p_comm, curthread->td_proc->p_pid);
2141 pmap->pm_old_tsb_ra[npages_shift - TSB_INIT_SHIFT] = pmap->pm_tsb.hti_ra;
2143 /* double TSB size */
2144 tsb_init(&hvtsb, &tsbscratch, npages_shift + 1);
2148 bcopy(&hvtsb, &pmap->pm_tsb, sizeof(hv_tsb_info_t));
2149 pmap->pm_tsbscratch = tsb_set_scratchpad_user(&pmap->pm_tsb);
2151 if (hv_mmu_tsb_ctxnon0(1, pmap->pm_tsb_ra) != H_EOK)
2152 panic("failed to set TSB 0x%lx - context == %ld\n",
2153 pmap->pm_tsb_ra, pmap->pm_context);
2154 info.tri_tsbscratch = pmap->pm_tsbscratch;
2155 info.tri_tsb_ra = pmap->pm_tsb_ra;
2156 pmap_ipi(pmap, tl_tsbupdate, pmap->pm_context, vtophys(&info));
2157 pmap->pm_tlbactive = pmap->pm_active;
2160 bcopy(&hvtsb, &pmap->pm_tsb, sizeof(hvtsb));
2161 if (hv_mmu_tsb_ctxnon0(1, pmap->pm_tsb_ra) != H_EOK)
2162 panic("failed to set TSB 0x%lx - context == %ld\n",
2163 pmap->pm_tsb_ra, pmap->pm_context);
2164 pmap->pm_tsbscratch = tsb_set_scratchpad_user(&pmap->pm_tsb);
2167 pmap->pm_tsb_miss_count = 0;
2168 pmap->pm_tsb_cap_miss_count = 0;
2172 pmap_tte_hash_resize(pmap_t pmap)
2174 tte_hash_t old_th = pmap->pm_hash;
2176 pmap->pm_hash = tte_hash_resize(pmap->pm_hash);
2178 if (curthread->td_proc->p_numthreads != 1)
2179 pmap_ipi(pmap, tl_ttehashupdate, pmap->pm_context, pmap->pm_hashscratch);
2181 pmap->pm_hashscratch = tte_hash_set_scratchpad_user(pmap->pm_hash, pmap->pm_context);
2183 tte_hash_destroy(old_th);
2187 * pmap_ts_referenced:
2189 * Return a count of reference bits for a page, clearing those bits.
2190 * It is not necessary for every reference bit to be cleared, but it
2191 * is necessary that 0 only be returned when there are truly no
2192 * reference bits set.
2194 * XXX: The exact number of bits to check and clear is a matter that
2195 * should be tested and standardized at some point in the future for
2196 * optimal aging of shared pages.
2200 pmap_ts_referenced(vm_page_t m)
2204 pv_entry_t pv, pvf, pvn;
2209 if (m->flags & PG_FICTITIOUS)
2212 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2213 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2218 pvn = TAILQ_NEXT(pv, pv_list);
2220 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2222 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2226 otte_data = tte_hash_clear_bits(pmap->pm_hash, pv->pv_va, VTD_REF);
2227 if ((otte_data & VTD_REF) != 0) {
2228 pmap_invalidate_page(pmap, pv->pv_va, TRUE);
2238 } while ((pv = pvn) != NULL && pv != pvf);
2244 pmap_zero_page(vm_page_t m)
2246 hwblkclr((void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m)), PAGE_SIZE);
2250 pmap_zero_page_area(vm_page_t m, int off, int size)
2255 pa = VM_PAGE_TO_PHYS(m);
2256 va = TLB_PHYS_TO_DIRECT(pa);
2257 if (off == 0 && size == PAGE_SIZE)
2258 hwblkclr((void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m)), PAGE_SIZE);
2260 bzero((char *)(va + off), size);
2265 pmap_zero_page_idle(vm_page_t m)
2267 hwblkclr((void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m)), PAGE_SIZE);
2271 pmap_set_ctx_panic(uint64_t error, vm_paddr_t tsb_ra, pmap_t pmap)
2273 panic("setting ctxnon0 failed ctx=0x%lx hvtsb_ra=0x%lx tsbscratch=0x%lx error=0x%lx",
2274 pmap->pm_context, tsb_ra, pmap->pm_tsbscratch, error);