2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
8 * Copyright (c) 1998,2000 Doug Rabson
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department and William Jolitz of UUNET Technologies Inc.
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by the University of
26 * California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
44 * from: i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp
45 * with some ideas from NetBSD's alpha pmap
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
51 #include <sys/param.h>
52 #include <sys/kernel.h>
55 #include <sys/mutex.h>
58 #include <sys/sysctl.h>
59 #include <sys/systm.h>
62 #include <vm/vm_page.h>
63 #include <vm/vm_map.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_pageout.h>
68 #include <machine/bootinfo.h>
69 #include <machine/efi.h>
70 #include <machine/md_var.h>
71 #include <machine/pal.h>
74 * Manages physical address maps.
76 * In addition to hardware address maps, this
77 * module is called upon to provide software-use-only
78 * maps which may or may not be stored in the same
79 * form as hardware maps. These pseudo-maps are
80 * used to store intermediate results from copy
81 * operations to and from address spaces.
83 * Since the information managed by this module is
84 * also stored by the logical address mapping module,
85 * this module may throw away valid virtual-to-physical
86 * mappings at almost any time. However, invalidations
87 * of virtual-to-physical mappings must be done as
90 * In order to cope with hardware architectures which
91 * make virtual-to-physical map invalidates expensive,
92 * this module may delay invalidate or reduced protection
93 * operations until such time as they are actually
94 * necessary. This module is given full information as
95 * to which processors are currently using which maps,
96 * and to when physical maps must be made correct.
100 * Following the Linux model, region IDs are allocated in groups of
101 * eight so that a single region ID can be used for as many RRs as we
102 * want by encoding the RR number into the low bits of the ID.
104 * We reserve region ID 0 for the kernel and allocate the remaining
105 * IDs for user pmaps.
107 * Region 0-3: User virtually mapped
108 * Region 4: PBVM and special mappings
109 * Region 5: Kernel virtual memory
110 * Region 6: Direct-mapped uncacheable
111 * Region 7: Direct-mapped cacheable
114 /* XXX move to a header. */
115 extern uint64_t ia64_gateway_page[];
117 #ifndef PMAP_SHPGPERPROC
118 #define PMAP_SHPGPERPROC 200
121 #if !defined(DIAGNOSTIC)
122 #define PMAP_INLINE __inline
127 #define pmap_accessed(lpte) ((lpte)->pte & PTE_ACCESSED)
128 #define pmap_dirty(lpte) ((lpte)->pte & PTE_DIRTY)
129 #define pmap_exec(lpte) ((lpte)->pte & PTE_AR_RX)
130 #define pmap_managed(lpte) ((lpte)->pte & PTE_MANAGED)
131 #define pmap_ppn(lpte) ((lpte)->pte & PTE_PPN_MASK)
132 #define pmap_present(lpte) ((lpte)->pte & PTE_PRESENT)
133 #define pmap_prot(lpte) (((lpte)->pte & PTE_PROT_MASK) >> 56)
134 #define pmap_wired(lpte) ((lpte)->pte & PTE_WIRED)
136 #define pmap_clear_accessed(lpte) (lpte)->pte &= ~PTE_ACCESSED
137 #define pmap_clear_dirty(lpte) (lpte)->pte &= ~PTE_DIRTY
138 #define pmap_clear_present(lpte) (lpte)->pte &= ~PTE_PRESENT
139 #define pmap_clear_wired(lpte) (lpte)->pte &= ~PTE_WIRED
141 #define pmap_set_wired(lpte) (lpte)->pte |= PTE_WIRED
144 * The VHPT bucket head structure.
153 * Statically allocated kernel pmap
155 struct pmap kernel_pmap_store;
157 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
158 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
161 * Kernel virtual memory management.
164 extern struct ia64_lpte ***ia64_kptdir;
166 #define KPTE_DIR0_INDEX(va) \
167 (((va) >> (3*PAGE_SHIFT-8)) & ((1<<(PAGE_SHIFT-3))-1))
168 #define KPTE_DIR1_INDEX(va) \
169 (((va) >> (2*PAGE_SHIFT-5)) & ((1<<(PAGE_SHIFT-3))-1))
170 #define KPTE_PTE_INDEX(va) \
171 (((va) >> PAGE_SHIFT) & ((1<<(PAGE_SHIFT-5))-1))
172 #define NKPTEPG (PAGE_SIZE / sizeof(struct ia64_lpte))
174 vm_offset_t kernel_vm_end;
176 /* Values for ptc.e. XXX values for SKI. */
177 static uint64_t pmap_ptc_e_base = 0x100000000;
178 static uint64_t pmap_ptc_e_count1 = 3;
179 static uint64_t pmap_ptc_e_count2 = 2;
180 static uint64_t pmap_ptc_e_stride1 = 0x2000;
181 static uint64_t pmap_ptc_e_stride2 = 0x100000000;
183 struct mtx pmap_ptc_mutex;
186 * Data for the RID allocator
188 static int pmap_ridcount;
189 static int pmap_rididx;
190 static int pmap_ridmapsz;
191 static int pmap_ridmax;
192 static uint64_t *pmap_ridmap;
193 struct mtx pmap_ridmutex;
196 * Data for the pv entry allocation mechanism
198 static uma_zone_t pvzone;
199 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
202 * Data for allocating PTEs for user processes.
204 static uma_zone_t ptezone;
207 * Virtual Hash Page Table (VHPT) data.
209 /* SYSCTL_DECL(_machdep); */
210 SYSCTL_NODE(_machdep, OID_AUTO, vhpt, CTLFLAG_RD, 0, "");
212 struct ia64_bucket *pmap_vhpt_bucket;
214 int pmap_vhpt_nbuckets;
215 SYSCTL_INT(_machdep_vhpt, OID_AUTO, nbuckets, CTLFLAG_RD,
216 &pmap_vhpt_nbuckets, 0, "");
218 int pmap_vhpt_log2size = 0;
219 TUNABLE_INT("machdep.vhpt.log2size", &pmap_vhpt_log2size);
220 SYSCTL_INT(_machdep_vhpt, OID_AUTO, log2size, CTLFLAG_RD,
221 &pmap_vhpt_log2size, 0, "");
223 static int pmap_vhpt_inserts;
224 SYSCTL_INT(_machdep_vhpt, OID_AUTO, inserts, CTLFLAG_RD,
225 &pmap_vhpt_inserts, 0, "");
227 static int pmap_vhpt_population(SYSCTL_HANDLER_ARGS);
228 SYSCTL_PROC(_machdep_vhpt, OID_AUTO, population, CTLTYPE_INT | CTLFLAG_RD,
229 NULL, 0, pmap_vhpt_population, "I", "");
231 static struct ia64_lpte *pmap_find_vhpt(vm_offset_t va);
233 static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
234 static pv_entry_t get_pv_entry(pmap_t locked_pmap);
236 static void pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
237 vm_page_t m, vm_prot_t prot);
238 static void pmap_free_pte(struct ia64_lpte *pte, vm_offset_t va);
239 static void pmap_invalidate_all(void);
240 static int pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte,
241 vm_offset_t va, pv_entry_t pv, int freepte);
242 static int pmap_remove_vhpt(vm_offset_t va);
243 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
247 pmap_steal_memory(vm_size_t size)
252 size = round_page(size);
254 bank_size = phys_avail[1] - phys_avail[0];
255 while (size > bank_size) {
257 for (i = 0; phys_avail[i+2]; i+= 2) {
258 phys_avail[i] = phys_avail[i+2];
259 phys_avail[i+1] = phys_avail[i+3];
264 panic("pmap_steal_memory: out of memory");
265 bank_size = phys_avail[1] - phys_avail[0];
269 phys_avail[0] += size;
271 va = IA64_PHYS_TO_RR7(pa);
272 bzero((caddr_t) va, size);
277 pmap_initialize_vhpt(vm_offset_t vhpt)
279 struct ia64_lpte *pte;
282 pte = (struct ia64_lpte *)vhpt;
283 for (i = 0; i < pmap_vhpt_nbuckets; i++) {
286 pte[i].tag = 1UL << 63; /* Invalid tag */
287 pte[i].chain = (uintptr_t)(pmap_vhpt_bucket + i);
292 MALLOC_DECLARE(M_SMP);
295 pmap_alloc_vhpt(void)
300 size = 1UL << pmap_vhpt_log2size;
301 vhpt = (uintptr_t)contigmalloc(size, M_SMP, 0, 0UL, ~0UL, size, 0UL);
303 vhpt = IA64_PHYS_TO_RR7(ia64_tpa(vhpt));
304 pmap_initialize_vhpt(vhpt);
311 * Bootstrap the system enough to run with virtual memory.
316 struct ia64_pal_result res;
319 int i, j, count, ridbits;
322 * Query the PAL Code to find the loop parameters for the
325 res = ia64_call_pal_static(PAL_PTCE_INFO, 0, 0, 0);
326 if (res.pal_status != 0)
327 panic("Can't configure ptc.e parameters");
328 pmap_ptc_e_base = res.pal_result[0];
329 pmap_ptc_e_count1 = res.pal_result[1] >> 32;
330 pmap_ptc_e_count2 = res.pal_result[1] & ((1L<<32) - 1);
331 pmap_ptc_e_stride1 = res.pal_result[2] >> 32;
332 pmap_ptc_e_stride2 = res.pal_result[2] & ((1L<<32) - 1);
334 printf("ptc.e base=0x%lx, count1=%ld, count2=%ld, "
335 "stride1=0x%lx, stride2=0x%lx\n",
342 mtx_init(&pmap_ptc_mutex, "PTC.G mutex", NULL, MTX_SPIN);
345 * Setup RIDs. RIDs 0..7 are reserved for the kernel.
347 * We currently need at least 19 bits in the RID because PID_MAX
348 * can only be encoded in 17 bits and we need RIDs for 4 regions
349 * per process. With PID_MAX equalling 99999 this means that we
350 * need to be able to encode 399996 (=4*PID_MAX).
351 * The Itanium processor only has 18 bits and the architected
352 * minimum is exactly that. So, we cannot use a PID based scheme
353 * in those cases. Enter pmap_ridmap...
354 * We should avoid the map when running on a processor that has
355 * implemented enough bits. This means that we should pass the
356 * process/thread ID to pmap. This we currently don't do, so we
357 * use the map anyway. However, we don't want to allocate a map
358 * that is large enough to cover the range dictated by the number
359 * of bits in the RID, because that may result in a RID map of
360 * 2MB in size for a 24-bit RID. A 64KB map is enough.
361 * The bottomline: we create a 32KB map when the processor only
362 * implements 18 bits (or when we can't figure it out). Otherwise
363 * we create a 64KB map.
365 res = ia64_call_pal_static(PAL_VM_SUMMARY, 0, 0, 0);
366 if (res.pal_status != 0) {
368 printf("Can't read VM Summary - assuming 18 Region ID bits\n");
369 ridbits = 18; /* guaranteed minimum */
371 ridbits = (res.pal_result[1] >> 8) & 0xff;
373 printf("Processor supports %d Region ID bits\n",
379 pmap_ridmax = (1 << ridbits);
380 pmap_ridmapsz = pmap_ridmax / 64;
381 pmap_ridmap = (uint64_t *)pmap_steal_memory(pmap_ridmax / 8);
382 pmap_ridmap[0] |= 0xff;
385 mtx_init(&pmap_ridmutex, "RID allocator lock", NULL, MTX_DEF);
388 * Allocate some memory for initial kernel 'page tables'.
390 ia64_kptdir = (void *)pmap_steal_memory(PAGE_SIZE);
392 kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
394 for (i = 0; phys_avail[i+2]; i+= 2)
399 * Determine a valid (mappable) VHPT size.
401 TUNABLE_INT_FETCH("machdep.vhpt.log2size", &pmap_vhpt_log2size);
402 if (pmap_vhpt_log2size == 0)
403 pmap_vhpt_log2size = 20;
404 else if (pmap_vhpt_log2size < 16)
405 pmap_vhpt_log2size = 16;
406 else if (pmap_vhpt_log2size > 28)
407 pmap_vhpt_log2size = 28;
408 if (pmap_vhpt_log2size & 1)
409 pmap_vhpt_log2size--;
412 size = 1UL << pmap_vhpt_log2size;
413 for (i = 0; i < count; i += 2) {
414 base = (phys_avail[i] + size - 1) & ~(size - 1);
415 if (base + size <= phys_avail[i+1])
419 panic("Unable to allocate VHPT");
421 if (base != phys_avail[i]) {
422 /* Split this region. */
423 for (j = count; j > i; j -= 2) {
424 phys_avail[j] = phys_avail[j-2];
425 phys_avail[j+1] = phys_avail[j-2+1];
427 phys_avail[i+1] = base;
428 phys_avail[i+2] = base + size;
430 phys_avail[i] = base + size;
432 base = IA64_PHYS_TO_RR7(base);
433 PCPU_SET(md.vhpt, base);
435 printf("VHPT: address=%#lx, size=%#lx\n", base, size);
437 pmap_vhpt_nbuckets = size / sizeof(struct ia64_lpte);
438 pmap_vhpt_bucket = (void *)pmap_steal_memory(pmap_vhpt_nbuckets *
439 sizeof(struct ia64_bucket));
440 for (i = 0; i < pmap_vhpt_nbuckets; i++) {
441 /* Stolen memory is zeroed. */
442 mtx_init(&pmap_vhpt_bucket[i].mutex, "VHPT bucket lock", NULL,
443 MTX_NOWITNESS | MTX_SPIN);
446 pmap_initialize_vhpt(base);
448 ia64_set_pta(base + (1 << 8) + (pmap_vhpt_log2size << 2) + 1);
451 virtual_avail = VM_MIN_KERNEL_ADDRESS;
452 virtual_end = VM_MAX_KERNEL_ADDRESS;
455 * Initialize the kernel pmap (which is statically allocated).
457 PMAP_LOCK_INIT(kernel_pmap);
458 for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
459 kernel_pmap->pm_rid[i] = 0;
460 TAILQ_INIT(&kernel_pmap->pm_pvlist);
461 PCPU_SET(md.current_pmap, kernel_pmap);
463 /* Region 5 is mapped via the VHPT. */
464 ia64_set_rr(IA64_RR_BASE(5), (5 << 8) | (PAGE_SHIFT << 2) | 1);
467 * Clear out any random TLB entries left over from booting.
469 pmap_invalidate_all();
475 pmap_vhpt_population(SYSCTL_HANDLER_ARGS)
480 for (i = 0; i < pmap_vhpt_nbuckets; i++)
481 count += pmap_vhpt_bucket[i].length;
483 error = SYSCTL_OUT(req, &count, sizeof(count));
488 pmap_page_to_va(vm_page_t m)
493 pa = VM_PAGE_TO_PHYS(m);
494 va = (m->md.memattr == VM_MEMATTR_UNCACHEABLE) ? IA64_PHYS_TO_RR6(pa) :
495 IA64_PHYS_TO_RR7(pa);
500 * Initialize a vm_page's machine-dependent fields.
503 pmap_page_init(vm_page_t m)
506 TAILQ_INIT(&m->md.pv_list);
507 m->md.pv_list_count = 0;
508 m->md.memattr = VM_MEMATTR_DEFAULT;
512 * Initialize the pmap module.
513 * Called by vm_init, to initialize any structures that the pmap
514 * system needs to map virtual memory.
519 int shpgperproc = PMAP_SHPGPERPROC;
522 * Initialize the address space (zone) for the pv entries. Set a
523 * high water mark so that the system can recover from excessive
524 * numbers of pv entries.
526 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
527 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
528 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
529 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
530 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
531 pv_entry_high_water = 9 * (pv_entry_max / 10);
533 ptezone = uma_zcreate("PT ENTRY", sizeof (struct ia64_lpte),
534 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
538 /***************************************************
539 * Manipulate TLBs for a pmap
540 ***************************************************/
543 pmap_invalidate_page(vm_offset_t va)
545 struct ia64_lpte *pte;
552 vhpt_ofs = ia64_thash(va) - PCPU_GET(md.vhpt);
554 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
555 pte = (struct ia64_lpte *)(pc->pc_md.vhpt + vhpt_ofs);
556 atomic_cmpset_64(&pte->tag, tag, 1UL << 63);
559 mtx_lock_spin(&pmap_ptc_mutex);
561 ia64_ptc_ga(va, PAGE_SHIFT << 2);
565 mtx_unlock_spin(&pmap_ptc_mutex);
573 pmap_invalidate_all_1(void *arg)
579 addr = pmap_ptc_e_base;
580 for (i = 0; i < pmap_ptc_e_count1; i++) {
581 for (j = 0; j < pmap_ptc_e_count2; j++) {
583 addr += pmap_ptc_e_stride2;
585 addr += pmap_ptc_e_stride1;
591 pmap_invalidate_all(void)
596 smp_rendezvous(NULL, pmap_invalidate_all_1, NULL, NULL);
600 pmap_invalidate_all_1(NULL);
604 pmap_allocate_rid(void)
609 mtx_lock(&pmap_ridmutex);
610 if (pmap_ridcount == pmap_ridmax)
611 panic("pmap_allocate_rid: All Region IDs used");
613 /* Find an index with a free bit. */
614 while ((bits = pmap_ridmap[pmap_rididx]) == ~0UL) {
616 if (pmap_rididx == pmap_ridmapsz)
619 rid = pmap_rididx * 64;
621 /* Find a free bit. */
628 pmap_ridmap[pmap_rididx] |= bit;
630 mtx_unlock(&pmap_ridmutex);
636 pmap_free_rid(uint32_t rid)
642 bit = ~(1UL << (rid & 63));
644 mtx_lock(&pmap_ridmutex);
645 pmap_ridmap[idx] &= bit;
647 mtx_unlock(&pmap_ridmutex);
650 /***************************************************
651 * Page table page management routines.....
652 ***************************************************/
655 pmap_pinit0(struct pmap *pmap)
657 /* kernel_pmap is the same as any other pmap. */
662 * Initialize a preallocated and zeroed pmap structure,
663 * such as one in a vmspace structure.
666 pmap_pinit(struct pmap *pmap)
670 PMAP_LOCK_INIT(pmap);
671 for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
672 pmap->pm_rid[i] = pmap_allocate_rid();
673 TAILQ_INIT(&pmap->pm_pvlist);
674 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
678 /***************************************************
679 * Pmap allocation/deallocation routines.
680 ***************************************************/
683 * Release any resources held by the given physical map.
684 * Called when a pmap initialized by pmap_pinit is being released.
685 * Should only be called if the map contains no valid mappings.
688 pmap_release(pmap_t pmap)
692 for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
694 pmap_free_rid(pmap->pm_rid[i]);
695 PMAP_LOCK_DESTROY(pmap);
699 * grow the number of kernel page table entries, if needed
702 pmap_growkernel(vm_offset_t addr)
704 struct ia64_lpte **dir1;
705 struct ia64_lpte *leaf;
708 while (kernel_vm_end <= addr) {
709 if (nkpt == PAGE_SIZE/8 + PAGE_SIZE*PAGE_SIZE/64)
710 panic("%s: out of kernel address space", __func__);
712 dir1 = ia64_kptdir[KPTE_DIR0_INDEX(kernel_vm_end)];
714 nkpg = vm_page_alloc(NULL, nkpt++,
715 VM_ALLOC_NOOBJ|VM_ALLOC_INTERRUPT|VM_ALLOC_WIRED);
717 panic("%s: cannot add dir. page", __func__);
719 dir1 = (struct ia64_lpte **)pmap_page_to_va(nkpg);
720 bzero(dir1, PAGE_SIZE);
721 ia64_kptdir[KPTE_DIR0_INDEX(kernel_vm_end)] = dir1;
724 nkpg = vm_page_alloc(NULL, nkpt++,
725 VM_ALLOC_NOOBJ|VM_ALLOC_INTERRUPT|VM_ALLOC_WIRED);
727 panic("%s: cannot add PTE page", __func__);
729 leaf = (struct ia64_lpte *)pmap_page_to_va(nkpg);
730 bzero(leaf, PAGE_SIZE);
731 dir1[KPTE_DIR1_INDEX(kernel_vm_end)] = leaf;
733 kernel_vm_end += PAGE_SIZE * NKPTEPG;
737 /***************************************************
738 * page management routines.
739 ***************************************************/
742 * free the pv_entry back to the free list
744 static PMAP_INLINE void
745 free_pv_entry(pv_entry_t pv)
748 uma_zfree(pvzone, pv);
752 * get a new pv_entry, allocating a block from the system
756 get_pv_entry(pmap_t locked_pmap)
758 static const struct timeval printinterval = { 60, 0 };
759 static struct timeval lastprint;
760 struct vpgqueues *vpq;
761 struct ia64_lpte *pte;
762 pmap_t oldpmap, pmap;
763 pv_entry_t allocated_pv, next_pv, pv;
767 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
768 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
769 allocated_pv = uma_zalloc(pvzone, M_NOWAIT);
770 if (allocated_pv != NULL) {
772 if (pv_entry_count > pv_entry_high_water)
775 return (allocated_pv);
779 * Reclaim pv entries: At first, destroy mappings to inactive
780 * pages. After that, if a pv entry is still needed, destroy
781 * mappings to active pages.
783 if (ratecheck(&lastprint, &printinterval))
784 printf("Approaching the limit on PV entries, "
785 "increase the vm.pmap.shpgperproc tunable.\n");
786 vpq = &vm_page_queues[PQ_INACTIVE];
788 TAILQ_FOREACH(m, &vpq->pl, pageq) {
789 if ((m->flags & PG_MARKER) != 0 || m->hold_count || m->busy)
791 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
794 /* Avoid deadlock and lock recursion. */
795 if (pmap > locked_pmap)
797 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
799 pmap->pm_stats.resident_count--;
800 oldpmap = pmap_switch(pmap);
801 pte = pmap_find_vhpt(va);
802 KASSERT(pte != NULL, ("pte"));
803 pmap_remove_vhpt(va);
804 pmap_invalidate_page(va);
805 pmap_switch(oldpmap);
806 if (pmap_accessed(pte))
807 vm_page_aflag_set(m, PGA_REFERENCED);
810 pmap_free_pte(pte, va);
811 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
812 m->md.pv_list_count--;
813 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
814 if (pmap != locked_pmap)
816 if (allocated_pv == NULL)
821 if (TAILQ_EMPTY(&m->md.pv_list))
822 vm_page_aflag_clear(m, PGA_WRITEABLE);
824 if (allocated_pv == NULL) {
825 if (vpq == &vm_page_queues[PQ_INACTIVE]) {
826 vpq = &vm_page_queues[PQ_ACTIVE];
829 panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable");
831 return (allocated_pv);
835 * Conditionally create a pv entry.
838 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
842 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
843 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
844 if (pv_entry_count < pv_entry_high_water &&
845 (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) {
849 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
850 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
851 m->md.pv_list_count++;
858 * Add an ia64_lpte to the VHPT.
861 pmap_enter_vhpt(struct ia64_lpte *pte, vm_offset_t va)
863 struct ia64_bucket *bckt;
864 struct ia64_lpte *vhpte;
867 /* Can fault, so get it out of the way. */
868 pte_pa = ia64_tpa((vm_offset_t)pte);
870 vhpte = (struct ia64_lpte *)ia64_thash(va);
871 bckt = (struct ia64_bucket *)vhpte->chain;
873 mtx_lock_spin(&bckt->mutex);
874 pte->chain = bckt->chain;
876 bckt->chain = pte_pa;
880 mtx_unlock_spin(&bckt->mutex);
884 * Remove the ia64_lpte matching va from the VHPT. Return zero if it
885 * worked or an appropriate error code otherwise.
888 pmap_remove_vhpt(vm_offset_t va)
890 struct ia64_bucket *bckt;
891 struct ia64_lpte *pte;
892 struct ia64_lpte *lpte;
893 struct ia64_lpte *vhpte;
897 vhpte = (struct ia64_lpte *)ia64_thash(va);
898 bckt = (struct ia64_bucket *)vhpte->chain;
901 mtx_lock_spin(&bckt->mutex);
903 pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
904 while (chain != 0 && pte->tag != tag) {
907 pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
910 mtx_unlock_spin(&bckt->mutex);
914 /* Snip this pv_entry out of the collision chain. */
916 bckt->chain = pte->chain;
918 lpte->chain = pte->chain;
922 mtx_unlock_spin(&bckt->mutex);
927 * Find the ia64_lpte for the given va, if any.
929 static struct ia64_lpte *
930 pmap_find_vhpt(vm_offset_t va)
932 struct ia64_bucket *bckt;
933 struct ia64_lpte *pte;
937 pte = (struct ia64_lpte *)ia64_thash(va);
938 bckt = (struct ia64_bucket *)pte->chain;
940 mtx_lock_spin(&bckt->mutex);
942 pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
943 while (chain != 0 && pte->tag != tag) {
945 pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
947 mtx_unlock_spin(&bckt->mutex);
948 return ((chain != 0) ? pte : NULL);
952 * Remove an entry from the list of managed mappings.
955 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va, pv_entry_t pv)
958 if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
959 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
960 if (pmap == pv->pv_pmap && va == pv->pv_va)
964 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
972 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
973 m->md.pv_list_count--;
974 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
975 vm_page_aflag_clear(m, PGA_WRITEABLE);
977 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
986 * Create a pv entry for page at pa for
990 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
994 pv = get_pv_entry(pmap);
998 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
999 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1000 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1001 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1002 m->md.pv_list_count++;
1006 * Routine: pmap_extract
1008 * Extract the physical page address associated
1009 * with the given map/virtual_address pair.
1012 pmap_extract(pmap_t pmap, vm_offset_t va)
1014 struct ia64_lpte *pte;
1020 oldpmap = pmap_switch(pmap);
1021 pte = pmap_find_vhpt(va);
1022 if (pte != NULL && pmap_present(pte))
1024 pmap_switch(oldpmap);
1030 * Routine: pmap_extract_and_hold
1032 * Atomically extract and hold the physical page
1033 * with the given pmap and virtual address pair
1034 * if that mapping permits the given protection.
1037 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1039 struct ia64_lpte *pte;
1047 oldpmap = pmap_switch(pmap);
1049 pte = pmap_find_vhpt(va);
1050 if (pte != NULL && pmap_present(pte) &&
1051 (pmap_prot(pte) & prot) == prot) {
1052 m = PHYS_TO_VM_PAGE(pmap_ppn(pte));
1053 if (vm_page_pa_tryrelock(pmap, pmap_ppn(pte), &pa))
1058 pmap_switch(oldpmap);
1063 /***************************************************
1064 * Low level mapping routines.....
1065 ***************************************************/
1068 * Find the kernel lpte for mapping the given virtual address, which
1069 * must be in the part of region 5 which we can cover with our kernel
1072 static struct ia64_lpte *
1073 pmap_find_kpte(vm_offset_t va)
1075 struct ia64_lpte **dir1;
1076 struct ia64_lpte *leaf;
1078 KASSERT((va >> 61) == 5,
1079 ("kernel mapping 0x%lx not in region 5", va));
1080 KASSERT(va < kernel_vm_end,
1081 ("kernel mapping 0x%lx out of range", va));
1083 dir1 = ia64_kptdir[KPTE_DIR0_INDEX(va)];
1084 leaf = dir1[KPTE_DIR1_INDEX(va)];
1085 return (&leaf[KPTE_PTE_INDEX(va)]);
1089 * Find a pte suitable for mapping a user-space address. If one exists
1090 * in the VHPT, that one will be returned, otherwise a new pte is
1093 static struct ia64_lpte *
1094 pmap_find_pte(vm_offset_t va)
1096 struct ia64_lpte *pte;
1098 if (va >= VM_MAXUSER_ADDRESS)
1099 return pmap_find_kpte(va);
1101 pte = pmap_find_vhpt(va);
1103 pte = uma_zalloc(ptezone, M_NOWAIT | M_ZERO);
1104 pte->tag = 1UL << 63;
1110 * Free a pte which is now unused. This simply returns it to the zone
1111 * allocator if it is a user mapping. For kernel mappings, clear the
1112 * valid bit to make it clear that the mapping is not currently used.
1115 pmap_free_pte(struct ia64_lpte *pte, vm_offset_t va)
1117 if (va < VM_MAXUSER_ADDRESS)
1118 uma_zfree(ptezone, pte);
1120 pmap_clear_present(pte);
1123 static PMAP_INLINE void
1124 pmap_pte_prot(pmap_t pm, struct ia64_lpte *pte, vm_prot_t prot)
1126 static long prot2ar[4] = {
1127 PTE_AR_R, /* VM_PROT_NONE */
1128 PTE_AR_RW, /* VM_PROT_WRITE */
1129 PTE_AR_RX|PTE_ED, /* VM_PROT_EXECUTE */
1130 PTE_AR_RWX|PTE_ED /* VM_PROT_WRITE|VM_PROT_EXECUTE */
1133 pte->pte &= ~(PTE_PROT_MASK | PTE_PL_MASK | PTE_AR_MASK | PTE_ED);
1134 pte->pte |= (uint64_t)(prot & VM_PROT_ALL) << 56;
1135 pte->pte |= (prot == VM_PROT_NONE || pm == kernel_pmap)
1136 ? PTE_PL_KERN : PTE_PL_USER;
1137 pte->pte |= prot2ar[(prot & VM_PROT_ALL) >> 1];
1140 static PMAP_INLINE void
1141 pmap_pte_attr(struct ia64_lpte *pte, vm_memattr_t ma)
1144 pte->pte &= ~PTE_MA_MASK;
1145 pte->pte |= (ma & PTE_MA_MASK);
1149 * Set a pte to contain a valid mapping and enter it in the VHPT. If
1150 * the pte was orginally valid, then its assumed to already be in the
1152 * This functions does not set the protection bits. It's expected
1153 * that those have been set correctly prior to calling this function.
1156 pmap_set_pte(struct ia64_lpte *pte, vm_offset_t va, vm_offset_t pa,
1157 boolean_t wired, boolean_t managed)
1160 pte->pte &= PTE_PROT_MASK | PTE_MA_MASK | PTE_PL_MASK |
1161 PTE_AR_MASK | PTE_ED;
1162 pte->pte |= PTE_PRESENT;
1163 pte->pte |= (managed) ? PTE_MANAGED : (PTE_DIRTY | PTE_ACCESSED);
1164 pte->pte |= (wired) ? PTE_WIRED : 0;
1165 pte->pte |= pa & PTE_PPN_MASK;
1167 pte->itir = PAGE_SHIFT << 2;
1169 pte->tag = ia64_ttag(va);
1173 * Remove the (possibly managed) mapping represented by pte from the
1177 pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte, vm_offset_t va,
1178 pv_entry_t pv, int freepte)
1184 * First remove from the VHPT.
1186 error = pmap_remove_vhpt(va);
1190 pmap_invalidate_page(va);
1192 if (pmap_wired(pte))
1193 pmap->pm_stats.wired_count -= 1;
1195 pmap->pm_stats.resident_count -= 1;
1196 if (pmap_managed(pte)) {
1197 m = PHYS_TO_VM_PAGE(pmap_ppn(pte));
1198 if (pmap_dirty(pte))
1200 if (pmap_accessed(pte))
1201 vm_page_aflag_set(m, PGA_REFERENCED);
1203 error = pmap_remove_entry(pmap, m, va, pv);
1206 pmap_free_pte(pte, va);
1212 * Extract the physical page address associated with a kernel
1216 pmap_kextract(vm_offset_t va)
1218 struct ia64_lpte *pte;
1219 uint64_t *pbvm_pgtbl;
1223 KASSERT(va >= VM_MAXUSER_ADDRESS, ("Must be kernel VA"));
1225 /* Regions 6 and 7 are direct mapped. */
1226 if (va >= IA64_RR_BASE(6)) {
1227 pa = IA64_RR_MASK(va);
1231 /* Region 5 is our KVA. Bail out if the VA is beyond our limits. */
1232 if (va >= kernel_vm_end)
1234 if (va >= VM_MIN_KERNEL_ADDRESS) {
1235 pte = pmap_find_kpte(va);
1236 pa = pmap_present(pte) ? pmap_ppn(pte) | (va & PAGE_MASK) : 0;
1240 /* The PBVM page table. */
1241 if (va >= IA64_PBVM_PGTBL + bootinfo->bi_pbvm_pgtblsz)
1243 if (va >= IA64_PBVM_PGTBL) {
1244 pa = (va - IA64_PBVM_PGTBL) + bootinfo->bi_pbvm_pgtbl;
1248 /* The PBVM itself. */
1249 if (va >= IA64_PBVM_BASE) {
1250 pbvm_pgtbl = (void *)IA64_PBVM_PGTBL;
1251 idx = (va - IA64_PBVM_BASE) >> IA64_PBVM_PAGE_SHIFT;
1252 if (idx >= (bootinfo->bi_pbvm_pgtblsz >> 3))
1254 if ((pbvm_pgtbl[idx] & PTE_PRESENT) == 0)
1256 pa = (pbvm_pgtbl[idx] & PTE_PPN_MASK) +
1257 (va & IA64_PBVM_PAGE_MASK);
1262 printf("XXX: %s: va=%#lx is invalid\n", __func__, va);
1271 * Add a list of wired pages to the kva this routine is only used for
1272 * temporary kernel mappings that do not need to have page modification
1273 * or references recorded. Note that old mappings are simply written
1274 * over. The page is effectively wired, but it's customary to not have
1275 * the PTE reflect that, nor update statistics.
1278 pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
1280 struct ia64_lpte *pte;
1283 for (i = 0; i < count; i++) {
1284 pte = pmap_find_kpte(va);
1285 if (pmap_present(pte))
1286 pmap_invalidate_page(va);
1288 pmap_enter_vhpt(pte, va);
1289 pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
1290 pmap_pte_attr(pte, m[i]->md.memattr);
1291 pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m[i]), FALSE, FALSE);
1297 * this routine jerks page mappings from the
1298 * kernel -- it is meant only for temporary mappings.
1301 pmap_qremove(vm_offset_t va, int count)
1303 struct ia64_lpte *pte;
1306 for (i = 0; i < count; i++) {
1307 pte = pmap_find_kpte(va);
1308 if (pmap_present(pte)) {
1309 pmap_remove_vhpt(va);
1310 pmap_invalidate_page(va);
1311 pmap_clear_present(pte);
1318 * Add a wired page to the kva. As for pmap_qenter(), it's customary
1319 * to not have the PTE reflect that, nor update statistics.
1322 pmap_kenter(vm_offset_t va, vm_offset_t pa)
1324 struct ia64_lpte *pte;
1326 pte = pmap_find_kpte(va);
1327 if (pmap_present(pte))
1328 pmap_invalidate_page(va);
1330 pmap_enter_vhpt(pte, va);
1331 pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
1332 pmap_pte_attr(pte, VM_MEMATTR_DEFAULT);
1333 pmap_set_pte(pte, va, pa, FALSE, FALSE);
1337 * Remove a page from the kva
1340 pmap_kremove(vm_offset_t va)
1342 struct ia64_lpte *pte;
1344 pte = pmap_find_kpte(va);
1345 if (pmap_present(pte)) {
1346 pmap_remove_vhpt(va);
1347 pmap_invalidate_page(va);
1348 pmap_clear_present(pte);
1353 * Used to map a range of physical addresses into kernel
1354 * virtual address space.
1356 * The value passed in '*virt' is a suggested virtual address for
1357 * the mapping. Architectures which can support a direct-mapped
1358 * physical to virtual region can return the appropriate address
1359 * within that region, leaving '*virt' unchanged. Other
1360 * architectures should map the pages starting at '*virt' and
1361 * update '*virt' with the first usable address after the mapped
1365 pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
1367 return IA64_PHYS_TO_RR7(start);
1371 * Remove the given range of addresses from the specified map.
1373 * It is assumed that the start and end are properly
1374 * rounded to the page size.
1377 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1382 struct ia64_lpte *pte;
1384 if (pmap->pm_stats.resident_count == 0)
1387 vm_page_lock_queues();
1389 oldpmap = pmap_switch(pmap);
1392 * special handling of removing one page. a very
1393 * common operation and easy to short circuit some
1396 if (sva + PAGE_SIZE == eva) {
1397 pte = pmap_find_vhpt(sva);
1399 pmap_remove_pte(pmap, pte, sva, 0, 1);
1403 if (pmap->pm_stats.resident_count < ((eva - sva) >> PAGE_SHIFT)) {
1404 TAILQ_FOREACH_SAFE(pv, &pmap->pm_pvlist, pv_plist, npv) {
1406 if (va >= sva && va < eva) {
1407 pte = pmap_find_vhpt(va);
1408 KASSERT(pte != NULL, ("pte"));
1409 pmap_remove_pte(pmap, pte, va, pv, 1);
1413 for (va = sva; va < eva; va += PAGE_SIZE) {
1414 pte = pmap_find_vhpt(va);
1416 pmap_remove_pte(pmap, pte, va, 0, 1);
1421 vm_page_unlock_queues();
1422 pmap_switch(oldpmap);
1427 * Routine: pmap_remove_all
1429 * Removes this physical page from
1430 * all physical maps in which it resides.
1431 * Reflects back modify bits to the pager.
1434 * Original versions of this routine were very
1435 * inefficient because they iteratively called
1436 * pmap_remove (slow...)
1440 pmap_remove_all(vm_page_t m)
1445 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1446 ("pmap_remove_all: page %p is not managed", m));
1447 vm_page_lock_queues();
1448 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1449 struct ia64_lpte *pte;
1450 pmap_t pmap = pv->pv_pmap;
1451 vm_offset_t va = pv->pv_va;
1454 oldpmap = pmap_switch(pmap);
1455 pte = pmap_find_vhpt(va);
1456 KASSERT(pte != NULL, ("pte"));
1457 if (pmap_ppn(pte) != VM_PAGE_TO_PHYS(m))
1458 panic("pmap_remove_all: pv_table for %lx is inconsistent", VM_PAGE_TO_PHYS(m));
1459 pmap_remove_pte(pmap, pte, va, pv, 1);
1460 pmap_switch(oldpmap);
1463 vm_page_aflag_clear(m, PGA_WRITEABLE);
1464 vm_page_unlock_queues();
1468 * Set the physical protection on the
1469 * specified range of this map as requested.
1472 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1475 struct ia64_lpte *pte;
1477 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1478 pmap_remove(pmap, sva, eva);
1482 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
1483 (VM_PROT_WRITE|VM_PROT_EXECUTE))
1486 if ((sva & PAGE_MASK) || (eva & PAGE_MASK))
1487 panic("pmap_protect: unaligned addresses");
1489 vm_page_lock_queues();
1491 oldpmap = pmap_switch(pmap);
1492 for ( ; sva < eva; sva += PAGE_SIZE) {
1493 /* If page is invalid, skip this page */
1494 pte = pmap_find_vhpt(sva);
1498 /* If there's no change, skip it too */
1499 if (pmap_prot(pte) == prot)
1502 if ((prot & VM_PROT_WRITE) == 0 &&
1503 pmap_managed(pte) && pmap_dirty(pte)) {
1504 vm_paddr_t pa = pmap_ppn(pte);
1505 vm_page_t m = PHYS_TO_VM_PAGE(pa);
1508 pmap_clear_dirty(pte);
1511 if (prot & VM_PROT_EXECUTE)
1512 ia64_sync_icache(sva, PAGE_SIZE);
1514 pmap_pte_prot(pmap, pte, prot);
1515 pmap_invalidate_page(sva);
1517 vm_page_unlock_queues();
1518 pmap_switch(oldpmap);
1523 * Insert the given physical page (p) at
1524 * the specified virtual address (v) in the
1525 * target physical map with the protection requested.
1527 * If specified, the page will be wired down, meaning
1528 * that the related pte can not be reclaimed.
1530 * NB: This is the only routine which MAY NOT lazy-evaluate
1531 * or lose information. That is, this routine must actually
1532 * insert this page into the given map NOW.
1535 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
1536 vm_prot_t prot, boolean_t wired)
1541 struct ia64_lpte origpte;
1542 struct ia64_lpte *pte;
1543 boolean_t icache_inval, managed;
1545 vm_page_lock_queues();
1547 oldpmap = pmap_switch(pmap);
1550 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
1551 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0,
1552 ("pmap_enter: page %p is not busy", m));
1555 * Find (or create) a pte for the given mapping.
1557 while ((pte = pmap_find_pte(va)) == NULL) {
1558 pmap_switch(oldpmap);
1560 vm_page_unlock_queues();
1562 vm_page_lock_queues();
1564 oldpmap = pmap_switch(pmap);
1567 if (!pmap_present(pte)) {
1569 pmap_enter_vhpt(pte, va);
1571 opa = pmap_ppn(pte);
1573 pa = VM_PAGE_TO_PHYS(m);
1575 icache_inval = (prot & VM_PROT_EXECUTE) ? TRUE : FALSE;
1578 * Mapping has not changed, must be protection or wiring change.
1582 * Wiring change, just update stats. We don't worry about
1583 * wiring PT pages as they remain resident as long as there
1584 * are valid mappings in them. Hence, if a user page is wired,
1585 * the PT page will be also.
1587 if (wired && !pmap_wired(&origpte))
1588 pmap->pm_stats.wired_count++;
1589 else if (!wired && pmap_wired(&origpte))
1590 pmap->pm_stats.wired_count--;
1592 managed = (pmap_managed(&origpte)) ? TRUE : FALSE;
1595 * We might be turning off write access to the page,
1596 * so we go ahead and sense modify status. Otherwise,
1597 * we can avoid I-cache invalidation if the page
1598 * already allowed execution.
1600 if (managed && pmap_dirty(&origpte))
1602 else if (pmap_exec(&origpte))
1603 icache_inval = FALSE;
1605 pmap_invalidate_page(va);
1610 * Mapping has changed, invalidate old range and fall
1611 * through to handle validating new mapping.
1614 pmap_remove_pte(pmap, pte, va, 0, 0);
1615 pmap_enter_vhpt(pte, va);
1619 * Enter on the PV list if part of our managed memory.
1621 if ((m->oflags & VPO_UNMANAGED) == 0) {
1622 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
1623 ("pmap_enter: managed mapping within the clean submap"));
1624 pmap_insert_entry(pmap, va, m);
1629 * Increment counters
1631 pmap->pm_stats.resident_count++;
1633 pmap->pm_stats.wired_count++;
1638 * Now validate mapping with desired protection/wiring. This
1639 * adds the pte to the VHPT if necessary.
1641 pmap_pte_prot(pmap, pte, prot);
1642 pmap_pte_attr(pte, m->md.memattr);
1643 pmap_set_pte(pte, va, pa, wired, managed);
1645 /* Invalidate the I-cache when needed. */
1647 ia64_sync_icache(va, PAGE_SIZE);
1649 if ((prot & VM_PROT_WRITE) != 0 && managed)
1650 vm_page_aflag_set(m, PGA_WRITEABLE);
1651 vm_page_unlock_queues();
1652 pmap_switch(oldpmap);
1657 * Maps a sequence of resident pages belonging to the same object.
1658 * The sequence begins with the given page m_start. This page is
1659 * mapped at the given virtual address start. Each subsequent page is
1660 * mapped at a virtual address that is offset from start by the same
1661 * amount as the page is offset from m_start within the object. The
1662 * last page in the sequence is the page with the largest offset from
1663 * m_start that can be mapped at a virtual address less than the given
1664 * virtual address end. Not every virtual page between start and end
1665 * is mapped; only those for which a resident page exists with the
1666 * corresponding offset from m_start are mapped.
1669 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
1670 vm_page_t m_start, vm_prot_t prot)
1674 vm_pindex_t diff, psize;
1676 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
1677 psize = atop(end - start);
1679 vm_page_lock_queues();
1681 oldpmap = pmap_switch(pmap);
1682 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1683 pmap_enter_quick_locked(pmap, start + ptoa(diff), m, prot);
1684 m = TAILQ_NEXT(m, listq);
1686 vm_page_unlock_queues();
1687 pmap_switch(oldpmap);
1692 * this code makes some *MAJOR* assumptions:
1693 * 1. Current pmap & pmap exists.
1696 * 4. No page table pages.
1697 * but is *MUCH* faster than pmap_enter...
1701 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
1705 vm_page_lock_queues();
1707 oldpmap = pmap_switch(pmap);
1708 pmap_enter_quick_locked(pmap, va, m, prot);
1709 vm_page_unlock_queues();
1710 pmap_switch(oldpmap);
1715 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
1718 struct ia64_lpte *pte;
1721 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
1722 (m->oflags & VPO_UNMANAGED) != 0,
1723 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
1724 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1725 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1727 if ((pte = pmap_find_pte(va)) == NULL)
1730 if (!pmap_present(pte)) {
1731 /* Enter on the PV list if the page is managed. */
1732 if ((m->oflags & VPO_UNMANAGED) == 0) {
1733 if (!pmap_try_insert_pv_entry(pmap, va, m)) {
1734 pmap_free_pte(pte, va);
1741 /* Increment counters. */
1742 pmap->pm_stats.resident_count++;
1744 /* Initialise with R/O protection and enter into VHPT. */
1745 pmap_enter_vhpt(pte, va);
1746 pmap_pte_prot(pmap, pte,
1747 prot & (VM_PROT_READ | VM_PROT_EXECUTE));
1748 pmap_pte_attr(pte, m->md.memattr);
1749 pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), FALSE, managed);
1751 if (prot & VM_PROT_EXECUTE)
1752 ia64_sync_icache(va, PAGE_SIZE);
1757 * pmap_object_init_pt preloads the ptes for a given object
1758 * into the specified pmap. This eliminates the blast of soft
1759 * faults on process startup and immediately after an mmap.
1762 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
1763 vm_object_t object, vm_pindex_t pindex,
1767 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1768 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
1769 ("pmap_object_init_pt: non-device object"));
1773 * Routine: pmap_change_wiring
1774 * Function: Change the wiring attribute for a map/virtual-address
1776 * In/out conditions:
1777 * The mapping must already exist in the pmap.
1780 pmap_change_wiring(pmap, va, wired)
1781 register pmap_t pmap;
1786 struct ia64_lpte *pte;
1789 oldpmap = pmap_switch(pmap);
1791 pte = pmap_find_vhpt(va);
1792 KASSERT(pte != NULL, ("pte"));
1793 if (wired && !pmap_wired(pte)) {
1794 pmap->pm_stats.wired_count++;
1795 pmap_set_wired(pte);
1796 } else if (!wired && pmap_wired(pte)) {
1797 pmap->pm_stats.wired_count--;
1798 pmap_clear_wired(pte);
1801 pmap_switch(oldpmap);
1808 * Copy the range specified by src_addr/len
1809 * from the source map to the range dst_addr/len
1810 * in the destination map.
1812 * This routine is only advisory and need not do anything.
1816 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
1817 vm_offset_t src_addr)
1823 * pmap_zero_page zeros the specified hardware page by
1824 * mapping it into virtual memory and using bzero to clear
1829 pmap_zero_page(vm_page_t m)
1833 p = (void *)pmap_page_to_va(m);
1834 bzero(p, PAGE_SIZE);
1839 * pmap_zero_page_area zeros the specified hardware page by
1840 * mapping it into virtual memory and using bzero to clear
1843 * off and size must reside within a single page.
1847 pmap_zero_page_area(vm_page_t m, int off, int size)
1851 p = (void *)pmap_page_to_va(m);
1852 bzero(p + off, size);
1857 * pmap_zero_page_idle zeros the specified hardware page by
1858 * mapping it into virtual memory and using bzero to clear
1859 * its contents. This is for the vm_idlezero process.
1863 pmap_zero_page_idle(vm_page_t m)
1867 p = (void *)pmap_page_to_va(m);
1868 bzero(p, PAGE_SIZE);
1873 * pmap_copy_page copies the specified (machine independent)
1874 * page by mapping the page into virtual memory and using
1875 * bcopy to copy the page, one machine dependent page at a
1879 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
1883 src = (void *)pmap_page_to_va(msrc);
1884 dst = (void *)pmap_page_to_va(mdst);
1885 bcopy(src, dst, PAGE_SIZE);
1889 * Returns true if the pmap's pv is one of the first
1890 * 16 pvs linked to from this page. This count may
1891 * be changed upwards or downwards in the future; it
1892 * is only necessary that true be returned for a small
1893 * subset of pmaps for proper page aging.
1896 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
1902 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1903 ("pmap_page_exists_quick: page %p is not managed", m));
1905 vm_page_lock_queues();
1906 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1907 if (pv->pv_pmap == pmap) {
1915 vm_page_unlock_queues();
1920 * pmap_page_wired_mappings:
1922 * Return the number of managed mappings to the given physical page
1926 pmap_page_wired_mappings(vm_page_t m)
1928 struct ia64_lpte *pte;
1929 pmap_t oldpmap, pmap;
1934 if ((m->oflags & VPO_UNMANAGED) != 0)
1936 vm_page_lock_queues();
1937 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1940 oldpmap = pmap_switch(pmap);
1941 pte = pmap_find_vhpt(pv->pv_va);
1942 KASSERT(pte != NULL, ("pte"));
1943 if (pmap_wired(pte))
1945 pmap_switch(oldpmap);
1948 vm_page_unlock_queues();
1953 * Remove all pages from specified address space
1954 * this aids process exit speeds. Also, this code
1955 * is special cased for current process only, but
1956 * can have the more generic (and slightly slower)
1957 * mode enabled. This is much faster than pmap_remove
1958 * in the case of running down an entire address space.
1961 pmap_remove_pages(pmap_t pmap)
1966 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
1967 printf("warning: %s called with non-current pmap\n",
1972 vm_page_lock_queues();
1974 oldpmap = pmap_switch(pmap);
1976 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
1977 struct ia64_lpte *pte;
1979 npv = TAILQ_NEXT(pv, pv_plist);
1981 pte = pmap_find_vhpt(pv->pv_va);
1982 KASSERT(pte != NULL, ("pte"));
1983 if (!pmap_wired(pte))
1984 pmap_remove_pte(pmap, pte, pv->pv_va, pv, 1);
1987 pmap_switch(oldpmap);
1989 vm_page_unlock_queues();
1993 * pmap_ts_referenced:
1995 * Return a count of reference bits for a page, clearing those bits.
1996 * It is not necessary for every reference bit to be cleared, but it
1997 * is necessary that 0 only be returned when there are truly no
1998 * reference bits set.
2000 * XXX: The exact number of bits to check and clear is a matter that
2001 * should be tested and standardized at some point in the future for
2002 * optimal aging of shared pages.
2005 pmap_ts_referenced(vm_page_t m)
2007 struct ia64_lpte *pte;
2012 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2013 ("pmap_ts_referenced: page %p is not managed", m));
2014 vm_page_lock_queues();
2015 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2016 PMAP_LOCK(pv->pv_pmap);
2017 oldpmap = pmap_switch(pv->pv_pmap);
2018 pte = pmap_find_vhpt(pv->pv_va);
2019 KASSERT(pte != NULL, ("pte"));
2020 if (pmap_accessed(pte)) {
2022 pmap_clear_accessed(pte);
2023 pmap_invalidate_page(pv->pv_va);
2025 pmap_switch(oldpmap);
2026 PMAP_UNLOCK(pv->pv_pmap);
2028 vm_page_unlock_queues();
2035 * Return whether or not the specified physical page was modified
2036 * in any physical maps.
2039 pmap_is_modified(vm_page_t m)
2041 struct ia64_lpte *pte;
2046 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2047 ("pmap_is_modified: page %p is not managed", m));
2051 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
2052 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
2053 * is clear, no PTEs can be dirty.
2055 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2056 if ((m->oflags & VPO_BUSY) == 0 &&
2057 (m->aflags & PGA_WRITEABLE) == 0)
2059 vm_page_lock_queues();
2060 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2061 PMAP_LOCK(pv->pv_pmap);
2062 oldpmap = pmap_switch(pv->pv_pmap);
2063 pte = pmap_find_vhpt(pv->pv_va);
2064 pmap_switch(oldpmap);
2065 KASSERT(pte != NULL, ("pte"));
2066 rv = pmap_dirty(pte) ? TRUE : FALSE;
2067 PMAP_UNLOCK(pv->pv_pmap);
2071 vm_page_unlock_queues();
2076 * pmap_is_prefaultable:
2078 * Return whether or not the specified virtual address is elgible
2082 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2084 struct ia64_lpte *pte;
2086 pte = pmap_find_vhpt(addr);
2087 if (pte != NULL && pmap_present(pte))
2093 * pmap_is_referenced:
2095 * Return whether or not the specified physical page was referenced
2096 * in any physical maps.
2099 pmap_is_referenced(vm_page_t m)
2101 struct ia64_lpte *pte;
2106 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2107 ("pmap_is_referenced: page %p is not managed", m));
2109 vm_page_lock_queues();
2110 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2111 PMAP_LOCK(pv->pv_pmap);
2112 oldpmap = pmap_switch(pv->pv_pmap);
2113 pte = pmap_find_vhpt(pv->pv_va);
2114 pmap_switch(oldpmap);
2115 KASSERT(pte != NULL, ("pte"));
2116 rv = pmap_accessed(pte) ? TRUE : FALSE;
2117 PMAP_UNLOCK(pv->pv_pmap);
2121 vm_page_unlock_queues();
2126 * Clear the modify bits on the specified physical page.
2129 pmap_clear_modify(vm_page_t m)
2131 struct ia64_lpte *pte;
2135 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2136 ("pmap_clear_modify: page %p is not managed", m));
2137 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2138 KASSERT((m->oflags & VPO_BUSY) == 0,
2139 ("pmap_clear_modify: page %p is busy", m));
2142 * If the page is not PGA_WRITEABLE, then no PTEs can be modified.
2143 * If the object containing the page is locked and the page is not
2144 * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
2146 if ((m->aflags & PGA_WRITEABLE) == 0)
2148 vm_page_lock_queues();
2149 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2150 PMAP_LOCK(pv->pv_pmap);
2151 oldpmap = pmap_switch(pv->pv_pmap);
2152 pte = pmap_find_vhpt(pv->pv_va);
2153 KASSERT(pte != NULL, ("pte"));
2154 if (pmap_dirty(pte)) {
2155 pmap_clear_dirty(pte);
2156 pmap_invalidate_page(pv->pv_va);
2158 pmap_switch(oldpmap);
2159 PMAP_UNLOCK(pv->pv_pmap);
2161 vm_page_unlock_queues();
2165 * pmap_clear_reference:
2167 * Clear the reference bit on the specified physical page.
2170 pmap_clear_reference(vm_page_t m)
2172 struct ia64_lpte *pte;
2176 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2177 ("pmap_clear_reference: page %p is not managed", m));
2178 vm_page_lock_queues();
2179 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2180 PMAP_LOCK(pv->pv_pmap);
2181 oldpmap = pmap_switch(pv->pv_pmap);
2182 pte = pmap_find_vhpt(pv->pv_va);
2183 KASSERT(pte != NULL, ("pte"));
2184 if (pmap_accessed(pte)) {
2185 pmap_clear_accessed(pte);
2186 pmap_invalidate_page(pv->pv_va);
2188 pmap_switch(oldpmap);
2189 PMAP_UNLOCK(pv->pv_pmap);
2191 vm_page_unlock_queues();
2195 * Clear the write and modified bits in each of the given page's mappings.
2198 pmap_remove_write(vm_page_t m)
2200 struct ia64_lpte *pte;
2201 pmap_t oldpmap, pmap;
2205 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2206 ("pmap_remove_write: page %p is not managed", m));
2209 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
2210 * another thread while the object is locked. Thus, if PGA_WRITEABLE
2211 * is clear, no page table entries need updating.
2213 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2214 if ((m->oflags & VPO_BUSY) == 0 &&
2215 (m->aflags & PGA_WRITEABLE) == 0)
2217 vm_page_lock_queues();
2218 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2221 oldpmap = pmap_switch(pmap);
2222 pte = pmap_find_vhpt(pv->pv_va);
2223 KASSERT(pte != NULL, ("pte"));
2224 prot = pmap_prot(pte);
2225 if ((prot & VM_PROT_WRITE) != 0) {
2226 if (pmap_dirty(pte)) {
2228 pmap_clear_dirty(pte);
2230 prot &= ~VM_PROT_WRITE;
2231 pmap_pte_prot(pmap, pte, prot);
2232 pmap_pte_attr(pte, m->md.memattr);
2233 pmap_invalidate_page(pv->pv_va);
2235 pmap_switch(oldpmap);
2238 vm_page_aflag_clear(m, PGA_WRITEABLE);
2239 vm_page_unlock_queues();
2243 * Map a set of physical memory pages into the kernel virtual
2244 * address space. Return a pointer to where it is mapped. This
2245 * routine is intended to be used for mapping device memory,
2249 pmap_mapdev(vm_paddr_t pa, vm_size_t sz)
2251 static void *last_va = NULL;
2252 static vm_paddr_t last_pa = 0;
2253 static vm_size_t last_sz = 0;
2257 if (pa == last_pa && sz == last_sz)
2260 md = efi_md_find(pa);
2262 printf("%s: [%#lx..%#lx] not covered by memory descriptor\n",
2263 __func__, pa, pa + sz - 1);
2267 if (md->md_type == EFI_MD_TYPE_FREE) {
2268 printf("%s: [%#lx..%#lx] is in DRAM\n", __func__, pa,
2273 va = (md->md_attr & EFI_MD_ATTR_WB) ? IA64_PHYS_TO_RR7(pa) :
2274 IA64_PHYS_TO_RR6(pa);
2276 last_va = (void *)va;
2283 * 'Unmap' a range mapped by pmap_mapdev().
2286 pmap_unmapdev(vm_offset_t va, vm_size_t size)
2291 * Sets the memory attribute for the specified page.
2294 pmap_page_set_memattr_1(void *arg)
2296 struct ia64_pal_result res;
2298 uintptr_t pp = (uintptr_t)arg;
2300 is = intr_disable();
2301 res = ia64_call_pal_static(pp, 0, 0, 0);
2306 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
2308 struct ia64_lpte *pte;
2313 vm_page_lock_queues();
2315 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2316 PMAP_LOCK(pv->pv_pmap);
2317 oldpmap = pmap_switch(pv->pv_pmap);
2318 pte = pmap_find_vhpt(pv->pv_va);
2319 KASSERT(pte != NULL, ("pte"));
2320 pmap_pte_attr(pte, ma);
2321 pmap_invalidate_page(pv->pv_va);
2322 pmap_switch(oldpmap);
2323 PMAP_UNLOCK(pv->pv_pmap);
2325 vm_page_unlock_queues();
2327 if (ma == VM_MEMATTR_UNCACHEABLE) {
2329 smp_rendezvous(NULL, pmap_page_set_memattr_1, NULL,
2330 (void *)PAL_PREFETCH_VISIBILITY);
2332 pmap_page_set_memattr_1((void *)PAL_PREFETCH_VISIBILITY);
2334 va = (void *)pmap_page_to_va(m);
2336 cpu_flush_dcache(va, PAGE_SIZE);
2339 smp_rendezvous(NULL, pmap_page_set_memattr_1, NULL,
2340 (void *)PAL_MC_DRAIN);
2342 pmap_page_set_memattr_1((void *)PAL_MC_DRAIN);
2348 * perform the pmap work for mincore
2351 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
2354 struct ia64_lpte *pte, tpte;
2360 oldpmap = pmap_switch(pmap);
2361 pte = pmap_find_vhpt(addr);
2366 pmap_switch(oldpmap);
2367 if (pte == NULL || !pmap_present(pte)) {
2371 val = MINCORE_INCORE;
2372 if (pmap_dirty(pte))
2373 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
2374 if (pmap_accessed(pte))
2375 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
2376 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
2377 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
2378 pmap_managed(pte)) {
2380 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
2381 if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
2385 PA_UNLOCK_COND(*locked_pa);
2391 pmap_activate(struct thread *td)
2393 pmap_switch(vmspace_pmap(td->td_proc->p_vmspace));
2397 pmap_switch(pmap_t pm)
2403 prevpm = PCPU_GET(md.current_pmap);
2407 for (i = 0; i < IA64_VM_MINKERN_REGION; i++) {
2408 ia64_set_rr(IA64_RR_BASE(i),
2409 (i << 8)|(PAGE_SHIFT << 2)|1);
2412 for (i = 0; i < IA64_VM_MINKERN_REGION; i++) {
2413 ia64_set_rr(IA64_RR_BASE(i),
2414 (pm->pm_rid[i] << 8)|(PAGE_SHIFT << 2)|1);
2417 PCPU_SET(md.current_pmap, pm);
2426 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
2429 struct ia64_lpte *pte;
2435 sz = (sz + 31) & ~31;
2438 oldpm = pmap_switch(pm);
2440 lim = round_page(va);
2441 len = MIN(lim - va, sz);
2442 pte = pmap_find_vhpt(va);
2443 if (pte != NULL && pmap_present(pte))
2444 ia64_sync_icache(va, len);
2453 * Increase the starting virtual address of the given mapping if a
2454 * different alignment might result in more superpage mappings.
2457 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
2458 vm_offset_t *addr, vm_size_t size)
2462 #include "opt_ddb.h"
2466 #include <ddb/ddb.h>
2468 static const char* psnames[] = {
2469 "1B", "2B", "4B", "8B",
2470 "16B", "32B", "64B", "128B",
2471 "256B", "512B", "1K", "2K",
2472 "4K", "8K", "16K", "32K",
2473 "64K", "128K", "256K", "512K",
2474 "1M", "2M", "4M", "8M",
2475 "16M", "32M", "64M", "128M",
2476 "256M", "512M", "1G", "2G"
2482 struct ia64_pal_result res;
2490 static const char *manames[] = {
2491 "WB", "bad", "bad", "bad",
2492 "UC", "UCE", "WC", "NaT",
2495 res = ia64_call_pal_static(PAL_VM_SUMMARY, 0, 0, 0);
2496 if (res.pal_status != 0) {
2497 db_printf("Can't get VM summary\n");
2502 maxtr = (res.pal_result[0] >> 40) & 0xff;
2504 maxtr = (res.pal_result[0] >> 32) & 0xff;
2506 db_printf("V RID Virtual Page Physical Page PgSz ED AR PL D A MA P KEY\n");
2507 for (i = 0; i <= maxtr; i++) {
2508 bzero(&buf, sizeof(buf));
2509 res = ia64_pal_physical(PAL_VM_TR_READ, i, type,
2510 ia64_tpa((uint64_t)&buf));
2511 if (!(res.pal_result[0] & 1))
2512 buf.pte &= ~PTE_AR_MASK;
2513 if (!(res.pal_result[0] & 2))
2514 buf.pte &= ~PTE_PL_MASK;
2515 if (!(res.pal_result[0] & 4))
2516 pmap_clear_dirty(&buf);
2517 if (!(res.pal_result[0] & 8))
2518 buf.pte &= ~PTE_MA_MASK;
2519 db_printf("%d %06x %013lx %013lx %4s %d %d %d %d %d %-3s "
2520 "%d %06x\n", (int)buf.ifa & 1, buf.rr.rr_rid,
2521 buf.ifa >> 12, (buf.pte & PTE_PPN_MASK) >> 12,
2522 psnames[(buf.itir & ITIR_PS_MASK) >> 2],
2523 (buf.pte & PTE_ED) ? 1 : 0,
2524 (int)(buf.pte & PTE_AR_MASK) >> 9,
2525 (int)(buf.pte & PTE_PL_MASK) >> 7,
2526 (pmap_dirty(&buf)) ? 1 : 0,
2527 (pmap_accessed(&buf)) ? 1 : 0,
2528 manames[(buf.pte & PTE_MA_MASK) >> 2],
2529 (pmap_present(&buf)) ? 1 : 0,
2530 (int)((buf.itir & ITIR_KEY_MASK) >> 8));
2534 DB_COMMAND(itr, db_itr)
2539 DB_COMMAND(dtr, db_dtr)
2544 DB_COMMAND(rr, db_rr)
2550 printf("RR RID PgSz VE\n");
2551 for (i = 0; i < 8; i++) {
2552 __asm __volatile ("mov %0=rr[%1]"
2554 : "r"(IA64_RR_BASE(i)));
2555 *(uint64_t *) &rr = t;
2556 printf("%d %06x %4s %d\n",
2557 i, rr.rr_rid, psnames[rr.rr_ps], rr.rr_ve);
2561 DB_COMMAND(thash, db_thash)
2566 db_printf("%p\n", (void *) ia64_thash(addr));
2569 DB_COMMAND(ttag, db_ttag)
2574 db_printf("0x%lx\n", ia64_ttag(addr));
2577 DB_COMMAND(kpte, db_kpte)
2579 struct ia64_lpte *pte;
2582 db_printf("usage: kpte <kva>\n");
2585 if (addr < VM_MIN_KERNEL_ADDRESS) {
2586 db_printf("kpte: error: invalid <kva>\n");
2589 pte = pmap_find_kpte(addr);
2590 db_printf("kpte at %p:\n", pte);
2591 db_printf(" pte =%016lx\n", pte->pte);
2592 db_printf(" itir =%016lx\n", pte->itir);
2593 db_printf(" tag =%016lx\n", pte->tag);
2594 db_printf(" chain=%016lx\n", pte->chain);