2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
8 * Copyright (c) 1998,2000 Doug Rabson
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department and William Jolitz of UUNET Technologies Inc.
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by the University of
26 * California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
44 * from: i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp
45 * with some ideas from NetBSD's alpha pmap
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
51 #include <sys/param.h>
52 #include <sys/kernel.h>
55 #include <sys/mutex.h>
58 #include <sys/sysctl.h>
59 #include <sys/systm.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_pageout.h>
69 #include <machine/bootinfo.h>
70 #include <machine/efi.h>
71 #include <machine/md_var.h>
72 #include <machine/pal.h>
75 * Manages physical address maps.
77 * In addition to hardware address maps, this
78 * module is called upon to provide software-use-only
79 * maps which may or may not be stored in the same
80 * form as hardware maps. These pseudo-maps are
81 * used to store intermediate results from copy
82 * operations to and from address spaces.
84 * Since the information managed by this module is
85 * also stored by the logical address mapping module,
86 * this module may throw away valid virtual-to-physical
87 * mappings at almost any time. However, invalidations
88 * of virtual-to-physical mappings must be done as
91 * In order to cope with hardware architectures which
92 * make virtual-to-physical map invalidates expensive,
93 * this module may delay invalidate or reduced protection
94 * operations until such time as they are actually
95 * necessary. This module is given full information as
96 * to which processors are currently using which maps,
97 * and to when physical maps must be made correct.
101 * Following the Linux model, region IDs are allocated in groups of
102 * eight so that a single region ID can be used for as many RRs as we
103 * want by encoding the RR number into the low bits of the ID.
105 * We reserve region ID 0 for the kernel and allocate the remaining
106 * IDs for user pmaps.
108 * Region 0-3: User virtually mapped
109 * Region 4: PBVM and special mappings
110 * Region 5: Kernel virtual memory
111 * Region 6: Direct-mapped uncacheable
112 * Region 7: Direct-mapped cacheable
115 /* XXX move to a header. */
116 extern uint64_t ia64_gateway_page[];
118 #ifndef PMAP_SHPGPERPROC
119 #define PMAP_SHPGPERPROC 200
122 #if !defined(DIAGNOSTIC)
123 #define PMAP_INLINE __inline
128 #define pmap_accessed(lpte) ((lpte)->pte & PTE_ACCESSED)
129 #define pmap_dirty(lpte) ((lpte)->pte & PTE_DIRTY)
130 #define pmap_exec(lpte) ((lpte)->pte & PTE_AR_RX)
131 #define pmap_managed(lpte) ((lpte)->pte & PTE_MANAGED)
132 #define pmap_ppn(lpte) ((lpte)->pte & PTE_PPN_MASK)
133 #define pmap_present(lpte) ((lpte)->pte & PTE_PRESENT)
134 #define pmap_prot(lpte) (((lpte)->pte & PTE_PROT_MASK) >> 56)
135 #define pmap_wired(lpte) ((lpte)->pte & PTE_WIRED)
137 #define pmap_clear_accessed(lpte) (lpte)->pte &= ~PTE_ACCESSED
138 #define pmap_clear_dirty(lpte) (lpte)->pte &= ~PTE_DIRTY
139 #define pmap_clear_present(lpte) (lpte)->pte &= ~PTE_PRESENT
140 #define pmap_clear_wired(lpte) (lpte)->pte &= ~PTE_WIRED
142 #define pmap_set_wired(lpte) (lpte)->pte |= PTE_WIRED
145 * The VHPT bucket head structure.
154 * Statically allocated kernel pmap
156 struct pmap kernel_pmap_store;
158 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
159 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
162 * Kernel virtual memory management.
165 extern struct ia64_lpte ***ia64_kptdir;
167 #define KPTE_DIR0_INDEX(va) \
168 (((va) >> (3*PAGE_SHIFT-8)) & ((1<<(PAGE_SHIFT-3))-1))
169 #define KPTE_DIR1_INDEX(va) \
170 (((va) >> (2*PAGE_SHIFT-5)) & ((1<<(PAGE_SHIFT-3))-1))
171 #define KPTE_PTE_INDEX(va) \
172 (((va) >> PAGE_SHIFT) & ((1<<(PAGE_SHIFT-5))-1))
173 #define NKPTEPG (PAGE_SIZE / sizeof(struct ia64_lpte))
175 vm_offset_t kernel_vm_end;
177 /* Values for ptc.e. XXX values for SKI. */
178 static uint64_t pmap_ptc_e_base = 0x100000000;
179 static uint64_t pmap_ptc_e_count1 = 3;
180 static uint64_t pmap_ptc_e_count2 = 2;
181 static uint64_t pmap_ptc_e_stride1 = 0x2000;
182 static uint64_t pmap_ptc_e_stride2 = 0x100000000;
184 struct mtx pmap_ptc_mutex;
187 * Data for the RID allocator
189 static int pmap_ridcount;
190 static int pmap_rididx;
191 static int pmap_ridmapsz;
192 static int pmap_ridmax;
193 static uint64_t *pmap_ridmap;
194 struct mtx pmap_ridmutex;
197 * Data for the pv entry allocation mechanism
199 static uma_zone_t pvzone;
200 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
203 * Data for allocating PTEs for user processes.
205 static uma_zone_t ptezone;
208 * Virtual Hash Page Table (VHPT) data.
210 /* SYSCTL_DECL(_machdep); */
211 static SYSCTL_NODE(_machdep, OID_AUTO, vhpt, CTLFLAG_RD, 0, "");
213 struct ia64_bucket *pmap_vhpt_bucket;
215 int pmap_vhpt_nbuckets;
216 SYSCTL_INT(_machdep_vhpt, OID_AUTO, nbuckets, CTLFLAG_RD,
217 &pmap_vhpt_nbuckets, 0, "");
219 int pmap_vhpt_log2size = 0;
220 TUNABLE_INT("machdep.vhpt.log2size", &pmap_vhpt_log2size);
221 SYSCTL_INT(_machdep_vhpt, OID_AUTO, log2size, CTLFLAG_RD,
222 &pmap_vhpt_log2size, 0, "");
224 static int pmap_vhpt_inserts;
225 SYSCTL_INT(_machdep_vhpt, OID_AUTO, inserts, CTLFLAG_RD,
226 &pmap_vhpt_inserts, 0, "");
228 static int pmap_vhpt_population(SYSCTL_HANDLER_ARGS);
229 SYSCTL_PROC(_machdep_vhpt, OID_AUTO, population, CTLTYPE_INT | CTLFLAG_RD,
230 NULL, 0, pmap_vhpt_population, "I", "");
232 static struct ia64_lpte *pmap_find_vhpt(vm_offset_t va);
234 static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
235 static pv_entry_t get_pv_entry(pmap_t locked_pmap);
237 static void pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
238 vm_page_t m, vm_prot_t prot);
239 static void pmap_free_pte(struct ia64_lpte *pte, vm_offset_t va);
240 static void pmap_invalidate_all(void);
241 static int pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte,
242 vm_offset_t va, pv_entry_t pv, int freepte);
243 static int pmap_remove_vhpt(vm_offset_t va);
244 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
248 pmap_initialize_vhpt(vm_offset_t vhpt)
250 struct ia64_lpte *pte;
253 pte = (struct ia64_lpte *)vhpt;
254 for (i = 0; i < pmap_vhpt_nbuckets; i++) {
257 pte[i].tag = 1UL << 63; /* Invalid tag */
258 pte[i].chain = (uintptr_t)(pmap_vhpt_bucket + i);
263 MALLOC_DECLARE(M_SMP);
266 pmap_alloc_vhpt(void)
271 size = 1UL << pmap_vhpt_log2size;
272 vhpt = (uintptr_t)contigmalloc(size, M_SMP, 0, 0UL, ~0UL, size, 0UL);
274 vhpt = IA64_PHYS_TO_RR7(ia64_tpa(vhpt));
275 pmap_initialize_vhpt(vhpt);
282 * Bootstrap the system enough to run with virtual memory.
287 struct ia64_pal_result res;
293 * Query the PAL Code to find the loop parameters for the
296 res = ia64_call_pal_static(PAL_PTCE_INFO, 0, 0, 0);
297 if (res.pal_status != 0)
298 panic("Can't configure ptc.e parameters");
299 pmap_ptc_e_base = res.pal_result[0];
300 pmap_ptc_e_count1 = res.pal_result[1] >> 32;
301 pmap_ptc_e_count2 = res.pal_result[1] & ((1L<<32) - 1);
302 pmap_ptc_e_stride1 = res.pal_result[2] >> 32;
303 pmap_ptc_e_stride2 = res.pal_result[2] & ((1L<<32) - 1);
305 printf("ptc.e base=0x%lx, count1=%ld, count2=%ld, "
306 "stride1=0x%lx, stride2=0x%lx\n",
313 mtx_init(&pmap_ptc_mutex, "PTC.G mutex", NULL, MTX_SPIN);
316 * Setup RIDs. RIDs 0..7 are reserved for the kernel.
318 * We currently need at least 19 bits in the RID because PID_MAX
319 * can only be encoded in 17 bits and we need RIDs for 4 regions
320 * per process. With PID_MAX equalling 99999 this means that we
321 * need to be able to encode 399996 (=4*PID_MAX).
322 * The Itanium processor only has 18 bits and the architected
323 * minimum is exactly that. So, we cannot use a PID based scheme
324 * in those cases. Enter pmap_ridmap...
325 * We should avoid the map when running on a processor that has
326 * implemented enough bits. This means that we should pass the
327 * process/thread ID to pmap. This we currently don't do, so we
328 * use the map anyway. However, we don't want to allocate a map
329 * that is large enough to cover the range dictated by the number
330 * of bits in the RID, because that may result in a RID map of
331 * 2MB in size for a 24-bit RID. A 64KB map is enough.
332 * The bottomline: we create a 32KB map when the processor only
333 * implements 18 bits (or when we can't figure it out). Otherwise
334 * we create a 64KB map.
336 res = ia64_call_pal_static(PAL_VM_SUMMARY, 0, 0, 0);
337 if (res.pal_status != 0) {
339 printf("Can't read VM Summary - assuming 18 Region ID bits\n");
340 ridbits = 18; /* guaranteed minimum */
342 ridbits = (res.pal_result[1] >> 8) & 0xff;
344 printf("Processor supports %d Region ID bits\n",
350 pmap_ridmax = (1 << ridbits);
351 pmap_ridmapsz = pmap_ridmax / 64;
352 pmap_ridmap = ia64_physmem_alloc(pmap_ridmax / 8, PAGE_SIZE);
353 pmap_ridmap[0] |= 0xff;
356 mtx_init(&pmap_ridmutex, "RID allocator lock", NULL, MTX_DEF);
359 * Allocate some memory for initial kernel 'page tables'.
361 ia64_kptdir = ia64_physmem_alloc(PAGE_SIZE, PAGE_SIZE);
363 kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
366 * Determine a valid (mappable) VHPT size.
368 TUNABLE_INT_FETCH("machdep.vhpt.log2size", &pmap_vhpt_log2size);
369 if (pmap_vhpt_log2size == 0)
370 pmap_vhpt_log2size = 20;
371 else if (pmap_vhpt_log2size < 16)
372 pmap_vhpt_log2size = 16;
373 else if (pmap_vhpt_log2size > 28)
374 pmap_vhpt_log2size = 28;
375 if (pmap_vhpt_log2size & 1)
376 pmap_vhpt_log2size--;
378 size = 1UL << pmap_vhpt_log2size;
379 base = (uintptr_t)ia64_physmem_alloc(size, size);
381 panic("Unable to allocate VHPT");
383 PCPU_SET(md.vhpt, base);
385 printf("VHPT: address=%#lx, size=%#lx\n", base, size);
387 pmap_vhpt_nbuckets = size / sizeof(struct ia64_lpte);
388 pmap_vhpt_bucket = ia64_physmem_alloc(pmap_vhpt_nbuckets *
389 sizeof(struct ia64_bucket), PAGE_SIZE);
390 for (i = 0; i < pmap_vhpt_nbuckets; i++) {
391 /* Stolen memory is zeroed. */
392 mtx_init(&pmap_vhpt_bucket[i].mutex, "VHPT bucket lock", NULL,
393 MTX_NOWITNESS | MTX_SPIN);
396 pmap_initialize_vhpt(base);
398 ia64_set_pta(base + (1 << 8) + (pmap_vhpt_log2size << 2) + 1);
401 virtual_avail = VM_MIN_KERNEL_ADDRESS;
402 virtual_end = VM_MAX_KERNEL_ADDRESS;
405 * Initialize the kernel pmap (which is statically allocated).
407 PMAP_LOCK_INIT(kernel_pmap);
408 for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
409 kernel_pmap->pm_rid[i] = 0;
410 TAILQ_INIT(&kernel_pmap->pm_pvlist);
411 PCPU_SET(md.current_pmap, kernel_pmap);
413 /* Region 5 is mapped via the VHPT. */
414 ia64_set_rr(IA64_RR_BASE(5), (5 << 8) | (PAGE_SHIFT << 2) | 1);
417 * Clear out any random TLB entries left over from booting.
419 pmap_invalidate_all();
425 pmap_vhpt_population(SYSCTL_HANDLER_ARGS)
430 for (i = 0; i < pmap_vhpt_nbuckets; i++)
431 count += pmap_vhpt_bucket[i].length;
433 error = SYSCTL_OUT(req, &count, sizeof(count));
438 pmap_page_to_va(vm_page_t m)
443 pa = VM_PAGE_TO_PHYS(m);
444 va = (m->md.memattr == VM_MEMATTR_UNCACHEABLE) ? IA64_PHYS_TO_RR6(pa) :
445 IA64_PHYS_TO_RR7(pa);
450 * Initialize a vm_page's machine-dependent fields.
453 pmap_page_init(vm_page_t m)
456 TAILQ_INIT(&m->md.pv_list);
457 m->md.pv_list_count = 0;
458 m->md.memattr = VM_MEMATTR_DEFAULT;
462 * Initialize the pmap module.
463 * Called by vm_init, to initialize any structures that the pmap
464 * system needs to map virtual memory.
469 int shpgperproc = PMAP_SHPGPERPROC;
472 * Initialize the address space (zone) for the pv entries. Set a
473 * high water mark so that the system can recover from excessive
474 * numbers of pv entries.
476 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
477 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
478 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
479 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
480 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
481 pv_entry_high_water = 9 * (pv_entry_max / 10);
483 ptezone = uma_zcreate("PT ENTRY", sizeof (struct ia64_lpte),
484 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
488 /***************************************************
489 * Manipulate TLBs for a pmap
490 ***************************************************/
493 pmap_invalidate_page(vm_offset_t va)
495 struct ia64_lpte *pte;
502 vhpt_ofs = ia64_thash(va) - PCPU_GET(md.vhpt);
504 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
505 pte = (struct ia64_lpte *)(pc->pc_md.vhpt + vhpt_ofs);
506 atomic_cmpset_64(&pte->tag, tag, 1UL << 63);
509 mtx_lock_spin(&pmap_ptc_mutex);
511 ia64_ptc_ga(va, PAGE_SHIFT << 2);
515 mtx_unlock_spin(&pmap_ptc_mutex);
523 pmap_invalidate_all_1(void *arg)
529 addr = pmap_ptc_e_base;
530 for (i = 0; i < pmap_ptc_e_count1; i++) {
531 for (j = 0; j < pmap_ptc_e_count2; j++) {
533 addr += pmap_ptc_e_stride2;
535 addr += pmap_ptc_e_stride1;
541 pmap_invalidate_all(void)
546 smp_rendezvous(NULL, pmap_invalidate_all_1, NULL, NULL);
550 pmap_invalidate_all_1(NULL);
554 pmap_allocate_rid(void)
559 mtx_lock(&pmap_ridmutex);
560 if (pmap_ridcount == pmap_ridmax)
561 panic("pmap_allocate_rid: All Region IDs used");
563 /* Find an index with a free bit. */
564 while ((bits = pmap_ridmap[pmap_rididx]) == ~0UL) {
566 if (pmap_rididx == pmap_ridmapsz)
569 rid = pmap_rididx * 64;
571 /* Find a free bit. */
578 pmap_ridmap[pmap_rididx] |= bit;
580 mtx_unlock(&pmap_ridmutex);
586 pmap_free_rid(uint32_t rid)
592 bit = ~(1UL << (rid & 63));
594 mtx_lock(&pmap_ridmutex);
595 pmap_ridmap[idx] &= bit;
597 mtx_unlock(&pmap_ridmutex);
600 /***************************************************
601 * Page table page management routines.....
602 ***************************************************/
605 pmap_pinit0(struct pmap *pmap)
607 /* kernel_pmap is the same as any other pmap. */
612 * Initialize a preallocated and zeroed pmap structure,
613 * such as one in a vmspace structure.
616 pmap_pinit(struct pmap *pmap)
620 PMAP_LOCK_INIT(pmap);
621 for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
622 pmap->pm_rid[i] = pmap_allocate_rid();
623 TAILQ_INIT(&pmap->pm_pvlist);
624 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
628 /***************************************************
629 * Pmap allocation/deallocation routines.
630 ***************************************************/
633 * Release any resources held by the given physical map.
634 * Called when a pmap initialized by pmap_pinit is being released.
635 * Should only be called if the map contains no valid mappings.
638 pmap_release(pmap_t pmap)
642 for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
644 pmap_free_rid(pmap->pm_rid[i]);
645 PMAP_LOCK_DESTROY(pmap);
649 * grow the number of kernel page table entries, if needed
652 pmap_growkernel(vm_offset_t addr)
654 struct ia64_lpte **dir1;
655 struct ia64_lpte *leaf;
658 while (kernel_vm_end <= addr) {
659 if (nkpt == PAGE_SIZE/8 + PAGE_SIZE*PAGE_SIZE/64)
660 panic("%s: out of kernel address space", __func__);
662 dir1 = ia64_kptdir[KPTE_DIR0_INDEX(kernel_vm_end)];
664 nkpg = vm_page_alloc(NULL, nkpt++,
665 VM_ALLOC_NOOBJ|VM_ALLOC_INTERRUPT|VM_ALLOC_WIRED);
667 panic("%s: cannot add dir. page", __func__);
669 dir1 = (struct ia64_lpte **)pmap_page_to_va(nkpg);
670 bzero(dir1, PAGE_SIZE);
671 ia64_kptdir[KPTE_DIR0_INDEX(kernel_vm_end)] = dir1;
674 nkpg = vm_page_alloc(NULL, nkpt++,
675 VM_ALLOC_NOOBJ|VM_ALLOC_INTERRUPT|VM_ALLOC_WIRED);
677 panic("%s: cannot add PTE page", __func__);
679 leaf = (struct ia64_lpte *)pmap_page_to_va(nkpg);
680 bzero(leaf, PAGE_SIZE);
681 dir1[KPTE_DIR1_INDEX(kernel_vm_end)] = leaf;
683 kernel_vm_end += PAGE_SIZE * NKPTEPG;
687 /***************************************************
688 * page management routines.
689 ***************************************************/
692 * free the pv_entry back to the free list
694 static PMAP_INLINE void
695 free_pv_entry(pv_entry_t pv)
698 uma_zfree(pvzone, pv);
702 * get a new pv_entry, allocating a block from the system
706 get_pv_entry(pmap_t locked_pmap)
708 static const struct timeval printinterval = { 60, 0 };
709 static struct timeval lastprint;
710 struct vpgqueues *vpq;
711 struct ia64_lpte *pte;
712 pmap_t oldpmap, pmap;
713 pv_entry_t allocated_pv, next_pv, pv;
717 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
718 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
719 allocated_pv = uma_zalloc(pvzone, M_NOWAIT);
720 if (allocated_pv != NULL) {
722 if (pv_entry_count > pv_entry_high_water)
725 return (allocated_pv);
729 * Reclaim pv entries: At first, destroy mappings to inactive
730 * pages. After that, if a pv entry is still needed, destroy
731 * mappings to active pages.
733 if (ratecheck(&lastprint, &printinterval))
734 printf("Approaching the limit on PV entries, "
735 "increase the vm.pmap.shpgperproc tunable.\n");
736 vpq = &vm_page_queues[PQ_INACTIVE];
738 TAILQ_FOREACH(m, &vpq->pl, pageq) {
739 if ((m->flags & PG_MARKER) != 0 || m->hold_count || m->busy)
741 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
744 /* Avoid deadlock and lock recursion. */
745 if (pmap > locked_pmap)
747 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
749 pmap->pm_stats.resident_count--;
750 oldpmap = pmap_switch(pmap);
751 pte = pmap_find_vhpt(va);
752 KASSERT(pte != NULL, ("pte"));
753 pmap_remove_vhpt(va);
754 pmap_invalidate_page(va);
755 pmap_switch(oldpmap);
756 if (pmap_accessed(pte))
757 vm_page_aflag_set(m, PGA_REFERENCED);
760 pmap_free_pte(pte, va);
761 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
762 m->md.pv_list_count--;
763 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
764 if (pmap != locked_pmap)
766 if (allocated_pv == NULL)
771 if (TAILQ_EMPTY(&m->md.pv_list))
772 vm_page_aflag_clear(m, PGA_WRITEABLE);
774 if (allocated_pv == NULL) {
775 if (vpq == &vm_page_queues[PQ_INACTIVE]) {
776 vpq = &vm_page_queues[PQ_ACTIVE];
779 panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable");
781 return (allocated_pv);
785 * Conditionally create a pv entry.
788 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
792 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
793 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
794 if (pv_entry_count < pv_entry_high_water &&
795 (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) {
799 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
800 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
801 m->md.pv_list_count++;
808 * Add an ia64_lpte to the VHPT.
811 pmap_enter_vhpt(struct ia64_lpte *pte, vm_offset_t va)
813 struct ia64_bucket *bckt;
814 struct ia64_lpte *vhpte;
817 /* Can fault, so get it out of the way. */
818 pte_pa = ia64_tpa((vm_offset_t)pte);
820 vhpte = (struct ia64_lpte *)ia64_thash(va);
821 bckt = (struct ia64_bucket *)vhpte->chain;
823 mtx_lock_spin(&bckt->mutex);
824 pte->chain = bckt->chain;
826 bckt->chain = pte_pa;
830 mtx_unlock_spin(&bckt->mutex);
834 * Remove the ia64_lpte matching va from the VHPT. Return zero if it
835 * worked or an appropriate error code otherwise.
838 pmap_remove_vhpt(vm_offset_t va)
840 struct ia64_bucket *bckt;
841 struct ia64_lpte *pte;
842 struct ia64_lpte *lpte;
843 struct ia64_lpte *vhpte;
847 vhpte = (struct ia64_lpte *)ia64_thash(va);
848 bckt = (struct ia64_bucket *)vhpte->chain;
851 mtx_lock_spin(&bckt->mutex);
853 pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
854 while (chain != 0 && pte->tag != tag) {
857 pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
860 mtx_unlock_spin(&bckt->mutex);
864 /* Snip this pv_entry out of the collision chain. */
866 bckt->chain = pte->chain;
868 lpte->chain = pte->chain;
872 mtx_unlock_spin(&bckt->mutex);
877 * Find the ia64_lpte for the given va, if any.
879 static struct ia64_lpte *
880 pmap_find_vhpt(vm_offset_t va)
882 struct ia64_bucket *bckt;
883 struct ia64_lpte *pte;
887 pte = (struct ia64_lpte *)ia64_thash(va);
888 bckt = (struct ia64_bucket *)pte->chain;
890 mtx_lock_spin(&bckt->mutex);
892 pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
893 while (chain != 0 && pte->tag != tag) {
895 pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
897 mtx_unlock_spin(&bckt->mutex);
898 return ((chain != 0) ? pte : NULL);
902 * Remove an entry from the list of managed mappings.
905 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va, pv_entry_t pv)
908 if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
909 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
910 if (pmap == pv->pv_pmap && va == pv->pv_va)
914 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
922 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
923 m->md.pv_list_count--;
924 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
925 vm_page_aflag_clear(m, PGA_WRITEABLE);
927 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
936 * Create a pv entry for page at pa for
940 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
944 pv = get_pv_entry(pmap);
948 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
949 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
950 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
951 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
952 m->md.pv_list_count++;
956 * Routine: pmap_extract
958 * Extract the physical page address associated
959 * with the given map/virtual_address pair.
962 pmap_extract(pmap_t pmap, vm_offset_t va)
964 struct ia64_lpte *pte;
970 oldpmap = pmap_switch(pmap);
971 pte = pmap_find_vhpt(va);
972 if (pte != NULL && pmap_present(pte))
974 pmap_switch(oldpmap);
980 * Routine: pmap_extract_and_hold
982 * Atomically extract and hold the physical page
983 * with the given pmap and virtual address pair
984 * if that mapping permits the given protection.
987 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
989 struct ia64_lpte *pte;
997 oldpmap = pmap_switch(pmap);
999 pte = pmap_find_vhpt(va);
1000 if (pte != NULL && pmap_present(pte) &&
1001 (pmap_prot(pte) & prot) == prot) {
1002 m = PHYS_TO_VM_PAGE(pmap_ppn(pte));
1003 if (vm_page_pa_tryrelock(pmap, pmap_ppn(pte), &pa))
1008 pmap_switch(oldpmap);
1013 /***************************************************
1014 * Low level mapping routines.....
1015 ***************************************************/
1018 * Find the kernel lpte for mapping the given virtual address, which
1019 * must be in the part of region 5 which we can cover with our kernel
1022 static struct ia64_lpte *
1023 pmap_find_kpte(vm_offset_t va)
1025 struct ia64_lpte **dir1;
1026 struct ia64_lpte *leaf;
1028 KASSERT((va >> 61) == 5,
1029 ("kernel mapping 0x%lx not in region 5", va));
1030 KASSERT(va < kernel_vm_end,
1031 ("kernel mapping 0x%lx out of range", va));
1033 dir1 = ia64_kptdir[KPTE_DIR0_INDEX(va)];
1034 leaf = dir1[KPTE_DIR1_INDEX(va)];
1035 return (&leaf[KPTE_PTE_INDEX(va)]);
1039 * Find a pte suitable for mapping a user-space address. If one exists
1040 * in the VHPT, that one will be returned, otherwise a new pte is
1043 static struct ia64_lpte *
1044 pmap_find_pte(vm_offset_t va)
1046 struct ia64_lpte *pte;
1048 if (va >= VM_MAXUSER_ADDRESS)
1049 return pmap_find_kpte(va);
1051 pte = pmap_find_vhpt(va);
1053 pte = uma_zalloc(ptezone, M_NOWAIT | M_ZERO);
1054 pte->tag = 1UL << 63;
1060 * Free a pte which is now unused. This simply returns it to the zone
1061 * allocator if it is a user mapping. For kernel mappings, clear the
1062 * valid bit to make it clear that the mapping is not currently used.
1065 pmap_free_pte(struct ia64_lpte *pte, vm_offset_t va)
1067 if (va < VM_MAXUSER_ADDRESS)
1068 uma_zfree(ptezone, pte);
1070 pmap_clear_present(pte);
1073 static PMAP_INLINE void
1074 pmap_pte_prot(pmap_t pm, struct ia64_lpte *pte, vm_prot_t prot)
1076 static long prot2ar[4] = {
1077 PTE_AR_R, /* VM_PROT_NONE */
1078 PTE_AR_RW, /* VM_PROT_WRITE */
1079 PTE_AR_RX|PTE_ED, /* VM_PROT_EXECUTE */
1080 PTE_AR_RWX|PTE_ED /* VM_PROT_WRITE|VM_PROT_EXECUTE */
1083 pte->pte &= ~(PTE_PROT_MASK | PTE_PL_MASK | PTE_AR_MASK | PTE_ED);
1084 pte->pte |= (uint64_t)(prot & VM_PROT_ALL) << 56;
1085 pte->pte |= (prot == VM_PROT_NONE || pm == kernel_pmap)
1086 ? PTE_PL_KERN : PTE_PL_USER;
1087 pte->pte |= prot2ar[(prot & VM_PROT_ALL) >> 1];
1090 static PMAP_INLINE void
1091 pmap_pte_attr(struct ia64_lpte *pte, vm_memattr_t ma)
1094 pte->pte &= ~PTE_MA_MASK;
1095 pte->pte |= (ma & PTE_MA_MASK);
1099 * Set a pte to contain a valid mapping and enter it in the VHPT. If
1100 * the pte was orginally valid, then its assumed to already be in the
1102 * This functions does not set the protection bits. It's expected
1103 * that those have been set correctly prior to calling this function.
1106 pmap_set_pte(struct ia64_lpte *pte, vm_offset_t va, vm_offset_t pa,
1107 boolean_t wired, boolean_t managed)
1110 pte->pte &= PTE_PROT_MASK | PTE_MA_MASK | PTE_PL_MASK |
1111 PTE_AR_MASK | PTE_ED;
1112 pte->pte |= PTE_PRESENT;
1113 pte->pte |= (managed) ? PTE_MANAGED : (PTE_DIRTY | PTE_ACCESSED);
1114 pte->pte |= (wired) ? PTE_WIRED : 0;
1115 pte->pte |= pa & PTE_PPN_MASK;
1117 pte->itir = PAGE_SHIFT << 2;
1119 pte->tag = ia64_ttag(va);
1123 * Remove the (possibly managed) mapping represented by pte from the
1127 pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte, vm_offset_t va,
1128 pv_entry_t pv, int freepte)
1134 * First remove from the VHPT.
1136 error = pmap_remove_vhpt(va);
1140 pmap_invalidate_page(va);
1142 if (pmap_wired(pte))
1143 pmap->pm_stats.wired_count -= 1;
1145 pmap->pm_stats.resident_count -= 1;
1146 if (pmap_managed(pte)) {
1147 m = PHYS_TO_VM_PAGE(pmap_ppn(pte));
1148 if (pmap_dirty(pte))
1150 if (pmap_accessed(pte))
1151 vm_page_aflag_set(m, PGA_REFERENCED);
1153 error = pmap_remove_entry(pmap, m, va, pv);
1156 pmap_free_pte(pte, va);
1162 * Extract the physical page address associated with a kernel
1166 pmap_kextract(vm_offset_t va)
1168 struct ia64_lpte *pte;
1169 uint64_t *pbvm_pgtbl;
1173 KASSERT(va >= VM_MAXUSER_ADDRESS, ("Must be kernel VA"));
1175 /* Regions 6 and 7 are direct mapped. */
1176 if (va >= IA64_RR_BASE(6)) {
1177 pa = IA64_RR_MASK(va);
1181 /* Region 5 is our KVA. Bail out if the VA is beyond our limits. */
1182 if (va >= kernel_vm_end)
1184 if (va >= VM_MIN_KERNEL_ADDRESS) {
1185 pte = pmap_find_kpte(va);
1186 pa = pmap_present(pte) ? pmap_ppn(pte) | (va & PAGE_MASK) : 0;
1190 /* The PBVM page table. */
1191 if (va >= IA64_PBVM_PGTBL + bootinfo->bi_pbvm_pgtblsz)
1193 if (va >= IA64_PBVM_PGTBL) {
1194 pa = (va - IA64_PBVM_PGTBL) + bootinfo->bi_pbvm_pgtbl;
1198 /* The PBVM itself. */
1199 if (va >= IA64_PBVM_BASE) {
1200 pbvm_pgtbl = (void *)IA64_PBVM_PGTBL;
1201 idx = (va - IA64_PBVM_BASE) >> IA64_PBVM_PAGE_SHIFT;
1202 if (idx >= (bootinfo->bi_pbvm_pgtblsz >> 3))
1204 if ((pbvm_pgtbl[idx] & PTE_PRESENT) == 0)
1206 pa = (pbvm_pgtbl[idx] & PTE_PPN_MASK) +
1207 (va & IA64_PBVM_PAGE_MASK);
1212 printf("XXX: %s: va=%#lx is invalid\n", __func__, va);
1221 * Add a list of wired pages to the kva this routine is only used for
1222 * temporary kernel mappings that do not need to have page modification
1223 * or references recorded. Note that old mappings are simply written
1224 * over. The page is effectively wired, but it's customary to not have
1225 * the PTE reflect that, nor update statistics.
1228 pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
1230 struct ia64_lpte *pte;
1233 for (i = 0; i < count; i++) {
1234 pte = pmap_find_kpte(va);
1235 if (pmap_present(pte))
1236 pmap_invalidate_page(va);
1238 pmap_enter_vhpt(pte, va);
1239 pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
1240 pmap_pte_attr(pte, m[i]->md.memattr);
1241 pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m[i]), FALSE, FALSE);
1247 * this routine jerks page mappings from the
1248 * kernel -- it is meant only for temporary mappings.
1251 pmap_qremove(vm_offset_t va, int count)
1253 struct ia64_lpte *pte;
1256 for (i = 0; i < count; i++) {
1257 pte = pmap_find_kpte(va);
1258 if (pmap_present(pte)) {
1259 pmap_remove_vhpt(va);
1260 pmap_invalidate_page(va);
1261 pmap_clear_present(pte);
1268 * Add a wired page to the kva. As for pmap_qenter(), it's customary
1269 * to not have the PTE reflect that, nor update statistics.
1272 pmap_kenter(vm_offset_t va, vm_offset_t pa)
1274 struct ia64_lpte *pte;
1276 pte = pmap_find_kpte(va);
1277 if (pmap_present(pte))
1278 pmap_invalidate_page(va);
1280 pmap_enter_vhpt(pte, va);
1281 pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
1282 pmap_pte_attr(pte, VM_MEMATTR_DEFAULT);
1283 pmap_set_pte(pte, va, pa, FALSE, FALSE);
1287 * Remove a page from the kva
1290 pmap_kremove(vm_offset_t va)
1292 struct ia64_lpte *pte;
1294 pte = pmap_find_kpte(va);
1295 if (pmap_present(pte)) {
1296 pmap_remove_vhpt(va);
1297 pmap_invalidate_page(va);
1298 pmap_clear_present(pte);
1303 * Used to map a range of physical addresses into kernel
1304 * virtual address space.
1306 * The value passed in '*virt' is a suggested virtual address for
1307 * the mapping. Architectures which can support a direct-mapped
1308 * physical to virtual region can return the appropriate address
1309 * within that region, leaving '*virt' unchanged. Other
1310 * architectures should map the pages starting at '*virt' and
1311 * update '*virt' with the first usable address after the mapped
1315 pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
1317 return IA64_PHYS_TO_RR7(start);
1321 * Remove the given range of addresses from the specified map.
1323 * It is assumed that the start and end are properly
1324 * rounded to the page size.
1327 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1332 struct ia64_lpte *pte;
1334 if (pmap->pm_stats.resident_count == 0)
1337 vm_page_lock_queues();
1339 oldpmap = pmap_switch(pmap);
1342 * special handling of removing one page. a very
1343 * common operation and easy to short circuit some
1346 if (sva + PAGE_SIZE == eva) {
1347 pte = pmap_find_vhpt(sva);
1349 pmap_remove_pte(pmap, pte, sva, 0, 1);
1353 if (pmap->pm_stats.resident_count < ((eva - sva) >> PAGE_SHIFT)) {
1354 TAILQ_FOREACH_SAFE(pv, &pmap->pm_pvlist, pv_plist, npv) {
1356 if (va >= sva && va < eva) {
1357 pte = pmap_find_vhpt(va);
1358 KASSERT(pte != NULL, ("pte"));
1359 pmap_remove_pte(pmap, pte, va, pv, 1);
1363 for (va = sva; va < eva; va += PAGE_SIZE) {
1364 pte = pmap_find_vhpt(va);
1366 pmap_remove_pte(pmap, pte, va, 0, 1);
1371 vm_page_unlock_queues();
1372 pmap_switch(oldpmap);
1377 * Routine: pmap_remove_all
1379 * Removes this physical page from
1380 * all physical maps in which it resides.
1381 * Reflects back modify bits to the pager.
1384 * Original versions of this routine were very
1385 * inefficient because they iteratively called
1386 * pmap_remove (slow...)
1390 pmap_remove_all(vm_page_t m)
1395 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1396 ("pmap_remove_all: page %p is not managed", m));
1397 vm_page_lock_queues();
1398 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1399 struct ia64_lpte *pte;
1400 pmap_t pmap = pv->pv_pmap;
1401 vm_offset_t va = pv->pv_va;
1404 oldpmap = pmap_switch(pmap);
1405 pte = pmap_find_vhpt(va);
1406 KASSERT(pte != NULL, ("pte"));
1407 if (pmap_ppn(pte) != VM_PAGE_TO_PHYS(m))
1408 panic("pmap_remove_all: pv_table for %lx is inconsistent", VM_PAGE_TO_PHYS(m));
1409 pmap_remove_pte(pmap, pte, va, pv, 1);
1410 pmap_switch(oldpmap);
1413 vm_page_aflag_clear(m, PGA_WRITEABLE);
1414 vm_page_unlock_queues();
1418 * Set the physical protection on the
1419 * specified range of this map as requested.
1422 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1425 struct ia64_lpte *pte;
1427 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1428 pmap_remove(pmap, sva, eva);
1432 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
1433 (VM_PROT_WRITE|VM_PROT_EXECUTE))
1436 if ((sva & PAGE_MASK) || (eva & PAGE_MASK))
1437 panic("pmap_protect: unaligned addresses");
1440 oldpmap = pmap_switch(pmap);
1441 for ( ; sva < eva; sva += PAGE_SIZE) {
1442 /* If page is invalid, skip this page */
1443 pte = pmap_find_vhpt(sva);
1447 /* If there's no change, skip it too */
1448 if (pmap_prot(pte) == prot)
1451 if ((prot & VM_PROT_WRITE) == 0 &&
1452 pmap_managed(pte) && pmap_dirty(pte)) {
1453 vm_paddr_t pa = pmap_ppn(pte);
1454 vm_page_t m = PHYS_TO_VM_PAGE(pa);
1457 pmap_clear_dirty(pte);
1460 if (prot & VM_PROT_EXECUTE)
1461 ia64_sync_icache(sva, PAGE_SIZE);
1463 pmap_pte_prot(pmap, pte, prot);
1464 pmap_invalidate_page(sva);
1466 pmap_switch(oldpmap);
1471 * Insert the given physical page (p) at
1472 * the specified virtual address (v) in the
1473 * target physical map with the protection requested.
1475 * If specified, the page will be wired down, meaning
1476 * that the related pte can not be reclaimed.
1478 * NB: This is the only routine which MAY NOT lazy-evaluate
1479 * or lose information. That is, this routine must actually
1480 * insert this page into the given map NOW.
1483 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
1484 vm_prot_t prot, boolean_t wired)
1489 struct ia64_lpte origpte;
1490 struct ia64_lpte *pte;
1491 boolean_t icache_inval, managed;
1493 vm_page_lock_queues();
1495 oldpmap = pmap_switch(pmap);
1498 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
1499 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0,
1500 ("pmap_enter: page %p is not busy", m));
1503 * Find (or create) a pte for the given mapping.
1505 while ((pte = pmap_find_pte(va)) == NULL) {
1506 pmap_switch(oldpmap);
1508 vm_page_unlock_queues();
1510 vm_page_lock_queues();
1512 oldpmap = pmap_switch(pmap);
1515 if (!pmap_present(pte)) {
1517 pmap_enter_vhpt(pte, va);
1519 opa = pmap_ppn(pte);
1521 pa = VM_PAGE_TO_PHYS(m);
1523 icache_inval = (prot & VM_PROT_EXECUTE) ? TRUE : FALSE;
1526 * Mapping has not changed, must be protection or wiring change.
1530 * Wiring change, just update stats. We don't worry about
1531 * wiring PT pages as they remain resident as long as there
1532 * are valid mappings in them. Hence, if a user page is wired,
1533 * the PT page will be also.
1535 if (wired && !pmap_wired(&origpte))
1536 pmap->pm_stats.wired_count++;
1537 else if (!wired && pmap_wired(&origpte))
1538 pmap->pm_stats.wired_count--;
1540 managed = (pmap_managed(&origpte)) ? TRUE : FALSE;
1543 * We might be turning off write access to the page,
1544 * so we go ahead and sense modify status. Otherwise,
1545 * we can avoid I-cache invalidation if the page
1546 * already allowed execution.
1548 if (managed && pmap_dirty(&origpte))
1550 else if (pmap_exec(&origpte))
1551 icache_inval = FALSE;
1553 pmap_invalidate_page(va);
1558 * Mapping has changed, invalidate old range and fall
1559 * through to handle validating new mapping.
1562 pmap_remove_pte(pmap, pte, va, 0, 0);
1563 pmap_enter_vhpt(pte, va);
1567 * Enter on the PV list if part of our managed memory.
1569 if ((m->oflags & VPO_UNMANAGED) == 0) {
1570 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
1571 ("pmap_enter: managed mapping within the clean submap"));
1572 pmap_insert_entry(pmap, va, m);
1577 * Increment counters
1579 pmap->pm_stats.resident_count++;
1581 pmap->pm_stats.wired_count++;
1586 * Now validate mapping with desired protection/wiring. This
1587 * adds the pte to the VHPT if necessary.
1589 pmap_pte_prot(pmap, pte, prot);
1590 pmap_pte_attr(pte, m->md.memattr);
1591 pmap_set_pte(pte, va, pa, wired, managed);
1593 /* Invalidate the I-cache when needed. */
1595 ia64_sync_icache(va, PAGE_SIZE);
1597 if ((prot & VM_PROT_WRITE) != 0 && managed)
1598 vm_page_aflag_set(m, PGA_WRITEABLE);
1599 vm_page_unlock_queues();
1600 pmap_switch(oldpmap);
1605 * Maps a sequence of resident pages belonging to the same object.
1606 * The sequence begins with the given page m_start. This page is
1607 * mapped at the given virtual address start. Each subsequent page is
1608 * mapped at a virtual address that is offset from start by the same
1609 * amount as the page is offset from m_start within the object. The
1610 * last page in the sequence is the page with the largest offset from
1611 * m_start that can be mapped at a virtual address less than the given
1612 * virtual address end. Not every virtual page between start and end
1613 * is mapped; only those for which a resident page exists with the
1614 * corresponding offset from m_start are mapped.
1617 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
1618 vm_page_t m_start, vm_prot_t prot)
1622 vm_pindex_t diff, psize;
1624 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
1625 psize = atop(end - start);
1627 vm_page_lock_queues();
1629 oldpmap = pmap_switch(pmap);
1630 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1631 pmap_enter_quick_locked(pmap, start + ptoa(diff), m, prot);
1632 m = TAILQ_NEXT(m, listq);
1634 vm_page_unlock_queues();
1635 pmap_switch(oldpmap);
1640 * this code makes some *MAJOR* assumptions:
1641 * 1. Current pmap & pmap exists.
1644 * 4. No page table pages.
1645 * but is *MUCH* faster than pmap_enter...
1649 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
1653 vm_page_lock_queues();
1655 oldpmap = pmap_switch(pmap);
1656 pmap_enter_quick_locked(pmap, va, m, prot);
1657 vm_page_unlock_queues();
1658 pmap_switch(oldpmap);
1663 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
1666 struct ia64_lpte *pte;
1669 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
1670 (m->oflags & VPO_UNMANAGED) != 0,
1671 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
1672 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1673 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1675 if ((pte = pmap_find_pte(va)) == NULL)
1678 if (!pmap_present(pte)) {
1679 /* Enter on the PV list if the page is managed. */
1680 if ((m->oflags & VPO_UNMANAGED) == 0) {
1681 if (!pmap_try_insert_pv_entry(pmap, va, m)) {
1682 pmap_free_pte(pte, va);
1689 /* Increment counters. */
1690 pmap->pm_stats.resident_count++;
1692 /* Initialise with R/O protection and enter into VHPT. */
1693 pmap_enter_vhpt(pte, va);
1694 pmap_pte_prot(pmap, pte,
1695 prot & (VM_PROT_READ | VM_PROT_EXECUTE));
1696 pmap_pte_attr(pte, m->md.memattr);
1697 pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), FALSE, managed);
1699 if (prot & VM_PROT_EXECUTE)
1700 ia64_sync_icache(va, PAGE_SIZE);
1705 * pmap_object_init_pt preloads the ptes for a given object
1706 * into the specified pmap. This eliminates the blast of soft
1707 * faults on process startup and immediately after an mmap.
1710 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
1711 vm_object_t object, vm_pindex_t pindex,
1715 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1716 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
1717 ("pmap_object_init_pt: non-device object"));
1721 * Routine: pmap_change_wiring
1722 * Function: Change the wiring attribute for a map/virtual-address
1724 * In/out conditions:
1725 * The mapping must already exist in the pmap.
1728 pmap_change_wiring(pmap, va, wired)
1729 register pmap_t pmap;
1734 struct ia64_lpte *pte;
1737 oldpmap = pmap_switch(pmap);
1739 pte = pmap_find_vhpt(va);
1740 KASSERT(pte != NULL, ("pte"));
1741 if (wired && !pmap_wired(pte)) {
1742 pmap->pm_stats.wired_count++;
1743 pmap_set_wired(pte);
1744 } else if (!wired && pmap_wired(pte)) {
1745 pmap->pm_stats.wired_count--;
1746 pmap_clear_wired(pte);
1749 pmap_switch(oldpmap);
1756 * Copy the range specified by src_addr/len
1757 * from the source map to the range dst_addr/len
1758 * in the destination map.
1760 * This routine is only advisory and need not do anything.
1764 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
1765 vm_offset_t src_addr)
1771 * pmap_zero_page zeros the specified hardware page by
1772 * mapping it into virtual memory and using bzero to clear
1777 pmap_zero_page(vm_page_t m)
1781 p = (void *)pmap_page_to_va(m);
1782 bzero(p, PAGE_SIZE);
1787 * pmap_zero_page_area zeros the specified hardware page by
1788 * mapping it into virtual memory and using bzero to clear
1791 * off and size must reside within a single page.
1795 pmap_zero_page_area(vm_page_t m, int off, int size)
1799 p = (void *)pmap_page_to_va(m);
1800 bzero(p + off, size);
1805 * pmap_zero_page_idle zeros the specified hardware page by
1806 * mapping it into virtual memory and using bzero to clear
1807 * its contents. This is for the vm_idlezero process.
1811 pmap_zero_page_idle(vm_page_t m)
1815 p = (void *)pmap_page_to_va(m);
1816 bzero(p, PAGE_SIZE);
1821 * pmap_copy_page copies the specified (machine independent)
1822 * page by mapping the page into virtual memory and using
1823 * bcopy to copy the page, one machine dependent page at a
1827 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
1831 src = (void *)pmap_page_to_va(msrc);
1832 dst = (void *)pmap_page_to_va(mdst);
1833 bcopy(src, dst, PAGE_SIZE);
1836 int unmapped_buf_allowed;
1839 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
1840 vm_offset_t b_offset, int xfersize)
1843 vm_offset_t a_pg_offset, b_pg_offset;
1846 while (xfersize > 0) {
1847 a_pg_offset = a_offset & PAGE_MASK;
1848 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1849 a_cp = (char *)pmap_page_to_va(ma[a_offset >> PAGE_SHIFT]) +
1851 b_pg_offset = b_offset & PAGE_MASK;
1852 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1853 b_cp = (char *)pmap_page_to_va(mb[b_offset >> PAGE_SHIFT]) +
1855 bcopy(a_cp, b_cp, cnt);
1863 * Returns true if the pmap's pv is one of the first
1864 * 16 pvs linked to from this page. This count may
1865 * be changed upwards or downwards in the future; it
1866 * is only necessary that true be returned for a small
1867 * subset of pmaps for proper page aging.
1870 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
1876 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1877 ("pmap_page_exists_quick: page %p is not managed", m));
1879 vm_page_lock_queues();
1880 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1881 if (pv->pv_pmap == pmap) {
1889 vm_page_unlock_queues();
1894 * pmap_page_wired_mappings:
1896 * Return the number of managed mappings to the given physical page
1900 pmap_page_wired_mappings(vm_page_t m)
1902 struct ia64_lpte *pte;
1903 pmap_t oldpmap, pmap;
1908 if ((m->oflags & VPO_UNMANAGED) != 0)
1910 vm_page_lock_queues();
1911 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1914 oldpmap = pmap_switch(pmap);
1915 pte = pmap_find_vhpt(pv->pv_va);
1916 KASSERT(pte != NULL, ("pte"));
1917 if (pmap_wired(pte))
1919 pmap_switch(oldpmap);
1922 vm_page_unlock_queues();
1927 * Remove all pages from specified address space
1928 * this aids process exit speeds. Also, this code
1929 * is special cased for current process only, but
1930 * can have the more generic (and slightly slower)
1931 * mode enabled. This is much faster than pmap_remove
1932 * in the case of running down an entire address space.
1935 pmap_remove_pages(pmap_t pmap)
1940 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
1941 printf("warning: %s called with non-current pmap\n",
1946 vm_page_lock_queues();
1948 oldpmap = pmap_switch(pmap);
1950 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
1951 struct ia64_lpte *pte;
1953 npv = TAILQ_NEXT(pv, pv_plist);
1955 pte = pmap_find_vhpt(pv->pv_va);
1956 KASSERT(pte != NULL, ("pte"));
1957 if (!pmap_wired(pte))
1958 pmap_remove_pte(pmap, pte, pv->pv_va, pv, 1);
1961 pmap_switch(oldpmap);
1963 vm_page_unlock_queues();
1967 * pmap_ts_referenced:
1969 * Return a count of reference bits for a page, clearing those bits.
1970 * It is not necessary for every reference bit to be cleared, but it
1971 * is necessary that 0 only be returned when there are truly no
1972 * reference bits set.
1974 * XXX: The exact number of bits to check and clear is a matter that
1975 * should be tested and standardized at some point in the future for
1976 * optimal aging of shared pages.
1979 pmap_ts_referenced(vm_page_t m)
1981 struct ia64_lpte *pte;
1986 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1987 ("pmap_ts_referenced: page %p is not managed", m));
1988 vm_page_lock_queues();
1989 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1990 PMAP_LOCK(pv->pv_pmap);
1991 oldpmap = pmap_switch(pv->pv_pmap);
1992 pte = pmap_find_vhpt(pv->pv_va);
1993 KASSERT(pte != NULL, ("pte"));
1994 if (pmap_accessed(pte)) {
1996 pmap_clear_accessed(pte);
1997 pmap_invalidate_page(pv->pv_va);
1999 pmap_switch(oldpmap);
2000 PMAP_UNLOCK(pv->pv_pmap);
2002 vm_page_unlock_queues();
2009 * Return whether or not the specified physical page was modified
2010 * in any physical maps.
2013 pmap_is_modified(vm_page_t m)
2015 struct ia64_lpte *pte;
2020 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2021 ("pmap_is_modified: page %p is not managed", m));
2025 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
2026 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
2027 * is clear, no PTEs can be dirty.
2029 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2030 if ((m->oflags & VPO_BUSY) == 0 &&
2031 (m->aflags & PGA_WRITEABLE) == 0)
2033 vm_page_lock_queues();
2034 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2035 PMAP_LOCK(pv->pv_pmap);
2036 oldpmap = pmap_switch(pv->pv_pmap);
2037 pte = pmap_find_vhpt(pv->pv_va);
2038 pmap_switch(oldpmap);
2039 KASSERT(pte != NULL, ("pte"));
2040 rv = pmap_dirty(pte) ? TRUE : FALSE;
2041 PMAP_UNLOCK(pv->pv_pmap);
2045 vm_page_unlock_queues();
2050 * pmap_is_prefaultable:
2052 * Return whether or not the specified virtual address is elgible
2056 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2058 struct ia64_lpte *pte;
2060 pte = pmap_find_vhpt(addr);
2061 if (pte != NULL && pmap_present(pte))
2067 * pmap_is_referenced:
2069 * Return whether or not the specified physical page was referenced
2070 * in any physical maps.
2073 pmap_is_referenced(vm_page_t m)
2075 struct ia64_lpte *pte;
2080 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2081 ("pmap_is_referenced: page %p is not managed", m));
2083 vm_page_lock_queues();
2084 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2085 PMAP_LOCK(pv->pv_pmap);
2086 oldpmap = pmap_switch(pv->pv_pmap);
2087 pte = pmap_find_vhpt(pv->pv_va);
2088 pmap_switch(oldpmap);
2089 KASSERT(pte != NULL, ("pte"));
2090 rv = pmap_accessed(pte) ? TRUE : FALSE;
2091 PMAP_UNLOCK(pv->pv_pmap);
2095 vm_page_unlock_queues();
2100 * Clear the modify bits on the specified physical page.
2103 pmap_clear_modify(vm_page_t m)
2105 struct ia64_lpte *pte;
2109 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2110 ("pmap_clear_modify: page %p is not managed", m));
2111 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2112 KASSERT((m->oflags & VPO_BUSY) == 0,
2113 ("pmap_clear_modify: page %p is busy", m));
2116 * If the page is not PGA_WRITEABLE, then no PTEs can be modified.
2117 * If the object containing the page is locked and the page is not
2118 * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
2120 if ((m->aflags & PGA_WRITEABLE) == 0)
2122 vm_page_lock_queues();
2123 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2124 PMAP_LOCK(pv->pv_pmap);
2125 oldpmap = pmap_switch(pv->pv_pmap);
2126 pte = pmap_find_vhpt(pv->pv_va);
2127 KASSERT(pte != NULL, ("pte"));
2128 if (pmap_dirty(pte)) {
2129 pmap_clear_dirty(pte);
2130 pmap_invalidate_page(pv->pv_va);
2132 pmap_switch(oldpmap);
2133 PMAP_UNLOCK(pv->pv_pmap);
2135 vm_page_unlock_queues();
2139 * pmap_clear_reference:
2141 * Clear the reference bit on the specified physical page.
2144 pmap_clear_reference(vm_page_t m)
2146 struct ia64_lpte *pte;
2150 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2151 ("pmap_clear_reference: page %p is not managed", m));
2152 vm_page_lock_queues();
2153 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2154 PMAP_LOCK(pv->pv_pmap);
2155 oldpmap = pmap_switch(pv->pv_pmap);
2156 pte = pmap_find_vhpt(pv->pv_va);
2157 KASSERT(pte != NULL, ("pte"));
2158 if (pmap_accessed(pte)) {
2159 pmap_clear_accessed(pte);
2160 pmap_invalidate_page(pv->pv_va);
2162 pmap_switch(oldpmap);
2163 PMAP_UNLOCK(pv->pv_pmap);
2165 vm_page_unlock_queues();
2169 * Clear the write and modified bits in each of the given page's mappings.
2172 pmap_remove_write(vm_page_t m)
2174 struct ia64_lpte *pte;
2175 pmap_t oldpmap, pmap;
2179 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2180 ("pmap_remove_write: page %p is not managed", m));
2183 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
2184 * another thread while the object is locked. Thus, if PGA_WRITEABLE
2185 * is clear, no page table entries need updating.
2187 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2188 if ((m->oflags & VPO_BUSY) == 0 &&
2189 (m->aflags & PGA_WRITEABLE) == 0)
2191 vm_page_lock_queues();
2192 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2195 oldpmap = pmap_switch(pmap);
2196 pte = pmap_find_vhpt(pv->pv_va);
2197 KASSERT(pte != NULL, ("pte"));
2198 prot = pmap_prot(pte);
2199 if ((prot & VM_PROT_WRITE) != 0) {
2200 if (pmap_dirty(pte)) {
2202 pmap_clear_dirty(pte);
2204 prot &= ~VM_PROT_WRITE;
2205 pmap_pte_prot(pmap, pte, prot);
2206 pmap_pte_attr(pte, m->md.memattr);
2207 pmap_invalidate_page(pv->pv_va);
2209 pmap_switch(oldpmap);
2212 vm_page_aflag_clear(m, PGA_WRITEABLE);
2213 vm_page_unlock_queues();
2217 * Map a set of physical memory pages into the kernel virtual
2218 * address space. Return a pointer to where it is mapped. This
2219 * routine is intended to be used for mapping device memory,
2223 pmap_mapdev(vm_paddr_t pa, vm_size_t sz)
2225 static void *last_va = NULL;
2226 static vm_paddr_t last_pa = 0;
2227 static vm_size_t last_sz = 0;
2231 if (pa == last_pa && sz == last_sz)
2234 md = efi_md_find(pa);
2236 printf("%s: [%#lx..%#lx] not covered by memory descriptor\n",
2237 __func__, pa, pa + sz - 1);
2241 if (md->md_type == EFI_MD_TYPE_FREE) {
2242 printf("%s: [%#lx..%#lx] is in DRAM\n", __func__, pa,
2247 va = (md->md_attr & EFI_MD_ATTR_WB) ? IA64_PHYS_TO_RR7(pa) :
2248 IA64_PHYS_TO_RR6(pa);
2250 last_va = (void *)va;
2257 * 'Unmap' a range mapped by pmap_mapdev().
2260 pmap_unmapdev(vm_offset_t va, vm_size_t size)
2265 * Sets the memory attribute for the specified page.
2268 pmap_page_set_memattr_1(void *arg)
2270 struct ia64_pal_result res;
2272 uintptr_t pp = (uintptr_t)arg;
2274 is = intr_disable();
2275 res = ia64_call_pal_static(pp, 0, 0, 0);
2280 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
2282 struct ia64_lpte *pte;
2287 vm_page_lock_queues();
2289 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2290 PMAP_LOCK(pv->pv_pmap);
2291 oldpmap = pmap_switch(pv->pv_pmap);
2292 pte = pmap_find_vhpt(pv->pv_va);
2293 KASSERT(pte != NULL, ("pte"));
2294 pmap_pte_attr(pte, ma);
2295 pmap_invalidate_page(pv->pv_va);
2296 pmap_switch(oldpmap);
2297 PMAP_UNLOCK(pv->pv_pmap);
2299 vm_page_unlock_queues();
2301 if (ma == VM_MEMATTR_UNCACHEABLE) {
2303 smp_rendezvous(NULL, pmap_page_set_memattr_1, NULL,
2304 (void *)PAL_PREFETCH_VISIBILITY);
2306 pmap_page_set_memattr_1((void *)PAL_PREFETCH_VISIBILITY);
2308 va = (void *)pmap_page_to_va(m);
2310 cpu_flush_dcache(va, PAGE_SIZE);
2313 smp_rendezvous(NULL, pmap_page_set_memattr_1, NULL,
2314 (void *)PAL_MC_DRAIN);
2316 pmap_page_set_memattr_1((void *)PAL_MC_DRAIN);
2322 * perform the pmap work for mincore
2325 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
2328 struct ia64_lpte *pte, tpte;
2334 oldpmap = pmap_switch(pmap);
2335 pte = pmap_find_vhpt(addr);
2340 pmap_switch(oldpmap);
2341 if (pte == NULL || !pmap_present(pte)) {
2345 val = MINCORE_INCORE;
2346 if (pmap_dirty(pte))
2347 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
2348 if (pmap_accessed(pte))
2349 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
2350 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
2351 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
2352 pmap_managed(pte)) {
2354 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
2355 if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
2359 PA_UNLOCK_COND(*locked_pa);
2365 pmap_activate(struct thread *td)
2367 pmap_switch(vmspace_pmap(td->td_proc->p_vmspace));
2371 pmap_switch(pmap_t pm)
2377 prevpm = PCPU_GET(md.current_pmap);
2381 for (i = 0; i < IA64_VM_MINKERN_REGION; i++) {
2382 ia64_set_rr(IA64_RR_BASE(i),
2383 (i << 8)|(PAGE_SHIFT << 2)|1);
2386 for (i = 0; i < IA64_VM_MINKERN_REGION; i++) {
2387 ia64_set_rr(IA64_RR_BASE(i),
2388 (pm->pm_rid[i] << 8)|(PAGE_SHIFT << 2)|1);
2391 PCPU_SET(md.current_pmap, pm);
2400 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
2403 struct ia64_lpte *pte;
2409 sz = (sz + 31) & ~31;
2412 oldpm = pmap_switch(pm);
2414 lim = round_page(va);
2415 len = MIN(lim - va, sz);
2416 pte = pmap_find_vhpt(va);
2417 if (pte != NULL && pmap_present(pte))
2418 ia64_sync_icache(va, len);
2427 * Increase the starting virtual address of the given mapping if a
2428 * different alignment might result in more superpage mappings.
2431 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
2432 vm_offset_t *addr, vm_size_t size)
2436 #include "opt_ddb.h"
2440 #include <ddb/ddb.h>
2442 static const char* psnames[] = {
2443 "1B", "2B", "4B", "8B",
2444 "16B", "32B", "64B", "128B",
2445 "256B", "512B", "1K", "2K",
2446 "4K", "8K", "16K", "32K",
2447 "64K", "128K", "256K", "512K",
2448 "1M", "2M", "4M", "8M",
2449 "16M", "32M", "64M", "128M",
2450 "256M", "512M", "1G", "2G"
2456 struct ia64_pal_result res;
2464 static const char *manames[] = {
2465 "WB", "bad", "bad", "bad",
2466 "UC", "UCE", "WC", "NaT",
2469 res = ia64_call_pal_static(PAL_VM_SUMMARY, 0, 0, 0);
2470 if (res.pal_status != 0) {
2471 db_printf("Can't get VM summary\n");
2476 maxtr = (res.pal_result[0] >> 40) & 0xff;
2478 maxtr = (res.pal_result[0] >> 32) & 0xff;
2480 db_printf("V RID Virtual Page Physical Page PgSz ED AR PL D A MA P KEY\n");
2481 for (i = 0; i <= maxtr; i++) {
2482 bzero(&buf, sizeof(buf));
2483 res = ia64_pal_physical(PAL_VM_TR_READ, i, type,
2484 ia64_tpa((uint64_t)&buf));
2485 if (!(res.pal_result[0] & 1))
2486 buf.pte &= ~PTE_AR_MASK;
2487 if (!(res.pal_result[0] & 2))
2488 buf.pte &= ~PTE_PL_MASK;
2489 if (!(res.pal_result[0] & 4))
2490 pmap_clear_dirty(&buf);
2491 if (!(res.pal_result[0] & 8))
2492 buf.pte &= ~PTE_MA_MASK;
2493 db_printf("%d %06x %013lx %013lx %4s %d %d %d %d %d %-3s "
2494 "%d %06x\n", (int)buf.ifa & 1, buf.rr.rr_rid,
2495 buf.ifa >> 12, (buf.pte & PTE_PPN_MASK) >> 12,
2496 psnames[(buf.itir & ITIR_PS_MASK) >> 2],
2497 (buf.pte & PTE_ED) ? 1 : 0,
2498 (int)(buf.pte & PTE_AR_MASK) >> 9,
2499 (int)(buf.pte & PTE_PL_MASK) >> 7,
2500 (pmap_dirty(&buf)) ? 1 : 0,
2501 (pmap_accessed(&buf)) ? 1 : 0,
2502 manames[(buf.pte & PTE_MA_MASK) >> 2],
2503 (pmap_present(&buf)) ? 1 : 0,
2504 (int)((buf.itir & ITIR_KEY_MASK) >> 8));
2508 DB_COMMAND(itr, db_itr)
2513 DB_COMMAND(dtr, db_dtr)
2518 DB_COMMAND(rr, db_rr)
2524 printf("RR RID PgSz VE\n");
2525 for (i = 0; i < 8; i++) {
2526 __asm __volatile ("mov %0=rr[%1]"
2528 : "r"(IA64_RR_BASE(i)));
2529 *(uint64_t *) &rr = t;
2530 printf("%d %06x %4s %d\n",
2531 i, rr.rr_rid, psnames[rr.rr_ps], rr.rr_ve);
2535 DB_COMMAND(thash, db_thash)
2540 db_printf("%p\n", (void *) ia64_thash(addr));
2543 DB_COMMAND(ttag, db_ttag)
2548 db_printf("0x%lx\n", ia64_ttag(addr));
2551 DB_COMMAND(kpte, db_kpte)
2553 struct ia64_lpte *pte;
2556 db_printf("usage: kpte <kva>\n");
2559 if (addr < VM_MIN_KERNEL_ADDRESS) {
2560 db_printf("kpte: error: invalid <kva>\n");
2563 pte = pmap_find_kpte(addr);
2564 db_printf("kpte at %p:\n", pte);
2565 db_printf(" pte =%016lx\n", pte->pte);
2566 db_printf(" itir =%016lx\n", pte->itir);
2567 db_printf(" tag =%016lx\n", pte->tag);
2568 db_printf(" chain=%016lx\n", pte->chain);