2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
8 * Copyright (c) 1998,2000 Doug Rabson
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department and William Jolitz of UUNET Technologies Inc.
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by the University of
26 * California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
44 * from: i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp
45 * with some ideas from NetBSD's alpha pmap
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
51 #include <sys/param.h>
52 #include <sys/kernel.h>
55 #include <sys/mutex.h>
58 #include <sys/sysctl.h>
59 #include <sys/systm.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_pageout.h>
69 #include <machine/bootinfo.h>
70 #include <machine/efi.h>
71 #include <machine/md_var.h>
72 #include <machine/pal.h>
75 * Manages physical address maps.
77 * In addition to hardware address maps, this
78 * module is called upon to provide software-use-only
79 * maps which may or may not be stored in the same
80 * form as hardware maps. These pseudo-maps are
81 * used to store intermediate results from copy
82 * operations to and from address spaces.
84 * Since the information managed by this module is
85 * also stored by the logical address mapping module,
86 * this module may throw away valid virtual-to-physical
87 * mappings at almost any time. However, invalidations
88 * of virtual-to-physical mappings must be done as
91 * In order to cope with hardware architectures which
92 * make virtual-to-physical map invalidates expensive,
93 * this module may delay invalidate or reduced protection
94 * operations until such time as they are actually
95 * necessary. This module is given full information as
96 * to which processors are currently using which maps,
97 * and to when physical maps must be made correct.
101 * Following the Linux model, region IDs are allocated in groups of
102 * eight so that a single region ID can be used for as many RRs as we
103 * want by encoding the RR number into the low bits of the ID.
105 * We reserve region ID 0 for the kernel and allocate the remaining
106 * IDs for user pmaps.
108 * Region 0-3: User virtually mapped
109 * Region 4: PBVM and special mappings
110 * Region 5: Kernel virtual memory
111 * Region 6: Direct-mapped uncacheable
112 * Region 7: Direct-mapped cacheable
115 /* XXX move to a header. */
116 extern uint64_t ia64_gateway_page[];
118 #ifndef PMAP_SHPGPERPROC
119 #define PMAP_SHPGPERPROC 200
122 #if !defined(DIAGNOSTIC)
123 #define PMAP_INLINE __inline
128 #define pmap_accessed(lpte) ((lpte)->pte & PTE_ACCESSED)
129 #define pmap_dirty(lpte) ((lpte)->pte & PTE_DIRTY)
130 #define pmap_exec(lpte) ((lpte)->pte & PTE_AR_RX)
131 #define pmap_managed(lpte) ((lpte)->pte & PTE_MANAGED)
132 #define pmap_ppn(lpte) ((lpte)->pte & PTE_PPN_MASK)
133 #define pmap_present(lpte) ((lpte)->pte & PTE_PRESENT)
134 #define pmap_prot(lpte) (((lpte)->pte & PTE_PROT_MASK) >> 56)
135 #define pmap_wired(lpte) ((lpte)->pte & PTE_WIRED)
137 #define pmap_clear_accessed(lpte) (lpte)->pte &= ~PTE_ACCESSED
138 #define pmap_clear_dirty(lpte) (lpte)->pte &= ~PTE_DIRTY
139 #define pmap_clear_present(lpte) (lpte)->pte &= ~PTE_PRESENT
140 #define pmap_clear_wired(lpte) (lpte)->pte &= ~PTE_WIRED
142 #define pmap_set_wired(lpte) (lpte)->pte |= PTE_WIRED
145 * The VHPT bucket head structure.
154 * Statically allocated kernel pmap
156 struct pmap kernel_pmap_store;
158 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
159 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
162 * Kernel virtual memory management.
165 extern struct ia64_lpte ***ia64_kptdir;
167 #define KPTE_DIR0_INDEX(va) \
168 (((va) >> (3*PAGE_SHIFT-8)) & ((1<<(PAGE_SHIFT-3))-1))
169 #define KPTE_DIR1_INDEX(va) \
170 (((va) >> (2*PAGE_SHIFT-5)) & ((1<<(PAGE_SHIFT-3))-1))
171 #define KPTE_PTE_INDEX(va) \
172 (((va) >> PAGE_SHIFT) & ((1<<(PAGE_SHIFT-5))-1))
173 #define NKPTEPG (PAGE_SIZE / sizeof(struct ia64_lpte))
175 vm_offset_t kernel_vm_end;
177 /* Values for ptc.e. XXX values for SKI. */
178 static uint64_t pmap_ptc_e_base = 0x100000000;
179 static uint64_t pmap_ptc_e_count1 = 3;
180 static uint64_t pmap_ptc_e_count2 = 2;
181 static uint64_t pmap_ptc_e_stride1 = 0x2000;
182 static uint64_t pmap_ptc_e_stride2 = 0x100000000;
184 struct mtx pmap_ptc_mutex;
187 * Data for the RID allocator
189 static int pmap_ridcount;
190 static int pmap_rididx;
191 static int pmap_ridmapsz;
192 static int pmap_ridmax;
193 static uint64_t *pmap_ridmap;
194 struct mtx pmap_ridmutex;
197 * Data for the pv entry allocation mechanism
199 static uma_zone_t pvzone;
200 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
203 * Data for allocating PTEs for user processes.
205 static uma_zone_t ptezone;
208 * Virtual Hash Page Table (VHPT) data.
210 /* SYSCTL_DECL(_machdep); */
211 static SYSCTL_NODE(_machdep, OID_AUTO, vhpt, CTLFLAG_RD, 0, "");
213 struct ia64_bucket *pmap_vhpt_bucket;
215 int pmap_vhpt_nbuckets;
216 SYSCTL_INT(_machdep_vhpt, OID_AUTO, nbuckets, CTLFLAG_RD,
217 &pmap_vhpt_nbuckets, 0, "");
219 int pmap_vhpt_log2size = 0;
220 TUNABLE_INT("machdep.vhpt.log2size", &pmap_vhpt_log2size);
221 SYSCTL_INT(_machdep_vhpt, OID_AUTO, log2size, CTLFLAG_RD,
222 &pmap_vhpt_log2size, 0, "");
224 static int pmap_vhpt_inserts;
225 SYSCTL_INT(_machdep_vhpt, OID_AUTO, inserts, CTLFLAG_RD,
226 &pmap_vhpt_inserts, 0, "");
228 static int pmap_vhpt_population(SYSCTL_HANDLER_ARGS);
229 SYSCTL_PROC(_machdep_vhpt, OID_AUTO, population, CTLTYPE_INT | CTLFLAG_RD,
230 NULL, 0, pmap_vhpt_population, "I", "");
232 static struct ia64_lpte *pmap_find_vhpt(vm_offset_t va);
234 static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
235 static pv_entry_t get_pv_entry(pmap_t locked_pmap);
237 static void pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
238 vm_page_t m, vm_prot_t prot);
239 static void pmap_free_pte(struct ia64_lpte *pte, vm_offset_t va);
240 static void pmap_invalidate_all(void);
241 static int pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte,
242 vm_offset_t va, pv_entry_t pv, int freepte);
243 static int pmap_remove_vhpt(vm_offset_t va);
244 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
248 pmap_steal_memory(vm_size_t size)
253 size = round_page(size);
255 bank_size = phys_avail[1] - phys_avail[0];
256 while (size > bank_size) {
258 for (i = 0; phys_avail[i+2]; i+= 2) {
259 phys_avail[i] = phys_avail[i+2];
260 phys_avail[i+1] = phys_avail[i+3];
265 panic("pmap_steal_memory: out of memory");
266 bank_size = phys_avail[1] - phys_avail[0];
270 phys_avail[0] += size;
272 va = IA64_PHYS_TO_RR7(pa);
273 bzero((caddr_t) va, size);
278 pmap_initialize_vhpt(vm_offset_t vhpt)
280 struct ia64_lpte *pte;
283 pte = (struct ia64_lpte *)vhpt;
284 for (i = 0; i < pmap_vhpt_nbuckets; i++) {
287 pte[i].tag = 1UL << 63; /* Invalid tag */
288 pte[i].chain = (uintptr_t)(pmap_vhpt_bucket + i);
293 MALLOC_DECLARE(M_SMP);
296 pmap_alloc_vhpt(void)
301 size = 1UL << pmap_vhpt_log2size;
302 vhpt = (uintptr_t)contigmalloc(size, M_SMP, 0, 0UL, ~0UL, size, 0UL);
304 vhpt = IA64_PHYS_TO_RR7(ia64_tpa(vhpt));
305 pmap_initialize_vhpt(vhpt);
312 * Bootstrap the system enough to run with virtual memory.
317 struct ia64_pal_result res;
320 int i, j, count, ridbits;
323 * Query the PAL Code to find the loop parameters for the
326 res = ia64_call_pal_static(PAL_PTCE_INFO, 0, 0, 0);
327 if (res.pal_status != 0)
328 panic("Can't configure ptc.e parameters");
329 pmap_ptc_e_base = res.pal_result[0];
330 pmap_ptc_e_count1 = res.pal_result[1] >> 32;
331 pmap_ptc_e_count2 = res.pal_result[1] & ((1L<<32) - 1);
332 pmap_ptc_e_stride1 = res.pal_result[2] >> 32;
333 pmap_ptc_e_stride2 = res.pal_result[2] & ((1L<<32) - 1);
335 printf("ptc.e base=0x%lx, count1=%ld, count2=%ld, "
336 "stride1=0x%lx, stride2=0x%lx\n",
343 mtx_init(&pmap_ptc_mutex, "PTC.G mutex", NULL, MTX_SPIN);
346 * Setup RIDs. RIDs 0..7 are reserved for the kernel.
348 * We currently need at least 19 bits in the RID because PID_MAX
349 * can only be encoded in 17 bits and we need RIDs for 4 regions
350 * per process. With PID_MAX equalling 99999 this means that we
351 * need to be able to encode 399996 (=4*PID_MAX).
352 * The Itanium processor only has 18 bits and the architected
353 * minimum is exactly that. So, we cannot use a PID based scheme
354 * in those cases. Enter pmap_ridmap...
355 * We should avoid the map when running on a processor that has
356 * implemented enough bits. This means that we should pass the
357 * process/thread ID to pmap. This we currently don't do, so we
358 * use the map anyway. However, we don't want to allocate a map
359 * that is large enough to cover the range dictated by the number
360 * of bits in the RID, because that may result in a RID map of
361 * 2MB in size for a 24-bit RID. A 64KB map is enough.
362 * The bottomline: we create a 32KB map when the processor only
363 * implements 18 bits (or when we can't figure it out). Otherwise
364 * we create a 64KB map.
366 res = ia64_call_pal_static(PAL_VM_SUMMARY, 0, 0, 0);
367 if (res.pal_status != 0) {
369 printf("Can't read VM Summary - assuming 18 Region ID bits\n");
370 ridbits = 18; /* guaranteed minimum */
372 ridbits = (res.pal_result[1] >> 8) & 0xff;
374 printf("Processor supports %d Region ID bits\n",
380 pmap_ridmax = (1 << ridbits);
381 pmap_ridmapsz = pmap_ridmax / 64;
382 pmap_ridmap = (uint64_t *)pmap_steal_memory(pmap_ridmax / 8);
383 pmap_ridmap[0] |= 0xff;
386 mtx_init(&pmap_ridmutex, "RID allocator lock", NULL, MTX_DEF);
389 * Allocate some memory for initial kernel 'page tables'.
391 ia64_kptdir = (void *)pmap_steal_memory(PAGE_SIZE);
393 kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
395 for (i = 0; phys_avail[i+2]; i+= 2)
400 * Determine a valid (mappable) VHPT size.
402 TUNABLE_INT_FETCH("machdep.vhpt.log2size", &pmap_vhpt_log2size);
403 if (pmap_vhpt_log2size == 0)
404 pmap_vhpt_log2size = 20;
405 else if (pmap_vhpt_log2size < 16)
406 pmap_vhpt_log2size = 16;
407 else if (pmap_vhpt_log2size > 28)
408 pmap_vhpt_log2size = 28;
409 if (pmap_vhpt_log2size & 1)
410 pmap_vhpt_log2size--;
413 size = 1UL << pmap_vhpt_log2size;
414 for (i = 0; i < count; i += 2) {
415 base = (phys_avail[i] + size - 1) & ~(size - 1);
416 if (base + size <= phys_avail[i+1])
420 panic("Unable to allocate VHPT");
422 if (base != phys_avail[i]) {
423 /* Split this region. */
424 for (j = count; j > i; j -= 2) {
425 phys_avail[j] = phys_avail[j-2];
426 phys_avail[j+1] = phys_avail[j-2+1];
428 phys_avail[i+1] = base;
429 phys_avail[i+2] = base + size;
431 phys_avail[i] = base + size;
433 base = IA64_PHYS_TO_RR7(base);
434 PCPU_SET(md.vhpt, base);
436 printf("VHPT: address=%#lx, size=%#lx\n", base, size);
438 pmap_vhpt_nbuckets = size / sizeof(struct ia64_lpte);
439 pmap_vhpt_bucket = (void *)pmap_steal_memory(pmap_vhpt_nbuckets *
440 sizeof(struct ia64_bucket));
441 for (i = 0; i < pmap_vhpt_nbuckets; i++) {
442 /* Stolen memory is zeroed. */
443 mtx_init(&pmap_vhpt_bucket[i].mutex, "VHPT bucket lock", NULL,
444 MTX_NOWITNESS | MTX_SPIN);
447 pmap_initialize_vhpt(base);
449 ia64_set_pta(base + (1 << 8) + (pmap_vhpt_log2size << 2) + 1);
452 virtual_avail = VM_MIN_KERNEL_ADDRESS;
453 virtual_end = VM_MAX_KERNEL_ADDRESS;
456 * Initialize the kernel pmap (which is statically allocated).
458 PMAP_LOCK_INIT(kernel_pmap);
459 for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
460 kernel_pmap->pm_rid[i] = 0;
461 TAILQ_INIT(&kernel_pmap->pm_pvlist);
462 PCPU_SET(md.current_pmap, kernel_pmap);
464 /* Region 5 is mapped via the VHPT. */
465 ia64_set_rr(IA64_RR_BASE(5), (5 << 8) | (PAGE_SHIFT << 2) | 1);
468 * Clear out any random TLB entries left over from booting.
470 pmap_invalidate_all();
476 pmap_vhpt_population(SYSCTL_HANDLER_ARGS)
481 for (i = 0; i < pmap_vhpt_nbuckets; i++)
482 count += pmap_vhpt_bucket[i].length;
484 error = SYSCTL_OUT(req, &count, sizeof(count));
489 pmap_page_to_va(vm_page_t m)
494 pa = VM_PAGE_TO_PHYS(m);
495 va = (m->md.memattr == VM_MEMATTR_UNCACHEABLE) ? IA64_PHYS_TO_RR6(pa) :
496 IA64_PHYS_TO_RR7(pa);
501 * Initialize a vm_page's machine-dependent fields.
504 pmap_page_init(vm_page_t m)
507 TAILQ_INIT(&m->md.pv_list);
508 m->md.pv_list_count = 0;
509 m->md.memattr = VM_MEMATTR_DEFAULT;
513 * Initialize the pmap module.
514 * Called by vm_init, to initialize any structures that the pmap
515 * system needs to map virtual memory.
520 int shpgperproc = PMAP_SHPGPERPROC;
523 * Initialize the address space (zone) for the pv entries. Set a
524 * high water mark so that the system can recover from excessive
525 * numbers of pv entries.
527 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
528 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
529 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
530 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
531 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
532 pv_entry_high_water = 9 * (pv_entry_max / 10);
534 ptezone = uma_zcreate("PT ENTRY", sizeof (struct ia64_lpte),
535 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
539 /***************************************************
540 * Manipulate TLBs for a pmap
541 ***************************************************/
544 pmap_invalidate_page(vm_offset_t va)
546 struct ia64_lpte *pte;
553 vhpt_ofs = ia64_thash(va) - PCPU_GET(md.vhpt);
555 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
556 pte = (struct ia64_lpte *)(pc->pc_md.vhpt + vhpt_ofs);
557 atomic_cmpset_64(&pte->tag, tag, 1UL << 63);
560 mtx_lock_spin(&pmap_ptc_mutex);
562 ia64_ptc_ga(va, PAGE_SHIFT << 2);
566 mtx_unlock_spin(&pmap_ptc_mutex);
574 pmap_invalidate_all_1(void *arg)
580 addr = pmap_ptc_e_base;
581 for (i = 0; i < pmap_ptc_e_count1; i++) {
582 for (j = 0; j < pmap_ptc_e_count2; j++) {
584 addr += pmap_ptc_e_stride2;
586 addr += pmap_ptc_e_stride1;
592 pmap_invalidate_all(void)
597 smp_rendezvous(NULL, pmap_invalidate_all_1, NULL, NULL);
601 pmap_invalidate_all_1(NULL);
605 pmap_allocate_rid(void)
610 mtx_lock(&pmap_ridmutex);
611 if (pmap_ridcount == pmap_ridmax)
612 panic("pmap_allocate_rid: All Region IDs used");
614 /* Find an index with a free bit. */
615 while ((bits = pmap_ridmap[pmap_rididx]) == ~0UL) {
617 if (pmap_rididx == pmap_ridmapsz)
620 rid = pmap_rididx * 64;
622 /* Find a free bit. */
629 pmap_ridmap[pmap_rididx] |= bit;
631 mtx_unlock(&pmap_ridmutex);
637 pmap_free_rid(uint32_t rid)
643 bit = ~(1UL << (rid & 63));
645 mtx_lock(&pmap_ridmutex);
646 pmap_ridmap[idx] &= bit;
648 mtx_unlock(&pmap_ridmutex);
651 /***************************************************
652 * Page table page management routines.....
653 ***************************************************/
656 pmap_pinit0(struct pmap *pmap)
658 /* kernel_pmap is the same as any other pmap. */
663 * Initialize a preallocated and zeroed pmap structure,
664 * such as one in a vmspace structure.
667 pmap_pinit(struct pmap *pmap)
671 PMAP_LOCK_INIT(pmap);
672 for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
673 pmap->pm_rid[i] = pmap_allocate_rid();
674 TAILQ_INIT(&pmap->pm_pvlist);
675 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
679 /***************************************************
680 * Pmap allocation/deallocation routines.
681 ***************************************************/
684 * Release any resources held by the given physical map.
685 * Called when a pmap initialized by pmap_pinit is being released.
686 * Should only be called if the map contains no valid mappings.
689 pmap_release(pmap_t pmap)
693 for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
695 pmap_free_rid(pmap->pm_rid[i]);
696 PMAP_LOCK_DESTROY(pmap);
700 * grow the number of kernel page table entries, if needed
703 pmap_growkernel(vm_offset_t addr)
705 struct ia64_lpte **dir1;
706 struct ia64_lpte *leaf;
709 while (kernel_vm_end <= addr) {
710 if (nkpt == PAGE_SIZE/8 + PAGE_SIZE*PAGE_SIZE/64)
711 panic("%s: out of kernel address space", __func__);
713 dir1 = ia64_kptdir[KPTE_DIR0_INDEX(kernel_vm_end)];
715 nkpg = vm_page_alloc(NULL, nkpt++,
716 VM_ALLOC_NOOBJ|VM_ALLOC_INTERRUPT|VM_ALLOC_WIRED);
718 panic("%s: cannot add dir. page", __func__);
720 dir1 = (struct ia64_lpte **)pmap_page_to_va(nkpg);
721 bzero(dir1, PAGE_SIZE);
722 ia64_kptdir[KPTE_DIR0_INDEX(kernel_vm_end)] = dir1;
725 nkpg = vm_page_alloc(NULL, nkpt++,
726 VM_ALLOC_NOOBJ|VM_ALLOC_INTERRUPT|VM_ALLOC_WIRED);
728 panic("%s: cannot add PTE page", __func__);
730 leaf = (struct ia64_lpte *)pmap_page_to_va(nkpg);
731 bzero(leaf, PAGE_SIZE);
732 dir1[KPTE_DIR1_INDEX(kernel_vm_end)] = leaf;
734 kernel_vm_end += PAGE_SIZE * NKPTEPG;
738 /***************************************************
739 * page management routines.
740 ***************************************************/
743 * free the pv_entry back to the free list
745 static PMAP_INLINE void
746 free_pv_entry(pv_entry_t pv)
749 uma_zfree(pvzone, pv);
753 * get a new pv_entry, allocating a block from the system
757 get_pv_entry(pmap_t locked_pmap)
759 static const struct timeval printinterval = { 60, 0 };
760 static struct timeval lastprint;
761 struct vpgqueues *vpq;
762 struct ia64_lpte *pte;
763 pmap_t oldpmap, pmap;
764 pv_entry_t allocated_pv, next_pv, pv;
768 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
769 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
770 allocated_pv = uma_zalloc(pvzone, M_NOWAIT);
771 if (allocated_pv != NULL) {
773 if (pv_entry_count > pv_entry_high_water)
776 return (allocated_pv);
780 * Reclaim pv entries: At first, destroy mappings to inactive
781 * pages. After that, if a pv entry is still needed, destroy
782 * mappings to active pages.
784 if (ratecheck(&lastprint, &printinterval))
785 printf("Approaching the limit on PV entries, "
786 "increase the vm.pmap.shpgperproc tunable.\n");
787 vpq = &vm_page_queues[PQ_INACTIVE];
789 TAILQ_FOREACH(m, &vpq->pl, pageq) {
790 if ((m->flags & PG_MARKER) != 0 || m->hold_count || m->busy)
792 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
795 /* Avoid deadlock and lock recursion. */
796 if (pmap > locked_pmap)
798 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
800 pmap->pm_stats.resident_count--;
801 oldpmap = pmap_switch(pmap);
802 pte = pmap_find_vhpt(va);
803 KASSERT(pte != NULL, ("pte"));
804 pmap_remove_vhpt(va);
805 pmap_invalidate_page(va);
806 pmap_switch(oldpmap);
807 if (pmap_accessed(pte))
808 vm_page_aflag_set(m, PGA_REFERENCED);
811 pmap_free_pte(pte, va);
812 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
813 m->md.pv_list_count--;
814 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
815 if (pmap != locked_pmap)
817 if (allocated_pv == NULL)
822 if (TAILQ_EMPTY(&m->md.pv_list))
823 vm_page_aflag_clear(m, PGA_WRITEABLE);
825 if (allocated_pv == NULL) {
826 if (vpq == &vm_page_queues[PQ_INACTIVE]) {
827 vpq = &vm_page_queues[PQ_ACTIVE];
830 panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable");
832 return (allocated_pv);
836 * Conditionally create a pv entry.
839 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
843 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
844 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
845 if (pv_entry_count < pv_entry_high_water &&
846 (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) {
850 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
851 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
852 m->md.pv_list_count++;
859 * Add an ia64_lpte to the VHPT.
862 pmap_enter_vhpt(struct ia64_lpte *pte, vm_offset_t va)
864 struct ia64_bucket *bckt;
865 struct ia64_lpte *vhpte;
868 /* Can fault, so get it out of the way. */
869 pte_pa = ia64_tpa((vm_offset_t)pte);
871 vhpte = (struct ia64_lpte *)ia64_thash(va);
872 bckt = (struct ia64_bucket *)vhpte->chain;
874 mtx_lock_spin(&bckt->mutex);
875 pte->chain = bckt->chain;
877 bckt->chain = pte_pa;
881 mtx_unlock_spin(&bckt->mutex);
885 * Remove the ia64_lpte matching va from the VHPT. Return zero if it
886 * worked or an appropriate error code otherwise.
889 pmap_remove_vhpt(vm_offset_t va)
891 struct ia64_bucket *bckt;
892 struct ia64_lpte *pte;
893 struct ia64_lpte *lpte;
894 struct ia64_lpte *vhpte;
898 vhpte = (struct ia64_lpte *)ia64_thash(va);
899 bckt = (struct ia64_bucket *)vhpte->chain;
902 mtx_lock_spin(&bckt->mutex);
904 pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
905 while (chain != 0 && pte->tag != tag) {
908 pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
911 mtx_unlock_spin(&bckt->mutex);
915 /* Snip this pv_entry out of the collision chain. */
917 bckt->chain = pte->chain;
919 lpte->chain = pte->chain;
923 mtx_unlock_spin(&bckt->mutex);
928 * Find the ia64_lpte for the given va, if any.
930 static struct ia64_lpte *
931 pmap_find_vhpt(vm_offset_t va)
933 struct ia64_bucket *bckt;
934 struct ia64_lpte *pte;
938 pte = (struct ia64_lpte *)ia64_thash(va);
939 bckt = (struct ia64_bucket *)pte->chain;
941 mtx_lock_spin(&bckt->mutex);
943 pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
944 while (chain != 0 && pte->tag != tag) {
946 pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
948 mtx_unlock_spin(&bckt->mutex);
949 return ((chain != 0) ? pte : NULL);
953 * Remove an entry from the list of managed mappings.
956 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va, pv_entry_t pv)
959 if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
960 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
961 if (pmap == pv->pv_pmap && va == pv->pv_va)
965 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
973 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
974 m->md.pv_list_count--;
975 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
976 vm_page_aflag_clear(m, PGA_WRITEABLE);
978 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
987 * Create a pv entry for page at pa for
991 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
995 pv = get_pv_entry(pmap);
999 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1000 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1001 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1002 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1003 m->md.pv_list_count++;
1007 * Routine: pmap_extract
1009 * Extract the physical page address associated
1010 * with the given map/virtual_address pair.
1013 pmap_extract(pmap_t pmap, vm_offset_t va)
1015 struct ia64_lpte *pte;
1021 oldpmap = pmap_switch(pmap);
1022 pte = pmap_find_vhpt(va);
1023 if (pte != NULL && pmap_present(pte))
1025 pmap_switch(oldpmap);
1031 * Routine: pmap_extract_and_hold
1033 * Atomically extract and hold the physical page
1034 * with the given pmap and virtual address pair
1035 * if that mapping permits the given protection.
1038 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1040 struct ia64_lpte *pte;
1048 oldpmap = pmap_switch(pmap);
1050 pte = pmap_find_vhpt(va);
1051 if (pte != NULL && pmap_present(pte) &&
1052 (pmap_prot(pte) & prot) == prot) {
1053 m = PHYS_TO_VM_PAGE(pmap_ppn(pte));
1054 if (vm_page_pa_tryrelock(pmap, pmap_ppn(pte), &pa))
1059 pmap_switch(oldpmap);
1064 /***************************************************
1065 * Low level mapping routines.....
1066 ***************************************************/
1069 * Find the kernel lpte for mapping the given virtual address, which
1070 * must be in the part of region 5 which we can cover with our kernel
1073 static struct ia64_lpte *
1074 pmap_find_kpte(vm_offset_t va)
1076 struct ia64_lpte **dir1;
1077 struct ia64_lpte *leaf;
1079 KASSERT((va >> 61) == 5,
1080 ("kernel mapping 0x%lx not in region 5", va));
1081 KASSERT(va < kernel_vm_end,
1082 ("kernel mapping 0x%lx out of range", va));
1084 dir1 = ia64_kptdir[KPTE_DIR0_INDEX(va)];
1085 leaf = dir1[KPTE_DIR1_INDEX(va)];
1086 return (&leaf[KPTE_PTE_INDEX(va)]);
1090 * Find a pte suitable for mapping a user-space address. If one exists
1091 * in the VHPT, that one will be returned, otherwise a new pte is
1094 static struct ia64_lpte *
1095 pmap_find_pte(vm_offset_t va)
1097 struct ia64_lpte *pte;
1099 if (va >= VM_MAXUSER_ADDRESS)
1100 return pmap_find_kpte(va);
1102 pte = pmap_find_vhpt(va);
1104 pte = uma_zalloc(ptezone, M_NOWAIT | M_ZERO);
1105 pte->tag = 1UL << 63;
1111 * Free a pte which is now unused. This simply returns it to the zone
1112 * allocator if it is a user mapping. For kernel mappings, clear the
1113 * valid bit to make it clear that the mapping is not currently used.
1116 pmap_free_pte(struct ia64_lpte *pte, vm_offset_t va)
1118 if (va < VM_MAXUSER_ADDRESS)
1119 uma_zfree(ptezone, pte);
1121 pmap_clear_present(pte);
1124 static PMAP_INLINE void
1125 pmap_pte_prot(pmap_t pm, struct ia64_lpte *pte, vm_prot_t prot)
1127 static long prot2ar[4] = {
1128 PTE_AR_R, /* VM_PROT_NONE */
1129 PTE_AR_RW, /* VM_PROT_WRITE */
1130 PTE_AR_RX|PTE_ED, /* VM_PROT_EXECUTE */
1131 PTE_AR_RWX|PTE_ED /* VM_PROT_WRITE|VM_PROT_EXECUTE */
1134 pte->pte &= ~(PTE_PROT_MASK | PTE_PL_MASK | PTE_AR_MASK | PTE_ED);
1135 pte->pte |= (uint64_t)(prot & VM_PROT_ALL) << 56;
1136 pte->pte |= (prot == VM_PROT_NONE || pm == kernel_pmap)
1137 ? PTE_PL_KERN : PTE_PL_USER;
1138 pte->pte |= prot2ar[(prot & VM_PROT_ALL) >> 1];
1141 static PMAP_INLINE void
1142 pmap_pte_attr(struct ia64_lpte *pte, vm_memattr_t ma)
1145 pte->pte &= ~PTE_MA_MASK;
1146 pte->pte |= (ma & PTE_MA_MASK);
1150 * Set a pte to contain a valid mapping and enter it in the VHPT. If
1151 * the pte was orginally valid, then its assumed to already be in the
1153 * This functions does not set the protection bits. It's expected
1154 * that those have been set correctly prior to calling this function.
1157 pmap_set_pte(struct ia64_lpte *pte, vm_offset_t va, vm_offset_t pa,
1158 boolean_t wired, boolean_t managed)
1161 pte->pte &= PTE_PROT_MASK | PTE_MA_MASK | PTE_PL_MASK |
1162 PTE_AR_MASK | PTE_ED;
1163 pte->pte |= PTE_PRESENT;
1164 pte->pte |= (managed) ? PTE_MANAGED : (PTE_DIRTY | PTE_ACCESSED);
1165 pte->pte |= (wired) ? PTE_WIRED : 0;
1166 pte->pte |= pa & PTE_PPN_MASK;
1168 pte->itir = PAGE_SHIFT << 2;
1170 pte->tag = ia64_ttag(va);
1174 * Remove the (possibly managed) mapping represented by pte from the
1178 pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte, vm_offset_t va,
1179 pv_entry_t pv, int freepte)
1185 * First remove from the VHPT.
1187 error = pmap_remove_vhpt(va);
1191 pmap_invalidate_page(va);
1193 if (pmap_wired(pte))
1194 pmap->pm_stats.wired_count -= 1;
1196 pmap->pm_stats.resident_count -= 1;
1197 if (pmap_managed(pte)) {
1198 m = PHYS_TO_VM_PAGE(pmap_ppn(pte));
1199 if (pmap_dirty(pte))
1201 if (pmap_accessed(pte))
1202 vm_page_aflag_set(m, PGA_REFERENCED);
1204 error = pmap_remove_entry(pmap, m, va, pv);
1207 pmap_free_pte(pte, va);
1213 * Extract the physical page address associated with a kernel
1217 pmap_kextract(vm_offset_t va)
1219 struct ia64_lpte *pte;
1220 uint64_t *pbvm_pgtbl;
1224 KASSERT(va >= VM_MAXUSER_ADDRESS, ("Must be kernel VA"));
1226 /* Regions 6 and 7 are direct mapped. */
1227 if (va >= IA64_RR_BASE(6)) {
1228 pa = IA64_RR_MASK(va);
1232 /* Region 5 is our KVA. Bail out if the VA is beyond our limits. */
1233 if (va >= kernel_vm_end)
1235 if (va >= VM_MIN_KERNEL_ADDRESS) {
1236 pte = pmap_find_kpte(va);
1237 pa = pmap_present(pte) ? pmap_ppn(pte) | (va & PAGE_MASK) : 0;
1241 /* The PBVM page table. */
1242 if (va >= IA64_PBVM_PGTBL + bootinfo->bi_pbvm_pgtblsz)
1244 if (va >= IA64_PBVM_PGTBL) {
1245 pa = (va - IA64_PBVM_PGTBL) + bootinfo->bi_pbvm_pgtbl;
1249 /* The PBVM itself. */
1250 if (va >= IA64_PBVM_BASE) {
1251 pbvm_pgtbl = (void *)IA64_PBVM_PGTBL;
1252 idx = (va - IA64_PBVM_BASE) >> IA64_PBVM_PAGE_SHIFT;
1253 if (idx >= (bootinfo->bi_pbvm_pgtblsz >> 3))
1255 if ((pbvm_pgtbl[idx] & PTE_PRESENT) == 0)
1257 pa = (pbvm_pgtbl[idx] & PTE_PPN_MASK) +
1258 (va & IA64_PBVM_PAGE_MASK);
1263 printf("XXX: %s: va=%#lx is invalid\n", __func__, va);
1272 * Add a list of wired pages to the kva this routine is only used for
1273 * temporary kernel mappings that do not need to have page modification
1274 * or references recorded. Note that old mappings are simply written
1275 * over. The page is effectively wired, but it's customary to not have
1276 * the PTE reflect that, nor update statistics.
1279 pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
1281 struct ia64_lpte *pte;
1284 for (i = 0; i < count; i++) {
1285 pte = pmap_find_kpte(va);
1286 if (pmap_present(pte))
1287 pmap_invalidate_page(va);
1289 pmap_enter_vhpt(pte, va);
1290 pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
1291 pmap_pte_attr(pte, m[i]->md.memattr);
1292 pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m[i]), FALSE, FALSE);
1298 * this routine jerks page mappings from the
1299 * kernel -- it is meant only for temporary mappings.
1302 pmap_qremove(vm_offset_t va, int count)
1304 struct ia64_lpte *pte;
1307 for (i = 0; i < count; i++) {
1308 pte = pmap_find_kpte(va);
1309 if (pmap_present(pte)) {
1310 pmap_remove_vhpt(va);
1311 pmap_invalidate_page(va);
1312 pmap_clear_present(pte);
1319 * Add a wired page to the kva. As for pmap_qenter(), it's customary
1320 * to not have the PTE reflect that, nor update statistics.
1323 pmap_kenter(vm_offset_t va, vm_offset_t pa)
1325 struct ia64_lpte *pte;
1327 pte = pmap_find_kpte(va);
1328 if (pmap_present(pte))
1329 pmap_invalidate_page(va);
1331 pmap_enter_vhpt(pte, va);
1332 pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
1333 pmap_pte_attr(pte, VM_MEMATTR_DEFAULT);
1334 pmap_set_pte(pte, va, pa, FALSE, FALSE);
1338 * Remove a page from the kva
1341 pmap_kremove(vm_offset_t va)
1343 struct ia64_lpte *pte;
1345 pte = pmap_find_kpte(va);
1346 if (pmap_present(pte)) {
1347 pmap_remove_vhpt(va);
1348 pmap_invalidate_page(va);
1349 pmap_clear_present(pte);
1354 * Used to map a range of physical addresses into kernel
1355 * virtual address space.
1357 * The value passed in '*virt' is a suggested virtual address for
1358 * the mapping. Architectures which can support a direct-mapped
1359 * physical to virtual region can return the appropriate address
1360 * within that region, leaving '*virt' unchanged. Other
1361 * architectures should map the pages starting at '*virt' and
1362 * update '*virt' with the first usable address after the mapped
1366 pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
1368 return IA64_PHYS_TO_RR7(start);
1372 * Remove the given range of addresses from the specified map.
1374 * It is assumed that the start and end are properly
1375 * rounded to the page size.
1378 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1383 struct ia64_lpte *pte;
1385 if (pmap->pm_stats.resident_count == 0)
1388 vm_page_lock_queues();
1390 oldpmap = pmap_switch(pmap);
1393 * special handling of removing one page. a very
1394 * common operation and easy to short circuit some
1397 if (sva + PAGE_SIZE == eva) {
1398 pte = pmap_find_vhpt(sva);
1400 pmap_remove_pte(pmap, pte, sva, 0, 1);
1404 if (pmap->pm_stats.resident_count < ((eva - sva) >> PAGE_SHIFT)) {
1405 TAILQ_FOREACH_SAFE(pv, &pmap->pm_pvlist, pv_plist, npv) {
1407 if (va >= sva && va < eva) {
1408 pte = pmap_find_vhpt(va);
1409 KASSERT(pte != NULL, ("pte"));
1410 pmap_remove_pte(pmap, pte, va, pv, 1);
1414 for (va = sva; va < eva; va += PAGE_SIZE) {
1415 pte = pmap_find_vhpt(va);
1417 pmap_remove_pte(pmap, pte, va, 0, 1);
1422 vm_page_unlock_queues();
1423 pmap_switch(oldpmap);
1428 * Routine: pmap_remove_all
1430 * Removes this physical page from
1431 * all physical maps in which it resides.
1432 * Reflects back modify bits to the pager.
1435 * Original versions of this routine were very
1436 * inefficient because they iteratively called
1437 * pmap_remove (slow...)
1441 pmap_remove_all(vm_page_t m)
1446 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1447 ("pmap_remove_all: page %p is not managed", m));
1448 vm_page_lock_queues();
1449 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1450 struct ia64_lpte *pte;
1451 pmap_t pmap = pv->pv_pmap;
1452 vm_offset_t va = pv->pv_va;
1455 oldpmap = pmap_switch(pmap);
1456 pte = pmap_find_vhpt(va);
1457 KASSERT(pte != NULL, ("pte"));
1458 if (pmap_ppn(pte) != VM_PAGE_TO_PHYS(m))
1459 panic("pmap_remove_all: pv_table for %lx is inconsistent", VM_PAGE_TO_PHYS(m));
1460 pmap_remove_pte(pmap, pte, va, pv, 1);
1461 pmap_switch(oldpmap);
1464 vm_page_aflag_clear(m, PGA_WRITEABLE);
1465 vm_page_unlock_queues();
1469 * Set the physical protection on the
1470 * specified range of this map as requested.
1473 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1476 struct ia64_lpte *pte;
1478 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1479 pmap_remove(pmap, sva, eva);
1483 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
1484 (VM_PROT_WRITE|VM_PROT_EXECUTE))
1487 if ((sva & PAGE_MASK) || (eva & PAGE_MASK))
1488 panic("pmap_protect: unaligned addresses");
1491 oldpmap = pmap_switch(pmap);
1492 for ( ; sva < eva; sva += PAGE_SIZE) {
1493 /* If page is invalid, skip this page */
1494 pte = pmap_find_vhpt(sva);
1498 /* If there's no change, skip it too */
1499 if (pmap_prot(pte) == prot)
1502 if ((prot & VM_PROT_WRITE) == 0 &&
1503 pmap_managed(pte) && pmap_dirty(pte)) {
1504 vm_paddr_t pa = pmap_ppn(pte);
1505 vm_page_t m = PHYS_TO_VM_PAGE(pa);
1508 pmap_clear_dirty(pte);
1511 if (prot & VM_PROT_EXECUTE)
1512 ia64_sync_icache(sva, PAGE_SIZE);
1514 pmap_pte_prot(pmap, pte, prot);
1515 pmap_invalidate_page(sva);
1517 pmap_switch(oldpmap);
1522 * Insert the given physical page (p) at
1523 * the specified virtual address (v) in the
1524 * target physical map with the protection requested.
1526 * If specified, the page will be wired down, meaning
1527 * that the related pte can not be reclaimed.
1529 * NB: This is the only routine which MAY NOT lazy-evaluate
1530 * or lose information. That is, this routine must actually
1531 * insert this page into the given map NOW.
1534 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
1535 vm_prot_t prot, boolean_t wired)
1540 struct ia64_lpte origpte;
1541 struct ia64_lpte *pte;
1542 boolean_t icache_inval, managed;
1544 vm_page_lock_queues();
1546 oldpmap = pmap_switch(pmap);
1549 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
1550 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0,
1551 ("pmap_enter: page %p is not busy", m));
1554 * Find (or create) a pte for the given mapping.
1556 while ((pte = pmap_find_pte(va)) == NULL) {
1557 pmap_switch(oldpmap);
1559 vm_page_unlock_queues();
1561 vm_page_lock_queues();
1563 oldpmap = pmap_switch(pmap);
1566 if (!pmap_present(pte)) {
1568 pmap_enter_vhpt(pte, va);
1570 opa = pmap_ppn(pte);
1572 pa = VM_PAGE_TO_PHYS(m);
1574 icache_inval = (prot & VM_PROT_EXECUTE) ? TRUE : FALSE;
1577 * Mapping has not changed, must be protection or wiring change.
1581 * Wiring change, just update stats. We don't worry about
1582 * wiring PT pages as they remain resident as long as there
1583 * are valid mappings in them. Hence, if a user page is wired,
1584 * the PT page will be also.
1586 if (wired && !pmap_wired(&origpte))
1587 pmap->pm_stats.wired_count++;
1588 else if (!wired && pmap_wired(&origpte))
1589 pmap->pm_stats.wired_count--;
1591 managed = (pmap_managed(&origpte)) ? TRUE : FALSE;
1594 * We might be turning off write access to the page,
1595 * so we go ahead and sense modify status. Otherwise,
1596 * we can avoid I-cache invalidation if the page
1597 * already allowed execution.
1599 if (managed && pmap_dirty(&origpte))
1601 else if (pmap_exec(&origpte))
1602 icache_inval = FALSE;
1604 pmap_invalidate_page(va);
1609 * Mapping has changed, invalidate old range and fall
1610 * through to handle validating new mapping.
1613 pmap_remove_pte(pmap, pte, va, 0, 0);
1614 pmap_enter_vhpt(pte, va);
1618 * Enter on the PV list if part of our managed memory.
1620 if ((m->oflags & VPO_UNMANAGED) == 0) {
1621 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
1622 ("pmap_enter: managed mapping within the clean submap"));
1623 pmap_insert_entry(pmap, va, m);
1628 * Increment counters
1630 pmap->pm_stats.resident_count++;
1632 pmap->pm_stats.wired_count++;
1637 * Now validate mapping with desired protection/wiring. This
1638 * adds the pte to the VHPT if necessary.
1640 pmap_pte_prot(pmap, pte, prot);
1641 pmap_pte_attr(pte, m->md.memattr);
1642 pmap_set_pte(pte, va, pa, wired, managed);
1644 /* Invalidate the I-cache when needed. */
1646 ia64_sync_icache(va, PAGE_SIZE);
1648 if ((prot & VM_PROT_WRITE) != 0 && managed)
1649 vm_page_aflag_set(m, PGA_WRITEABLE);
1650 vm_page_unlock_queues();
1651 pmap_switch(oldpmap);
1656 * Maps a sequence of resident pages belonging to the same object.
1657 * The sequence begins with the given page m_start. This page is
1658 * mapped at the given virtual address start. Each subsequent page is
1659 * mapped at a virtual address that is offset from start by the same
1660 * amount as the page is offset from m_start within the object. The
1661 * last page in the sequence is the page with the largest offset from
1662 * m_start that can be mapped at a virtual address less than the given
1663 * virtual address end. Not every virtual page between start and end
1664 * is mapped; only those for which a resident page exists with the
1665 * corresponding offset from m_start are mapped.
1668 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
1669 vm_page_t m_start, vm_prot_t prot)
1673 vm_pindex_t diff, psize;
1675 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
1676 psize = atop(end - start);
1678 vm_page_lock_queues();
1680 oldpmap = pmap_switch(pmap);
1681 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1682 pmap_enter_quick_locked(pmap, start + ptoa(diff), m, prot);
1683 m = TAILQ_NEXT(m, listq);
1685 vm_page_unlock_queues();
1686 pmap_switch(oldpmap);
1691 * this code makes some *MAJOR* assumptions:
1692 * 1. Current pmap & pmap exists.
1695 * 4. No page table pages.
1696 * but is *MUCH* faster than pmap_enter...
1700 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
1704 vm_page_lock_queues();
1706 oldpmap = pmap_switch(pmap);
1707 pmap_enter_quick_locked(pmap, va, m, prot);
1708 vm_page_unlock_queues();
1709 pmap_switch(oldpmap);
1714 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
1717 struct ia64_lpte *pte;
1720 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
1721 (m->oflags & VPO_UNMANAGED) != 0,
1722 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
1723 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1724 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1726 if ((pte = pmap_find_pte(va)) == NULL)
1729 if (!pmap_present(pte)) {
1730 /* Enter on the PV list if the page is managed. */
1731 if ((m->oflags & VPO_UNMANAGED) == 0) {
1732 if (!pmap_try_insert_pv_entry(pmap, va, m)) {
1733 pmap_free_pte(pte, va);
1740 /* Increment counters. */
1741 pmap->pm_stats.resident_count++;
1743 /* Initialise with R/O protection and enter into VHPT. */
1744 pmap_enter_vhpt(pte, va);
1745 pmap_pte_prot(pmap, pte,
1746 prot & (VM_PROT_READ | VM_PROT_EXECUTE));
1747 pmap_pte_attr(pte, m->md.memattr);
1748 pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), FALSE, managed);
1750 if (prot & VM_PROT_EXECUTE)
1751 ia64_sync_icache(va, PAGE_SIZE);
1756 * pmap_object_init_pt preloads the ptes for a given object
1757 * into the specified pmap. This eliminates the blast of soft
1758 * faults on process startup and immediately after an mmap.
1761 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
1762 vm_object_t object, vm_pindex_t pindex,
1766 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1767 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
1768 ("pmap_object_init_pt: non-device object"));
1772 * Routine: pmap_change_wiring
1773 * Function: Change the wiring attribute for a map/virtual-address
1775 * In/out conditions:
1776 * The mapping must already exist in the pmap.
1779 pmap_change_wiring(pmap, va, wired)
1780 register pmap_t pmap;
1785 struct ia64_lpte *pte;
1788 oldpmap = pmap_switch(pmap);
1790 pte = pmap_find_vhpt(va);
1791 KASSERT(pte != NULL, ("pte"));
1792 if (wired && !pmap_wired(pte)) {
1793 pmap->pm_stats.wired_count++;
1794 pmap_set_wired(pte);
1795 } else if (!wired && pmap_wired(pte)) {
1796 pmap->pm_stats.wired_count--;
1797 pmap_clear_wired(pte);
1800 pmap_switch(oldpmap);
1807 * Copy the range specified by src_addr/len
1808 * from the source map to the range dst_addr/len
1809 * in the destination map.
1811 * This routine is only advisory and need not do anything.
1815 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
1816 vm_offset_t src_addr)
1822 * pmap_zero_page zeros the specified hardware page by
1823 * mapping it into virtual memory and using bzero to clear
1828 pmap_zero_page(vm_page_t m)
1832 p = (void *)pmap_page_to_va(m);
1833 bzero(p, PAGE_SIZE);
1838 * pmap_zero_page_area zeros the specified hardware page by
1839 * mapping it into virtual memory and using bzero to clear
1842 * off and size must reside within a single page.
1846 pmap_zero_page_area(vm_page_t m, int off, int size)
1850 p = (void *)pmap_page_to_va(m);
1851 bzero(p + off, size);
1856 * pmap_zero_page_idle zeros the specified hardware page by
1857 * mapping it into virtual memory and using bzero to clear
1858 * its contents. This is for the vm_idlezero process.
1862 pmap_zero_page_idle(vm_page_t m)
1866 p = (void *)pmap_page_to_va(m);
1867 bzero(p, PAGE_SIZE);
1872 * pmap_copy_page copies the specified (machine independent)
1873 * page by mapping the page into virtual memory and using
1874 * bcopy to copy the page, one machine dependent page at a
1878 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
1882 src = (void *)pmap_page_to_va(msrc);
1883 dst = (void *)pmap_page_to_va(mdst);
1884 bcopy(src, dst, PAGE_SIZE);
1888 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
1889 vm_offset_t b_offset, int xfersize)
1892 vm_offset_t a_pg_offset, b_pg_offset;
1895 while (xfersize > 0) {
1896 a_pg_offset = a_offset & PAGE_MASK;
1897 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1898 a_cp = (char *)pmap_page_to_va(ma[a_offset >> PAGE_SHIFT]) +
1900 b_pg_offset = b_offset & PAGE_MASK;
1901 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1902 b_cp = (char *)pmap_page_to_va(mb[b_offset >> PAGE_SHIFT]) +
1904 bcopy(a_cp, b_cp, cnt);
1912 * Returns true if the pmap's pv is one of the first
1913 * 16 pvs linked to from this page. This count may
1914 * be changed upwards or downwards in the future; it
1915 * is only necessary that true be returned for a small
1916 * subset of pmaps for proper page aging.
1919 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
1925 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1926 ("pmap_page_exists_quick: page %p is not managed", m));
1928 vm_page_lock_queues();
1929 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1930 if (pv->pv_pmap == pmap) {
1938 vm_page_unlock_queues();
1943 * pmap_page_wired_mappings:
1945 * Return the number of managed mappings to the given physical page
1949 pmap_page_wired_mappings(vm_page_t m)
1951 struct ia64_lpte *pte;
1952 pmap_t oldpmap, pmap;
1957 if ((m->oflags & VPO_UNMANAGED) != 0)
1959 vm_page_lock_queues();
1960 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1963 oldpmap = pmap_switch(pmap);
1964 pte = pmap_find_vhpt(pv->pv_va);
1965 KASSERT(pte != NULL, ("pte"));
1966 if (pmap_wired(pte))
1968 pmap_switch(oldpmap);
1971 vm_page_unlock_queues();
1976 * Remove all pages from specified address space
1977 * this aids process exit speeds. Also, this code
1978 * is special cased for current process only, but
1979 * can have the more generic (and slightly slower)
1980 * mode enabled. This is much faster than pmap_remove
1981 * in the case of running down an entire address space.
1984 pmap_remove_pages(pmap_t pmap)
1989 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
1990 printf("warning: %s called with non-current pmap\n",
1995 vm_page_lock_queues();
1997 oldpmap = pmap_switch(pmap);
1999 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
2000 struct ia64_lpte *pte;
2002 npv = TAILQ_NEXT(pv, pv_plist);
2004 pte = pmap_find_vhpt(pv->pv_va);
2005 KASSERT(pte != NULL, ("pte"));
2006 if (!pmap_wired(pte))
2007 pmap_remove_pte(pmap, pte, pv->pv_va, pv, 1);
2010 pmap_switch(oldpmap);
2012 vm_page_unlock_queues();
2016 * pmap_ts_referenced:
2018 * Return a count of reference bits for a page, clearing those bits.
2019 * It is not necessary for every reference bit to be cleared, but it
2020 * is necessary that 0 only be returned when there are truly no
2021 * reference bits set.
2023 * XXX: The exact number of bits to check and clear is a matter that
2024 * should be tested and standardized at some point in the future for
2025 * optimal aging of shared pages.
2028 pmap_ts_referenced(vm_page_t m)
2030 struct ia64_lpte *pte;
2035 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2036 ("pmap_ts_referenced: page %p is not managed", m));
2037 vm_page_lock_queues();
2038 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2039 PMAP_LOCK(pv->pv_pmap);
2040 oldpmap = pmap_switch(pv->pv_pmap);
2041 pte = pmap_find_vhpt(pv->pv_va);
2042 KASSERT(pte != NULL, ("pte"));
2043 if (pmap_accessed(pte)) {
2045 pmap_clear_accessed(pte);
2046 pmap_invalidate_page(pv->pv_va);
2048 pmap_switch(oldpmap);
2049 PMAP_UNLOCK(pv->pv_pmap);
2051 vm_page_unlock_queues();
2058 * Return whether or not the specified physical page was modified
2059 * in any physical maps.
2062 pmap_is_modified(vm_page_t m)
2064 struct ia64_lpte *pte;
2069 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2070 ("pmap_is_modified: page %p is not managed", m));
2074 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
2075 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
2076 * is clear, no PTEs can be dirty.
2078 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2079 if ((m->oflags & VPO_BUSY) == 0 &&
2080 (m->aflags & PGA_WRITEABLE) == 0)
2082 vm_page_lock_queues();
2083 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2084 PMAP_LOCK(pv->pv_pmap);
2085 oldpmap = pmap_switch(pv->pv_pmap);
2086 pte = pmap_find_vhpt(pv->pv_va);
2087 pmap_switch(oldpmap);
2088 KASSERT(pte != NULL, ("pte"));
2089 rv = pmap_dirty(pte) ? TRUE : FALSE;
2090 PMAP_UNLOCK(pv->pv_pmap);
2094 vm_page_unlock_queues();
2099 * pmap_is_prefaultable:
2101 * Return whether or not the specified virtual address is elgible
2105 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2107 struct ia64_lpte *pte;
2109 pte = pmap_find_vhpt(addr);
2110 if (pte != NULL && pmap_present(pte))
2116 * pmap_is_referenced:
2118 * Return whether or not the specified physical page was referenced
2119 * in any physical maps.
2122 pmap_is_referenced(vm_page_t m)
2124 struct ia64_lpte *pte;
2129 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2130 ("pmap_is_referenced: page %p is not managed", m));
2132 vm_page_lock_queues();
2133 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2134 PMAP_LOCK(pv->pv_pmap);
2135 oldpmap = pmap_switch(pv->pv_pmap);
2136 pte = pmap_find_vhpt(pv->pv_va);
2137 pmap_switch(oldpmap);
2138 KASSERT(pte != NULL, ("pte"));
2139 rv = pmap_accessed(pte) ? TRUE : FALSE;
2140 PMAP_UNLOCK(pv->pv_pmap);
2144 vm_page_unlock_queues();
2149 * Clear the modify bits on the specified physical page.
2152 pmap_clear_modify(vm_page_t m)
2154 struct ia64_lpte *pte;
2158 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2159 ("pmap_clear_modify: page %p is not managed", m));
2160 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2161 KASSERT((m->oflags & VPO_BUSY) == 0,
2162 ("pmap_clear_modify: page %p is busy", m));
2165 * If the page is not PGA_WRITEABLE, then no PTEs can be modified.
2166 * If the object containing the page is locked and the page is not
2167 * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
2169 if ((m->aflags & PGA_WRITEABLE) == 0)
2171 vm_page_lock_queues();
2172 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2173 PMAP_LOCK(pv->pv_pmap);
2174 oldpmap = pmap_switch(pv->pv_pmap);
2175 pte = pmap_find_vhpt(pv->pv_va);
2176 KASSERT(pte != NULL, ("pte"));
2177 if (pmap_dirty(pte)) {
2178 pmap_clear_dirty(pte);
2179 pmap_invalidate_page(pv->pv_va);
2181 pmap_switch(oldpmap);
2182 PMAP_UNLOCK(pv->pv_pmap);
2184 vm_page_unlock_queues();
2188 * pmap_clear_reference:
2190 * Clear the reference bit on the specified physical page.
2193 pmap_clear_reference(vm_page_t m)
2195 struct ia64_lpte *pte;
2199 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2200 ("pmap_clear_reference: page %p is not managed", m));
2201 vm_page_lock_queues();
2202 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2203 PMAP_LOCK(pv->pv_pmap);
2204 oldpmap = pmap_switch(pv->pv_pmap);
2205 pte = pmap_find_vhpt(pv->pv_va);
2206 KASSERT(pte != NULL, ("pte"));
2207 if (pmap_accessed(pte)) {
2208 pmap_clear_accessed(pte);
2209 pmap_invalidate_page(pv->pv_va);
2211 pmap_switch(oldpmap);
2212 PMAP_UNLOCK(pv->pv_pmap);
2214 vm_page_unlock_queues();
2218 * Clear the write and modified bits in each of the given page's mappings.
2221 pmap_remove_write(vm_page_t m)
2223 struct ia64_lpte *pte;
2224 pmap_t oldpmap, pmap;
2228 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2229 ("pmap_remove_write: page %p is not managed", m));
2232 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
2233 * another thread while the object is locked. Thus, if PGA_WRITEABLE
2234 * is clear, no page table entries need updating.
2236 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2237 if ((m->oflags & VPO_BUSY) == 0 &&
2238 (m->aflags & PGA_WRITEABLE) == 0)
2240 vm_page_lock_queues();
2241 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2244 oldpmap = pmap_switch(pmap);
2245 pte = pmap_find_vhpt(pv->pv_va);
2246 KASSERT(pte != NULL, ("pte"));
2247 prot = pmap_prot(pte);
2248 if ((prot & VM_PROT_WRITE) != 0) {
2249 if (pmap_dirty(pte)) {
2251 pmap_clear_dirty(pte);
2253 prot &= ~VM_PROT_WRITE;
2254 pmap_pte_prot(pmap, pte, prot);
2255 pmap_pte_attr(pte, m->md.memattr);
2256 pmap_invalidate_page(pv->pv_va);
2258 pmap_switch(oldpmap);
2261 vm_page_aflag_clear(m, PGA_WRITEABLE);
2262 vm_page_unlock_queues();
2266 * Map a set of physical memory pages into the kernel virtual
2267 * address space. Return a pointer to where it is mapped. This
2268 * routine is intended to be used for mapping device memory,
2272 pmap_mapdev(vm_paddr_t pa, vm_size_t sz)
2274 static void *last_va = NULL;
2275 static vm_paddr_t last_pa = 0;
2276 static vm_size_t last_sz = 0;
2280 if (pa == last_pa && sz == last_sz)
2283 md = efi_md_find(pa);
2285 printf("%s: [%#lx..%#lx] not covered by memory descriptor\n",
2286 __func__, pa, pa + sz - 1);
2290 if (md->md_type == EFI_MD_TYPE_FREE) {
2291 printf("%s: [%#lx..%#lx] is in DRAM\n", __func__, pa,
2296 va = (md->md_attr & EFI_MD_ATTR_WB) ? IA64_PHYS_TO_RR7(pa) :
2297 IA64_PHYS_TO_RR6(pa);
2299 last_va = (void *)va;
2306 * 'Unmap' a range mapped by pmap_mapdev().
2309 pmap_unmapdev(vm_offset_t va, vm_size_t size)
2314 * Sets the memory attribute for the specified page.
2317 pmap_page_set_memattr_1(void *arg)
2319 struct ia64_pal_result res;
2321 uintptr_t pp = (uintptr_t)arg;
2323 is = intr_disable();
2324 res = ia64_call_pal_static(pp, 0, 0, 0);
2329 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
2331 struct ia64_lpte *pte;
2336 vm_page_lock_queues();
2338 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2339 PMAP_LOCK(pv->pv_pmap);
2340 oldpmap = pmap_switch(pv->pv_pmap);
2341 pte = pmap_find_vhpt(pv->pv_va);
2342 KASSERT(pte != NULL, ("pte"));
2343 pmap_pte_attr(pte, ma);
2344 pmap_invalidate_page(pv->pv_va);
2345 pmap_switch(oldpmap);
2346 PMAP_UNLOCK(pv->pv_pmap);
2348 vm_page_unlock_queues();
2350 if (ma == VM_MEMATTR_UNCACHEABLE) {
2352 smp_rendezvous(NULL, pmap_page_set_memattr_1, NULL,
2353 (void *)PAL_PREFETCH_VISIBILITY);
2355 pmap_page_set_memattr_1((void *)PAL_PREFETCH_VISIBILITY);
2357 va = (void *)pmap_page_to_va(m);
2359 cpu_flush_dcache(va, PAGE_SIZE);
2362 smp_rendezvous(NULL, pmap_page_set_memattr_1, NULL,
2363 (void *)PAL_MC_DRAIN);
2365 pmap_page_set_memattr_1((void *)PAL_MC_DRAIN);
2371 * perform the pmap work for mincore
2374 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
2377 struct ia64_lpte *pte, tpte;
2383 oldpmap = pmap_switch(pmap);
2384 pte = pmap_find_vhpt(addr);
2389 pmap_switch(oldpmap);
2390 if (pte == NULL || !pmap_present(pte)) {
2394 val = MINCORE_INCORE;
2395 if (pmap_dirty(pte))
2396 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
2397 if (pmap_accessed(pte))
2398 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
2399 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
2400 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
2401 pmap_managed(pte)) {
2403 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
2404 if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
2408 PA_UNLOCK_COND(*locked_pa);
2414 pmap_activate(struct thread *td)
2416 pmap_switch(vmspace_pmap(td->td_proc->p_vmspace));
2420 pmap_switch(pmap_t pm)
2426 prevpm = PCPU_GET(md.current_pmap);
2430 for (i = 0; i < IA64_VM_MINKERN_REGION; i++) {
2431 ia64_set_rr(IA64_RR_BASE(i),
2432 (i << 8)|(PAGE_SHIFT << 2)|1);
2435 for (i = 0; i < IA64_VM_MINKERN_REGION; i++) {
2436 ia64_set_rr(IA64_RR_BASE(i),
2437 (pm->pm_rid[i] << 8)|(PAGE_SHIFT << 2)|1);
2440 PCPU_SET(md.current_pmap, pm);
2449 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
2452 struct ia64_lpte *pte;
2458 sz = (sz + 31) & ~31;
2461 oldpm = pmap_switch(pm);
2463 lim = round_page(va);
2464 len = MIN(lim - va, sz);
2465 pte = pmap_find_vhpt(va);
2466 if (pte != NULL && pmap_present(pte))
2467 ia64_sync_icache(va, len);
2476 * Increase the starting virtual address of the given mapping if a
2477 * different alignment might result in more superpage mappings.
2480 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
2481 vm_offset_t *addr, vm_size_t size)
2485 #include "opt_ddb.h"
2489 #include <ddb/ddb.h>
2491 static const char* psnames[] = {
2492 "1B", "2B", "4B", "8B",
2493 "16B", "32B", "64B", "128B",
2494 "256B", "512B", "1K", "2K",
2495 "4K", "8K", "16K", "32K",
2496 "64K", "128K", "256K", "512K",
2497 "1M", "2M", "4M", "8M",
2498 "16M", "32M", "64M", "128M",
2499 "256M", "512M", "1G", "2G"
2505 struct ia64_pal_result res;
2513 static const char *manames[] = {
2514 "WB", "bad", "bad", "bad",
2515 "UC", "UCE", "WC", "NaT",
2518 res = ia64_call_pal_static(PAL_VM_SUMMARY, 0, 0, 0);
2519 if (res.pal_status != 0) {
2520 db_printf("Can't get VM summary\n");
2525 maxtr = (res.pal_result[0] >> 40) & 0xff;
2527 maxtr = (res.pal_result[0] >> 32) & 0xff;
2529 db_printf("V RID Virtual Page Physical Page PgSz ED AR PL D A MA P KEY\n");
2530 for (i = 0; i <= maxtr; i++) {
2531 bzero(&buf, sizeof(buf));
2532 res = ia64_pal_physical(PAL_VM_TR_READ, i, type,
2533 ia64_tpa((uint64_t)&buf));
2534 if (!(res.pal_result[0] & 1))
2535 buf.pte &= ~PTE_AR_MASK;
2536 if (!(res.pal_result[0] & 2))
2537 buf.pte &= ~PTE_PL_MASK;
2538 if (!(res.pal_result[0] & 4))
2539 pmap_clear_dirty(&buf);
2540 if (!(res.pal_result[0] & 8))
2541 buf.pte &= ~PTE_MA_MASK;
2542 db_printf("%d %06x %013lx %013lx %4s %d %d %d %d %d %-3s "
2543 "%d %06x\n", (int)buf.ifa & 1, buf.rr.rr_rid,
2544 buf.ifa >> 12, (buf.pte & PTE_PPN_MASK) >> 12,
2545 psnames[(buf.itir & ITIR_PS_MASK) >> 2],
2546 (buf.pte & PTE_ED) ? 1 : 0,
2547 (int)(buf.pte & PTE_AR_MASK) >> 9,
2548 (int)(buf.pte & PTE_PL_MASK) >> 7,
2549 (pmap_dirty(&buf)) ? 1 : 0,
2550 (pmap_accessed(&buf)) ? 1 : 0,
2551 manames[(buf.pte & PTE_MA_MASK) >> 2],
2552 (pmap_present(&buf)) ? 1 : 0,
2553 (int)((buf.itir & ITIR_KEY_MASK) >> 8));
2557 DB_COMMAND(itr, db_itr)
2562 DB_COMMAND(dtr, db_dtr)
2567 DB_COMMAND(rr, db_rr)
2573 printf("RR RID PgSz VE\n");
2574 for (i = 0; i < 8; i++) {
2575 __asm __volatile ("mov %0=rr[%1]"
2577 : "r"(IA64_RR_BASE(i)));
2578 *(uint64_t *) &rr = t;
2579 printf("%d %06x %4s %d\n",
2580 i, rr.rr_rid, psnames[rr.rr_ps], rr.rr_ve);
2584 DB_COMMAND(thash, db_thash)
2589 db_printf("%p\n", (void *) ia64_thash(addr));
2592 DB_COMMAND(ttag, db_ttag)
2597 db_printf("0x%lx\n", ia64_ttag(addr));
2600 DB_COMMAND(kpte, db_kpte)
2602 struct ia64_lpte *pte;
2605 db_printf("usage: kpte <kva>\n");
2608 if (addr < VM_MIN_KERNEL_ADDRESS) {
2609 db_printf("kpte: error: invalid <kva>\n");
2612 pte = pmap_find_kpte(addr);
2613 db_printf("kpte at %p:\n", pte);
2614 db_printf(" pte =%016lx\n", pte->pte);
2615 db_printf(" itir =%016lx\n", pte->itir);
2616 db_printf(" tag =%016lx\n", pte->tag);
2617 db_printf(" chain=%016lx\n", pte->chain);