2 * Copyright (c) 1991 Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department and William Jolitz of UUNET Technologies Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * Derived from hp300 version by Mike Hibler, this version by William
34 * Jolitz uses a recursive map [a pde points to the page directory] to
35 * map the page tables using the pagetables themselves. This is done to
36 * reduce the impact on kernel virtual memory for lots of sparse address
37 * space, and to reduce the cost of memory to each process.
39 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
40 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
41 * from: src/sys/i386/include/pmap.h,v 1.65.2.2 2000/11/30 01:54:42 peter
42 * JNPR: pmap.h,v 1.7.2.1 2007/09/10 07:44:12 girish
46 #ifndef _MACHINE_PMAP_H_
47 #define _MACHINE_PMAP_H_
49 #include <machine/vmparam.h>
54 #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
56 #define NKPT 120 /* actual number of kernel page tables */
59 #define NKPDE 255 /* addressable number of page tables/pde's */
62 #define KPTDI (VM_MIN_KERNEL_ADDRESS >> SEGSHIFT)
63 #define NUSERPGTBLS (VM_MAXUSER_ADDRESS >> SEGSHIFT)
67 #include <sys/queue.h>
68 #include <machine/pte.h>
69 #include <sys/_lock.h>
70 #include <sys/_mutex.h>
81 TAILQ_HEAD(, pv_entry)pv_list;
84 #define PV_TABLE_MOD 0x01 /* modified */
85 #define PV_TABLE_REF 0x02 /* referenced */
88 #define ASIDGEN_BITS (32 - ASID_BITS)
89 #define ASIDGEN_MASK ((1 << ASIDGEN_BITS) - 1)
92 pd_entry_t *pm_segtab; /* KVA of segment table */
93 TAILQ_HEAD(, pv_entry)pm_pvlist; /* list of mappings in
95 int pm_active; /* active on cpus */
97 u_int32_t asid:ASID_BITS; /* TLB address space tag */
98 u_int32_t gen:ASIDGEN_BITS; /* its generation number */
100 struct pmap_statistics pm_stats; /* pmap statistics */
101 struct vm_page *pm_ptphint; /* pmap ptp hint */
105 typedef struct pmap *pmap_t;
108 #include <sys/lock.h>
109 #include <sys/proc.h>
110 #include <vm/vm_map.h>
112 pt_entry_t *pmap_pte(pmap_t, vm_offset_t);
113 pd_entry_t pmap_segmap(pmap_t pmap, vm_offset_t va);
114 vm_offset_t pmap_kextract(vm_offset_t va);
115 extern pmap_t kernel_pmap;
117 #define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
119 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
120 #define PMAP_LOCK_ASSERT(pmap, type) mtx_assert(&(pmap)->pm_mtx, (type))
121 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
122 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
124 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
125 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
126 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
127 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
129 #define PMAP_LGMEM_LOCK_INIT(sysmap) mtx_init(&(sysmap)->lock, "pmap-lgmem", \
130 "per-cpu-map", (MTX_DEF| MTX_DUPOK))
131 #define PMAP_LGMEM_LOCK(sysmap) mtx_lock(&(sysmap)->lock)
132 #define PMAP_LGMEM_UNLOCK(sysmap) mtx_unlock(&(sysmap)->lock)
133 #define PMAP_LGMEM_DESTROY(sysmap) mtx_destroy(&(sysmap)->lock)
138 * For each vm_page_t, there is a list of all currently valid virtual
139 * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
141 typedef struct pv_entry {
142 pmap_t pv_pmap; /* pmap where mapping lies */
143 vm_offset_t pv_va; /* virtual address for mapping */
144 TAILQ_ENTRY(pv_entry)pv_list;
145 TAILQ_ENTRY(pv_entry)pv_plist;
146 vm_page_t pv_ptem; /* VM page for pte */
147 boolean_t pv_wired; /* whether this entry is wired */
153 #if defined(DIAGNOSTIC)
154 #define PMAP_DIAGNOSTIC
157 #if !defined(PMAP_DIAGNOSTIC)
158 #define PMAP_INLINE __inline
163 extern vm_offset_t avail_end;
164 extern vm_offset_t avail_start;
165 extern vm_offset_t phys_avail[];
166 extern char *ptvmmap; /* poor name! */
167 extern vm_offset_t virtual_avail;
168 extern vm_offset_t virtual_end;
169 extern pd_entry_t *segbase;
171 extern vm_paddr_t mips_wired_tlb_physmem_start;
172 extern vm_paddr_t mips_wired_tlb_physmem_end;
173 extern u_int need_wired_tlb_page_pool;
175 #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
176 #define pmap_kernel() kernel_pmap
178 void pmap_bootstrap(void);
179 void *pmap_mapdev(vm_offset_t, vm_size_t);
180 void pmap_unmapdev(vm_offset_t, vm_size_t);
181 vm_offset_t pmap_steal_memory(vm_size_t size);
182 void pmap_set_modified(vm_offset_t pa);
183 int page_is_managed(vm_offset_t pa);
184 void pmap_page_is_free(vm_page_t m);
185 void pmap_kushmem_reattach(struct proc *);
186 /* PMAP_INLINE */ void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
187 /* PMAP_INLINE */ void pmap_kremove(vm_offset_t va);
188 void *pmap_kenter_temporary(vm_paddr_t pa, int i);
189 void pmap_kenter_temporary_free(vm_paddr_t pa);
190 int pmap_compute_pages_to_dump(void);
191 void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
194 * floating virtual pages (FPAGES)
196 * These are the reserved virtual memory areas which can be
197 * mapped to any physical memory.
200 #define FPAGES_SHARED 2
201 #define FSPACE ((FPAGES * MAXCPU + FPAGES_SHARED) * PAGE_SIZE)
202 #define PMAP_FPAGE1 0x00 /* Used by pmap_zero_page &
204 #define PMAP_FPAGE2 0x01 /* Used by pmap_copy_page */
206 #define PMAP_FPAGE3 0x00 /* Used by pmap_zero_page_idle */
207 #define PMAP_FPAGE_KENTER_TEMP 0x01 /* Used by coredump */
216 struct fpage fp[FPAGES];
220 pmap_map_fpage(vm_paddr_t pa, struct fpage *fp,
221 boolean_t check_unmaped);
222 void pmap_unmap_fpage(vm_paddr_t pa, struct fpage *fp);
228 #endif /* !_MACHINE_PMAP_H_ */