2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1991 Regents of the University of California.
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department and William Jolitz of UUNET Technologies Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Derived from hp300 version by Mike Hibler, this version by William
35 * Jolitz uses a recursive map [a pde points to the page directory] to
36 * map the page tables using the pagetables themselves. This is done to
37 * reduce the impact on kernel virtual memory for lots of sparse address
38 * space, and to reduce the cost of memory to each process.
40 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
41 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
45 #ifndef _MACHINE_PMAP_H_
46 #define _MACHINE_PMAP_H_
49 * Page-directory and page-table entries follow this format, with a few
50 * of the fields not present here and there, depending on a lot of things.
52 /* ---- Intel Nomenclature ---- */
53 #define X86_PG_V 0x001 /* P Valid */
54 #define X86_PG_RW 0x002 /* R/W Read/Write */
55 #define X86_PG_U 0x004 /* U/S User/Supervisor */
56 #define X86_PG_NC_PWT 0x008 /* PWT Write through */
57 #define X86_PG_NC_PCD 0x010 /* PCD Cache disable */
58 #define X86_PG_A 0x020 /* A Accessed */
59 #define X86_PG_M 0x040 /* D Dirty */
60 #define X86_PG_PS 0x080 /* PS Page size (0=4k,1=2M) */
61 #define X86_PG_PTE_PAT 0x080 /* PAT PAT index */
62 #define X86_PG_G 0x100 /* G Global */
63 #define X86_PG_AVAIL1 0x200 /* / Available for system */
64 #define X86_PG_AVAIL2 0x400 /* < programmers use */
65 #define X86_PG_AVAIL3 0x800 /* \ */
66 #define X86_PG_PDE_PAT 0x1000 /* PAT PAT index */
67 #define X86_PG_NX (1ul<<63) /* No-execute */
68 #define X86_PG_AVAIL(x) (1ul << (x))
70 /* Page level cache control fields used to determine the PAT type */
71 #define X86_PG_PDE_CACHE (X86_PG_PDE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD)
72 #define X86_PG_PTE_CACHE (X86_PG_PTE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD)
75 * Intel extended page table (EPT) bit definitions.
77 #define EPT_PG_READ 0x001 /* R Read */
78 #define EPT_PG_WRITE 0x002 /* W Write */
79 #define EPT_PG_EXECUTE 0x004 /* X Execute */
80 #define EPT_PG_IGNORE_PAT 0x040 /* IPAT Ignore PAT */
81 #define EPT_PG_PS 0x080 /* PS Page size */
82 #define EPT_PG_A 0x100 /* A Accessed */
83 #define EPT_PG_M 0x200 /* D Dirty */
84 #define EPT_PG_MEMORY_TYPE(x) ((x) << 3) /* MT Memory Type */
87 * Define the PG_xx macros in terms of the bits on x86 PTEs.
90 #define PG_RW X86_PG_RW
92 #define PG_NC_PWT X86_PG_NC_PWT
93 #define PG_NC_PCD X86_PG_NC_PCD
96 #define PG_PS X86_PG_PS
97 #define PG_PTE_PAT X86_PG_PTE_PAT
99 #define PG_AVAIL1 X86_PG_AVAIL1
100 #define PG_AVAIL2 X86_PG_AVAIL2
101 #define PG_AVAIL3 X86_PG_AVAIL3
102 #define PG_PDE_PAT X86_PG_PDE_PAT
103 #define PG_NX X86_PG_NX
104 #define PG_PDE_CACHE X86_PG_PDE_CACHE
105 #define PG_PTE_CACHE X86_PG_PTE_CACHE
107 /* Our various interpretations of the above */
108 #define PG_W X86_PG_AVAIL3 /* "Wired" pseudoflag */
109 #define PG_MANAGED X86_PG_AVAIL2
110 #define EPT_PG_EMUL_V X86_PG_AVAIL(52)
111 #define EPT_PG_EMUL_RW X86_PG_AVAIL(53)
112 #define PG_PROMOTED X86_PG_AVAIL(54) /* PDE only */
113 #define PG_FRAME (0x000ffffffffff000ul)
114 #define PG_PS_FRAME (0x000fffffffe00000ul)
117 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB
118 * (PTE) page mappings have identical settings for the following fields:
120 #define PG_PTE_PROMOTE (PG_NX | PG_MANAGED | PG_W | PG_G | PG_PTE_CACHE | \
121 PG_M | PG_A | PG_U | PG_RW | PG_V)
124 * Page Protection Exception bits
127 #define PGEX_P 0x01 /* Protection violation vs. not present */
128 #define PGEX_W 0x02 /* during a Write cycle */
129 #define PGEX_U 0x04 /* access from User mode (UPL) */
130 #define PGEX_RSV 0x08 /* reserved PTE field is non-zero */
131 #define PGEX_I 0x10 /* during an instruction fetch */
134 * undef the PG_xx macros that define bits in the regular x86 PTEs that
135 * have a different position in nested PTEs. This is done when compiling
136 * code that needs to be aware of the differences between regular x86 and
139 * The appropriate bitmask will be calculated at runtime based on the pmap
142 #ifdef AMD64_NPT_AWARE
143 #undef PG_AVAIL1 /* X86_PG_AVAIL1 aliases with EPT_PG_M */
156 * Pte related macros. This is complicated by having to deal with
157 * the sign extension of the 48th bit.
159 #define KVADDR(l4, l3, l2, l1) ( \
160 ((unsigned long)-1 << 47) | \
161 ((unsigned long)(l4) << PML4SHIFT) | \
162 ((unsigned long)(l3) << PDPSHIFT) | \
163 ((unsigned long)(l2) << PDRSHIFT) | \
164 ((unsigned long)(l1) << PAGE_SHIFT))
166 #define UVADDR(l4, l3, l2, l1) ( \
167 ((unsigned long)(l4) << PML4SHIFT) | \
168 ((unsigned long)(l3) << PDPSHIFT) | \
169 ((unsigned long)(l2) << PDRSHIFT) | \
170 ((unsigned long)(l1) << PAGE_SHIFT))
173 * Number of kernel PML4 slots. Can be anywhere from 1 to 64 or so,
174 * but setting it larger than NDMPML4E makes no sense.
176 * Each slot provides .5 TB of kernel virtual space.
180 #define NUPML4E (NPML4EPG/2) /* number of userland PML4 pages */
181 #define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */
182 #define NUPDE (NUPDPE*NPDEPG) /* number of userland PD entries */
185 * NDMPML4E is the maximum number of PML4 entries that will be
186 * used to implement the direct map. It must be a power of two,
187 * and should generally exceed NKPML4E. The maximum possible
188 * value is 64; using 128 will make the direct map intrude into
189 * the recursive page table map.
194 * These values control the layout of virtual memory. The starting address
195 * of the direct map, which is controlled by DMPML4I, must be a multiple of
196 * its size. (See the PHYS_TO_DMAP() and DMAP_TO_PHYS() macros.)
198 * Note: KPML4I is the index of the (single) level 4 page that maps
199 * the KVA that holds KERNBASE, while KPML4BASE is the index of the
200 * first level 4 page that maps VM_MIN_KERNEL_ADDRESS. If NKPML4E
201 * is 1, these are the same, otherwise KPML4BASE < KPML4I and extra
202 * level 4 PDEs are needed to map from VM_MIN_KERNEL_ADDRESS up to
205 * (KPML4I combines with KPDPI to choose where KERNBASE starts.
206 * Or, in other words, KPML4I provides bits 39..47 of KERNBASE,
207 * and KPDPI provides bits 30..38.)
209 #define PML4PML4I (NPML4EPG/2) /* Index of recursive pml4 mapping */
211 #define KPML4BASE (NPML4EPG-NKPML4E) /* KVM at highest addresses */
212 #define DMPML4I rounddown(KPML4BASE-NDMPML4E, NDMPML4E) /* Below KVM */
214 #define KPML4I (NPML4EPG-1)
215 #define KPDPI (NPDPEPG-2) /* kernbase at -2GB */
218 * XXX doesn't really belong here I guess...
220 #define ISA_HOLE_START 0xa0000
221 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
223 #define PMAP_PCID_NONE 0xffffffff
224 #define PMAP_PCID_KERN 0
225 #define PMAP_PCID_OVERMAX 0x1000
226 #define PMAP_PCID_OVERMAX_KERN 0x800
227 #define PMAP_PCID_USER_PT 0x800
229 #define PMAP_NO_CR3 (~0UL)
233 #include <sys/queue.h>
234 #include <sys/_cpuset.h>
235 #include <sys/_lock.h>
236 #include <sys/_mutex.h>
238 #include <vm/_vm_radix.h>
240 typedef u_int64_t pd_entry_t;
241 typedef u_int64_t pt_entry_t;
242 typedef u_int64_t pdp_entry_t;
243 typedef u_int64_t pml4_entry_t;
246 * Address of current address space page table maps and directories.
249 #define addr_PTmap (KVADDR(PML4PML4I, 0, 0, 0))
250 #define addr_PDmap (KVADDR(PML4PML4I, PML4PML4I, 0, 0))
251 #define addr_PDPmap (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0))
252 #define addr_PML4map (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I))
253 #define addr_PML4pml4e (addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t)))
254 #define PTmap ((pt_entry_t *)(addr_PTmap))
255 #define PDmap ((pd_entry_t *)(addr_PDmap))
256 #define PDPmap ((pd_entry_t *)(addr_PDPmap))
257 #define PML4map ((pd_entry_t *)(addr_PML4map))
258 #define PML4pml4e ((pd_entry_t *)(addr_PML4pml4e))
260 extern int nkpt; /* Initial number of kernel page tables */
261 extern u_int64_t KPDPphys; /* physical address of kernel level 3 */
262 extern u_int64_t KPML4phys; /* physical address of kernel level 4 */
265 * virtual address to page table entry and
266 * to physical address.
267 * Note: these work recursively, thus vtopte of a pte will give
268 * the corresponding pde that in turn maps it.
270 pt_entry_t *vtopte(vm_offset_t);
271 #define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
273 #define pte_load_store(ptep, pte) atomic_swap_long(ptep, pte)
274 #define pte_load_clear(ptep) atomic_swap_long(ptep, 0)
275 #define pte_store(ptep, pte) do { \
276 *(u_long *)(ptep) = (u_long)(pte); \
278 #define pte_clear(ptep) pte_store(ptep, 0)
280 #define pde_store(pdep, pde) pte_store(pdep, pde)
282 extern pt_entry_t pg_nx;
297 TAILQ_HEAD(, pv_entry) pv_list; /* (p) */
298 int pv_gen; /* (p) */
303 PT_X86, /* regular x86 page tables */
304 PT_EPT, /* Intel's nested page tables */
305 PT_RVI, /* AMD's nested page tables */
314 * The kernel virtual address (KVA) of the level 4 page table page is always
315 * within the direct map (DMAP) region.
319 pml4_entry_t *pm_pml4; /* KVA of level 4 page table */
320 pml4_entry_t *pm_pml4u; /* KVA of user l4 page table */
323 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
324 cpuset_t pm_active; /* active on cpus */
325 enum pmap_type pm_type; /* regular or nested tables */
326 struct pmap_statistics pm_stats; /* pmap statistics */
327 struct vm_radix pm_root; /* spare page table pages */
328 long pm_eptgen; /* EPT pmap generation id */
330 struct pmap_pcids pm_pcids[MAXCPU];
334 #define PMAP_NESTED_IPIMASK 0xff
335 #define PMAP_PDE_SUPERPAGE (1 << 8) /* supports 2MB superpages */
336 #define PMAP_EMULATE_AD_BITS (1 << 9) /* needs A/D bits emulation */
337 #define PMAP_SUPPORTS_EXEC_ONLY (1 << 10) /* execute only mappings ok */
339 typedef struct pmap *pmap_t;
342 extern struct pmap kernel_pmap_store;
343 #define kernel_pmap (&kernel_pmap_store)
345 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
346 #define PMAP_LOCK_ASSERT(pmap, type) \
347 mtx_assert(&(pmap)->pm_mtx, (type))
348 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
349 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
350 NULL, MTX_DEF | MTX_DUPOK)
351 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
352 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
353 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
354 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
356 int pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags);
357 int pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype);
361 * For each vm_page_t, there is a list of all currently valid virtual
362 * mappings of that page. An entry is a pv_entry_t, the list is pv_list.
364 typedef struct pv_entry {
365 vm_offset_t pv_va; /* virtual address for mapping */
366 TAILQ_ENTRY(pv_entry) pv_next;
370 * pv_entries are allocated in chunks per-process. This avoids the
371 * need to track per-pmap assignments.
375 #define PV_CHUNK_HEADER \
377 TAILQ_ENTRY(pv_chunk) pc_list; \
378 uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */ \
379 TAILQ_ENTRY(pv_chunk) pc_lru;
381 struct pv_chunk_header {
387 struct pv_entry pc_pventry[_NPCPV];
392 extern caddr_t CADDR1;
393 extern pt_entry_t *CMAP1;
394 extern vm_paddr_t phys_avail[];
395 extern vm_paddr_t dump_avail[];
396 extern vm_offset_t virtual_avail;
397 extern vm_offset_t virtual_end;
398 extern vm_paddr_t dmaplimit;
399 extern int pmap_pcid_enabled;
400 extern int invpcid_works;
402 #define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
403 #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
404 #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
408 void pmap_activate_boot(pmap_t pmap);
409 void pmap_activate_sw(struct thread *);
410 void pmap_allow_2m_x_ept_recalculate(void);
411 void pmap_bootstrap(vm_paddr_t *);
412 int pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde);
413 int pmap_change_attr(vm_offset_t, vm_size_t, int);
414 void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate);
415 void pmap_init_pat(void);
416 void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
417 void *pmap_kenter_temporary(vm_paddr_t pa, int i);
418 vm_paddr_t pmap_kextract(vm_offset_t);
419 void pmap_kremove(vm_offset_t);
420 void *pmap_mapbios(vm_paddr_t, vm_size_t);
421 void *pmap_mapdev(vm_paddr_t, vm_size_t);
422 void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
423 boolean_t pmap_page_is_mapped(vm_page_t m);
424 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
425 void pmap_pinit_pml4(vm_page_t);
426 bool pmap_ps_enabled(pmap_t pmap);
427 void pmap_unmapdev(vm_offset_t, vm_size_t);
428 void pmap_invalidate_page(pmap_t, vm_offset_t);
429 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
430 void pmap_invalidate_all(pmap_t);
431 void pmap_invalidate_cache(void);
432 void pmap_invalidate_cache_pages(vm_page_t *pages, int count);
433 void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva,
435 void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num);
436 boolean_t pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
437 void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
438 void pmap_pti_add_kva(vm_offset_t sva, vm_offset_t eva, bool exec);
439 void pmap_pti_remove_kva(vm_offset_t sva, vm_offset_t eva);
440 void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3);
441 void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va);
442 void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva,
446 /* Return various clipped indexes for a given VA */
447 static __inline vm_pindex_t
448 pmap_pte_index(vm_offset_t va)
451 return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1));
454 static __inline vm_pindex_t
455 pmap_pde_index(vm_offset_t va)
458 return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
461 static __inline vm_pindex_t
462 pmap_pdpe_index(vm_offset_t va)
465 return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
468 static __inline vm_pindex_t
469 pmap_pml4e_index(vm_offset_t va)
472 return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
477 #endif /* !_MACHINE_PMAP_H_ */