2 * Copyright (c) 1991 Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department and William Jolitz of UUNET Technologies Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * Derived from hp300 version by Mike Hibler, this version by William
38 * Jolitz uses a recursive map [a pde points to the page directory] to
39 * map the page tables using the pagetables themselves. This is done to
40 * reduce the impact on kernel virtual memory for lots of sparse address
41 * space, and to reduce the cost of memory to each process.
43 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
44 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
45 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30
50 #include <machine/pmap-v6.h>
51 #else /* ARM_NEW_PMAP */
53 #ifndef _MACHINE_PMAP_H_
54 #define _MACHINE_PMAP_H_
56 #include <machine/pte.h>
57 #include <machine/cpuconf.h>
61 #if ARM_ARCH_6 || ARM_ARCH_7A
69 #define PTE_PAGETABLE 6
73 #define PTE_DEVICE PTE_NOCACHE
74 #define PTE_PAGETABLE 3
89 #include <sys/queue.h>
90 #include <sys/_cpuset.h>
91 #include <sys/_lock.h>
92 #include <sys/_mutex.h>
94 #define PDESIZE sizeof(pd_entry_t) /* for assembly files */
95 #define PTESIZE sizeof(pt_entry_t) /* for assembly files */
99 #define vtophys(va) pmap_kextract((vm_offset_t)(va))
103 #define pmap_page_get_memattr(m) ((m)->md.pv_memattr)
104 #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
105 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
106 boolean_t pmap_page_is_mapped(vm_page_t);
108 #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
110 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
117 * This structure is used to hold a virtual<->physical address
118 * association and is used mostly by bootstrap code
121 SLIST_ENTRY(pv_addr) pv_list;
131 vm_memattr_t pv_memattr;
132 #if (ARM_MMU_V6 + ARM_MMU_V7) == 0
133 vm_offset_t pv_kva; /* first kernel VA mapping */
135 TAILQ_HEAD(,pv_entry) pv_list;
143 * The number of L2 descriptor tables which can be tracked by an l2_dtable.
144 * A bucket size of 16 provides for 16MB of contiguous virtual address
145 * space per l2_dtable. Most processes will, therefore, require only two or
146 * three of these to map their whole working set.
148 #define L2_BUCKET_LOG2 4
149 #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
151 * Given the above "L2-descriptors-per-l2_dtable" constant, the number
152 * of l2_dtable structures required to track all possible page descriptors
153 * mappable by an L1 translation table is given by the following constants:
155 #define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
156 #define L2_SIZE (1 << L2_LOG2)
161 struct l1_ttable *pm_l1;
162 struct l2_dtable *pm_l2[L2_SIZE];
163 cpuset_t pm_active; /* active on cpus */
164 struct pmap_statistics pm_stats; /* pmap statictics */
165 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
166 TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
168 TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
172 typedef struct pmap *pmap_t;
175 extern struct pmap kernel_pmap_store;
176 #define kernel_pmap (&kernel_pmap_store)
177 #define pmap_kernel() kernel_pmap
179 #define PMAP_ASSERT_LOCKED(pmap) \
180 mtx_assert(&(pmap)->pm_mtx, MA_OWNED)
181 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
182 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
183 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
184 NULL, MTX_DEF | MTX_DUPOK)
185 #define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx)
186 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
187 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
188 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
193 * For each vm_page_t, there is a list of all currently valid virtual
194 * mappings of that page. An entry is a pv_entry_t, the list is pv_list.
196 typedef struct pv_entry {
197 vm_offset_t pv_va; /* virtual address for mapping */
198 TAILQ_ENTRY(pv_entry) pv_list;
199 int pv_flags; /* flags (wired, etc...) */
200 #if (ARM_MMU_V6 + ARM_MMU_V7) == 0
201 pmap_t pv_pmap; /* pmap where mapping lies */
202 TAILQ_ENTRY(pv_entry) pv_plist;
207 * pv_entries are allocated in chunks per-process. This avoids the
208 * need to track per-pmap assignments.
215 TAILQ_ENTRY(pv_chunk) pc_list;
216 uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */
217 uint32_t pc_dummy[3]; /* aligns pv_chunk to 4KB */
218 TAILQ_ENTRY(pv_chunk) pc_lru;
219 struct pv_entry pc_pventry[_NPCPV];
224 boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **);
227 * virtual address to page table entry and
228 * to physical address. Likewise for alternate address space.
229 * Note: these work recursively, thus vtopte of a pte will give
230 * the corresponding pde that in turn maps it.
234 * The current top of kernel VM.
236 extern vm_offset_t pmap_curmaxkvaddr;
240 void pmap_set_pcb_pagedir(pmap_t, struct pcb *);
241 /* Virtual address to page table entry */
242 static __inline pt_entry_t *
243 vtopte(vm_offset_t va)
248 if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE)
253 extern vm_paddr_t phys_avail[];
254 extern vm_offset_t virtual_avail;
255 extern vm_offset_t virtual_end;
257 void pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt);
258 int pmap_change_attr(vm_offset_t, vm_size_t, int);
259 void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
260 void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa);
261 void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t);
262 void pmap_kremove_device(vm_offset_t, vm_size_t);
263 void *pmap_kenter_temporary(vm_paddr_t pa, int i);
264 void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa);
265 vm_paddr_t pmap_kextract(vm_offset_t va);
266 void pmap_kremove(vm_offset_t);
267 void *pmap_mapdev(vm_offset_t, vm_size_t);
268 void pmap_unmapdev(vm_offset_t, vm_size_t);
269 vm_page_t pmap_use_pt(pmap_t, vm_offset_t);
270 void pmap_debug(int);
271 #if (ARM_MMU_V6 + ARM_MMU_V7) == 0
272 void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int);
274 void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *);
275 vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int);
277 pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
279 int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int);
282 * Definitions for MMU domains
284 #define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */
285 #define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */
288 * The new pmap ensures that page-tables are always mapping Write-Thru.
289 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
292 * Unfortunately, not all CPUs have a write-through cache mode. So we
293 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
294 * and if there is the chance for PTE syncs to be needed, we define
295 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
298 extern int pmap_needs_pte_sync;
301 * These macros define the various bit masks in the PTE.
303 * We use these macros since we use different bits on different processor
307 #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
308 #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\
309 L1_S_XSCALE_TEX(TEX_XSCALE_T))
311 #define L2_L_CACHE_MASK_generic (L2_B|L2_C)
312 #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \
313 L2_XSCALE_L_TEX(TEX_XSCALE_T))
315 #define L2_S_PROT_U_generic (L2_AP(AP_U))
316 #define L2_S_PROT_W_generic (L2_AP(AP_W))
317 #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W)
319 #define L2_S_PROT_U_xscale (L2_AP0(AP_U))
320 #define L2_S_PROT_W_xscale (L2_AP0(AP_W))
321 #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W)
323 #define L2_S_CACHE_MASK_generic (L2_B|L2_C)
324 #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \
325 L2_XSCALE_T_TEX(TEX_XSCALE_X))
327 #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
328 #define L1_S_PROTO_xscale (L1_TYPE_S)
330 #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
331 #define L1_C_PROTO_xscale (L1_TYPE_C)
333 #define L2_L_PROTO (L2_TYPE_L)
335 #define L2_S_PROTO_generic (L2_TYPE_S)
336 #define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS)
339 * User-visible names for the ones that vary with MMU class.
341 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
342 #define L2_AP(x) (L2_AP0(x))
344 #define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x))
347 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
349 * AP[2:1] access permissions model:
351 * AP[2](APX) - Write Disable
352 * AP[1] - User Enable
353 * AP[0] - Reference Flag
355 * AP[2] AP[1] Kernel User
362 #define L2_S_PROT_R (0) /* kernel read */
363 #define L2_S_PROT_U (L2_AP0(2)) /* user read */
364 #define L2_S_REF (L2_AP0(1)) /* reference flag */
366 #define L2_S_PROT_MASK (L2_S_PROT_U|L2_S_PROT_R|L2_APX)
367 #define L2_S_EXECUTABLE(pte) (!(pte & L2_XN))
368 #define L2_S_WRITABLE(pte) (!(pte & L2_APX))
369 #define L2_S_REFERENCED(pte) (!!(pte & L2_S_REF))
372 #define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C)
373 #define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C)
374 #define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C)
376 #define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C|L1_SHARED)
377 #define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C|L2_SHARED)
378 #define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C|L2_SHARED)
381 #define L1_S_PROTO (L1_TYPE_S)
382 #define L1_C_PROTO (L1_TYPE_C)
383 #define L2_S_PROTO (L2_TYPE_S)
386 * Promotion to a 1MB (SECTION) mapping requires that the corresponding
387 * 4KB (SMALL) page mappings have identical settings for the following fields:
389 #define L2_S_PROMOTE (L2_S_REF | L2_SHARED | L2_S_PROT_MASK | \
393 * In order to compare 1MB (SECTION) entry settings with the 4KB (SMALL)
394 * page mapping it is necessary to read and shift appropriate bits from
395 * L1 entry to positions of the corresponding bits in the L2 entry.
397 #define L1_S_DEMOTE(l1pd) ((((l1pd) & L1_S_PROTO) >> 0) | \
398 (((l1pd) & L1_SHARED) >> 6) | \
399 (((l1pd) & L1_S_REF) >> 6) | \
400 (((l1pd) & L1_S_PROT_MASK) >> 6) | \
401 (((l1pd) & L1_S_XN) >> 4))
404 #define ARM_L1S_STRONG_ORD (0)
405 #define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2))
406 #define ARM_L1S_DEVICE_SHARE (L1_S_B)
407 #define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1))
408 #define ARM_L1S_NRML_IWT_OWT (L1_S_C)
409 #define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B)
410 #define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B)
412 #define ARM_L2L_STRONG_ORD (0)
413 #define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2))
414 #define ARM_L2L_DEVICE_SHARE (L2_B)
415 #define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1))
416 #define ARM_L2L_NRML_IWT_OWT (L2_C)
417 #define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B)
418 #define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B)
420 #define ARM_L2S_STRONG_ORD (0)
421 #define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2))
422 #define ARM_L2S_DEVICE_SHARE (L2_B)
423 #define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1))
424 #define ARM_L2S_NRML_IWT_OWT (L2_C)
425 #define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B)
426 #define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B)
428 #define ARM_L1S_STRONG_ORD (0)
429 #define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2))
430 #define ARM_L1S_DEVICE_SHARE (L1_S_B)
431 #define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1)|L1_SHARED)
432 #define ARM_L1S_NRML_IWT_OWT (L1_S_C|L1_SHARED)
433 #define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B|L1_SHARED)
434 #define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B|L1_SHARED)
436 #define ARM_L2L_STRONG_ORD (0)
437 #define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2))
438 #define ARM_L2L_DEVICE_SHARE (L2_B)
439 #define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1)|L2_SHARED)
440 #define ARM_L2L_NRML_IWT_OWT (L2_C|L2_SHARED)
441 #define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED)
442 #define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B|L2_SHARED)
444 #define ARM_L2S_STRONG_ORD (0)
445 #define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2))
446 #define ARM_L2S_DEVICE_SHARE (L2_B)
447 #define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1)|L2_SHARED)
448 #define ARM_L2S_NRML_IWT_OWT (L2_C|L2_SHARED)
449 #define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED)
450 #define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B|L2_SHARED)
454 /* More than one MMU class configured; use variables. */
455 #define L2_S_PROT_U pte_l2_s_prot_u
456 #define L2_S_PROT_W pte_l2_s_prot_w
457 #define L2_S_PROT_MASK pte_l2_s_prot_mask
459 #define L1_S_CACHE_MASK pte_l1_s_cache_mask
460 #define L2_L_CACHE_MASK pte_l2_l_cache_mask
461 #define L2_S_CACHE_MASK pte_l2_s_cache_mask
463 #define L1_S_PROTO pte_l1_s_proto
464 #define L1_C_PROTO pte_l1_c_proto
465 #define L2_S_PROTO pte_l2_s_proto
467 #elif ARM_MMU_GENERIC != 0
468 #define L2_S_PROT_U L2_S_PROT_U_generic
469 #define L2_S_PROT_W L2_S_PROT_W_generic
470 #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
472 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
473 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
474 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
476 #define L1_S_PROTO L1_S_PROTO_generic
477 #define L1_C_PROTO L1_C_PROTO_generic
478 #define L2_S_PROTO L2_S_PROTO_generic
480 #elif ARM_MMU_XSCALE == 1
481 #define L2_S_PROT_U L2_S_PROT_U_xscale
482 #define L2_S_PROT_W L2_S_PROT_W_xscale
483 #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
485 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
486 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
487 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
489 #define L1_S_PROTO L1_S_PROTO_xscale
490 #define L1_C_PROTO L1_C_PROTO_xscale
491 #define L2_S_PROTO L2_S_PROTO_xscale
493 #endif /* ARM_NMMUS > 1 */
495 #if defined(CPU_XSCALE_81342) || ARM_ARCH_6 || ARM_ARCH_7A
496 #define PMAP_NEEDS_PTE_SYNC 1
497 #define PMAP_INCLUDE_PTE_SYNC
499 #define PMAP_NEEDS_PTE_SYNC 0
503 * These macros return various bits based on kernel/user and protection.
504 * Note that the compiler will usually fold these at compile time.
506 #if (ARM_MMU_V6 + ARM_MMU_V7) == 0
508 #define L1_S_PROT_U (L1_S_AP(AP_U))
509 #define L1_S_PROT_W (L1_S_AP(AP_W))
510 #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
511 #define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W)
513 #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
514 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
516 #define L2_L_PROT_U (L2_AP(AP_U))
517 #define L2_L_PROT_W (L2_AP(AP_W))
518 #define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W)
520 #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
521 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
523 #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
524 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
526 #define L1_S_PROT_U (L1_S_AP(AP_U))
527 #define L1_S_PROT_W (L1_S_APX) /* Write disable */
528 #define L1_S_PROT_MASK (L1_S_PROT_W|L1_S_PROT_U)
529 #define L1_S_REF (L1_S_AP(AP_REF)) /* Reference flag */
530 #define L1_S_WRITABLE(pd) (!((pd) & L1_S_PROT_W))
531 #define L1_S_EXECUTABLE(pd) (!((pd) & L1_S_XN))
532 #define L1_S_REFERENCED(pd) ((pd) & L1_S_REF)
534 #define L1_S_PROT(ku, pr) (((((ku) == PTE_KERNEL) ? 0 : L1_S_PROT_U) | \
535 (((pr) & VM_PROT_WRITE) ? 0 : L1_S_PROT_W) | \
536 (((pr) & VM_PROT_EXECUTE) ? 0 : L1_S_XN)))
538 #define L2_L_PROT_MASK (L2_APX|L2_AP0(0x3))
539 #define L2_L_PROT(ku, pr) (L2_L_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \
540 (((pr) & VM_PROT_WRITE) ? L2_APX : 0)))
542 #define L2_S_PROT(ku, pr) (L2_S_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \
543 (((pr) & VM_PROT_WRITE) ? L2_APX : 0)))
548 * Macros to test if a mapping is mappable with an L1 Section mapping
549 * or an L2 Large Page mapping.
551 #define L1_S_MAPPABLE_P(va, pa, size) \
552 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
554 #define L2_L_MAPPABLE_P(va, pa, size) \
555 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
558 * Provide a fallback in case we were not able to determine it at
561 #ifndef PMAP_NEEDS_PTE_SYNC
562 #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
563 #define PMAP_INCLUDE_PTE_SYNC
567 #define _sync_l2(pte, size) cpu_l2cache_wb_range(vtophys(pte), size)
569 #define _sync_l2(pte, size) cpu_l2cache_wb_range(pte, size)
572 #define PTE_SYNC(pte) \
574 if (PMAP_NEEDS_PTE_SYNC) { \
575 cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\
576 cpu_drain_writebuf(); \
577 _sync_l2((vm_offset_t)(pte), sizeof(pt_entry_t));\
579 cpu_drain_writebuf(); \
580 } while (/*CONSTCOND*/0)
582 #define PTE_SYNC_RANGE(pte, cnt) \
584 if (PMAP_NEEDS_PTE_SYNC) { \
585 cpu_dcache_wb_range((vm_offset_t)(pte), \
586 (cnt) << 2); /* * sizeof(pt_entry_t) */ \
587 cpu_drain_writebuf(); \
588 _sync_l2((vm_offset_t)(pte), \
589 (cnt) << 2); /* * sizeof(pt_entry_t) */ \
591 cpu_drain_writebuf(); \
592 } while (/*CONSTCOND*/0)
594 extern pt_entry_t pte_l1_s_cache_mode;
595 extern pt_entry_t pte_l1_s_cache_mask;
597 extern pt_entry_t pte_l2_l_cache_mode;
598 extern pt_entry_t pte_l2_l_cache_mask;
600 extern pt_entry_t pte_l2_s_cache_mode;
601 extern pt_entry_t pte_l2_s_cache_mask;
603 extern pt_entry_t pte_l1_s_cache_mode_pt;
604 extern pt_entry_t pte_l2_l_cache_mode_pt;
605 extern pt_entry_t pte_l2_s_cache_mode_pt;
607 extern pt_entry_t pte_l2_s_prot_u;
608 extern pt_entry_t pte_l2_s_prot_w;
609 extern pt_entry_t pte_l2_s_prot_mask;
611 extern pt_entry_t pte_l1_s_proto;
612 extern pt_entry_t pte_l1_c_proto;
613 extern pt_entry_t pte_l2_s_proto;
615 extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
616 extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys,
617 vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
618 extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
620 #if (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7) != 0 || defined(CPU_XSCALE_81342)
621 void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t);
622 void pmap_zero_page_generic(vm_paddr_t, int, int);
624 void pmap_pte_init_generic(void);
625 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
626 void pmap_pte_init_mmu_v6(void);
627 #endif /* (ARM_MMU_V6 + ARM_MMU_V7) != 0 */
628 #endif /* (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7) != 0 */
630 #if ARM_MMU_XSCALE == 1
631 void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t);
632 void pmap_zero_page_xscale(vm_paddr_t, int, int);
634 void pmap_pte_init_xscale(void);
636 void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t);
638 void pmap_use_minicache(vm_offset_t, vm_size_t);
639 #endif /* ARM_MMU_XSCALE == 1 */
640 #if defined(CPU_XSCALE_81342)
641 #define ARM_HAVE_SUPERSECTIONS
646 #define l1pte_valid(pde) ((pde) != 0)
647 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
648 #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
649 #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
651 #define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
652 #define l2pte_valid(pte) ((pte) != 0)
653 #define l2pte_pa(pte) ((pte) & L2_S_FRAME)
654 #define l2pte_minidata(pte) (((pte) & \
655 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
656 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
658 /* L1 and L2 page table macros */
659 #define pmap_pde_v(pde) l1pte_valid(*(pde))
660 #define pmap_pde_section(pde) l1pte_section_p(*(pde))
661 #define pmap_pde_page(pde) l1pte_page_p(*(pde))
662 #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
664 #define pmap_pte_v(pte) l2pte_valid(*(pte))
665 #define pmap_pte_pa(pte) l2pte_pa(*(pte))
668 * Flags that indicate attributes of pages or mappings of pages.
670 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
671 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
672 * pv_entry's for each page. They live in the same "namespace" so
673 * that we can clear multiple attributes at a time.
675 * Note the "non-cacheable" flag generally means the page has
676 * multiple mappings in a given address space.
678 #define PVF_MOD 0x01 /* page is modified */
679 #define PVF_REF 0x02 /* page is referenced */
680 #define PVF_WIRED 0x04 /* mapping is wired */
681 #define PVF_WRITE 0x08 /* mapping is writable */
682 #define PVF_EXEC 0x10 /* mapping is executable */
683 #define PVF_NC 0x20 /* mapping is non-cacheable */
684 #define PVF_MWC 0x40 /* mapping is used multiple times in userland */
685 #define PVF_UNMAN 0x80 /* mapping is unmanaged */
687 void vector_page_setprot(int);
689 #define SECTION_CACHE 0x1
690 #define SECTION_PT 0x2
691 void pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags);
692 #ifdef ARM_HAVE_SUPERSECTIONS
693 void pmap_kenter_supersection(vm_offset_t, uint64_t, int flags);
698 void pmap_postinit(void);
700 extern vm_paddr_t dump_avail[];
705 #endif /* !_MACHINE_PMAP_H_ */
706 #endif /* !ARM_NEW_PMAP */