2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #ifndef _MACHINE_PMAP_VAR_H_
31 #define _MACHINE_PMAP_VAR_H_
33 #include <machine/cpu-v6.h>
34 #include <machine/pte-v6.h>
36 * Various PMAP defines, exports, and inline functions
37 * definitions also usable in other MD code.
40 /* A number of pages in L1 page table. */
41 #define NPG_IN_PT1 (NB_IN_PT1 / PAGE_SIZE)
43 /* A number of L2 page tables in a page. */
44 #define NPT2_IN_PG (PAGE_SIZE / NB_IN_PT2)
46 /* A number of L2 page table entries in a page. */
47 #define NPTE2_IN_PG (NPT2_IN_PG * NPTE2_IN_PT2)
52 * A L2 page tables page contains NPT2_IN_PG L2 page tables. Masking of
53 * pte1_idx by PT2PG_MASK gives us an index to associated L2 page table
54 * in a page. The PT2PG_SHIFT definition depends on NPT2_IN_PG strictly.
55 * I.e., (1 << PT2PG_SHIFT) == NPT2_IN_PG must be fulfilled.
58 #define PT2PG_MASK ((1 << PT2PG_SHIFT) - 1)
61 * A PT2TAB holds all allocated L2 page table pages in a pmap.
62 * Right shifting of virtual address by PT2TAB_SHIFT gives us an index
63 * to L2 page table page in PT2TAB which holds the address mapping.
65 #define PT2TAB_ENTRIES (NPTE1_IN_PT1 / NPT2_IN_PG)
66 #define PT2TAB_SHIFT (PTE1_SHIFT + PT2PG_SHIFT)
69 * All allocated L2 page table pages in a pmap are mapped into PT2MAP space.
70 * An virtual address right shifting by PT2MAP_SHIFT gives us an index to PTE2
71 * which maps the address.
73 #define PT2MAP_SIZE (NPTE1_IN_PT1 * NB_IN_PT2)
74 #define PT2MAP_SHIFT PTE2_SHIFT
76 extern pt1_entry_t *kern_pt1;
77 extern pt2_entry_t *kern_pt2tab;
78 extern pt2_entry_t *PT2MAP;
81 * Virtual interface for L1 page table management.
85 pte1_index(vm_offset_t va)
88 return (va >> PTE1_SHIFT);
91 static __inline pt1_entry_t *
92 pte1_ptr(pt1_entry_t *pt1, vm_offset_t va)
95 return (pt1 + pte1_index(va));
98 static __inline vm_offset_t
99 pte1_trunc(vm_offset_t va)
102 return (va & PTE1_FRAME);
105 static __inline vm_offset_t
106 pte1_roundup(vm_offset_t va)
109 return ((va + PTE1_OFFSET) & PTE1_FRAME);
113 * Virtual interface for L1 page table entries management.
115 * XXX: Some of the following functions now with a synchronization barrier
116 * are called in a loop, so it could be useful to have two versions of them.
117 * One with the barrier and one without the barrier. In this case, pure
118 * barrier pte1_sync() should be implemented as well.
121 pte1_sync(pt1_entry_t *pte1p)
125 #ifndef PMAP_PTE_NOCACHE
126 if (!cpuinfo.coherent_walk)
127 dcache_wb_pou((vm_offset_t)pte1p, sizeof(*pte1p));
132 pte1_sync_range(pt1_entry_t *pte1p, vm_size_t size)
136 #ifndef PMAP_PTE_NOCACHE
137 if (!cpuinfo.coherent_walk)
138 dcache_wb_pou((vm_offset_t)pte1p, size);
143 pte1_store(pt1_entry_t *pte1p, pt1_entry_t pte1)
146 atomic_store_rel_int(pte1p, pte1);
151 pte1_clear(pt1_entry_t *pte1p)
154 pte1_store(pte1p, 0);
158 pte1_clear_bit(pt1_entry_t *pte1p, uint32_t bit)
161 atomic_clear_int(pte1p, bit);
165 static __inline boolean_t
166 pte1_cmpset(pt1_entry_t *pte1p, pt1_entry_t opte1, pt1_entry_t npte1)
170 ret = atomic_cmpset_int(pte1p, opte1, npte1);
171 if (ret) pte1_sync(pte1p);
176 static __inline boolean_t
177 pte1_is_link(pt1_entry_t pte1)
180 return ((pte1 & L1_TYPE_MASK) == L1_TYPE_C);
184 pte1_is_section(pt1_entry_t pte1)
187 return ((pte1 & L1_TYPE_MASK) == L1_TYPE_S);
190 static __inline boolean_t
191 pte1_is_dirty(pt1_entry_t pte1)
194 return ((pte1 & (PTE1_NM | PTE1_RO)) == 0);
197 static __inline boolean_t
198 pte1_is_global(pt1_entry_t pte1)
201 return ((pte1 & PTE1_NG) == 0);
204 static __inline boolean_t
205 pte1_is_valid(pt1_entry_t pte1)
209 l1_type = pte1 & L1_TYPE_MASK;
210 return ((l1_type == L1_TYPE_C) || (l1_type == L1_TYPE_S));
213 static __inline boolean_t
214 pte1_is_wired(pt1_entry_t pte1)
217 return (pte1 & PTE1_W);
220 static __inline pt1_entry_t
221 pte1_load(pt1_entry_t *pte1p)
229 static __inline pt1_entry_t
230 pte1_load_clear(pt1_entry_t *pte1p)
234 opte1 = atomic_readandclear_int(pte1p);
240 pte1_set_bit(pt1_entry_t *pte1p, uint32_t bit)
243 atomic_set_int(pte1p, bit);
247 static __inline vm_paddr_t
248 pte1_pa(pt1_entry_t pte1)
251 return ((vm_paddr_t)(pte1 & PTE1_FRAME));
254 static __inline vm_paddr_t
255 pte1_link_pa(pt1_entry_t pte1)
258 return ((vm_paddr_t)(pte1 & L1_C_ADDR_MASK));
262 * Virtual interface for L2 page table entries management.
264 * XXX: Some of the following functions now with a synchronization barrier
265 * are called in a loop, so it could be useful to have two versions of them.
266 * One with the barrier and one without the barrier.
270 pte2_sync(pt2_entry_t *pte2p)
274 #ifndef PMAP_PTE_NOCACHE
275 if (!cpuinfo.coherent_walk)
276 dcache_wb_pou((vm_offset_t)pte2p, sizeof(*pte2p));
281 pte2_sync_range(pt2_entry_t *pte2p, vm_size_t size)
285 #ifndef PMAP_PTE_NOCACHE
286 if (!cpuinfo.coherent_walk)
287 dcache_wb_pou((vm_offset_t)pte2p, size);
292 pte2_store(pt2_entry_t *pte2p, pt2_entry_t pte2)
295 atomic_store_rel_int(pte2p, pte2);
300 pte2_clear(pt2_entry_t *pte2p)
303 pte2_store(pte2p, 0);
307 pte2_clear_bit(pt2_entry_t *pte2p, uint32_t bit)
310 atomic_clear_int(pte2p, bit);
314 static __inline boolean_t
315 pte2_cmpset(pt2_entry_t *pte2p, pt2_entry_t opte2, pt2_entry_t npte2)
319 ret = atomic_cmpset_int(pte2p, opte2, npte2);
320 if (ret) pte2_sync(pte2p);
325 static __inline boolean_t
326 pte2_is_dirty(pt2_entry_t pte2)
329 return ((pte2 & (PTE2_NM | PTE2_RO)) == 0);
332 static __inline boolean_t
333 pte2_is_global(pt2_entry_t pte2)
336 return ((pte2 & PTE2_NG) == 0);
339 static __inline boolean_t
340 pte2_is_valid(pt2_entry_t pte2)
343 return (pte2 & PTE2_V);
346 static __inline boolean_t
347 pte2_is_wired(pt2_entry_t pte2)
350 return (pte2 & PTE2_W);
353 static __inline pt2_entry_t
354 pte2_load(pt2_entry_t *pte2p)
362 static __inline pt2_entry_t
363 pte2_load_clear(pt2_entry_t *pte2p)
367 opte2 = atomic_readandclear_int(pte2p);
373 pte2_set_bit(pt2_entry_t *pte2p, uint32_t bit)
376 atomic_set_int(pte2p, bit);
381 pte2_set_wired(pt2_entry_t *pte2p, boolean_t wired)
385 * Wired bit is transparent for page table walk,
386 * so pte2_sync() is not needed.
389 atomic_set_int(pte2p, PTE2_W);
391 atomic_clear_int(pte2p, PTE2_W);
394 static __inline vm_paddr_t
395 pte2_pa(pt2_entry_t pte2)
398 return ((vm_paddr_t)(pte2 & PTE2_FRAME));
401 static __inline u_int
402 pte2_attr(pt2_entry_t pte2)
405 return ((u_int)(pte2 & PTE2_ATTR_MASK));
409 * Virtual interface for L2 page tables mapping management.
412 static __inline u_int
413 pt2tab_index(vm_offset_t va)
416 return (va >> PT2TAB_SHIFT);
419 static __inline pt2_entry_t *
420 pt2tab_entry(pt2_entry_t *pt2tab, vm_offset_t va)
423 return (pt2tab + pt2tab_index(va));
427 pt2tab_store(pt2_entry_t *pte2p, pt2_entry_t pte2)
430 pte2_store(pte2p,pte2);
433 static __inline pt2_entry_t
434 pt2tab_load(pt2_entry_t *pte2p)
437 return (pte2_load(pte2p));
440 static __inline pt2_entry_t
441 pt2tab_load_clear(pt2_entry_t *pte2p)
444 return (pte2_load_clear(pte2p));
447 static __inline u_int
448 pt2map_index(vm_offset_t va)
451 return (va >> PT2MAP_SHIFT);
454 static __inline pt2_entry_t *
455 pt2map_entry(vm_offset_t va)
458 return (PT2MAP + pt2map_index(va));
462 * Virtual interface for pmap structure & kernel shortcuts.
465 static __inline pt1_entry_t *
466 pmap_pte1(pmap_t pmap, vm_offset_t va)
469 return (pte1_ptr(pmap->pm_pt1, va));
472 static __inline pt1_entry_t *
473 kern_pte1(vm_offset_t va)
476 return (pte1_ptr(kern_pt1, va));
479 static __inline pt2_entry_t *
480 pmap_pt2tab_entry(pmap_t pmap, vm_offset_t va)
483 return (pt2tab_entry(pmap->pm_pt2tab, va));
486 static __inline pt2_entry_t *
487 kern_pt2tab_entry(vm_offset_t va)
490 return (pt2tab_entry(kern_pt2tab, va));
493 static __inline vm_page_t
494 pmap_pt2_page(pmap_t pmap, vm_offset_t va)
498 pte2 = pte2_load(pmap_pt2tab_entry(pmap, va));
499 return (PHYS_TO_VM_PAGE(pte2 & PTE2_FRAME));
502 static __inline vm_page_t
503 kern_pt2_page(vm_offset_t va)
507 pte2 = pte2_load(kern_pt2tab_entry(va));
508 return (PHYS_TO_VM_PAGE(pte2 & PTE2_FRAME));
512 #endif /* !_MACHINE_PMAP_VAR_H_ */