2 * Copyright (C) 2007 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Some hw specific parts of this pmap were derived or influenced
29 * by NetBSD's ibm4xx pmap module. More generic code is shared with
30 * a few other pmap modules from the FreeBSD tree.
36 * Kernel and user threads run within one common virtual address space
39 * Virtual address space layout:
40 * -----------------------------
41 * 0x0000_0000 - 0xbfff_efff : user process
42 * 0xc000_0000 - 0xc1ff_ffff : kernel reserved
43 * 0xc000_0000 - kernelend : kernel code &data
44 * 0xc1ff_c000 - 0xc200_0000 : kstack0
45 * 0xc200_0000 - 0xffef_ffff : KVA
46 * 0xc200_0000 - 0xc200_3fff : reserved for page zero/copy
47 * 0xc200_4000 - ptbl buf end: reserved for ptbl bufs
48 * ptbl buf end- 0xffef_ffff : actual free KVA space
49 * 0xfff0_0000 - 0xffff_ffff : I/O devices region
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
55 #include <sys/types.h>
56 #include <sys/param.h>
57 #include <sys/malloc.h>
60 #include <sys/queue.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/msgbuf.h>
65 #include <sys/mutex.h>
66 #include <sys/vmmeter.h>
69 #include <vm/vm_page.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_pageout.h>
72 #include <vm/vm_extern.h>
73 #include <vm/vm_object.h>
74 #include <vm/vm_param.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_pager.h>
79 #include <machine/cpu.h>
80 #include <machine/pcb.h>
81 #include <machine/powerpc.h>
83 #include <machine/tlb.h>
84 #include <machine/spr.h>
85 #include <machine/vmparam.h>
86 #include <machine/md_var.h>
87 #include <machine/mmuvar.h>
88 #include <machine/pmap.h>
89 #include <machine/pte.h>
97 #define debugf(fmt, args...) printf(fmt, ##args)
99 #define debugf(fmt, args...)
102 #define TODO panic("%s: not implemented", __func__);
103 #define memmove(d, s, l) bcopy(s, d, l)
105 #include "opt_sched.h"
107 #error "e500 only works with SCHED_4BSD which uses a global scheduler lock."
109 extern struct mtx sched_lock;
111 /* Kernel physical load address. */
112 extern uint32_t kernload;
114 struct mem_region availmem_regions[MEM_REGIONS];
115 int availmem_regions_sz;
117 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
118 static vm_offset_t zero_page_va;
119 static struct mtx zero_page_mutex;
122 * Reserved KVA space for mmu_booke_zero_page_idle. This is used
123 * by idle thred only, no lock required.
125 static vm_offset_t zero_page_idle_va;
127 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
128 static vm_offset_t copy_page_src_va;
129 static vm_offset_t copy_page_dst_va;
130 static struct mtx copy_page_mutex;
132 /**************************************************************************/
134 /**************************************************************************/
136 static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
137 vm_prot_t, boolean_t);
139 unsigned int kptbl_min; /* Index of the first kernel ptbl. */
140 unsigned int kernel_ptbls; /* Number of KVA ptbls. */
142 static int pagedaemon_waken;
145 * If user pmap is processed with mmu_booke_remove and the resident count
146 * drops to 0, there are no more pages to remove, so we need not continue.
148 #define PMAP_REMOVE_DONE(pmap) \
149 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
151 extern void load_pid0(tlbtid_t);
153 /**************************************************************************/
154 /* TLB and TID handling */
155 /**************************************************************************/
157 /* Translation ID busy table */
158 static volatile pmap_t tidbusy[TID_MAX + 1];
161 * Actual maximum number of TLB0 entries.
162 * This number differs between e500 core revisions.
165 u_int32_t tlb0_nways;
166 u_int32_t tlb0_nentries_per_way;
168 #define TLB0_SIZE (tlb0_size)
169 #define TLB0_NWAYS (tlb0_nways)
170 #define TLB0_ENTRIES_PER_WAY (tlb0_nentries_per_way)
172 /* Pointer to kernel tlb0 table, allocated in mmu_booke_bootstrap() */
176 * Spinlock to assure proper locking between threads and
177 * between tlb miss handler and kernel.
179 static struct mtx tlb0_mutex;
183 /* In-ram copy of the TLB1 */
184 static tlb_entry_t tlb1[TLB1_SIZE];
186 /* Next free entry in the TLB1 */
187 static unsigned int tlb1_idx;
189 static tlbtid_t tid_alloc(struct pmap *);
190 static void tid_flush(tlbtid_t);
192 extern void tlb1_inval_va(vm_offset_t);
193 extern void tlb0_inval_va(vm_offset_t);
195 static void tlb_print_entry(int, u_int32_t, u_int32_t, u_int32_t, u_int32_t);
197 static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, u_int32_t);
198 static void __tlb1_set_entry(unsigned int, vm_offset_t, vm_offset_t,
199 vm_size_t, u_int32_t, unsigned int, unsigned int);
200 static void tlb1_write_entry(unsigned int);
201 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
202 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t);
204 static vm_size_t tsize2size(unsigned int);
205 static unsigned int size2tsize(vm_size_t);
206 static unsigned int ilog2(unsigned int);
208 static void set_mas4_defaults(void);
210 static void tlb0_inval_entry(vm_offset_t, unsigned int);
211 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
212 static void tlb0_write_entry(unsigned int, unsigned int);
213 static void tlb0_flush_entry(pmap_t, vm_offset_t);
214 static void tlb0_init(void);
216 /**************************************************************************/
217 /* Page table management */
218 /**************************************************************************/
220 /* Data for the pv entry allocation mechanism */
221 static uma_zone_t pvzone;
222 static struct vm_object pvzone_obj;
223 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
225 #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */
227 #ifndef PMAP_SHPGPERPROC
228 #define PMAP_SHPGPERPROC 200
231 static void ptbl_init(void);
232 static struct ptbl_buf *ptbl_buf_alloc(void);
233 static void ptbl_buf_free(struct ptbl_buf *);
234 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
236 static void ptbl_alloc(mmu_t, pmap_t, unsigned int);
237 static void ptbl_free(mmu_t, pmap_t, unsigned int);
238 static void ptbl_hold(mmu_t, pmap_t, unsigned int);
239 static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
241 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
242 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
243 void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, u_int32_t);
244 static int pte_remove(mmu_t, pmap_t, vm_offset_t, u_int8_t);
246 pv_entry_t pv_alloc(void);
247 static void pv_free(pv_entry_t);
248 static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
249 static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
251 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
252 #define PTBL_BUFS (128 * 16)
255 TAILQ_ENTRY(ptbl_buf) link; /* list link */
256 vm_offset_t kva; /* va of mapping */
259 /* ptbl free list and a lock used for access synchronization. */
260 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
261 static struct mtx ptbl_buf_freelist_lock;
263 /* Base address of kva space allocated fot ptbl bufs. */
264 static vm_offset_t ptbl_buf_pool_vabase;
266 /* Pointer to ptbl_buf structures. */
267 static struct ptbl_buf *ptbl_bufs;
270 * Kernel MMU interface
272 static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
273 static void mmu_booke_clear_modify(mmu_t, vm_page_t);
274 static void mmu_booke_clear_reference(mmu_t, vm_page_t);
275 static void mmu_booke_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t,
277 static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
278 static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
279 vm_prot_t, boolean_t);
280 static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
281 vm_page_t, vm_prot_t);
282 static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
284 static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
285 static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
287 static void mmu_booke_init(mmu_t);
288 static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
289 static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
290 static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t);
291 static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t,
293 static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t);
294 static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
295 vm_object_t, vm_pindex_t, vm_size_t);
296 static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
297 static void mmu_booke_page_init(mmu_t, vm_page_t);
298 static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
299 static void mmu_booke_pinit(mmu_t, pmap_t);
300 static void mmu_booke_pinit0(mmu_t, pmap_t);
301 static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
303 static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
304 static void mmu_booke_qremove(mmu_t, vm_offset_t, int);
305 static void mmu_booke_release(mmu_t, pmap_t);
306 static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
307 static void mmu_booke_remove_all(mmu_t, vm_page_t);
308 static void mmu_booke_remove_write(mmu_t, vm_page_t);
309 static void mmu_booke_zero_page(mmu_t, vm_page_t);
310 static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
311 static void mmu_booke_zero_page_idle(mmu_t, vm_page_t);
312 static void mmu_booke_activate(mmu_t, struct thread *);
313 static void mmu_booke_deactivate(mmu_t, struct thread *);
314 static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
315 static void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t);
316 static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
317 static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t);
318 static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t);
319 static void mmu_booke_kremove(mmu_t, vm_offset_t);
320 static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
321 static boolean_t mmu_booke_page_executable(mmu_t, vm_page_t);
323 static mmu_method_t mmu_booke_methods[] = {
324 /* pmap dispatcher interface */
325 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring),
326 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify),
327 MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference),
328 MMUMETHOD(mmu_copy, mmu_booke_copy),
329 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
330 MMUMETHOD(mmu_enter, mmu_booke_enter),
331 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
332 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
333 MMUMETHOD(mmu_extract, mmu_booke_extract),
334 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold),
335 MMUMETHOD(mmu_init, mmu_booke_init),
336 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified),
337 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable),
338 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced),
339 MMUMETHOD(mmu_map, mmu_booke_map),
340 MMUMETHOD(mmu_mincore, mmu_booke_mincore),
341 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt),
342 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
343 MMUMETHOD(mmu_page_init, mmu_booke_page_init),
344 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
345 MMUMETHOD(mmu_pinit, mmu_booke_pinit),
346 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0),
347 MMUMETHOD(mmu_protect, mmu_booke_protect),
348 MMUMETHOD(mmu_qenter, mmu_booke_qenter),
349 MMUMETHOD(mmu_qremove, mmu_booke_qremove),
350 MMUMETHOD(mmu_release, mmu_booke_release),
351 MMUMETHOD(mmu_remove, mmu_booke_remove),
352 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
353 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
354 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
355 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
356 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle),
357 MMUMETHOD(mmu_activate, mmu_booke_activate),
358 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate),
360 /* Internal interfaces */
361 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
362 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
363 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev),
364 MMUMETHOD(mmu_kenter, mmu_booke_kenter),
365 MMUMETHOD(mmu_kextract, mmu_booke_kextract),
366 /* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */
367 MMUMETHOD(mmu_page_executable, mmu_booke_page_executable),
368 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
373 static mmu_def_t booke_mmu = {
380 /* Return number of entries in TLB0. */
382 tlb0_get_tlbconf(void)
386 tlb0_cfg = mfspr(SPR_TLB0CFG);
387 tlb0_size = tlb0_cfg & TLBCFG_NENTRY_MASK;
388 tlb0_nways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
389 tlb0_nentries_per_way = tlb0_size/tlb0_nways;
392 /* Initialize pool of kva ptbl buffers. */
398 //debugf("ptbl_init: s (ptbl_bufs = 0x%08x size 0x%08x)\n",
399 // (u_int32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
400 //debugf("ptbl_init: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)\n",
401 // ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
403 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
404 TAILQ_INIT(&ptbl_buf_freelist);
406 for (i = 0; i < PTBL_BUFS; i++) {
407 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
408 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
411 //debugf("ptbl_init: e\n");
414 /* Get a ptbl_buf from the freelist. */
415 static struct ptbl_buf *
418 struct ptbl_buf *buf;
420 //debugf("ptbl_buf_alloc: s\n");
422 mtx_lock(&ptbl_buf_freelist_lock);
423 buf = TAILQ_FIRST(&ptbl_buf_freelist);
425 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
426 mtx_unlock(&ptbl_buf_freelist_lock);
428 //debugf("ptbl_buf_alloc: e (buf = 0x%08x)\n", (u_int32_t)buf);
432 /* Return ptbl buff to free pool. */
434 ptbl_buf_free(struct ptbl_buf *buf)
437 //debugf("ptbl_buf_free: s (buf = 0x%08x)\n", (u_int32_t)buf);
439 mtx_lock(&ptbl_buf_freelist_lock);
440 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
441 mtx_unlock(&ptbl_buf_freelist_lock);
443 //debugf("ptbl_buf_free: e\n");
447 * Search the list of allocated ptbl bufs and find
448 * on list of allocated ptbls
451 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
453 struct ptbl_buf *pbuf;
455 //debugf("ptbl_free_pmap_ptbl: s (pmap = 0x%08x ptbl = 0x%08x)\n",
456 // (u_int32_t)pmap, (u_int32_t)ptbl);
458 TAILQ_FOREACH(pbuf, &pmap->ptbl_list, link) {
459 if (pbuf->kva == (vm_offset_t)ptbl) {
460 /* Remove from pmap ptbl buf list. */
461 TAILQ_REMOVE(&pmap->ptbl_list, pbuf, link);
463 /* Free correspondig ptbl buf. */
470 //debugf("ptbl_free_pmap_ptbl: e\n");
473 /* Allocate page table. */
475 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
477 vm_page_t mtbl[PTBL_PAGES];
479 struct ptbl_buf *pbuf;
483 //int su = (pmap == kernel_pmap);
484 //debugf("ptbl_alloc: s (pmap = 0x%08x su = %d pdir_idx = %d)\n", (u_int32_t)pmap, su, pdir_idx);
486 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
487 ("ptbl_alloc: invalid pdir_idx"));
488 KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
489 ("pte_alloc: valid ptbl entry exists!"));
491 pbuf = ptbl_buf_alloc();
493 panic("pte_alloc: couldn't alloc kernel virtual memory");
494 pmap->pm_pdir[pdir_idx] = (pte_t *)pbuf->kva;
495 //debugf("ptbl_alloc: kva = 0x%08x\n", (u_int32_t)pmap->pm_pdir[pdir_idx]);
497 /* Allocate ptbl pages, this will sleep! */
498 for (i = 0; i < PTBL_PAGES; i++) {
499 pidx = (PTBL_PAGES * pdir_idx) + i;
500 while ((m = vm_page_alloc(NULL, pidx, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
502 vm_page_unlock_queues();
504 vm_page_lock_queues();
510 /* Map in allocated pages into kernel_pmap. */
511 mmu_booke_qenter(mmu, (vm_offset_t)pmap->pm_pdir[pdir_idx], mtbl, PTBL_PAGES);
513 /* Zero whole ptbl. */
514 bzero((caddr_t)pmap->pm_pdir[pdir_idx], PTBL_PAGES * PAGE_SIZE);
516 /* Add pbuf to the pmap ptbl bufs list. */
517 TAILQ_INSERT_TAIL(&pmap->ptbl_list, pbuf, link);
519 //debugf("ptbl_alloc: e\n");
522 /* Free ptbl pages and invalidate pdir entry. */
524 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
532 //int su = (pmap == kernel_pmap);
533 //debugf("ptbl_free: s (pmap = 0x%08x su = %d pdir_idx = %d)\n", (u_int32_t)pmap, su, pdir_idx);
535 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
536 ("ptbl_free: invalid pdir_idx"));
538 ptbl = pmap->pm_pdir[pdir_idx];
540 //debugf("ptbl_free: ptbl = 0x%08x\n", (u_int32_t)ptbl);
541 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
543 for (i = 0; i < PTBL_PAGES; i++) {
544 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
545 pa = pte_vatopa(mmu, kernel_pmap, va);
546 m = PHYS_TO_VM_PAGE(pa);
547 vm_page_free_zero(m);
548 atomic_subtract_int(&cnt.v_wire_count, 1);
549 mmu_booke_kremove(mmu, va);
552 ptbl_free_pmap_ptbl(pmap, ptbl);
553 pmap->pm_pdir[pdir_idx] = NULL;
555 //debugf("ptbl_free: e\n");
559 * Decrement ptbl pages hold count and attempt to free ptbl pages.
560 * Called when removing pte entry from ptbl.
562 * Return 1 if ptbl pages were freed.
565 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
572 //int su = (pmap == kernel_pmap);
573 //debugf("ptbl_unhold: s (pmap = %08x su = %d pdir_idx = %d)\n",
574 // (u_int32_t)pmap, su, pdir_idx);
576 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
577 ("ptbl_unhold: invalid pdir_idx"));
578 KASSERT((pmap != kernel_pmap),
579 ("ptbl_unhold: unholding kernel ptbl!"));
581 ptbl = pmap->pm_pdir[pdir_idx];
583 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
584 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
585 ("ptbl_unhold: non kva ptbl"));
587 /* decrement hold count */
588 for (i = 0; i < PTBL_PAGES; i++) {
589 pa = pte_vatopa(mmu, kernel_pmap, (vm_offset_t)ptbl + (i * PAGE_SIZE));
590 m = PHYS_TO_VM_PAGE(pa);
595 * Free ptbl pages if there are no pte etries in this ptbl.
596 * wire_count has the same value for all ptbl pages, so check
599 if (m->wire_count == 0) {
600 ptbl_free(mmu, pmap, pdir_idx);
602 //debugf("ptbl_unhold: e (freed ptbl)\n");
606 //debugf("ptbl_unhold: e\n");
611 * Increment hold count for ptbl pages. This routine is used when
612 * new pte entry is being inserted into ptbl.
615 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
622 //debugf("ptbl_hold: s (pmap = 0x%08x pdir_idx = %d)\n", (u_int32_t)pmap, pdir_idx);
624 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
625 ("ptbl_hold: invalid pdir_idx"));
626 KASSERT((pmap != kernel_pmap),
627 ("ptbl_hold: holding kernel ptbl!"));
629 ptbl = pmap->pm_pdir[pdir_idx];
631 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
633 for (i = 0; i < PTBL_PAGES; i++) {
634 pa = pte_vatopa(mmu, kernel_pmap, (vm_offset_t)ptbl + (i * PAGE_SIZE));
635 m = PHYS_TO_VM_PAGE(pa);
639 //debugf("ptbl_hold: e\n");
642 /* Allocate pv_entry structure. */
648 debugf("pv_alloc: s\n");
651 if ((pv_entry_count > pv_entry_high_water) && (pagedaemon_waken == 0)) {
652 pagedaemon_waken = 1;
653 wakeup (&vm_pages_needed);
655 pv = uma_zalloc(pvzone, M_NOWAIT);
657 debugf("pv_alloc: e\n");
661 /* Free pv_entry structure. */
663 pv_free(pv_entry_t pve)
665 //debugf("pv_free: s\n");
668 uma_zfree(pvzone, pve);
670 //debugf("pv_free: e\n");
674 /* Allocate and initialize pv_entry structure. */
676 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
680 //int su = (pmap == kernel_pmap);
681 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
682 // (u_int32_t)pmap, va, (u_int32_t)m);
686 panic("pv_insert: no pv entries!");
692 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
693 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
695 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
697 //debugf("pv_insert: e\n");
700 /* Destroy pv entry. */
702 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
706 //int su = (pmap == kernel_pmap);
707 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
709 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
710 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
713 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
714 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
715 /* remove from pv_list */
716 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
717 if (TAILQ_EMPTY(&m->md.pv_list))
718 vm_page_flag_clear(m, PG_WRITEABLE);
720 /* free pv entry struct */
727 //debugf("pv_remove: e\n");
731 * Clean pte entry, try to free page table page if requested.
733 * Return 1 if ptbl pages were freed, otherwise return 0.
736 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
738 unsigned int pdir_idx = PDIR_IDX(va);
739 unsigned int ptbl_idx = PTBL_IDX(va);
744 //int su = (pmap == kernel_pmap);
745 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
746 // su, (u_int32_t)pmap, va, flags);
748 ptbl = pmap->pm_pdir[pdir_idx];
749 KASSERT(ptbl, ("pte_remove: null ptbl"));
751 pte = &ptbl[ptbl_idx];
753 if (pte == NULL || !PTE_ISVALID(pte))
756 /* Get vm_page_t for mapped pte. */
757 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
759 if (PTE_ISWIRED(pte))
760 pmap->pm_stats.wired_count--;
762 if (!PTE_ISFAKE(pte)) {
763 /* Handle managed entry. */
764 if (PTE_ISMANAGED(pte)) {
766 /* Handle modified pages. */
767 if (PTE_ISMODIFIED(pte))
770 /* Referenced pages. */
771 if (PTE_ISREFERENCED(pte))
772 vm_page_flag_set(m, PG_REFERENCED);
774 /* Remove pv_entry from pv_list. */
775 pv_remove(pmap, va, m);
781 pmap->pm_stats.resident_count--;
783 if (flags & PTBL_UNHOLD) {
784 //debugf("pte_remove: e (unhold)\n");
785 return (ptbl_unhold(mmu, pmap, pdir_idx));
788 //debugf("pte_remove: e\n");
793 * Insert PTE for a given page and virtual address.
796 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, u_int32_t flags)
798 unsigned int pdir_idx = PDIR_IDX(va);
799 unsigned int ptbl_idx = PTBL_IDX(va);
803 //int su = (pmap == kernel_pmap);
804 //debugf("pte_enter: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
806 /* Get the page table pointer. */
807 ptbl = pmap->pm_pdir[pdir_idx];
811 * Check if there is valid mapping for requested
812 * va, if there is, remove it.
814 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
815 if (PTE_ISVALID(pte)) {
816 pte_remove(mmu, pmap, va, PTBL_HOLD);
819 * pte is not used, increment hold count
822 if (pmap != kernel_pmap)
823 ptbl_hold(mmu, pmap, pdir_idx);
826 /* Allocate page table pages. */
827 ptbl_alloc(mmu, pmap, pdir_idx);
830 /* Flush entry from TLB. */
831 tlb0_flush_entry(pmap, va);
833 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
836 * Insert pv_entry into pv_list for mapped page
837 * if part of managed memory.
839 if ((m->flags & PG_FICTITIOUS) == 0) {
840 if ((m->flags & PG_UNMANAGED) == 0) {
841 pte->flags |= PTE_MANAGED;
843 /* Create and insert pv entry. */
844 pv_insert(pmap, va, m);
847 pte->flags |= PTE_FAKE;
850 pmap->pm_stats.resident_count++;
851 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK;
852 pte->flags |= (PTE_VALID | flags);
854 //debugf("pte_enter: e\n");
857 /* Return the pa for the given pmap/va. */
859 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
864 pte = pte_find(mmu, pmap, va);
865 if ((pte != NULL) && PTE_ISVALID(pte))
866 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
870 /* Get a pointer to a PTE in a page table. */
872 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
874 unsigned int pdir_idx = PDIR_IDX(va);
875 unsigned int ptbl_idx = PTBL_IDX(va);
877 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
879 if (pmap->pm_pdir[pdir_idx])
880 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
885 /**************************************************************************/
887 /**************************************************************************/
890 * This is called during e500_init, before the system is really initialized.
893 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t kernelstart, vm_offset_t kernelend)
895 vm_offset_t phys_kernelend;
896 struct mem_region *mp, *mp1;
899 u_int phys_avail_count;
900 vm_size_t physsz, hwphyssz, kstack0_sz;
901 vm_offset_t kernel_pdir, kstack0;
902 vm_paddr_t kstack0_phys;
904 debugf("mmu_booke_bootstrap: entered\n");
906 /* Align kernel start and end address (kernel image). */
907 kernelstart = trunc_page(kernelstart);
908 kernelend = round_page(kernelend);
910 /* Allocate space for the message buffer. */
911 msgbufp = (struct msgbuf *)kernelend;
912 kernelend += MSGBUF_SIZE;
913 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (u_int32_t)msgbufp,
916 kernelend = round_page(kernelend);
918 /* Allocate space for tlb0 table. */
919 tlb0_get_tlbconf(); /* Read TLB0 size and associativity. */
920 tlb0 = (tlb_entry_t *)kernelend;
921 kernelend += sizeof(tlb_entry_t) * tlb0_size;
922 debugf(" tlb0 at 0x%08x end = 0x%08x\n", (u_int32_t)tlb0, kernelend);
924 kernelend = round_page(kernelend);
926 /* Allocate space for ptbl_bufs. */
927 ptbl_bufs = (struct ptbl_buf *)kernelend;
928 kernelend += sizeof(struct ptbl_buf) * PTBL_BUFS;
929 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (u_int32_t)ptbl_bufs,
932 kernelend = round_page(kernelend);
934 /* Allocate PTE tables for kernel KVA. */
935 kernel_pdir = kernelend;
936 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS +
937 PDIR_SIZE - 1) / PDIR_SIZE;
938 kernelend += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
939 debugf(" kernel ptbls: %d\n", kernel_ptbls);
940 debugf(" kernel pdir at 0x%08x\n", kernel_pdir);
942 if (kernelend - kernelstart > 0x1000000) {
943 kernelend = (kernelend + 0x3fffff) & ~0x3fffff;
944 tlb1_mapin_region(kernelstart + 0x1000000,
945 kernload + 0x1000000, kernelend - kernelstart - 0x1000000);
947 kernelend = (kernelend + 0xffffff) & ~0xffffff;
950 * Clear the structures - note we can only do it safely after the
951 * possible additional TLB1 translations are in place so that
952 * all range up to the currently calculated 'kernelend' is covered.
954 memset((void *)tlb0, 0, sizeof(tlb_entry_t) * tlb0_size);
955 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
956 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
958 /*******************************************************/
959 /* Set the start and end of kva. */
960 /*******************************************************/
961 virtual_avail = kernelend;
962 virtual_end = VM_MAX_KERNEL_ADDRESS;
964 /* Allocate KVA space for page zero/copy operations. */
965 zero_page_va = virtual_avail;
966 virtual_avail += PAGE_SIZE;
967 zero_page_idle_va = virtual_avail;
968 virtual_avail += PAGE_SIZE;
969 copy_page_src_va = virtual_avail;
970 virtual_avail += PAGE_SIZE;
971 copy_page_dst_va = virtual_avail;
972 virtual_avail += PAGE_SIZE;
974 /* Initialize page zero/copy mutexes. */
975 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
976 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
978 /* Initialize tlb0 table mutex. */
979 mtx_init(&tlb0_mutex, "tlb0", NULL, MTX_SPIN | MTX_RECURSE);
981 /* Allocate KVA space for ptbl bufs. */
982 ptbl_buf_pool_vabase = virtual_avail;
983 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
985 debugf("ptbl_buf_pool_vabase = 0x%08x\n", ptbl_buf_pool_vabase);
986 debugf("virtual_avail = %08x\n", virtual_avail);
987 debugf("virtual_end = %08x\n", virtual_end);
989 /* Calculate corresponding physical addresses for the kernel region. */
990 phys_kernelend = kernload + (kernelend - kernelstart);
992 debugf("kernel image and allocated data:\n");
993 debugf(" kernload = 0x%08x\n", kernload);
994 debugf(" kernelstart = 0x%08x\n", kernelstart);
995 debugf(" kernelend = 0x%08x\n", kernelend);
996 debugf(" kernel size = 0x%08x\n", kernelend - kernelstart);
998 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
999 panic("mmu_booke_bootstrap: phys_avail too small");
1002 * Removed kernel physical address range from avail
1003 * regions list. Page align all regions.
1004 * Non-page aligned memory isn't very interesting to us.
1005 * Also, sort the entries for ascending addresses.
1008 cnt = availmem_regions_sz;
1009 debugf("processing avail regions:\n");
1010 for (mp = availmem_regions; mp->mr_size; mp++) {
1012 e = mp->mr_start + mp->mr_size;
1013 debugf(" %08x-%08x -> ", s, e);
1014 /* Check whether this region holds all of the kernel. */
1015 if (s < kernload && e > phys_kernelend) {
1016 availmem_regions[cnt].mr_start = phys_kernelend;
1017 availmem_regions[cnt++].mr_size = e - phys_kernelend;
1020 /* Look whether this regions starts within the kernel. */
1021 if (s >= kernload && s < phys_kernelend) {
1022 if (e <= phys_kernelend)
1026 /* Now look whether this region ends within the kernel. */
1027 if (e > kernload && e <= phys_kernelend) {
1032 /* Now page align the start and size of the region. */
1038 debugf("%08x-%08x = %x\n", s, e, sz);
1040 /* Check whether some memory is left here. */
1044 (cnt - (mp - availmem_regions)) * sizeof(*mp));
1050 /* Do an insertion sort. */
1051 for (mp1 = availmem_regions; mp1 < mp; mp1++)
1052 if (s < mp1->mr_start)
1055 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1063 availmem_regions_sz = cnt;
1065 /*******************************************************/
1066 /* Steal physical memory for kernel stack from the end */
1067 /* of the first avail region */
1068 /*******************************************************/
1069 kstack0_sz = KSTACK_PAGES * PAGE_SIZE;
1070 kstack0_phys = availmem_regions[0].mr_start +
1071 availmem_regions[0].mr_size;
1072 kstack0_phys -= kstack0_sz;
1073 availmem_regions[0].mr_size -= kstack0_sz;
1075 /*******************************************************/
1076 /* Fill in phys_avail table, based on availmem_regions */
1077 /*******************************************************/
1078 phys_avail_count = 0;
1081 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1083 debugf("fill in phys_avail:\n");
1084 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1086 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n",
1087 availmem_regions[i].mr_start,
1088 availmem_regions[i].mr_start + availmem_regions[i].mr_size,
1089 availmem_regions[i].mr_size);
1091 if (hwphyssz != 0 &&
1092 (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1093 debugf(" hw.physmem adjust\n");
1094 if (physsz < hwphyssz) {
1095 phys_avail[j] = availmem_regions[i].mr_start;
1097 availmem_regions[i].mr_start +
1105 phys_avail[j] = availmem_regions[i].mr_start;
1106 phys_avail[j + 1] = availmem_regions[i].mr_start +
1107 availmem_regions[i].mr_size;
1109 physsz += availmem_regions[i].mr_size;
1111 physmem = btoc(physsz);
1113 /* Calculate the last available physical address. */
1114 for (i = 0; phys_avail[i + 2] != 0; i += 2)
1116 Maxmem = powerpc_btop(phys_avail[i + 1]);
1118 debugf("Maxmem = 0x%08lx\n", Maxmem);
1119 debugf("phys_avail_count = %d\n", phys_avail_count);
1120 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, physmem);
1122 /*******************************************************/
1123 /* Initialize (statically allocated) kernel pmap. */
1124 /*******************************************************/
1125 PMAP_LOCK_INIT(kernel_pmap);
1126 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1128 debugf("kernel_pmap = 0x%08x\n", (u_int32_t)kernel_pmap);
1129 debugf("kptbl_min = %d, kernel_kptbls = %d\n", kptbl_min, kernel_ptbls);
1130 debugf("kernel pdir range: 0x%08x - 0x%08x\n",
1131 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1);
1133 /* Initialize kernel pdir */
1134 for (i = 0; i < kernel_ptbls; i++)
1135 kernel_pmap->pm_pdir[kptbl_min + i] =
1136 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES));
1138 kernel_pmap->pm_tid = KERNEL_TID;
1139 kernel_pmap->pm_active = ~0;
1141 /* Initialize tidbusy with kenel_pmap entry. */
1142 tidbusy[0] = kernel_pmap;
1144 /*******************************************************/
1146 /*******************************************************/
1147 /* Enter kstack0 into kernel map, provide guard page */
1148 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1149 thread0.td_kstack = kstack0;
1150 thread0.td_kstack_pages = KSTACK_PAGES;
1152 debugf("kstack_sz = 0x%08x\n", kstack0_sz);
1153 debugf("kstack0_phys at 0x%08x - 0x%08x\n",
1154 kstack0_phys, kstack0_phys + kstack0_sz);
1155 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
1157 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
1158 for (i = 0; i < KSTACK_PAGES; i++) {
1159 mmu_booke_kenter(mmu, kstack0, kstack0_phys);
1160 kstack0 += PAGE_SIZE;
1161 kstack0_phys += PAGE_SIZE;
1164 /* Initialize TLB0 handling. */
1167 debugf("mmu_booke_bootstrap: exit\n");
1171 * Get the physical page address for the given pmap/virtual address.
1174 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1179 pa = pte_vatopa(mmu, pmap, va);
1186 * Extract the physical page address associated with the given
1187 * kernel virtual address.
1190 mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
1193 return (pte_vatopa(mmu, kernel_pmap, va));
1197 * Initialize the pmap module.
1198 * Called by vm_init, to initialize any structures that the pmap
1199 * system needs to map virtual memory.
1202 mmu_booke_init(mmu_t mmu)
1204 int shpgperproc = PMAP_SHPGPERPROC;
1206 //debugf("mmu_booke_init: s\n");
1209 * Initialize the address space (zone) for the pv entries. Set a
1210 * high water mark so that the system can recover from excessive
1211 * numbers of pv entries.
1213 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
1214 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1216 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1217 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
1219 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1220 pv_entry_high_water = 9 * (pv_entry_max / 10);
1222 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
1224 /* Pre-fill pvzone with initial number of pv entries. */
1225 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
1227 /* Initialize ptbl allocation. */
1230 //debugf("mmu_booke_init: e\n");
1234 * Map a list of wired pages into kernel virtual address space. This is
1235 * intended for temporary mappings which do not need page modification or
1236 * references recorded. Existing mappings in the region are overwritten.
1239 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
1243 //debugf("mmu_booke_qenter: s (sva = 0x%08x count = %d)\n", sva, count);
1246 while (count-- > 0) {
1247 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1252 //debugf("mmu_booke_qenter: e\n");
1256 * Remove page mappings from kernel virtual address space. Intended for
1257 * temporary mappings entered by mmu_booke_qenter.
1260 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
1264 //debugf("mmu_booke_qremove: s (sva = 0x%08x count = %d)\n", sva, count);
1267 while (count-- > 0) {
1268 mmu_booke_kremove(mmu, va);
1272 //debugf("mmu_booke_qremove: e\n");
1276 * Map a wired page into kernel virtual address space.
1279 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
1281 unsigned int pdir_idx = PDIR_IDX(va);
1282 unsigned int ptbl_idx = PTBL_IDX(va);
1286 //debugf("mmu_booke_kenter: s (pdir_idx = %d ptbl_idx = %d va=0x%08x pa=0x%08x)\n",
1287 // pdir_idx, ptbl_idx, va, pa);
1289 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)),
1290 ("mmu_booke_kenter: invalid va"));
1293 /* assume IO mapping, set I, G bits */
1294 flags = (PTE_G | PTE_I | PTE_FAKE);
1296 /* if mapping is within system memory, do not set I, G bits */
1297 for (i = 0; i < totalmem_regions_sz; i++) {
1298 if ((pa >= totalmem_regions[i].mr_start) &&
1299 (pa < (totalmem_regions[i].mr_start +
1300 totalmem_regions[i].mr_size))) {
1301 flags &= ~(PTE_I | PTE_G | PTE_FAKE);
1309 flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID);
1311 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1313 if (PTE_ISVALID(pte)) {
1314 //debugf("mmu_booke_kenter: replacing entry!\n");
1316 /* Flush entry from TLB0 */
1317 tlb0_flush_entry(kernel_pmap, va);
1320 pte->rpn = pa & ~PTE_PA_MASK;
1323 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
1324 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n",
1325 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
1327 /* Flush the real memory from the instruction cache. */
1328 if ((flags & (PTE_I | PTE_G)) == 0) {
1329 __syncicache((void *)va, PAGE_SIZE);
1332 //debugf("mmu_booke_kenter: e\n");
1336 * Remove a page from kernel page table.
1339 mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
1341 unsigned int pdir_idx = PDIR_IDX(va);
1342 unsigned int ptbl_idx = PTBL_IDX(va);
1345 //debugf("mmu_booke_kremove: s (va = 0x%08x)\n", va);
1347 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)),
1348 ("mmu_booke_kremove: invalid va"));
1350 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1352 if (!PTE_ISVALID(pte)) {
1353 //debugf("mmu_booke_kremove: e (invalid pte)\n");
1357 /* Invalidate entry in TLB0. */
1358 tlb0_flush_entry(kernel_pmap, va);
1363 //debugf("mmu_booke_kremove: e\n");
1367 * Initialize pmap associated with process 0.
1370 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
1372 //debugf("mmu_booke_pinit0: s (pmap = 0x%08x)\n", (u_int32_t)pmap);
1373 mmu_booke_pinit(mmu, pmap);
1374 PCPU_SET(curpmap, pmap);
1375 //debugf("mmu_booke_pinit0: e\n");
1379 * Initialize a preallocated and zeroed pmap structure,
1380 * such as one in a vmspace structure.
1383 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
1386 //struct thread *td;
1389 //td = PCPU_GET(curthread);
1391 //debugf("mmu_booke_pinit: s (pmap = 0x%08x)\n", (u_int32_t)pmap);
1392 //printf("mmu_booke_pinit: proc %d '%s'\n", p->p_pid, p->p_comm);
1394 KASSERT((pmap != kernel_pmap), ("mmu_booke_pinit: initializing kernel_pmap"));
1396 PMAP_LOCK_INIT(pmap);
1398 pmap->pm_active = 0;
1399 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1400 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
1402 TAILQ_INIT(&pmap->ptbl_list);
1404 //debugf("mmu_booke_pinit: e\n");
1408 * Release any resources held by the given physical map.
1409 * Called when a pmap initialized by mmu_booke_pinit is being released.
1410 * Should only be called if the map contains no valid mappings.
1413 mmu_booke_release(mmu_t mmu, pmap_t pmap)
1416 //debugf("mmu_booke_release: s\n");
1418 PMAP_LOCK_DESTROY(pmap);
1420 //debugf("mmu_booke_release: e\n");
1424 /* Not needed, kernel page tables are statically allocated. */
1426 mmu_booke_growkernel(vm_offset_t maxkvaddr)
1432 * Insert the given physical page at the specified virtual address in the
1433 * target physical map with the protection requested. If specified the page
1434 * will be wired down.
1437 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1438 vm_prot_t prot, boolean_t wired)
1440 vm_page_lock_queues();
1442 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired);
1443 vm_page_unlock_queues();
1448 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1449 vm_prot_t prot, boolean_t wired)
1456 pa = VM_PAGE_TO_PHYS(m);
1457 su = (pmap == kernel_pmap);
1460 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
1461 // "pa=0x%08x prot=0x%08x wired=%d)\n",
1462 // (u_int32_t)pmap, su, pmap->pm_tid,
1463 // (u_int32_t)m, va, pa, prot, wired);
1466 KASSERT(((va >= virtual_avail) && (va <= VM_MAX_KERNEL_ADDRESS)),
1467 ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
1469 KASSERT((va <= VM_MAXUSER_ADDRESS),
1470 ("mmu_booke_enter_locked: user pmap, non user va"));
1473 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1476 * If there is an existing mapping, and the physical address has not
1477 * changed, must be protection or wiring change.
1479 if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
1480 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
1482 //debugf("mmu_booke_enter_locked: update\n");
1484 /* Wiring change, just update stats. */
1486 if (!PTE_ISWIRED(pte)) {
1487 pte->flags |= PTE_WIRED;
1488 pmap->pm_stats.wired_count++;
1491 if (PTE_ISWIRED(pte)) {
1492 pte->flags &= ~PTE_WIRED;
1493 pmap->pm_stats.wired_count--;
1497 /* Save the old bits and clear the ones we're interested in. */
1499 pte->flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
1501 if (prot & VM_PROT_WRITE) {
1502 /* Add write permissions. */
1503 pte->flags |= PTE_SW;
1505 pte->flags |= PTE_UW;
1507 /* Handle modified pages, sense modify status. */
1508 if (PTE_ISMODIFIED(pte))
1512 /* If we're turning on execute permissions, flush the icache. */
1513 if (prot & VM_PROT_EXECUTE) {
1514 pte->flags |= PTE_SX;
1516 pte->flags |= PTE_UX;
1518 if ((flags & (PTE_UX | PTE_SX)) == 0)
1522 /* Flush the old mapping from TLB0. */
1523 pte->flags &= ~PTE_REFERENCED;
1524 tlb0_flush_entry(pmap, va);
1527 * If there is an existing mapping, but its for a different
1528 * physical address, pte_enter() will delete the old mapping.
1530 //if ((pte != NULL) && PTE_ISVALID(pte))
1531 // debugf("mmu_booke_enter_locked: replace\n");
1533 // debugf("mmu_booke_enter_locked: new\n");
1535 /* Now set up the flags and install the new mapping. */
1536 flags = (PTE_SR | PTE_VALID);
1541 if (prot & VM_PROT_WRITE) {
1547 if (prot & VM_PROT_EXECUTE) {
1553 /* If its wired update stats. */
1555 pmap->pm_stats.wired_count++;
1559 pte_enter(mmu, pmap, m, va, flags);
1561 /* Flush the real memory from the instruction cache. */
1562 if (prot & VM_PROT_EXECUTE)
1566 if (sync && (su || pmap == PCPU_GET(curpmap))) {
1567 __syncicache((void *)va, PAGE_SIZE);
1572 /* Create a temporary mapping. */
1573 pmap = PCPU_GET(curpmap);
1576 pte = pte_find(mmu, pmap, va);
1577 KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__));
1579 flags = PTE_SR | PTE_VALID | PTE_UR;
1580 pte_enter(mmu, pmap, m, va, flags);
1581 __syncicache((void *)va, PAGE_SIZE);
1582 pte_remove(mmu, pmap, va, PTBL_UNHOLD);
1585 //debugf("mmu_booke_enter_locked: e\n");
1589 * Maps a sequence of resident pages belonging to the same object.
1590 * The sequence begins with the given page m_start. This page is
1591 * mapped at the given virtual address start. Each subsequent page is
1592 * mapped at a virtual address that is offset from start by the same
1593 * amount as the page is offset from m_start within the object. The
1594 * last page in the sequence is the page with the largest offset from
1595 * m_start that can be mapped at a virtual address less than the given
1596 * virtual address end. Not every virtual page between start and end
1597 * is mapped; only those for which a resident page exists with the
1598 * corresponding offset from m_start are mapped.
1601 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
1602 vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
1605 vm_pindex_t diff, psize;
1607 psize = atop(end - start);
1610 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1611 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, prot &
1612 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1613 m = TAILQ_NEXT(m, listq);
1619 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1623 //debugf("mmu_booke_enter_quick: s\n");
1626 mmu_booke_enter_locked(mmu, pmap, va, m,
1627 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1630 //debugf("mmu_booke_enter_quick e\n");
1634 * Remove the given range of addresses from the specified map.
1636 * It is assumed that the start and end are properly rounded to the page size.
1639 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
1644 int su = (pmap == kernel_pmap);
1646 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
1647 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
1650 KASSERT(((va >= virtual_avail) && (va <= VM_MAX_KERNEL_ADDRESS)),
1651 ("mmu_booke_enter: kernel pmap, non kernel va"));
1653 KASSERT((va <= VM_MAXUSER_ADDRESS),
1654 ("mmu_booke_enter: user pmap, non user va"));
1657 if (PMAP_REMOVE_DONE(pmap)) {
1658 //debugf("mmu_booke_remove: e (empty)\n");
1662 hold_flag = PTBL_HOLD_FLAG(pmap);
1663 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
1665 vm_page_lock_queues();
1667 for (; va < endva; va += PAGE_SIZE) {
1668 pte = pte_find(mmu, pmap, va);
1669 if ((pte != NULL) && PTE_ISVALID(pte)) {
1670 pte_remove(mmu, pmap, va, hold_flag);
1672 /* Flush mapping from TLB0. */
1673 tlb0_flush_entry(pmap, va);
1677 vm_page_unlock_queues();
1679 //debugf("mmu_booke_remove: e\n");
1683 * Remove physical page from all pmaps in which it resides.
1686 mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
1691 //debugf("mmu_booke_remove_all: s\n");
1693 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1695 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
1696 pvn = TAILQ_NEXT(pv, pv_link);
1698 PMAP_LOCK(pv->pv_pmap);
1699 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
1700 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
1702 /* Flush mapping from TLB0. */
1703 tlb0_flush_entry(pv->pv_pmap, pv->pv_va);
1704 PMAP_UNLOCK(pv->pv_pmap);
1706 vm_page_flag_clear(m, PG_WRITEABLE);
1708 //debugf("mmu_booke_remove_all: e\n");
1712 * Map a range of physical addresses into kernel virtual address space.
1714 * The value passed in *virt is a suggested virtual address for the mapping.
1715 * Architectures which can support a direct-mapped physical to virtual region
1716 * can return the appropriate address within that region, leaving '*virt'
1717 * unchanged. We cannot and therefore do not; *virt is updated with the
1718 * first usable address after the mapped region.
1721 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
1722 vm_offset_t pa_end, int prot)
1724 vm_offset_t sva = *virt;
1725 vm_offset_t va = sva;
1727 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
1728 // sva, pa_start, pa_end);
1730 while (pa_start < pa_end) {
1731 mmu_booke_kenter(mmu, va, pa_start);
1733 pa_start += PAGE_SIZE;
1737 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
1742 * The pmap must be activated before it's address space can be accessed in any
1746 mmu_booke_activate(mmu_t mmu, struct thread *td)
1750 pmap = &td->td_proc->p_vmspace->vm_pmap;
1752 //debugf("mmu_booke_activate: s (proc = '%s', id = %d, pmap = 0x%08x)\n",
1753 // td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1755 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
1757 mtx_lock_spin(&sched_lock);
1759 pmap->pm_active |= PCPU_GET(cpumask);
1760 PCPU_SET(curpmap, pmap);
1765 /* Load PID0 register with pmap tid value. */
1766 load_pid0(pmap->pm_tid);
1768 mtx_unlock_spin(&sched_lock);
1770 //debugf("mmu_booke_activate: e (tid = %d for '%s')\n", pmap->pm_tid,
1771 // td->td_proc->p_comm);
1775 * Deactivate the specified process's address space.
1778 mmu_booke_deactivate(mmu_t mmu, struct thread *td)
1782 pmap = &td->td_proc->p_vmspace->vm_pmap;
1783 pmap->pm_active &= ~(PCPU_GET(cpumask));
1784 PCPU_SET(curpmap, NULL);
1788 * Copy the range specified by src_addr/len
1789 * from the source map to the range dst_addr/len
1790 * in the destination map.
1792 * This routine is only advisory and need not do anything.
1795 mmu_booke_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
1796 vm_size_t len, vm_offset_t src_addr)
1802 * Set the physical protection on the specified range of this map as requested.
1805 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1812 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1813 mmu_booke_remove(mmu, pmap, sva, eva);
1817 if (prot & VM_PROT_WRITE)
1820 vm_page_lock_queues();
1822 for (va = sva; va < eva; va += PAGE_SIZE) {
1823 if ((pte = pte_find(mmu, pmap, va)) != NULL) {
1824 if (PTE_ISVALID(pte)) {
1825 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1827 /* Handle modified pages. */
1828 if (PTE_ISMODIFIED(pte))
1831 /* Referenced pages. */
1832 if (PTE_ISREFERENCED(pte))
1833 vm_page_flag_set(m, PG_REFERENCED);
1835 /* Flush mapping from TLB0. */
1836 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED |
1838 tlb0_flush_entry(pmap, va);
1843 vm_page_unlock_queues();
1847 * Clear the write and modified bits in each of the given page's mappings.
1850 mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
1855 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1856 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1857 (m->flags & PG_WRITEABLE) == 0)
1860 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1861 PMAP_LOCK(pv->pv_pmap);
1862 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
1863 if (PTE_ISVALID(pte)) {
1864 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1866 /* Handle modified pages. */
1867 if (PTE_ISMODIFIED(pte))
1870 /* Referenced pages. */
1871 if (PTE_ISREFERENCED(pte))
1872 vm_page_flag_set(m, PG_REFERENCED);
1874 /* Flush mapping from TLB0. */
1875 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED |
1877 tlb0_flush_entry(pv->pv_pmap, pv->pv_va);
1880 PMAP_UNLOCK(pv->pv_pmap);
1882 vm_page_flag_clear(m, PG_WRITEABLE);
1886 mmu_booke_page_executable(mmu_t mmu, vm_page_t m)
1890 boolean_t executable;
1893 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1894 PMAP_LOCK(pv->pv_pmap);
1895 pte = pte_find(mmu, pv->pv_pmap, pv->pv_va);
1896 if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX))
1898 PMAP_UNLOCK(pv->pv_pmap);
1903 return (executable);
1907 * Atomically extract and hold the physical page with the given
1908 * pmap and virtual address pair if that mapping permits the given
1912 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
1920 vm_page_lock_queues();
1922 pte = pte_find(mmu, pmap, va);
1924 if ((pte != NULL) && PTE_ISVALID(pte)) {
1925 if (pmap == kernel_pmap)
1930 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
1931 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1936 vm_page_unlock_queues();
1942 * Initialize a vm_page's machine-dependent fields.
1945 mmu_booke_page_init(mmu_t mmu, vm_page_t m)
1948 TAILQ_INIT(&m->md.pv_list);
1952 * mmu_booke_zero_page_area zeros the specified hardware page by
1953 * mapping it into virtual memory and using bzero to clear
1956 * off and size must reside within a single page.
1959 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1963 //debugf("mmu_booke_zero_page_area: s\n");
1965 mtx_lock(&zero_page_mutex);
1968 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
1969 bzero((caddr_t)va + off, size);
1970 mmu_booke_kremove(mmu, va);
1972 mtx_unlock(&zero_page_mutex);
1974 //debugf("mmu_booke_zero_page_area: e\n");
1978 * mmu_booke_zero_page zeros the specified hardware page.
1981 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
1984 //debugf("mmu_booke_zero_page: s\n");
1985 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE);
1986 //debugf("mmu_booke_zero_page: e\n");
1990 * mmu_booke_copy_page copies the specified (machine independent) page by
1991 * mapping the page into virtual memory and using memcopy to copy the page,
1992 * one machine dependent page at a time.
1995 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
1997 vm_offset_t sva, dva;
1999 //debugf("mmu_booke_copy_page: s\n");
2001 mtx_lock(©_page_mutex);
2002 sva = copy_page_src_va;
2003 dva = copy_page_dst_va;
2005 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
2006 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
2007 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
2008 mmu_booke_kremove(mmu, dva);
2009 mmu_booke_kremove(mmu, sva);
2011 mtx_unlock(©_page_mutex);
2013 //debugf("mmu_booke_copy_page: e\n");
2018 * Remove all pages from specified address space, this aids process exit
2019 * speeds. This is much faster than mmu_booke_remove in the case of running
2020 * down an entire address space. Only works for the current pmap.
2023 mmu_booke_remove_pages(pmap_t pmap)
2029 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
2030 * into virtual memory and using bzero to clear its contents. This is intended
2031 * to be called from the vm_pagezero process only and outside of Giant. No
2035 mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m)
2039 //debugf("mmu_booke_zero_page_idle: s\n");
2041 va = zero_page_idle_va;
2042 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2043 bzero((caddr_t)va, PAGE_SIZE);
2044 mmu_booke_kremove(mmu, va);
2046 //debugf("mmu_booke_zero_page_idle: e\n");
2050 * Return whether or not the specified physical page was modified
2051 * in any of physical maps.
2054 mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
2059 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2060 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2063 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2064 PMAP_LOCK(pv->pv_pmap);
2065 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2066 if (!PTE_ISVALID(pte))
2067 goto make_sure_to_unlock;
2069 if (PTE_ISMODIFIED(pte)) {
2070 PMAP_UNLOCK(pv->pv_pmap);
2074 make_sure_to_unlock:
2075 PMAP_UNLOCK(pv->pv_pmap);
2081 * Return whether or not the specified virtual address is elgible
2085 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2092 * Clear the modify bits on the specified physical page.
2095 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
2100 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2101 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2104 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2105 PMAP_LOCK(pv->pv_pmap);
2106 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2107 if (!PTE_ISVALID(pte))
2108 goto make_sure_to_unlock;
2110 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
2111 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
2113 tlb0_flush_entry(pv->pv_pmap, pv->pv_va);
2116 make_sure_to_unlock:
2117 PMAP_UNLOCK(pv->pv_pmap);
2122 * Return a count of reference bits for a page, clearing those bits.
2123 * It is not necessary for every reference bit to be cleared, but it
2124 * is necessary that 0 only be returned when there are truly no
2125 * reference bits set.
2127 * XXX: The exact number of bits to check and clear is a matter that
2128 * should be tested and standardized at some point in the future for
2129 * optimal aging of shared pages.
2132 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
2138 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2139 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2143 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2144 PMAP_LOCK(pv->pv_pmap);
2145 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2146 if (!PTE_ISVALID(pte))
2147 goto make_sure_to_unlock;
2149 if (PTE_ISREFERENCED(pte)) {
2150 pte->flags &= ~PTE_REFERENCED;
2151 tlb0_flush_entry(pv->pv_pmap, pv->pv_va);
2154 PMAP_UNLOCK(pv->pv_pmap);
2159 make_sure_to_unlock:
2160 PMAP_UNLOCK(pv->pv_pmap);
2166 * Clear the reference bit on the specified physical page.
2169 mmu_booke_clear_reference(mmu_t mmu, vm_page_t m)
2174 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2175 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2178 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2179 PMAP_LOCK(pv->pv_pmap);
2180 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2181 if (!PTE_ISVALID(pte))
2182 goto make_sure_to_unlock;
2184 if (PTE_ISREFERENCED(pte)) {
2185 pte->flags &= ~PTE_REFERENCED;
2186 tlb0_flush_entry(pv->pv_pmap, pv->pv_va);
2189 make_sure_to_unlock:
2190 PMAP_UNLOCK(pv->pv_pmap);
2195 * Change wiring attribute for a map/virtual-address pair.
2198 mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired)
2203 if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2205 if (!PTE_ISWIRED(pte)) {
2206 pte->flags |= PTE_WIRED;
2207 pmap->pm_stats.wired_count++;
2210 if (PTE_ISWIRED(pte)) {
2211 pte->flags &= ~PTE_WIRED;
2212 pmap->pm_stats.wired_count--;
2220 * Return true if the pmap's pv is one of the first 16 pvs linked to from this
2221 * page. This count may be changed upwards or downwards in the future; it is
2222 * only necessary that true be returned for a small subset of pmaps for proper
2226 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
2231 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2232 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2236 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2238 if (pv->pv_pmap == pmap)
2248 * Return the number of managed mappings to the given physical page that are
2252 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
2258 if ((m->flags & PG_FICTITIOUS) != 0)
2260 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2262 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2263 PMAP_LOCK(pv->pv_pmap);
2264 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
2265 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
2267 PMAP_UNLOCK(pv->pv_pmap);
2274 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2280 * This currently does not work for entries that
2281 * overlap TLB1 entries.
2283 for (i = 0; i < tlb1_idx; i ++) {
2284 if (tlb1_iomapped(i, pa, size, &va) == 0)
2292 * Map a set of physical memory pages into the kernel virtual address space.
2293 * Return a pointer to where it is mapped. This routine is intended to be used
2294 * for mapping device memory, NOT real memory.
2297 mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2301 va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa);
2303 printf("Wiring VA=%x to PA=%x (size=%x), using TLB1[%d]\n",
2304 va, pa, size, tlb1_idx);
2305 tlb1_set_entry(va, pa, size, _TLB_ENTRY_IO);
2306 return ((void *)va);
2310 * 'Unmap' a range mapped by mmu_booke_mapdev().
2313 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2315 vm_offset_t base, offset;
2317 //debugf("mmu_booke_unmapdev: s (va = 0x%08x)\n", va);
2320 * Unmap only if this is inside kernel virtual space.
2322 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2323 base = trunc_page(va);
2324 offset = va & PAGE_MASK;
2325 size = roundup(offset + size, PAGE_SIZE);
2326 kmem_free(kernel_map, base, size);
2329 //debugf("mmu_booke_unmapdev: e\n");
2333 * mmu_booke_object_init_pt preloads the ptes for a given object
2334 * into the specified pmap. This eliminates the blast of soft
2335 * faults on process startup and immediately after an mmap.
2338 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2339 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2341 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2342 KASSERT(object->type == OBJT_DEVICE,
2343 ("mmu_booke_object_init_pt: non-device object"));
2347 * Perform the pmap work for mincore.
2350 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2357 /**************************************************************************/
2359 /**************************************************************************/
2361 * Flush all entries from TLB0 matching given tid.
2364 tid_flush(tlbtid_t tid)
2366 int i, entryidx, way;
2368 //debugf("tid_flush: s (tid = %d)\n", tid);
2370 mtx_lock_spin(&tlb0_mutex);
2372 for (i = 0; i < TLB0_SIZE; i++) {
2373 if (MAS1_GETTID(tlb0[i].mas1) == tid) {
2374 way = i / TLB0_ENTRIES_PER_WAY;
2375 entryidx = i - (way * TLB0_ENTRIES_PER_WAY);
2377 //debugf("tid_flush: inval tlb0 entry %d\n", i);
2378 tlb0_inval_entry(entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT, way);
2382 mtx_unlock_spin(&tlb0_mutex);
2384 //debugf("tid_flush: e\n");
2388 * Allocate a TID. If necessary, steal one from someone else.
2389 * The new TID is flushed from the TLB before returning.
2392 tid_alloc(pmap_t pmap)
2395 static tlbtid_t next_tid = TID_MIN;
2397 //struct thread *td;
2400 //td = PCPU_GET(curthread);
2402 //debugf("tid_alloc: s (pmap = 0x%08x)\n", (u_int32_t)pmap);
2403 //printf("tid_alloc: proc %d '%s'\n", p->p_pid, p->p_comm);
2405 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
2408 * Find a likely TID, allocate unused if possible,
2409 * skip reserved entries.
2412 while (tidbusy[tid] != NULL) {
2413 if (tid == next_tid)
2423 /* Now clean it out */
2426 /* If we are stealing pmap then clear its tid */
2428 //debugf("warning: stealing tid %d\n", tid);
2429 tidbusy[tid]->pm_tid = 0;
2432 /* Calculate next tid */
2438 tidbusy[tid] = pmap;
2441 //debugf("tid_alloc: e (%02d next = %02d)\n", tid, next_tid);
2447 * Free this pmap's TID.
2450 tid_free(pmap_t pmap)
2454 oldtid = pmap->pm_tid;
2457 panic("tid_free: freeing kernel tid");
2461 if (tidbusy[oldtid] == 0)
2462 debugf("tid_free: freeing free tid %d\n", oldtid);
2463 if (tidbusy[oldtid] != pmap) {
2464 debugf("tid_free: freeing someone esle's tid\n "
2465 "tidbusy[%d] = 0x%08x pmap = 0x%08x\n",
2466 oldtid, (u_int32_t)tidbusy[oldtid], (u_int32_t)pmap);
2470 tidbusy[oldtid] = NULL;
2478 tid_print_busy(void)
2482 for (i = 0; i < TID_MAX; i++) {
2483 debugf("tid %d = pmap 0x%08x", i, (u_int32_t)tidbusy[i]);
2485 debugf(" pmap->tid = %d", tidbusy[i]->pm_tid);
2493 /**************************************************************************/
2495 /**************************************************************************/
2498 tlb_print_entry(int i, u_int32_t mas1, u_int32_t mas2, u_int32_t mas3, u_int32_t mas7)
2507 if (mas1 & MAS1_VALID)
2512 if (mas1 & MAS1_IPROT)
2517 as = (mas1 & MAS1_TS) ? 1 : 0;
2518 tid = MAS1_GETTID(mas1);
2520 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2523 size = tsize2size(tsize);
2525 debugf("%3d: (%s) [AS=%d] "
2526 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
2527 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n",
2528 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
2531 /* Convert TLB0 va and way number to tlb0[] table index. */
2532 static inline unsigned int
2533 tlb0_tableidx(vm_offset_t va, unsigned int way)
2537 idx = (way * TLB0_ENTRIES_PER_WAY);
2538 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
2543 * Write given entry to TLB0 hardware.
2544 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7).
2547 tlb0_write_entry(unsigned int idx, unsigned int way)
2549 u_int32_t mas0, mas7, nv;
2551 /* Clear high order RPN bits. */
2555 mas0 = mfspr(SPR_MAS0);
2556 nv = mas0 & (TLB0_NWAYS - 1);
2559 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way) | nv;
2561 //debugf("tlb0_write_entry: s (idx=%d way=%d mas0=0x%08x "
2562 // "mas1=0x%08x mas2=0x%08x mas3=0x%08x)\n",
2563 // idx, way, mas0, tlb0[idx].mas1,
2564 // tlb0[idx].mas2, tlb0[idx].mas3);
2566 mtspr(SPR_MAS0, mas0);
2567 __asm volatile("isync");
2568 mtspr(SPR_MAS1, tlb0[idx].mas1);
2569 __asm volatile("isync");
2570 mtspr(SPR_MAS2, tlb0[idx].mas2);
2571 __asm volatile("isync");
2572 mtspr(SPR_MAS3, tlb0[idx].mas3);
2573 __asm volatile("isync");
2574 mtspr(SPR_MAS7, mas7);
2575 __asm volatile("isync; tlbwe; isync; msync");
2577 //debugf("tlb0_write_entry: e\n");
2581 * Invalidate TLB0 entry, clear correspondig tlb0 table element.
2584 tlb0_inval_entry(vm_offset_t va, unsigned int way)
2586 int idx = tlb0_tableidx(va, way);
2588 //debugf("tlb0_inval_entry: s (va=0x%08x way=%d idx=%d)\n",
2591 tlb0[idx].mas1 = 1 << MAS1_TSIZE_SHIFT; /* !MAS1_VALID */
2592 tlb0[idx].mas2 = va & MAS2_EPN;
2595 tlb0_write_entry(idx, way);
2597 //debugf("tlb0_inval_entry: e\n");
2601 * Invalidate TLB0 entry that corresponds to pmap/va.
2604 tlb0_flush_entry(pmap_t pmap, vm_offset_t va)
2608 //debugf("tlb0_flush_entry: s (pmap=0x%08x va=0x%08x)\n",
2609 // (u_int32_t)pmap, va);
2611 mtx_lock_spin(&tlb0_mutex);
2613 /* Check all TLB0 ways. */
2614 for (way = 0; way < TLB0_NWAYS; way ++) {
2615 idx = tlb0_tableidx(va, way);
2617 /* Invalidate only if entry matches va and pmap tid. */
2618 if (((MAS1_GETTID(tlb0[idx].mas1) == pmap->pm_tid) &&
2619 ((tlb0[idx].mas2 & MAS2_EPN) == va))) {
2620 tlb0_inval_entry(va, way);
2624 mtx_unlock_spin(&tlb0_mutex);
2626 //debugf("tlb0_flush_entry: e\n");
2629 /* Clean TLB0 hardware and tlb0[] table. */
2635 debugf("tlb0_init: TLB0_SIZE = %d TLB0_NWAYS = %d\n",
2636 TLB0_SIZE, TLB0_NWAYS);
2638 mtx_lock_spin(&tlb0_mutex);
2640 for (way = 0; way < TLB0_NWAYS; way ++) {
2641 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
2642 tlb0_inval_entry(entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT, way);
2646 mtx_unlock_spin(&tlb0_mutex);
2651 /* Print out tlb0 entries for given va. */
2653 tlb0_print_tlbentries_va(vm_offset_t va)
2655 u_int32_t mas0, mas1, mas2, mas3, mas7;
2658 debugf("TLB0 entries for va = 0x%08x:\n", va);
2659 for (way = 0; way < TLB0_NWAYS; way ++) {
2660 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
2661 mtspr(SPR_MAS0, mas0);
2662 __asm volatile("isync");
2664 mas2 = va & MAS2_EPN;
2665 mtspr(SPR_MAS2, mas2);
2666 __asm volatile("isync; tlbre");
2668 mas1 = mfspr(SPR_MAS1);
2669 mas2 = mfspr(SPR_MAS2);
2670 mas3 = mfspr(SPR_MAS3);
2671 mas7 = mfspr(SPR_MAS7);
2673 idx = tlb0_tableidx(va, way);
2674 tlb_print_entry(idx, mas1, mas2, mas3, mas7);
2678 /* Print out contents of the MAS registers for each TLB0 entry */
2680 tlb0_print_tlbentries(void)
2682 u_int32_t mas0, mas1, mas2, mas3, mas7;
2683 int entryidx, way, idx;
2685 debugf("TLB0 entries:\n");
2686 for (way = 0; way < TLB0_NWAYS; way ++) {
2687 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
2689 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
2690 mtspr(SPR_MAS0, mas0);
2691 __asm volatile("isync");
2693 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
2694 mtspr(SPR_MAS2, mas2);
2696 __asm volatile("isync; tlbre");
2698 mas1 = mfspr(SPR_MAS1);
2699 mas2 = mfspr(SPR_MAS2);
2700 mas3 = mfspr(SPR_MAS3);
2701 mas7 = mfspr(SPR_MAS7);
2703 idx = tlb0_tableidx(mas2, way);
2704 tlb_print_entry(idx, mas1, mas2, mas3, mas7);
2709 /* Print out kernel tlb0[] table. */
2711 tlb0_print_entries(void)
2715 debugf("tlb0[] table entries:\n");
2716 for (i = 0; i < TLB0_SIZE; i++) {
2717 tlb_print_entry(i, tlb0[i].mas1,
2718 tlb0[i].mas2, tlb0[i].mas3, 0);
2724 /**************************************************************************/
2726 /**************************************************************************/
2728 * Write given entry to TLB1 hardware.
2729 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7).
2732 tlb1_write_entry(unsigned int idx)
2734 u_int32_t mas0, mas7;
2736 //debugf("tlb1_write_entry: s\n");
2738 /* Clear high order RPN bits */
2742 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx);
2743 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0);
2745 mtspr(SPR_MAS0, mas0);
2746 __asm volatile("isync");
2747 mtspr(SPR_MAS1, tlb1[idx].mas1);
2748 __asm volatile("isync");
2749 mtspr(SPR_MAS2, tlb1[idx].mas2);
2750 __asm volatile("isync");
2751 mtspr(SPR_MAS3, tlb1[idx].mas3);
2752 __asm volatile("isync");
2753 mtspr(SPR_MAS7, mas7);
2754 __asm volatile("isync; tlbwe; isync; msync");
2756 //debugf("tlb1_write_entry: e\n");;
2760 * Return the largest uint value log such that 2^log <= num.
2763 ilog2(unsigned int num)
2767 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
2772 * Convert TLB TSIZE value to mapped region size.
2775 tsize2size(unsigned int tsize)
2780 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
2783 return ((1 << (2 * tsize)) * 1024);
2787 * Convert region size (must be power of 4) to TLB TSIZE value.
2790 size2tsize(vm_size_t size)
2794 * tsize = log2(size) / 2 - 5
2797 return (ilog2(size) / 2 - 5);
2801 * Setup entry in a sw tlb1 table, write entry to TLB1 hardware.
2802 * This routine is used for low level operations on the TLB1,
2803 * for creating temporaray as well as permanent mappings (tlb_set_entry).
2805 * We assume kernel mappings only, thus all entries created have supervisor
2806 * permission bits set nad user permission bits cleared.
2808 * Provided mapping size must be a power of 4.
2809 * Mapping flags must be a combination of MAS2_[WIMG].
2810 * Entry TID is set to _tid which must not exceed 8 bit value.
2811 * Entry TS is set to either 0 or MAS1_TS based on provided _ts.
2814 __tlb1_set_entry(unsigned int idx, vm_offset_t va, vm_offset_t pa,
2815 vm_size_t size, u_int32_t flags, unsigned int _tid, unsigned int _ts)
2820 //debugf("__tlb1_set_entry: s (idx = %d va = 0x%08x pa = 0x%08x "
2821 // "size = 0x%08x flags = 0x%08x _tid = %d _ts = %d\n",
2822 // idx, va, pa, size, flags, _tid, _ts);
2824 /* Convert size to TSIZE */
2825 tsize = size2tsize(size);
2826 //debugf("__tlb1_set_entry: tsize = %d\n", tsize);
2828 tid = (_tid << MAS1_TID_SHIFT) & MAS1_TID_MASK;
2829 ts = (_ts) ? MAS1_TS : 0;
2830 tlb1[idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
2831 tlb1[idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
2833 tlb1[idx].mas2 = (va & MAS2_EPN) | flags;
2835 /* Set supervisor rwx permission bits */
2836 tlb1[idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
2838 //debugf("__tlb1_set_entry: mas1 = %08x mas2 = %08x mas3 = 0x%08x\n",
2839 // tlb1[idx].mas1, tlb1[idx].mas2, tlb1[idx].mas3);
2841 tlb1_write_entry(idx);
2842 //debugf("__tlb1_set_entry: e\n");
2846 * Register permanent kernel mapping in TLB1.
2848 * Entries are created starting from index 0 (current free entry is
2849 * kept in tlb1_idx) and are not supposed to be invalidated.
2852 tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, u_int32_t flags)
2854 //debugf("tlb1_set_entry: s (tlb1_idx = %d va = 0x%08x pa = 0x%08x "
2855 // "size = 0x%08x flags = 0x%08x\n",
2856 // tlb1_idx, va, pa, size, flags);
2858 if (tlb1_idx >= TLB1_SIZE) {
2859 //debugf("tlb1_set_entry: e (tlb1 full!)\n");
2863 /* TS = 0, TID = 0 */
2864 __tlb1_set_entry(tlb1_idx++, va, pa, size, flags, KERNEL_TID, 0);
2865 //debugf("tlb1_set_entry: e\n");
2870 * Invalidate TLB1 entry, clear correspondig tlb1 table element.
2871 * This routine is used to clear temporary entries created
2872 * early in a locore.S or through the use of __tlb1_set_entry().
2875 tlb1_inval_entry(unsigned int idx)
2879 va = tlb1[idx].mas2 & MAS2_EPN;
2881 tlb1[idx].mas1 = 0; /* !MAS1_VALID */
2885 tlb1_write_entry(idx);
2889 tlb1_entry_size_cmp(const void *a, const void *b)
2891 const vm_size_t *sza;
2892 const vm_size_t *szb;
2898 else if (*sza < *szb)
2905 * Mapin contiguous RAM region into the TLB1 using maximum of
2906 * KERNEL_REGION_MAX_TLB_ENTRIES entries.
2908 * If necessarry round up last entry size and return total size
2909 * used by all allocated entries.
2912 tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size)
2914 vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES];
2915 vm_size_t mapped_size, sz, esz;
2919 debugf("tlb1_mapin_region:\n");
2920 debugf(" region size = 0x%08x va = 0x%08x pa = 0x%08x\n", size, va, pa);
2924 memset(entry_size, 0, sizeof(entry_size));
2926 /* Calculate entry sizes. */
2927 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) {
2929 /* Largest region that is power of 4 and fits within size */
2931 esz = 1 << (2 * log);
2933 /* Minimum region size is 4KB */
2934 if (esz < (1 << 12))
2937 /* If this is last entry cover remaining size. */
2938 if (i == KERNEL_REGION_MAX_TLB_ENTRIES - 1) {
2943 entry_size[i] = esz;
2951 /* Sort entry sizes, required to get proper entry address alignment. */
2952 qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES,
2953 sizeof(vm_size_t), tlb1_entry_size_cmp);
2955 /* Load TLB1 entries. */
2956 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) {
2957 esz = entry_size[i];
2960 debugf(" entry %d: sz = 0x%08x (va = 0x%08x pa = 0x%08x)\n",
2961 tlb1_idx, esz, va, pa);
2962 tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM);
2968 debugf(" mapped size 0x%08x (wasted space 0x%08x)\n",
2969 mapped_size, mapped_size - size);
2971 return (mapped_size);
2975 * TLB1 initialization routine, to be called after the very first
2976 * assembler level setup done in locore.S.
2979 tlb1_init(vm_offset_t ccsrbar)
2983 /* TBL1[1] is used to map the kernel. Save that entry. */
2984 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1);
2985 mtspr(SPR_MAS0, mas0);
2986 __asm __volatile("isync; tlbre");
2988 tlb1[1].mas1 = mfspr(SPR_MAS1);
2989 tlb1[1].mas2 = mfspr(SPR_MAS2);
2990 tlb1[1].mas3 = mfspr(SPR_MAS3);
2992 /* Mapin CCSRBAR in TLB1[0] */
2993 __tlb1_set_entry(0, CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE,
2994 _TLB_ENTRY_IO, KERNEL_TID, 0);
2996 /* Setup TLB miss defaults */
2997 set_mas4_defaults();
2999 /* Reset next available TLB1 entry index. */
3004 * Setup MAS4 defaults.
3005 * These values are loaded to MAS0-2 on a TLB miss.
3008 set_mas4_defaults(void)
3012 /* Defaults: TLB0, PID0, TSIZED=4K */
3013 mas4 = MAS4_TLBSELD0;
3014 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
3016 mtspr(SPR_MAS4, mas4);
3017 __asm volatile("isync");
3021 * Print out contents of the MAS registers for each TLB1 entry
3024 tlb1_print_tlbentries(void)
3026 u_int32_t mas0, mas1, mas2, mas3, mas7;
3029 debugf("TLB1 entries:\n");
3030 for (i = 0; i < TLB1_SIZE; i++) {
3032 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
3033 mtspr(SPR_MAS0, mas0);
3035 __asm volatile("isync; tlbre");
3037 mas1 = mfspr(SPR_MAS1);
3038 mas2 = mfspr(SPR_MAS2);
3039 mas3 = mfspr(SPR_MAS3);
3040 mas7 = mfspr(SPR_MAS7);
3042 tlb_print_entry(i, mas1, mas2, mas3, mas7);
3047 * Print out contents of the in-ram tlb1 table.
3050 tlb1_print_entries(void)
3054 debugf("tlb1[] table entries:\n");
3055 for (i = 0; i < TLB1_SIZE; i++)
3056 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0);
3060 * Return 0 if the physical IO range is encompassed by one of the
3061 * the TLB1 entries, otherwise return related error code.
3064 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
3067 vm_paddr_t pa_start;
3069 unsigned int entry_tsize;
3070 vm_size_t entry_size;
3072 *va = (vm_offset_t)NULL;
3074 /* Skip invalid entries */
3075 if (!(tlb1[i].mas1 & MAS1_VALID))
3079 * The entry must be cache-inhibited, guarded, and r/w
3080 * so it can function as an i/o page
3082 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G);
3083 if (prot != (MAS2_I | MAS2_G))
3086 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW);
3087 if (prot != (MAS3_SR | MAS3_SW))
3090 /* The address should be within the entry range. */
3091 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3092 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
3094 entry_size = tsize2size(entry_tsize);
3095 pa_start = tlb1[i].mas3 & MAS3_RPN;
3096 pa_end = pa_start + entry_size - 1;
3098 if ((pa < pa_start) || ((pa + size) > pa_end))
3101 /* Return virtual address of this mapping. */
3102 *va = (tlb1[i].mas2 & MAS2_EPN) + (pa - pa_start);