2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * Some hw specific parts of this pmap were derived or influenced
27 * by NetBSD's ibm4xx pmap module. More generic code is shared with
28 * a few other pmap modules from the FreeBSD tree.
34 * Kernel and user threads run within one common virtual address space
37 * Virtual address space layout:
38 * -----------------------------
39 * 0x0000_0000 - 0xafff_ffff : user process
40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.)
41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved
42 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc.
43 * 0xc100_0000 - 0xfeef_ffff : KVA
44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0
47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space
48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region
51 #include <sys/cdefs.h>
52 __FBSDID("$FreeBSD$");
54 #include <sys/types.h>
55 #include <sys/param.h>
56 #include <sys/malloc.h>
60 #include <sys/queue.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/msgbuf.h>
65 #include <sys/mutex.h>
67 #include <sys/vmmeter.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_pageout.h>
73 #include <vm/vm_extern.h>
74 #include <vm/vm_object.h>
75 #include <vm/vm_param.h>
76 #include <vm/vm_map.h>
77 #include <vm/vm_pager.h>
80 #include <machine/bootinfo.h>
81 #include <machine/cpu.h>
82 #include <machine/pcb.h>
83 #include <machine/platform.h>
85 #include <machine/tlb.h>
86 #include <machine/spr.h>
87 #include <machine/vmparam.h>
88 #include <machine/md_var.h>
89 #include <machine/mmuvar.h>
90 #include <machine/pmap.h>
91 #include <machine/pte.h>
99 #define debugf(fmt, args...) printf(fmt, ##args)
101 #define debugf(fmt, args...)
104 #define TODO panic("%s: not implemented", __func__);
106 #include "opt_sched.h"
108 #error "e500 only works with SCHED_4BSD which uses a global scheduler lock."
110 extern struct mtx sched_lock;
112 extern int dumpsys_minidump;
114 extern unsigned char _etext[];
115 extern unsigned char _end[];
117 /* Kernel physical load address. */
118 extern uint32_t kernload;
119 vm_offset_t kernstart;
122 /* Message buffer and tables. */
123 static vm_offset_t data_start;
124 static vm_size_t data_end;
126 /* Phys/avail memory regions. */
127 static struct mem_region *availmem_regions;
128 static int availmem_regions_sz;
129 static struct mem_region *physmem_regions;
130 static int physmem_regions_sz;
132 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
133 static vm_offset_t zero_page_va;
134 static struct mtx zero_page_mutex;
136 static struct mtx tlbivax_mutex;
139 * Reserved KVA space for mmu_booke_zero_page_idle. This is used
140 * by idle thred only, no lock required.
142 static vm_offset_t zero_page_idle_va;
144 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
145 static vm_offset_t copy_page_src_va;
146 static vm_offset_t copy_page_dst_va;
147 static struct mtx copy_page_mutex;
149 /**************************************************************************/
151 /**************************************************************************/
153 static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
154 vm_prot_t, boolean_t);
156 unsigned int kptbl_min; /* Index of the first kernel ptbl. */
157 unsigned int kernel_ptbls; /* Number of KVA ptbls. */
160 * If user pmap is processed with mmu_booke_remove and the resident count
161 * drops to 0, there are no more pages to remove, so we need not continue.
163 #define PMAP_REMOVE_DONE(pmap) \
164 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
166 extern void tlb_lock(uint32_t *);
167 extern void tlb_unlock(uint32_t *);
168 extern void tid_flush(tlbtid_t);
170 /**************************************************************************/
171 /* TLB and TID handling */
172 /**************************************************************************/
174 /* Translation ID busy table */
175 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
178 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
179 * core revisions and should be read from h/w registers during early config.
181 uint32_t tlb0_entries;
183 uint32_t tlb0_entries_per_way;
185 #define TLB0_ENTRIES (tlb0_entries)
186 #define TLB0_WAYS (tlb0_ways)
187 #define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way)
189 #define TLB1_ENTRIES 16
191 /* In-ram copy of the TLB1 */
192 static tlb_entry_t tlb1[TLB1_ENTRIES];
194 /* Next free entry in the TLB1 */
195 static unsigned int tlb1_idx;
197 static tlbtid_t tid_alloc(struct pmap *);
199 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
201 static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t);
202 static void tlb1_write_entry(unsigned int);
203 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
204 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t);
206 static vm_size_t tsize2size(unsigned int);
207 static unsigned int size2tsize(vm_size_t);
208 static unsigned int ilog2(unsigned int);
210 static void set_mas4_defaults(void);
212 static inline void tlb0_flush_entry(vm_offset_t);
213 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
215 /**************************************************************************/
216 /* Page table management */
217 /**************************************************************************/
219 /* Data for the pv entry allocation mechanism */
220 static uma_zone_t pvzone;
221 static struct vm_object pvzone_obj;
222 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
224 #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */
226 #ifndef PMAP_SHPGPERPROC
227 #define PMAP_SHPGPERPROC 200
230 static void ptbl_init(void);
231 static struct ptbl_buf *ptbl_buf_alloc(void);
232 static void ptbl_buf_free(struct ptbl_buf *);
233 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
235 static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int);
236 static void ptbl_free(mmu_t, pmap_t, unsigned int);
237 static void ptbl_hold(mmu_t, pmap_t, unsigned int);
238 static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
240 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
241 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
242 static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t);
243 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
245 static pv_entry_t pv_alloc(void);
246 static void pv_free(pv_entry_t);
247 static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
248 static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
250 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
251 #define PTBL_BUFS (128 * 16)
254 TAILQ_ENTRY(ptbl_buf) link; /* list link */
255 vm_offset_t kva; /* va of mapping */
258 /* ptbl free list and a lock used for access synchronization. */
259 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
260 static struct mtx ptbl_buf_freelist_lock;
262 /* Base address of kva space allocated fot ptbl bufs. */
263 static vm_offset_t ptbl_buf_pool_vabase;
265 /* Pointer to ptbl_buf structures. */
266 static struct ptbl_buf *ptbl_bufs;
268 void pmap_bootstrap_ap(volatile uint32_t *);
271 * Kernel MMU interface
273 static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
274 static void mmu_booke_clear_modify(mmu_t, vm_page_t);
275 static void mmu_booke_clear_reference(mmu_t, vm_page_t);
276 static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
277 vm_size_t, vm_offset_t);
278 static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
279 static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
280 vm_prot_t, boolean_t);
281 static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
282 vm_page_t, vm_prot_t);
283 static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
285 static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
286 static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
288 static void mmu_booke_init(mmu_t);
289 static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
290 static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
291 static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t);
292 static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t,
294 static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t);
295 static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
296 vm_object_t, vm_pindex_t, vm_size_t);
297 static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
298 static void mmu_booke_page_init(mmu_t, vm_page_t);
299 static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
300 static void mmu_booke_pinit(mmu_t, pmap_t);
301 static void mmu_booke_pinit0(mmu_t, pmap_t);
302 static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
304 static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
305 static void mmu_booke_qremove(mmu_t, vm_offset_t, int);
306 static void mmu_booke_release(mmu_t, pmap_t);
307 static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
308 static void mmu_booke_remove_all(mmu_t, vm_page_t);
309 static void mmu_booke_remove_write(mmu_t, vm_page_t);
310 static void mmu_booke_zero_page(mmu_t, vm_page_t);
311 static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
312 static void mmu_booke_zero_page_idle(mmu_t, vm_page_t);
313 static void mmu_booke_activate(mmu_t, struct thread *);
314 static void mmu_booke_deactivate(mmu_t, struct thread *);
315 static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
316 static void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t);
317 static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
318 static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t);
319 static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t);
320 static void mmu_booke_kremove(mmu_t, vm_offset_t);
321 static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
322 static boolean_t mmu_booke_page_executable(mmu_t, vm_page_t);
323 static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *,
324 vm_size_t, vm_size_t *);
325 static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *,
326 vm_size_t, vm_offset_t);
327 static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *);
329 static mmu_method_t mmu_booke_methods[] = {
330 /* pmap dispatcher interface */
331 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring),
332 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify),
333 MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference),
334 MMUMETHOD(mmu_copy, mmu_booke_copy),
335 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
336 MMUMETHOD(mmu_enter, mmu_booke_enter),
337 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
338 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
339 MMUMETHOD(mmu_extract, mmu_booke_extract),
340 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold),
341 MMUMETHOD(mmu_init, mmu_booke_init),
342 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified),
343 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable),
344 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced),
345 MMUMETHOD(mmu_map, mmu_booke_map),
346 MMUMETHOD(mmu_mincore, mmu_booke_mincore),
347 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt),
348 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
349 MMUMETHOD(mmu_page_init, mmu_booke_page_init),
350 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
351 MMUMETHOD(mmu_pinit, mmu_booke_pinit),
352 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0),
353 MMUMETHOD(mmu_protect, mmu_booke_protect),
354 MMUMETHOD(mmu_qenter, mmu_booke_qenter),
355 MMUMETHOD(mmu_qremove, mmu_booke_qremove),
356 MMUMETHOD(mmu_release, mmu_booke_release),
357 MMUMETHOD(mmu_remove, mmu_booke_remove),
358 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
359 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
360 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
361 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
362 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle),
363 MMUMETHOD(mmu_activate, mmu_booke_activate),
364 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate),
366 /* Internal interfaces */
367 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
368 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
369 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev),
370 MMUMETHOD(mmu_kenter, mmu_booke_kenter),
371 MMUMETHOD(mmu_kextract, mmu_booke_kextract),
372 /* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */
373 MMUMETHOD(mmu_page_executable, mmu_booke_page_executable),
374 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
376 /* dumpsys() support */
377 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
378 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap),
379 MMUMETHOD(mmu_scan_md, mmu_booke_scan_md),
384 static mmu_def_t booke_mmu = {
400 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
403 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
404 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock);
406 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
407 ("tlb_miss_lock: tried to lock self"));
409 tlb_lock(pc->pc_booke_tlb_lock);
411 CTR1(KTR_PMAP, "%s: locked", __func__);
418 tlb_miss_unlock(void)
426 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
428 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
429 __func__, pc->pc_cpuid);
431 tlb_unlock(pc->pc_booke_tlb_lock);
433 CTR1(KTR_PMAP, "%s: unlocked", __func__);
439 /* Return number of entries in TLB0. */
441 tlb0_get_tlbconf(void)
445 tlb0_cfg = mfspr(SPR_TLB0CFG);
446 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
447 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
448 tlb0_entries_per_way = tlb0_entries / tlb0_ways;
451 /* Initialize pool of kva ptbl buffers. */
457 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
458 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
459 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
460 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
462 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
463 TAILQ_INIT(&ptbl_buf_freelist);
465 for (i = 0; i < PTBL_BUFS; i++) {
466 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
467 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
471 /* Get a ptbl_buf from the freelist. */
472 static struct ptbl_buf *
475 struct ptbl_buf *buf;
477 mtx_lock(&ptbl_buf_freelist_lock);
478 buf = TAILQ_FIRST(&ptbl_buf_freelist);
480 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
481 mtx_unlock(&ptbl_buf_freelist_lock);
483 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
488 /* Return ptbl buff to free pool. */
490 ptbl_buf_free(struct ptbl_buf *buf)
493 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
495 mtx_lock(&ptbl_buf_freelist_lock);
496 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
497 mtx_unlock(&ptbl_buf_freelist_lock);
501 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
504 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
506 struct ptbl_buf *pbuf;
508 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
510 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
512 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
513 if (pbuf->kva == (vm_offset_t)ptbl) {
514 /* Remove from pmap ptbl buf list. */
515 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
517 /* Free corresponding ptbl buf. */
523 /* Allocate page table. */
525 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
527 vm_page_t mtbl[PTBL_PAGES];
529 struct ptbl_buf *pbuf;
534 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
535 (pmap == kernel_pmap), pdir_idx);
537 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
538 ("ptbl_alloc: invalid pdir_idx"));
539 KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
540 ("pte_alloc: valid ptbl entry exists!"));
542 pbuf = ptbl_buf_alloc();
544 panic("pte_alloc: couldn't alloc kernel virtual memory");
546 ptbl = (pte_t *)pbuf->kva;
548 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
550 /* Allocate ptbl pages, this will sleep! */
551 for (i = 0; i < PTBL_PAGES; i++) {
552 pidx = (PTBL_PAGES * pdir_idx) + i;
553 while ((m = vm_page_alloc(NULL, pidx,
554 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
557 vm_page_unlock_queues();
559 vm_page_lock_queues();
565 /* Map allocated pages into kernel_pmap. */
566 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
568 /* Zero whole ptbl. */
569 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
571 /* Add pbuf to the pmap ptbl bufs list. */
572 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
577 /* Free ptbl pages and invalidate pdir entry. */
579 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
587 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
588 (pmap == kernel_pmap), pdir_idx);
590 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
591 ("ptbl_free: invalid pdir_idx"));
593 ptbl = pmap->pm_pdir[pdir_idx];
595 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
597 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
600 * Invalidate the pdir entry as soon as possible, so that other CPUs
601 * don't attempt to look up the page tables we are releasing.
603 mtx_lock_spin(&tlbivax_mutex);
606 pmap->pm_pdir[pdir_idx] = NULL;
609 mtx_unlock_spin(&tlbivax_mutex);
611 for (i = 0; i < PTBL_PAGES; i++) {
612 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
613 pa = pte_vatopa(mmu, kernel_pmap, va);
614 m = PHYS_TO_VM_PAGE(pa);
615 vm_page_free_zero(m);
616 atomic_subtract_int(&cnt.v_wire_count, 1);
617 mmu_booke_kremove(mmu, va);
620 ptbl_free_pmap_ptbl(pmap, ptbl);
624 * Decrement ptbl pages hold count and attempt to free ptbl pages.
625 * Called when removing pte entry from ptbl.
627 * Return 1 if ptbl pages were freed.
630 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
637 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
638 (pmap == kernel_pmap), pdir_idx);
640 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
641 ("ptbl_unhold: invalid pdir_idx"));
642 KASSERT((pmap != kernel_pmap),
643 ("ptbl_unhold: unholding kernel ptbl!"));
645 ptbl = pmap->pm_pdir[pdir_idx];
647 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
648 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
649 ("ptbl_unhold: non kva ptbl"));
651 /* decrement hold count */
652 for (i = 0; i < PTBL_PAGES; i++) {
653 pa = pte_vatopa(mmu, kernel_pmap,
654 (vm_offset_t)ptbl + (i * PAGE_SIZE));
655 m = PHYS_TO_VM_PAGE(pa);
660 * Free ptbl pages if there are no pte etries in this ptbl.
661 * wire_count has the same value for all ptbl pages, so check the last
664 if (m->wire_count == 0) {
665 ptbl_free(mmu, pmap, pdir_idx);
667 //debugf("ptbl_unhold: e (freed ptbl)\n");
675 * Increment hold count for ptbl pages. This routine is used when a new pte
676 * entry is being inserted into the ptbl.
679 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
686 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
689 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
690 ("ptbl_hold: invalid pdir_idx"));
691 KASSERT((pmap != kernel_pmap),
692 ("ptbl_hold: holding kernel ptbl!"));
694 ptbl = pmap->pm_pdir[pdir_idx];
696 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
698 for (i = 0; i < PTBL_PAGES; i++) {
699 pa = pte_vatopa(mmu, kernel_pmap,
700 (vm_offset_t)ptbl + (i * PAGE_SIZE));
701 m = PHYS_TO_VM_PAGE(pa);
706 /* Allocate pv_entry structure. */
713 if (pv_entry_count > pv_entry_high_water)
715 pv = uma_zalloc(pvzone, M_NOWAIT);
720 /* Free pv_entry structure. */
722 pv_free(pv_entry_t pve)
726 uma_zfree(pvzone, pve);
730 /* Allocate and initialize pv_entry structure. */
732 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
736 //int su = (pmap == kernel_pmap);
737 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
738 // (u_int32_t)pmap, va, (u_int32_t)m);
742 panic("pv_insert: no pv entries!");
748 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
749 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
751 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
753 //debugf("pv_insert: e\n");
756 /* Destroy pv entry. */
758 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
762 //int su = (pmap == kernel_pmap);
763 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
765 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
766 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
769 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
770 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
771 /* remove from pv_list */
772 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
773 if (TAILQ_EMPTY(&m->md.pv_list))
774 vm_page_flag_clear(m, PG_WRITEABLE);
776 /* free pv entry struct */
782 //debugf("pv_remove: e\n");
786 * Clean pte entry, try to free page table page if requested.
788 * Return 1 if ptbl pages were freed, otherwise return 0.
791 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
793 unsigned int pdir_idx = PDIR_IDX(va);
794 unsigned int ptbl_idx = PTBL_IDX(va);
799 //int su = (pmap == kernel_pmap);
800 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
801 // su, (u_int32_t)pmap, va, flags);
803 ptbl = pmap->pm_pdir[pdir_idx];
804 KASSERT(ptbl, ("pte_remove: null ptbl"));
806 pte = &ptbl[ptbl_idx];
808 if (pte == NULL || !PTE_ISVALID(pte))
811 if (PTE_ISWIRED(pte))
812 pmap->pm_stats.wired_count--;
814 /* Handle managed entry. */
815 if (PTE_ISMANAGED(pte)) {
816 /* Get vm_page_t for mapped pte. */
817 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
819 if (PTE_ISMODIFIED(pte))
822 if (PTE_ISREFERENCED(pte))
823 vm_page_flag_set(m, PG_REFERENCED);
825 pv_remove(pmap, va, m);
828 mtx_lock_spin(&tlbivax_mutex);
831 tlb0_flush_entry(va);
836 mtx_unlock_spin(&tlbivax_mutex);
838 pmap->pm_stats.resident_count--;
840 if (flags & PTBL_UNHOLD) {
841 //debugf("pte_remove: e (unhold)\n");
842 return (ptbl_unhold(mmu, pmap, pdir_idx));
845 //debugf("pte_remove: e\n");
850 * Insert PTE for a given page and virtual address.
853 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
855 unsigned int pdir_idx = PDIR_IDX(va);
856 unsigned int ptbl_idx = PTBL_IDX(va);
859 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
860 pmap == kernel_pmap, pmap, va);
862 /* Get the page table pointer. */
863 ptbl = pmap->pm_pdir[pdir_idx];
866 /* Allocate page table pages. */
867 ptbl = ptbl_alloc(mmu, pmap, pdir_idx);
870 * Check if there is valid mapping for requested
871 * va, if there is, remove it.
873 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
874 if (PTE_ISVALID(pte)) {
875 pte_remove(mmu, pmap, va, PTBL_HOLD);
878 * pte is not used, increment hold count
881 if (pmap != kernel_pmap)
882 ptbl_hold(mmu, pmap, pdir_idx);
887 * Insert pv_entry into pv_list for mapped page if part of managed
890 if ((m->flags & PG_FICTITIOUS) == 0) {
891 if ((m->flags & PG_UNMANAGED) == 0) {
892 flags |= PTE_MANAGED;
894 /* Create and insert pv entry. */
895 pv_insert(pmap, va, m);
899 pmap->pm_stats.resident_count++;
901 mtx_lock_spin(&tlbivax_mutex);
904 tlb0_flush_entry(va);
905 if (pmap->pm_pdir[pdir_idx] == NULL) {
907 * If we just allocated a new page table, hook it in
910 pmap->pm_pdir[pdir_idx] = ptbl;
912 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
913 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK;
914 pte->flags |= (PTE_VALID | flags);
917 mtx_unlock_spin(&tlbivax_mutex);
920 /* Return the pa for the given pmap/va. */
922 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
927 pte = pte_find(mmu, pmap, va);
928 if ((pte != NULL) && PTE_ISVALID(pte))
929 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
933 /* Get a pointer to a PTE in a page table. */
935 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
937 unsigned int pdir_idx = PDIR_IDX(va);
938 unsigned int ptbl_idx = PTBL_IDX(va);
940 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
942 if (pmap->pm_pdir[pdir_idx])
943 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
948 /**************************************************************************/
950 /**************************************************************************/
953 * This is called during e500_init, before the system is really initialized.
956 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
958 vm_offset_t phys_kernelend;
959 struct mem_region *mp, *mp1;
962 u_int phys_avail_count;
963 vm_size_t physsz, hwphyssz, kstack0_sz;
964 vm_offset_t kernel_pdir, kstack0, va;
965 vm_paddr_t kstack0_phys;
969 debugf("mmu_booke_bootstrap: entered\n");
971 /* Initialize invalidation mutex */
972 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
974 /* Read TLB0 size and associativity. */
977 /* Align kernel start and end address (kernel image). */
978 kernstart = trunc_page(start);
979 data_start = round_page(kernelend);
980 kernsize = data_start - kernstart;
982 data_end = data_start;
984 /* Allocate space for the message buffer. */
985 msgbufp = (struct msgbuf *)data_end;
986 data_end += MSGBUF_SIZE;
987 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp,
990 data_end = round_page(data_end);
992 /* Allocate the dynamic per-cpu area. */
993 dpcpu = (void *)data_end;
994 data_end += DPCPU_SIZE;
995 dpcpu_init(dpcpu, 0);
997 /* Allocate space for ptbl_bufs. */
998 ptbl_bufs = (struct ptbl_buf *)data_end;
999 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
1000 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs,
1003 data_end = round_page(data_end);
1005 /* Allocate PTE tables for kernel KVA. */
1006 kernel_pdir = data_end;
1007 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS +
1008 PDIR_SIZE - 1) / PDIR_SIZE;
1009 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
1010 debugf(" kernel ptbls: %d\n", kernel_ptbls);
1011 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end);
1013 debugf(" data_end: 0x%08x\n", data_end);
1014 if (data_end - kernstart > 0x1000000) {
1015 data_end = (data_end + 0x3fffff) & ~0x3fffff;
1016 tlb1_mapin_region(kernstart + 0x1000000,
1017 kernload + 0x1000000, data_end - kernstart - 0x1000000);
1019 data_end = (data_end + 0xffffff) & ~0xffffff;
1021 debugf(" updated data_end: 0x%08x\n", data_end);
1023 kernsize += data_end - data_start;
1026 * Clear the structures - note we can only do it safely after the
1027 * possible additional TLB1 translations are in place (above) so that
1028 * all range up to the currently calculated 'data_end' is covered.
1030 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
1031 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1033 /*******************************************************/
1034 /* Set the start and end of kva. */
1035 /*******************************************************/
1036 virtual_avail = round_page(data_end);
1037 virtual_end = VM_MAX_KERNEL_ADDRESS;
1039 /* Allocate KVA space for page zero/copy operations. */
1040 zero_page_va = virtual_avail;
1041 virtual_avail += PAGE_SIZE;
1042 zero_page_idle_va = virtual_avail;
1043 virtual_avail += PAGE_SIZE;
1044 copy_page_src_va = virtual_avail;
1045 virtual_avail += PAGE_SIZE;
1046 copy_page_dst_va = virtual_avail;
1047 virtual_avail += PAGE_SIZE;
1048 debugf("zero_page_va = 0x%08x\n", zero_page_va);
1049 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va);
1050 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
1051 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
1053 /* Initialize page zero/copy mutexes. */
1054 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
1055 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
1057 /* Allocate KVA space for ptbl bufs. */
1058 ptbl_buf_pool_vabase = virtual_avail;
1059 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
1060 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n",
1061 ptbl_buf_pool_vabase, virtual_avail);
1063 /* Calculate corresponding physical addresses for the kernel region. */
1064 phys_kernelend = kernload + kernsize;
1065 debugf("kernel image and allocated data:\n");
1066 debugf(" kernload = 0x%08x\n", kernload);
1067 debugf(" kernstart = 0x%08x\n", kernstart);
1068 debugf(" kernsize = 0x%08x\n", kernsize);
1070 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1071 panic("mmu_booke_bootstrap: phys_avail too small");
1074 * Remove kernel physical address range from avail regions list. Page
1075 * align all regions. Non-page aligned memory isn't very interesting
1076 * to us. Also, sort the entries for ascending addresses.
1079 /* Retrieve phys/avail mem regions */
1080 mem_regions(&physmem_regions, &physmem_regions_sz,
1081 &availmem_regions, &availmem_regions_sz);
1083 cnt = availmem_regions_sz;
1084 debugf("processing avail regions:\n");
1085 for (mp = availmem_regions; mp->mr_size; mp++) {
1087 e = mp->mr_start + mp->mr_size;
1088 debugf(" %08x-%08x -> ", s, e);
1089 /* Check whether this region holds all of the kernel. */
1090 if (s < kernload && e > phys_kernelend) {
1091 availmem_regions[cnt].mr_start = phys_kernelend;
1092 availmem_regions[cnt++].mr_size = e - phys_kernelend;
1095 /* Look whether this regions starts within the kernel. */
1096 if (s >= kernload && s < phys_kernelend) {
1097 if (e <= phys_kernelend)
1101 /* Now look whether this region ends within the kernel. */
1102 if (e > kernload && e <= phys_kernelend) {
1107 /* Now page align the start and size of the region. */
1113 debugf("%08x-%08x = %x\n", s, e, sz);
1115 /* Check whether some memory is left here. */
1119 (cnt - (mp - availmem_regions)) * sizeof(*mp));
1125 /* Do an insertion sort. */
1126 for (mp1 = availmem_regions; mp1 < mp; mp1++)
1127 if (s < mp1->mr_start)
1130 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1138 availmem_regions_sz = cnt;
1140 /*******************************************************/
1141 /* Steal physical memory for kernel stack from the end */
1142 /* of the first avail region */
1143 /*******************************************************/
1144 kstack0_sz = KSTACK_PAGES * PAGE_SIZE;
1145 kstack0_phys = availmem_regions[0].mr_start +
1146 availmem_regions[0].mr_size;
1147 kstack0_phys -= kstack0_sz;
1148 availmem_regions[0].mr_size -= kstack0_sz;
1150 /*******************************************************/
1151 /* Fill in phys_avail table, based on availmem_regions */
1152 /*******************************************************/
1153 phys_avail_count = 0;
1156 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1158 debugf("fill in phys_avail:\n");
1159 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1161 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n",
1162 availmem_regions[i].mr_start,
1163 availmem_regions[i].mr_start +
1164 availmem_regions[i].mr_size,
1165 availmem_regions[i].mr_size);
1167 if (hwphyssz != 0 &&
1168 (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1169 debugf(" hw.physmem adjust\n");
1170 if (physsz < hwphyssz) {
1171 phys_avail[j] = availmem_regions[i].mr_start;
1173 availmem_regions[i].mr_start +
1181 phys_avail[j] = availmem_regions[i].mr_start;
1182 phys_avail[j + 1] = availmem_regions[i].mr_start +
1183 availmem_regions[i].mr_size;
1185 physsz += availmem_regions[i].mr_size;
1187 physmem = btoc(physsz);
1189 /* Calculate the last available physical address. */
1190 for (i = 0; phys_avail[i + 2] != 0; i += 2)
1192 Maxmem = powerpc_btop(phys_avail[i + 1]);
1194 debugf("Maxmem = 0x%08lx\n", Maxmem);
1195 debugf("phys_avail_count = %d\n", phys_avail_count);
1196 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem,
1199 /*******************************************************/
1200 /* Initialize (statically allocated) kernel pmap. */
1201 /*******************************************************/
1202 PMAP_LOCK_INIT(kernel_pmap);
1203 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1205 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap);
1206 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls);
1207 debugf("kernel pdir range: 0x%08x - 0x%08x\n",
1208 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1);
1210 /* Initialize kernel pdir */
1211 for (i = 0; i < kernel_ptbls; i++)
1212 kernel_pmap->pm_pdir[kptbl_min + i] =
1213 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES));
1215 for (i = 0; i < MAXCPU; i++) {
1216 kernel_pmap->pm_tid[i] = TID_KERNEL;
1218 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
1219 tidbusy[i][0] = kernel_pmap;
1223 * Fill in PTEs covering kernel code and data. They are not required
1224 * for address translation, as this area is covered by static TLB1
1225 * entries, but for pte_vatopa() to work correctly with kernel area
1228 for (va = KERNBASE; va < data_end; va += PAGE_SIZE) {
1229 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
1230 pte->rpn = kernload + (va - KERNBASE);
1231 pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1234 /* Mark kernel_pmap active on all CPUs */
1235 kernel_pmap->pm_active = ~0;
1237 /*******************************************************/
1239 /*******************************************************/
1241 /* Enter kstack0 into kernel map, provide guard page */
1242 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1243 thread0.td_kstack = kstack0;
1244 thread0.td_kstack_pages = KSTACK_PAGES;
1246 debugf("kstack_sz = 0x%08x\n", kstack0_sz);
1247 debugf("kstack0_phys at 0x%08x - 0x%08x\n",
1248 kstack0_phys, kstack0_phys + kstack0_sz);
1249 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
1251 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
1252 for (i = 0; i < KSTACK_PAGES; i++) {
1253 mmu_booke_kenter(mmu, kstack0, kstack0_phys);
1254 kstack0 += PAGE_SIZE;
1255 kstack0_phys += PAGE_SIZE;
1258 debugf("virtual_avail = %08x\n", virtual_avail);
1259 debugf("virtual_end = %08x\n", virtual_end);
1261 debugf("mmu_booke_bootstrap: exit\n");
1265 pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
1270 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
1271 * have the snapshot of its contents in the s/w tlb1[] table, so use
1272 * these values directly to (re)program AP's TLB1 hardware.
1274 for (i = 0; i < tlb1_idx; i ++) {
1275 /* Skip invalid entries */
1276 if (!(tlb1[i].mas1 & MAS1_VALID))
1279 tlb1_write_entry(i);
1282 set_mas4_defaults();
1286 * Get the physical page address for the given pmap/virtual address.
1289 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1294 pa = pte_vatopa(mmu, pmap, va);
1301 * Extract the physical page address associated with the given
1302 * kernel virtual address.
1305 mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
1308 return (pte_vatopa(mmu, kernel_pmap, va));
1312 * Initialize the pmap module.
1313 * Called by vm_init, to initialize any structures that the pmap
1314 * system needs to map virtual memory.
1317 mmu_booke_init(mmu_t mmu)
1319 int shpgperproc = PMAP_SHPGPERPROC;
1322 * Initialize the address space (zone) for the pv entries. Set a
1323 * high water mark so that the system can recover from excessive
1324 * numbers of pv entries.
1326 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
1327 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1329 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1330 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
1332 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1333 pv_entry_high_water = 9 * (pv_entry_max / 10);
1335 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
1337 /* Pre-fill pvzone with initial number of pv entries. */
1338 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
1340 /* Initialize ptbl allocation. */
1345 * Map a list of wired pages into kernel virtual address space. This is
1346 * intended for temporary mappings which do not need page modification or
1347 * references recorded. Existing mappings in the region are overwritten.
1350 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
1355 while (count-- > 0) {
1356 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1363 * Remove page mappings from kernel virtual address space. Intended for
1364 * temporary mappings entered by mmu_booke_qenter.
1367 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
1372 while (count-- > 0) {
1373 mmu_booke_kremove(mmu, va);
1379 * Map a wired page into kernel virtual address space.
1382 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
1384 unsigned int pdir_idx = PDIR_IDX(va);
1385 unsigned int ptbl_idx = PTBL_IDX(va);
1389 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1390 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
1393 flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID);
1396 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1398 mtx_lock_spin(&tlbivax_mutex);
1401 if (PTE_ISVALID(pte)) {
1403 CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
1405 /* Flush entry from TLB0 */
1406 tlb0_flush_entry(va);
1409 pte->rpn = pa & ~PTE_PA_MASK;
1412 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
1413 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n",
1414 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
1416 /* Flush the real memory from the instruction cache. */
1417 if ((flags & (PTE_I | PTE_G)) == 0) {
1418 __syncicache((void *)va, PAGE_SIZE);
1422 mtx_unlock_spin(&tlbivax_mutex);
1426 * Remove a page from kernel page table.
1429 mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
1431 unsigned int pdir_idx = PDIR_IDX(va);
1432 unsigned int ptbl_idx = PTBL_IDX(va);
1435 // CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va));
1437 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1438 (va <= VM_MAX_KERNEL_ADDRESS)),
1439 ("mmu_booke_kremove: invalid va"));
1441 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1443 if (!PTE_ISVALID(pte)) {
1445 CTR1(KTR_PMAP, "%s: invalid pte", __func__);
1450 mtx_lock_spin(&tlbivax_mutex);
1453 /* Invalidate entry in TLB0, update PTE. */
1454 tlb0_flush_entry(va);
1459 mtx_unlock_spin(&tlbivax_mutex);
1463 * Initialize pmap associated with process 0.
1466 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
1469 mmu_booke_pinit(mmu, pmap);
1470 PCPU_SET(curpmap, pmap);
1474 * Initialize a preallocated and zeroed pmap structure,
1475 * such as one in a vmspace structure.
1478 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
1482 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
1483 curthread->td_proc->p_pid, curthread->td_proc->p_comm);
1485 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
1487 PMAP_LOCK_INIT(pmap);
1488 for (i = 0; i < MAXCPU; i++)
1489 pmap->pm_tid[i] = TID_NONE;
1490 pmap->pm_active = 0;
1491 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1492 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
1493 TAILQ_INIT(&pmap->pm_ptbl_list);
1497 * Release any resources held by the given physical map.
1498 * Called when a pmap initialized by mmu_booke_pinit is being released.
1499 * Should only be called if the map contains no valid mappings.
1502 mmu_booke_release(mmu_t mmu, pmap_t pmap)
1505 printf("mmu_booke_release: s\n");
1507 KASSERT(pmap->pm_stats.resident_count == 0,
1508 ("pmap_release: pmap resident count %ld != 0",
1509 pmap->pm_stats.resident_count));
1511 PMAP_LOCK_DESTROY(pmap);
1515 * Insert the given physical page at the specified virtual address in the
1516 * target physical map with the protection requested. If specified the page
1517 * will be wired down.
1520 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1521 vm_prot_t prot, boolean_t wired)
1524 vm_page_lock_queues();
1526 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired);
1527 vm_page_unlock_queues();
1532 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1533 vm_prot_t prot, boolean_t wired)
1540 pa = VM_PAGE_TO_PHYS(m);
1541 su = (pmap == kernel_pmap);
1544 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
1545 // "pa=0x%08x prot=0x%08x wired=%d)\n",
1546 // (u_int32_t)pmap, su, pmap->pm_tid,
1547 // (u_int32_t)m, va, pa, prot, wired);
1550 KASSERT(((va >= virtual_avail) &&
1551 (va <= VM_MAX_KERNEL_ADDRESS)),
1552 ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
1554 KASSERT((va <= VM_MAXUSER_ADDRESS),
1555 ("mmu_booke_enter_locked: user pmap, non user va"));
1558 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1561 * If there is an existing mapping, and the physical address has not
1562 * changed, must be protection or wiring change.
1564 if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
1565 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
1568 * Before actually updating pte->flags we calculate and
1569 * prepare its new value in a helper var.
1572 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
1574 /* Wiring change, just update stats. */
1576 if (!PTE_ISWIRED(pte)) {
1578 pmap->pm_stats.wired_count++;
1581 if (PTE_ISWIRED(pte)) {
1582 flags &= ~PTE_WIRED;
1583 pmap->pm_stats.wired_count--;
1587 if (prot & VM_PROT_WRITE) {
1588 /* Add write permissions. */
1593 vm_page_flag_set(m, PG_WRITEABLE);
1595 /* Handle modified pages, sense modify status. */
1598 * The PTE_MODIFIED flag could be set by underlying
1599 * TLB misses since we last read it (above), possibly
1600 * other CPUs could update it so we check in the PTE
1601 * directly rather than rely on that saved local flags
1604 if (PTE_ISMODIFIED(pte))
1608 if (prot & VM_PROT_EXECUTE) {
1614 * Check existing flags for execute permissions: if we
1615 * are turning execute permissions on, icache should
1618 if ((flags & (PTE_UX | PTE_SX)) == 0)
1622 flags &= ~PTE_REFERENCED;
1625 * The new flags value is all calculated -- only now actually
1628 mtx_lock_spin(&tlbivax_mutex);
1631 tlb0_flush_entry(va);
1635 mtx_unlock_spin(&tlbivax_mutex);
1639 * If there is an existing mapping, but it's for a different
1640 * physical address, pte_enter() will delete the old mapping.
1642 //if ((pte != NULL) && PTE_ISVALID(pte))
1643 // debugf("mmu_booke_enter_locked: replace\n");
1645 // debugf("mmu_booke_enter_locked: new\n");
1647 /* Now set up the flags and install the new mapping. */
1648 flags = (PTE_SR | PTE_VALID);
1654 if (prot & VM_PROT_WRITE) {
1659 vm_page_flag_set(m, PG_WRITEABLE);
1662 if (prot & VM_PROT_EXECUTE) {
1668 /* If its wired update stats. */
1670 pmap->pm_stats.wired_count++;
1674 pte_enter(mmu, pmap, m, va, flags);
1676 /* Flush the real memory from the instruction cache. */
1677 if (prot & VM_PROT_EXECUTE)
1681 if (sync && (su || pmap == PCPU_GET(curpmap))) {
1682 __syncicache((void *)va, PAGE_SIZE);
1687 /* Create a temporary mapping. */
1688 pmap = PCPU_GET(curpmap);
1691 pte = pte_find(mmu, pmap, va);
1692 KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__));
1694 flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M;
1696 pte_enter(mmu, pmap, m, va, flags);
1697 __syncicache((void *)va, PAGE_SIZE);
1698 pte_remove(mmu, pmap, va, PTBL_UNHOLD);
1703 * Maps a sequence of resident pages belonging to the same object.
1704 * The sequence begins with the given page m_start. This page is
1705 * mapped at the given virtual address start. Each subsequent page is
1706 * mapped at a virtual address that is offset from start by the same
1707 * amount as the page is offset from m_start within the object. The
1708 * last page in the sequence is the page with the largest offset from
1709 * m_start that can be mapped at a virtual address less than the given
1710 * virtual address end. Not every virtual page between start and end
1711 * is mapped; only those for which a resident page exists with the
1712 * corresponding offset from m_start are mapped.
1715 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
1716 vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
1719 vm_pindex_t diff, psize;
1721 psize = atop(end - start);
1724 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1725 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
1726 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1727 m = TAILQ_NEXT(m, listq);
1733 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1738 mmu_booke_enter_locked(mmu, pmap, va, m,
1739 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1744 * Remove the given range of addresses from the specified map.
1746 * It is assumed that the start and end are properly rounded to the page size.
1749 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
1754 int su = (pmap == kernel_pmap);
1756 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
1757 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
1760 KASSERT(((va >= virtual_avail) &&
1761 (va <= VM_MAX_KERNEL_ADDRESS)),
1762 ("mmu_booke_remove: kernel pmap, non kernel va"));
1764 KASSERT((va <= VM_MAXUSER_ADDRESS),
1765 ("mmu_booke_remove: user pmap, non user va"));
1768 if (PMAP_REMOVE_DONE(pmap)) {
1769 //debugf("mmu_booke_remove: e (empty)\n");
1773 hold_flag = PTBL_HOLD_FLAG(pmap);
1774 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
1776 vm_page_lock_queues();
1778 for (; va < endva; va += PAGE_SIZE) {
1779 pte = pte_find(mmu, pmap, va);
1780 if ((pte != NULL) && PTE_ISVALID(pte))
1781 pte_remove(mmu, pmap, va, hold_flag);
1784 vm_page_unlock_queues();
1786 //debugf("mmu_booke_remove: e\n");
1790 * Remove physical page from all pmaps in which it resides.
1793 mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
1798 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1800 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
1801 pvn = TAILQ_NEXT(pv, pv_link);
1803 PMAP_LOCK(pv->pv_pmap);
1804 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
1805 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
1806 PMAP_UNLOCK(pv->pv_pmap);
1808 vm_page_flag_clear(m, PG_WRITEABLE);
1812 * Map a range of physical addresses into kernel virtual address space.
1815 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
1816 vm_offset_t pa_end, int prot)
1818 vm_offset_t sva = *virt;
1819 vm_offset_t va = sva;
1821 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
1822 // sva, pa_start, pa_end);
1824 while (pa_start < pa_end) {
1825 mmu_booke_kenter(mmu, va, pa_start);
1827 pa_start += PAGE_SIZE;
1831 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
1836 * The pmap must be activated before it's address space can be accessed in any
1840 mmu_booke_activate(mmu_t mmu, struct thread *td)
1844 pmap = &td->td_proc->p_vmspace->vm_pmap;
1846 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)",
1847 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1849 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
1851 mtx_lock_spin(&sched_lock);
1853 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
1854 PCPU_SET(curpmap, pmap);
1856 if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE)
1859 /* Load PID0 register with pmap tid value. */
1860 mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]);
1861 __asm __volatile("isync");
1863 mtx_unlock_spin(&sched_lock);
1865 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
1866 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
1870 * Deactivate the specified process's address space.
1873 mmu_booke_deactivate(mmu_t mmu, struct thread *td)
1877 pmap = &td->td_proc->p_vmspace->vm_pmap;
1879 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
1880 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1882 atomic_clear_int(&pmap->pm_active, PCPU_GET(cpumask));
1883 PCPU_SET(curpmap, NULL);
1887 * Copy the range specified by src_addr/len
1888 * from the source map to the range dst_addr/len
1889 * in the destination map.
1891 * This routine is only advisory and need not do anything.
1894 mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
1895 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
1901 * Set the physical protection on the specified range of this map as requested.
1904 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1911 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1912 mmu_booke_remove(mmu, pmap, sva, eva);
1916 if (prot & VM_PROT_WRITE)
1919 vm_page_lock_queues();
1921 for (va = sva; va < eva; va += PAGE_SIZE) {
1922 if ((pte = pte_find(mmu, pmap, va)) != NULL) {
1923 if (PTE_ISVALID(pte)) {
1924 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1926 mtx_lock_spin(&tlbivax_mutex);
1929 /* Handle modified pages. */
1930 if (PTE_ISMODIFIED(pte))
1933 /* Referenced pages. */
1934 if (PTE_ISREFERENCED(pte))
1935 vm_page_flag_set(m, PG_REFERENCED);
1937 tlb0_flush_entry(va);
1938 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED |
1942 mtx_unlock_spin(&tlbivax_mutex);
1947 vm_page_unlock_queues();
1951 * Clear the write and modified bits in each of the given page's mappings.
1954 mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
1959 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1960 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1961 (m->flags & PG_WRITEABLE) == 0)
1964 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1965 PMAP_LOCK(pv->pv_pmap);
1966 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
1967 if (PTE_ISVALID(pte)) {
1968 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1970 mtx_lock_spin(&tlbivax_mutex);
1973 /* Handle modified pages. */
1974 if (PTE_ISMODIFIED(pte))
1977 /* Referenced pages. */
1978 if (PTE_ISREFERENCED(pte))
1979 vm_page_flag_set(m, PG_REFERENCED);
1981 /* Flush mapping from TLB0. */
1982 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED |
1986 mtx_unlock_spin(&tlbivax_mutex);
1989 PMAP_UNLOCK(pv->pv_pmap);
1991 vm_page_flag_clear(m, PG_WRITEABLE);
1995 mmu_booke_page_executable(mmu_t mmu, vm_page_t m)
1999 boolean_t executable;
2002 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2003 PMAP_LOCK(pv->pv_pmap);
2004 pte = pte_find(mmu, pv->pv_pmap, pv->pv_va);
2005 if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX))
2007 PMAP_UNLOCK(pv->pv_pmap);
2012 return (executable);
2016 * Atomically extract and hold the physical page with the given
2017 * pmap and virtual address pair if that mapping permits the given
2021 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
2029 vm_page_lock_queues();
2032 pte = pte_find(mmu, pmap, va);
2033 if ((pte != NULL) && PTE_ISVALID(pte)) {
2034 if (pmap == kernel_pmap)
2039 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
2040 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2045 vm_page_unlock_queues();
2051 * Initialize a vm_page's machine-dependent fields.
2054 mmu_booke_page_init(mmu_t mmu, vm_page_t m)
2057 TAILQ_INIT(&m->md.pv_list);
2061 * mmu_booke_zero_page_area zeros the specified hardware page by
2062 * mapping it into virtual memory and using bzero to clear
2065 * off and size must reside within a single page.
2068 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
2072 /* XXX KASSERT off and size are within a single page? */
2074 mtx_lock(&zero_page_mutex);
2077 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2078 bzero((caddr_t)va + off, size);
2079 mmu_booke_kremove(mmu, va);
2081 mtx_unlock(&zero_page_mutex);
2085 * mmu_booke_zero_page zeros the specified hardware page.
2088 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
2091 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE);
2095 * mmu_booke_copy_page copies the specified (machine independent) page by
2096 * mapping the page into virtual memory and using memcopy to copy the page,
2097 * one machine dependent page at a time.
2100 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
2102 vm_offset_t sva, dva;
2104 sva = copy_page_src_va;
2105 dva = copy_page_dst_va;
2107 mtx_lock(©_page_mutex);
2108 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
2109 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
2110 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
2111 mmu_booke_kremove(mmu, dva);
2112 mmu_booke_kremove(mmu, sva);
2113 mtx_unlock(©_page_mutex);
2117 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
2118 * into virtual memory and using bzero to clear its contents. This is intended
2119 * to be called from the vm_pagezero process only and outside of Giant. No
2123 mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m)
2127 va = zero_page_idle_va;
2128 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2129 bzero((caddr_t)va, PAGE_SIZE);
2130 mmu_booke_kremove(mmu, va);
2134 * Return whether or not the specified physical page was modified
2135 * in any of physical maps.
2138 mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
2143 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2144 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2147 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2148 PMAP_LOCK(pv->pv_pmap);
2149 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2150 if (!PTE_ISVALID(pte))
2151 goto make_sure_to_unlock;
2153 if (PTE_ISMODIFIED(pte)) {
2154 PMAP_UNLOCK(pv->pv_pmap);
2158 make_sure_to_unlock:
2159 PMAP_UNLOCK(pv->pv_pmap);
2165 * Return whether or not the specified virtual address is eligible
2169 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2176 * Clear the modify bits on the specified physical page.
2179 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
2184 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2185 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2188 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2189 PMAP_LOCK(pv->pv_pmap);
2190 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2191 if (!PTE_ISVALID(pte))
2192 goto make_sure_to_unlock;
2194 mtx_lock_spin(&tlbivax_mutex);
2197 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
2198 tlb0_flush_entry(pv->pv_va);
2199 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
2204 mtx_unlock_spin(&tlbivax_mutex);
2206 make_sure_to_unlock:
2207 PMAP_UNLOCK(pv->pv_pmap);
2212 * Return a count of reference bits for a page, clearing those bits.
2213 * It is not necessary for every reference bit to be cleared, but it
2214 * is necessary that 0 only be returned when there are truly no
2215 * reference bits set.
2217 * XXX: The exact number of bits to check and clear is a matter that
2218 * should be tested and standardized at some point in the future for
2219 * optimal aging of shared pages.
2222 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
2228 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2229 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2233 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2234 PMAP_LOCK(pv->pv_pmap);
2235 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2236 if (!PTE_ISVALID(pte))
2237 goto make_sure_to_unlock;
2239 if (PTE_ISREFERENCED(pte)) {
2240 mtx_lock_spin(&tlbivax_mutex);
2243 tlb0_flush_entry(pv->pv_va);
2244 pte->flags &= ~PTE_REFERENCED;
2247 mtx_unlock_spin(&tlbivax_mutex);
2250 PMAP_UNLOCK(pv->pv_pmap);
2255 make_sure_to_unlock:
2256 PMAP_UNLOCK(pv->pv_pmap);
2262 * Clear the reference bit on the specified physical page.
2265 mmu_booke_clear_reference(mmu_t mmu, vm_page_t m)
2270 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2271 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2274 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2275 PMAP_LOCK(pv->pv_pmap);
2276 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2277 if (!PTE_ISVALID(pte))
2278 goto make_sure_to_unlock;
2280 if (PTE_ISREFERENCED(pte)) {
2281 mtx_lock_spin(&tlbivax_mutex);
2284 tlb0_flush_entry(pv->pv_va);
2285 pte->flags &= ~PTE_REFERENCED;
2288 mtx_unlock_spin(&tlbivax_mutex);
2291 make_sure_to_unlock:
2292 PMAP_UNLOCK(pv->pv_pmap);
2297 * Change wiring attribute for a map/virtual-address pair.
2300 mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired)
2305 if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2307 if (!PTE_ISWIRED(pte)) {
2308 pte->flags |= PTE_WIRED;
2309 pmap->pm_stats.wired_count++;
2312 if (PTE_ISWIRED(pte)) {
2313 pte->flags &= ~PTE_WIRED;
2314 pmap->pm_stats.wired_count--;
2322 * Return true if the pmap's pv is one of the first 16 pvs linked to from this
2323 * page. This count may be changed upwards or downwards in the future; it is
2324 * only necessary that true be returned for a small subset of pmaps for proper
2328 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
2333 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2334 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2338 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2339 if (pv->pv_pmap == pmap)
2349 * Return the number of managed mappings to the given physical page that are
2353 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
2359 if ((m->flags & PG_FICTITIOUS) != 0)
2361 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2363 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2364 PMAP_LOCK(pv->pv_pmap);
2365 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
2366 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
2368 PMAP_UNLOCK(pv->pv_pmap);
2375 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2381 * This currently does not work for entries that
2382 * overlap TLB1 entries.
2384 for (i = 0; i < tlb1_idx; i ++) {
2385 if (tlb1_iomapped(i, pa, size, &va) == 0)
2393 mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
2400 /* Raw physical memory dumps don't have a virtual address. */
2401 if (md->md_vaddr == ~0UL) {
2402 /* We always map a 256MB page at 256M. */
2403 gran = 256 * 1024 * 1024;
2404 pa = md->md_paddr + ofs;
2405 ppa = pa & ~(gran - 1);
2408 tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO);
2409 if (*sz > (gran - ofs))
2414 /* Minidumps are based on virtual memory addresses. */
2415 va = md->md_vaddr + ofs;
2416 if (va >= kernstart + kernsize) {
2417 gran = PAGE_SIZE - (va & PAGE_MASK);
2425 mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
2429 /* Raw physical memory dumps don't have a virtual address. */
2430 if (md->md_vaddr == ~0UL) {
2432 tlb1[tlb1_idx].mas1 = 0;
2433 tlb1[tlb1_idx].mas2 = 0;
2434 tlb1[tlb1_idx].mas3 = 0;
2435 tlb1_write_entry(tlb1_idx);
2439 /* Minidumps are based on virtual memory addresses. */
2440 /* Nothing to do... */
2444 mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev)
2446 static struct pmap_md md;
2447 struct bi_mem_region *mr;
2451 if (dumpsys_minidump) {
2452 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */
2454 /* 1st: kernel .data and .bss. */
2456 md.md_vaddr = trunc_page((uintptr_t)_etext);
2457 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr;
2460 switch (prev->md_index) {
2462 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2464 md.md_vaddr = data_start;
2465 md.md_size = data_end - data_start;
2468 /* 3rd: kernel VM. */
2469 va = prev->md_vaddr + prev->md_size;
2470 /* Find start of next chunk (from va). */
2471 while (va < virtual_end) {
2472 /* Don't dump the buffer cache. */
2473 if (va >= kmi.buffer_sva &&
2474 va < kmi.buffer_eva) {
2475 va = kmi.buffer_eva;
2478 pte = pte_find(mmu, kernel_pmap, va);
2479 if (pte != NULL && PTE_ISVALID(pte))
2483 if (va < virtual_end) {
2486 /* Find last page in chunk. */
2487 while (va < virtual_end) {
2488 /* Don't run into the buffer cache. */
2489 if (va == kmi.buffer_sva)
2491 pte = pte_find(mmu, kernel_pmap, va);
2492 if (pte == NULL || !PTE_ISVALID(pte))
2496 md.md_size = va - md.md_vaddr;
2504 } else { /* minidumps */
2507 /* first physical chunk. */
2508 md.md_paddr = mr->mem_base;
2509 md.md_size = mr->mem_size;
2512 } else if (md.md_index < bootinfo->bi_mem_reg_no) {
2513 md.md_paddr = mr[md.md_index].mem_base;
2514 md.md_size = mr[md.md_index].mem_size;
2518 /* There's no next physical chunk. */
2527 * Map a set of physical memory pages into the kernel virtual address space.
2528 * Return a pointer to where it is mapped. This routine is intended to be used
2529 * for mapping device memory, NOT real memory.
2532 mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2538 va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa);
2542 sz = 1 << (ilog2(size) & ~1);
2544 printf("Wiring VA=%x to PA=%x (size=%x), "
2545 "using TLB1[%d]\n", va, pa, sz, tlb1_idx);
2546 tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO);
2556 * 'Unmap' a range mapped by mmu_booke_mapdev().
2559 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2561 vm_offset_t base, offset;
2564 * Unmap only if this is inside kernel virtual space.
2566 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2567 base = trunc_page(va);
2568 offset = va & PAGE_MASK;
2569 size = roundup(offset + size, PAGE_SIZE);
2570 kmem_free(kernel_map, base, size);
2575 * mmu_booke_object_init_pt preloads the ptes for a given object into the
2576 * specified pmap. This eliminates the blast of soft faults on process startup
2577 * and immediately after an mmap.
2580 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2581 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2584 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2585 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2586 ("mmu_booke_object_init_pt: non-device object"));
2590 * Perform the pmap work for mincore.
2593 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2600 /**************************************************************************/
2602 /**************************************************************************/
2605 * Allocate a TID. If necessary, steal one from someone else.
2606 * The new TID is flushed from the TLB before returning.
2609 tid_alloc(pmap_t pmap)
2614 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
2616 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
2618 thiscpu = PCPU_GET(cpuid);
2620 tid = PCPU_GET(tid_next);
2623 PCPU_SET(tid_next, tid + 1);
2625 /* If we are stealing TID then clear the relevant pmap's field */
2626 if (tidbusy[thiscpu][tid] != NULL) {
2628 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
2630 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
2632 /* Flush all entries from TLB0 matching this TID. */
2636 tidbusy[thiscpu][tid] = pmap;
2637 pmap->pm_tid[thiscpu] = tid;
2638 __asm __volatile("msync; isync");
2640 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
2641 PCPU_GET(tid_next));
2646 /**************************************************************************/
2648 /**************************************************************************/
2651 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
2661 if (mas1 & MAS1_VALID)
2666 if (mas1 & MAS1_IPROT)
2671 as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
2672 tid = MAS1_GETTID(mas1);
2674 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2677 size = tsize2size(tsize);
2679 debugf("%3d: (%s) [AS=%d] "
2680 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
2681 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n",
2682 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
2685 /* Convert TLB0 va and way number to tlb0[] table index. */
2686 static inline unsigned int
2687 tlb0_tableidx(vm_offset_t va, unsigned int way)
2691 idx = (way * TLB0_ENTRIES_PER_WAY);
2692 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
2697 * Invalidate TLB0 entry.
2700 tlb0_flush_entry(vm_offset_t va)
2703 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
2705 mtx_assert(&tlbivax_mutex, MA_OWNED);
2707 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
2708 __asm __volatile("isync; msync");
2709 __asm __volatile("tlbsync; msync");
2711 CTR1(KTR_PMAP, "%s: e", __func__);
2714 /* Print out contents of the MAS registers for each TLB0 entry */
2716 tlb0_print_tlbentries(void)
2718 uint32_t mas0, mas1, mas2, mas3, mas7;
2719 int entryidx, way, idx;
2721 debugf("TLB0 entries:\n");
2722 for (way = 0; way < TLB0_WAYS; way ++)
2723 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
2725 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
2726 mtspr(SPR_MAS0, mas0);
2727 __asm __volatile("isync");
2729 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
2730 mtspr(SPR_MAS2, mas2);
2732 __asm __volatile("isync; tlbre");
2734 mas1 = mfspr(SPR_MAS1);
2735 mas2 = mfspr(SPR_MAS2);
2736 mas3 = mfspr(SPR_MAS3);
2737 mas7 = mfspr(SPR_MAS7);
2739 idx = tlb0_tableidx(mas2, way);
2740 tlb_print_entry(idx, mas1, mas2, mas3, mas7);
2744 /**************************************************************************/
2746 /**************************************************************************/
2749 * TLB1 mapping notes:
2752 * TLB1[1] Kernel text and data.
2753 * TLB1[2-15] Additional kernel text and data mappings (if required), PCI
2754 * windows, other devices mappings.
2758 * Write given entry to TLB1 hardware.
2759 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7).
2762 tlb1_write_entry(unsigned int idx)
2764 uint32_t mas0, mas7;
2766 //debugf("tlb1_write_entry: s\n");
2768 /* Clear high order RPN bits */
2772 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx);
2773 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0);
2775 mtspr(SPR_MAS0, mas0);
2776 __asm __volatile("isync");
2777 mtspr(SPR_MAS1, tlb1[idx].mas1);
2778 __asm __volatile("isync");
2779 mtspr(SPR_MAS2, tlb1[idx].mas2);
2780 __asm __volatile("isync");
2781 mtspr(SPR_MAS3, tlb1[idx].mas3);
2782 __asm __volatile("isync");
2783 mtspr(SPR_MAS7, mas7);
2784 __asm __volatile("isync; tlbwe; isync; msync");
2786 //debugf("tlb1_write_entry: e\n");;
2790 * Return the largest uint value log such that 2^log <= num.
2793 ilog2(unsigned int num)
2797 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
2802 * Convert TLB TSIZE value to mapped region size.
2805 tsize2size(unsigned int tsize)
2810 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
2813 return ((1 << (2 * tsize)) * 1024);
2817 * Convert region size (must be power of 4) to TLB TSIZE value.
2820 size2tsize(vm_size_t size)
2823 return (ilog2(size) / 2 - 5);
2827 * Register permanent kernel mapping in TLB1.
2829 * Entries are created starting from index 0 (current free entry is
2830 * kept in tlb1_idx) and are not supposed to be invalidated.
2833 tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size,
2839 if (tlb1_idx >= TLB1_ENTRIES) {
2840 printf("tlb1_set_entry: TLB1 full!\n");
2844 /* Convert size to TSIZE */
2845 tsize = size2tsize(size);
2847 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
2848 /* XXX TS is hard coded to 0 for now as we only use single address space */
2849 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
2851 /* XXX LOCK tlb1[] */
2853 tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
2854 tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
2855 tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags;
2857 /* Set supervisor RWX permission bits */
2858 tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
2860 tlb1_write_entry(tlb1_idx++);
2862 /* XXX UNLOCK tlb1[] */
2865 * XXX in general TLB1 updates should be propagated between CPUs,
2866 * since current design assumes to have the same TLB1 set-up on all
2873 tlb1_entry_size_cmp(const void *a, const void *b)
2875 const vm_size_t *sza;
2876 const vm_size_t *szb;
2882 else if (*sza < *szb)
2889 * Map in contiguous RAM region into the TLB1 using maximum of
2890 * KERNEL_REGION_MAX_TLB_ENTRIES entries.
2892 * If necessary round up last entry size and return total size
2893 * used by all allocated entries.
2896 tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size)
2898 vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES];
2899 vm_size_t mapped_size, sz, esz;
2903 CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x",
2904 __func__, size, va, pa);
2908 memset(entry_size, 0, sizeof(entry_size));
2910 /* Calculate entry sizes. */
2911 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) {
2913 /* Largest region that is power of 4 and fits within size */
2914 log = ilog2(sz) / 2;
2915 esz = 1 << (2 * log);
2917 /* If this is last entry cover remaining size. */
2918 if (i == KERNEL_REGION_MAX_TLB_ENTRIES - 1) {
2923 entry_size[i] = esz;
2931 /* Sort entry sizes, required to get proper entry address alignment. */
2932 qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES,
2933 sizeof(vm_size_t), tlb1_entry_size_cmp);
2935 /* Load TLB1 entries. */
2936 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) {
2937 esz = entry_size[i];
2941 CTR5(KTR_PMAP, "%s: entry %d: sz = 0x%08x (va = 0x%08x "
2942 "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa);
2944 tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM);
2950 CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)",
2951 __func__, mapped_size, mapped_size - size);
2953 return (mapped_size);
2957 * TLB1 initialization routine, to be called after the very first
2958 * assembler level setup done in locore.S.
2961 tlb1_init(vm_offset_t ccsrbar)
2965 /* TLB1[1] is used to map the kernel. Save that entry. */
2966 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1);
2967 mtspr(SPR_MAS0, mas0);
2968 __asm __volatile("isync; tlbre");
2970 tlb1[1].mas1 = mfspr(SPR_MAS1);
2971 tlb1[1].mas2 = mfspr(SPR_MAS2);
2972 tlb1[1].mas3 = mfspr(SPR_MAS3);
2974 /* Map in CCSRBAR in TLB1[0] */
2976 tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO);
2978 * Set the next available TLB1 entry index. Note TLB[1] is reserved
2979 * for initial mapping of kernel text+data, which was set early in
2980 * locore, we need to skip this [busy] entry.
2984 /* Setup TLB miss defaults */
2985 set_mas4_defaults();
2989 * Setup MAS4 defaults.
2990 * These values are loaded to MAS0-2 on a TLB miss.
2993 set_mas4_defaults(void)
2997 /* Defaults: TLB0, PID0, TSIZED=4K */
2998 mas4 = MAS4_TLBSELD0;
2999 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
3003 mtspr(SPR_MAS4, mas4);
3004 __asm __volatile("isync");
3008 * Print out contents of the MAS registers for each TLB1 entry
3011 tlb1_print_tlbentries(void)
3013 uint32_t mas0, mas1, mas2, mas3, mas7;
3016 debugf("TLB1 entries:\n");
3017 for (i = 0; i < TLB1_ENTRIES; i++) {
3019 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
3020 mtspr(SPR_MAS0, mas0);
3022 __asm __volatile("isync; tlbre");
3024 mas1 = mfspr(SPR_MAS1);
3025 mas2 = mfspr(SPR_MAS2);
3026 mas3 = mfspr(SPR_MAS3);
3027 mas7 = mfspr(SPR_MAS7);
3029 tlb_print_entry(i, mas1, mas2, mas3, mas7);
3034 * Print out contents of the in-ram tlb1 table.
3037 tlb1_print_entries(void)
3041 debugf("tlb1[] table entries:\n");
3042 for (i = 0; i < TLB1_ENTRIES; i++)
3043 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0);
3047 * Return 0 if the physical IO range is encompassed by one of the
3048 * the TLB1 entries, otherwise return related error code.
3051 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
3054 vm_paddr_t pa_start;
3056 unsigned int entry_tsize;
3057 vm_size_t entry_size;
3059 *va = (vm_offset_t)NULL;
3061 /* Skip invalid entries */
3062 if (!(tlb1[i].mas1 & MAS1_VALID))
3066 * The entry must be cache-inhibited, guarded, and r/w
3067 * so it can function as an i/o page
3069 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G);
3070 if (prot != (MAS2_I | MAS2_G))
3073 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW);
3074 if (prot != (MAS3_SR | MAS3_SW))
3077 /* The address should be within the entry range. */
3078 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3079 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
3081 entry_size = tsize2size(entry_tsize);
3082 pa_start = tlb1[i].mas3 & MAS3_RPN;
3083 pa_end = pa_start + entry_size - 1;
3085 if ((pa < pa_start) || ((pa + size) > pa_end))
3088 /* Return virtual address of this mapping. */
3089 *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start);