2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
5 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Some hw specific parts of this pmap were derived or influenced
29 * by NetBSD's ibm4xx pmap module. More generic code is shared with
30 * a few other pmap modules from the FreeBSD tree.
36 * Kernel and user threads run within one common virtual address space
40 * Virtual address space layout:
41 * -----------------------------
42 * 0x0000_0000 - 0x7fff_ffff : user process
43 * 0x8000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.)
44 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved
45 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc.
46 * 0xc100_0000 - 0xffff_ffff : KVA
47 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
48 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
49 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0
50 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space
53 * Virtual address space layout:
54 * -----------------------------
55 * 0x0000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : user process
56 * 0x0000_0000_0000_0000 - 0x8fff_ffff_ffff_ffff : text, data, heap, maps, libraries
57 * 0x9000_0000_0000_0000 - 0xafff_ffff_ffff_ffff : mmio region
58 * 0xb000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : stack
59 * 0xc000_0000_0000_0000 - 0xcfff_ffff_ffff_ffff : kernel reserved
60 * 0xc000_0000_0000_0000 - endkernel-1 : kernel code & data
61 * endkernel - msgbufp-1 : flat device tree
62 * msgbufp - ptbl_bufs-1 : message buffer
63 * ptbl_bufs - kernel_pdir-1 : kernel page tables
64 * kernel_pdir - kernel_pp2d-1 : kernel page directory
65 * kernel_pp2d - . : kernel pointers to page directory
66 * pmap_zero_copy_min - crashdumpmap-1 : reserved for page zero/copy
67 * crashdumpmap - ptbl_buf_pool_vabase-1 : reserved for ptbl bufs
68 * ptbl_buf_pool_vabase - virtual_avail-1 : user page directories and page tables
69 * virtual_avail - 0xcfff_ffff_ffff_ffff : actual free KVA space
70 * 0xd000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff : coprocessor region
71 * 0xe000_0000_0000_0000 - 0xefff_ffff_ffff_ffff : mmio region
72 * 0xf000_0000_0000_0000 - 0xffff_ffff_ffff_ffff : direct map
73 * 0xf000_0000_0000_0000 - +Maxmem : physmem map
74 * - 0xffff_ffff_ffff_ffff : device direct map
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD$");
80 #include "opt_kstack_pages.h"
82 #include <sys/param.h>
84 #include <sys/malloc.h>
88 #include <sys/queue.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/kerneldump.h>
92 #include <sys/linker.h>
93 #include <sys/msgbuf.h>
95 #include <sys/mutex.h>
96 #include <sys/rwlock.h>
97 #include <sys/sched.h>
99 #include <sys/vmmeter.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_kern.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_extern.h>
106 #include <vm/vm_object.h>
107 #include <vm/vm_param.h>
108 #include <vm/vm_map.h>
109 #include <vm/vm_pager.h>
110 #include <vm/vm_phys.h>
111 #include <vm/vm_pagequeue.h>
114 #include <machine/_inttypes.h>
115 #include <machine/cpu.h>
116 #include <machine/pcb.h>
117 #include <machine/platform.h>
119 #include <machine/tlb.h>
120 #include <machine/spr.h>
121 #include <machine/md_var.h>
122 #include <machine/mmuvar.h>
123 #include <machine/pmap.h>
124 #include <machine/pte.h>
128 #define SPARSE_MAPDEV
130 #define debugf(fmt, args...) printf(fmt, ##args)
132 #define debugf(fmt, args...)
136 #define PRI0ptrX "016lx"
138 #define PRI0ptrX "08x"
141 #define TODO panic("%s: not implemented", __func__);
143 extern unsigned char _etext[];
144 extern unsigned char _end[];
146 extern uint32_t *bootinfo;
149 vm_offset_t kernstart;
152 /* Message buffer and tables. */
153 static vm_offset_t data_start;
154 static vm_size_t data_end;
156 /* Phys/avail memory regions. */
157 static struct mem_region *availmem_regions;
158 static int availmem_regions_sz;
159 static struct mem_region *physmem_regions;
160 static int physmem_regions_sz;
162 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
163 static vm_offset_t zero_page_va;
164 static struct mtx zero_page_mutex;
166 static struct mtx tlbivax_mutex;
168 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
169 static vm_offset_t copy_page_src_va;
170 static vm_offset_t copy_page_dst_va;
171 static struct mtx copy_page_mutex;
173 /**************************************************************************/
175 /**************************************************************************/
177 static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
178 vm_prot_t, u_int flags, int8_t psind);
180 unsigned int kptbl_min; /* Index of the first kernel ptbl. */
181 unsigned int kernel_ptbls; /* Number of KVA ptbls. */
183 unsigned int kernel_pdirs;
187 * If user pmap is processed with mmu_booke_remove and the resident count
188 * drops to 0, there are no more pages to remove, so we need not continue.
190 #define PMAP_REMOVE_DONE(pmap) \
191 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
193 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
194 extern int elf32_nxstack;
197 /**************************************************************************/
198 /* TLB and TID handling */
199 /**************************************************************************/
201 /* Translation ID busy table */
202 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
205 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
206 * core revisions and should be read from h/w registers during early config.
208 uint32_t tlb0_entries;
210 uint32_t tlb0_entries_per_way;
211 uint32_t tlb1_entries;
213 #define TLB0_ENTRIES (tlb0_entries)
214 #define TLB0_WAYS (tlb0_ways)
215 #define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way)
217 #define TLB1_ENTRIES (tlb1_entries)
219 static vm_offset_t tlb1_map_base = VM_MAXUSER_ADDRESS + PAGE_SIZE;
221 static tlbtid_t tid_alloc(struct pmap *);
222 static void tid_flush(tlbtid_t tid);
225 static void tlb_print_entry(int, uint32_t, uint64_t, uint32_t, uint32_t);
227 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
230 static void tlb1_read_entry(tlb_entry_t *, unsigned int);
231 static void tlb1_write_entry(tlb_entry_t *, unsigned int);
232 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
233 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t);
235 static vm_size_t tsize2size(unsigned int);
236 static unsigned int size2tsize(vm_size_t);
237 static unsigned int ilog2(unsigned long);
239 static void set_mas4_defaults(void);
241 static inline void tlb0_flush_entry(vm_offset_t);
242 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
244 /**************************************************************************/
245 /* Page table management */
246 /**************************************************************************/
248 static struct rwlock_padalign pvh_global_lock;
250 /* Data for the pv entry allocation mechanism */
251 static uma_zone_t pvzone;
252 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
254 #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */
256 #ifndef PMAP_SHPGPERPROC
257 #define PMAP_SHPGPERPROC 200
260 static void ptbl_init(void);
261 static struct ptbl_buf *ptbl_buf_alloc(void);
262 static void ptbl_buf_free(struct ptbl_buf *);
263 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
266 static pte_t *ptbl_alloc(mmu_t, pmap_t, pte_t **,
267 unsigned int, boolean_t);
268 static void ptbl_free(mmu_t, pmap_t, pte_t **, unsigned int);
269 static void ptbl_hold(mmu_t, pmap_t, pte_t **, unsigned int);
270 static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t);
272 static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
273 static void ptbl_free(mmu_t, pmap_t, unsigned int);
274 static void ptbl_hold(mmu_t, pmap_t, unsigned int);
275 static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
278 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
279 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
280 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
281 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
282 static void kernel_pte_alloc(vm_offset_t, vm_offset_t, vm_offset_t);
284 static pv_entry_t pv_alloc(void);
285 static void pv_free(pv_entry_t);
286 static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
287 static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
289 static void booke_pmap_init_qpages(void);
291 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
293 #define PTBL_BUFS (16UL * 16 * 16)
295 #define PTBL_BUFS (128 * 16)
299 TAILQ_ENTRY(ptbl_buf) link; /* list link */
300 vm_offset_t kva; /* va of mapping */
303 /* ptbl free list and a lock used for access synchronization. */
304 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
305 static struct mtx ptbl_buf_freelist_lock;
307 /* Base address of kva space allocated fot ptbl bufs. */
308 static vm_offset_t ptbl_buf_pool_vabase;
310 /* Pointer to ptbl_buf structures. */
311 static struct ptbl_buf *ptbl_bufs;
314 extern tlb_entry_t __boot_tlb1[];
315 void pmap_bootstrap_ap(volatile uint32_t *);
319 * Kernel MMU interface
321 static void mmu_booke_clear_modify(mmu_t, vm_page_t);
322 static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
323 vm_size_t, vm_offset_t);
324 static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
325 static void mmu_booke_copy_pages(mmu_t, vm_page_t *,
326 vm_offset_t, vm_page_t *, vm_offset_t, int);
327 static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
328 vm_prot_t, u_int flags, int8_t psind);
329 static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
330 vm_page_t, vm_prot_t);
331 static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
333 static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
334 static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
336 static void mmu_booke_init(mmu_t);
337 static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
338 static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
339 static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t);
340 static int mmu_booke_ts_referenced(mmu_t, vm_page_t);
341 static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t,
343 static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
345 static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
346 vm_object_t, vm_pindex_t, vm_size_t);
347 static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
348 static void mmu_booke_page_init(mmu_t, vm_page_t);
349 static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
350 static void mmu_booke_pinit(mmu_t, pmap_t);
351 static void mmu_booke_pinit0(mmu_t, pmap_t);
352 static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
354 static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
355 static void mmu_booke_qremove(mmu_t, vm_offset_t, int);
356 static void mmu_booke_release(mmu_t, pmap_t);
357 static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
358 static void mmu_booke_remove_all(mmu_t, vm_page_t);
359 static void mmu_booke_remove_write(mmu_t, vm_page_t);
360 static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
361 static void mmu_booke_zero_page(mmu_t, vm_page_t);
362 static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
363 static void mmu_booke_activate(mmu_t, struct thread *);
364 static void mmu_booke_deactivate(mmu_t, struct thread *);
365 static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
366 static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
367 static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
368 static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
369 static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t);
370 static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
371 static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
372 static void mmu_booke_kremove(mmu_t, vm_offset_t);
373 static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
374 static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
376 static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t,
378 static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
380 static void mmu_booke_scan_init(mmu_t);
381 static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
382 static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
383 static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr,
384 vm_size_t sz, vm_memattr_t mode);
385 static int mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm,
386 volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
387 static int mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
388 int *is_user, vm_offset_t *decoded_addr);
391 static mmu_method_t mmu_booke_methods[] = {
392 /* pmap dispatcher interface */
393 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify),
394 MMUMETHOD(mmu_copy, mmu_booke_copy),
395 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
396 MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages),
397 MMUMETHOD(mmu_enter, mmu_booke_enter),
398 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
399 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
400 MMUMETHOD(mmu_extract, mmu_booke_extract),
401 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold),
402 MMUMETHOD(mmu_init, mmu_booke_init),
403 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified),
404 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable),
405 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced),
406 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced),
407 MMUMETHOD(mmu_map, mmu_booke_map),
408 MMUMETHOD(mmu_mincore, mmu_booke_mincore),
409 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt),
410 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
411 MMUMETHOD(mmu_page_init, mmu_booke_page_init),
412 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
413 MMUMETHOD(mmu_pinit, mmu_booke_pinit),
414 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0),
415 MMUMETHOD(mmu_protect, mmu_booke_protect),
416 MMUMETHOD(mmu_qenter, mmu_booke_qenter),
417 MMUMETHOD(mmu_qremove, mmu_booke_qremove),
418 MMUMETHOD(mmu_release, mmu_booke_release),
419 MMUMETHOD(mmu_remove, mmu_booke_remove),
420 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
421 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
422 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
423 MMUMETHOD(mmu_unwire, mmu_booke_unwire),
424 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
425 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
426 MMUMETHOD(mmu_activate, mmu_booke_activate),
427 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate),
428 MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page),
429 MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page),
431 /* Internal interfaces */
432 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
433 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
434 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev),
435 MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr),
436 MMUMETHOD(mmu_kenter, mmu_booke_kenter),
437 MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr),
438 MMUMETHOD(mmu_kextract, mmu_booke_kextract),
439 MMUMETHOD(mmu_kremove, mmu_booke_kremove),
440 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
441 MMUMETHOD(mmu_change_attr, mmu_booke_change_attr),
442 MMUMETHOD(mmu_map_user_ptr, mmu_booke_map_user_ptr),
443 MMUMETHOD(mmu_decode_kernel_ptr, mmu_booke_decode_kernel_ptr),
445 /* dumpsys() support */
446 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
447 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap),
448 MMUMETHOD(mmu_scan_init, mmu_booke_scan_init),
453 MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
455 static __inline uint32_t
456 tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
461 if (ma != VM_MEMATTR_DEFAULT) {
463 case VM_MEMATTR_UNCACHEABLE:
464 return (MAS2_I | MAS2_G);
465 case VM_MEMATTR_WRITE_COMBINING:
466 case VM_MEMATTR_WRITE_BACK:
467 case VM_MEMATTR_PREFETCHABLE:
469 case VM_MEMATTR_WRITE_THROUGH:
470 return (MAS2_W | MAS2_M);
471 case VM_MEMATTR_CACHEABLE:
477 * Assume the page is cache inhibited and access is guarded unless
478 * it's in our available memory array.
480 attrib = _TLB_ENTRY_IO;
481 for (i = 0; i < physmem_regions_sz; i++) {
482 if ((pa >= physmem_regions[i].mr_start) &&
483 (pa < (physmem_regions[i].mr_start +
484 physmem_regions[i].mr_size))) {
485 attrib = _TLB_ENTRY_MEM;
502 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
505 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
506 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke.tlb_lock);
508 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
509 ("tlb_miss_lock: tried to lock self"));
511 tlb_lock(pc->pc_booke.tlb_lock);
513 CTR1(KTR_PMAP, "%s: locked", __func__);
520 tlb_miss_unlock(void)
528 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
530 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
531 __func__, pc->pc_cpuid);
533 tlb_unlock(pc->pc_booke.tlb_lock);
535 CTR1(KTR_PMAP, "%s: unlocked", __func__);
541 /* Return number of entries in TLB0. */
543 tlb0_get_tlbconf(void)
547 tlb0_cfg = mfspr(SPR_TLB0CFG);
548 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
549 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
550 tlb0_entries_per_way = tlb0_entries / tlb0_ways;
553 /* Return number of entries in TLB1. */
555 tlb1_get_tlbconf(void)
559 tlb1_cfg = mfspr(SPR_TLB1CFG);
560 tlb1_entries = tlb1_cfg & TLBCFG_NENTRY_MASK;
563 /**************************************************************************/
564 /* Page table related */
565 /**************************************************************************/
568 /* Initialize pool of kva ptbl buffers. */
574 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
575 TAILQ_INIT(&ptbl_buf_freelist);
577 for (i = 0; i < PTBL_BUFS; i++) {
578 ptbl_bufs[i].kva = ptbl_buf_pool_vabase +
579 i * MAX(PTBL_PAGES,PDIR_PAGES) * PAGE_SIZE;
580 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
584 /* Get an sf_buf from the freelist. */
585 static struct ptbl_buf *
588 struct ptbl_buf *buf;
590 mtx_lock(&ptbl_buf_freelist_lock);
591 buf = TAILQ_FIRST(&ptbl_buf_freelist);
593 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
594 mtx_unlock(&ptbl_buf_freelist_lock);
599 /* Return ptbl buff to free pool. */
601 ptbl_buf_free(struct ptbl_buf *buf)
603 mtx_lock(&ptbl_buf_freelist_lock);
604 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
605 mtx_unlock(&ptbl_buf_freelist_lock);
609 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
612 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t * ptbl)
614 struct ptbl_buf *pbuf;
616 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) {
617 if (pbuf->kva == (vm_offset_t) ptbl) {
618 /* Remove from pmap ptbl buf list. */
619 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
621 /* Free corresponding ptbl buf. */
629 /* Get a pointer to a PTE in a page table. */
630 static __inline pte_t *
631 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
636 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
638 pdir = pmap->pm_pp2d[PP2D_IDX(va)];
641 ptbl = pdir[PDIR_IDX(va)];
642 return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
646 * Search the list of allocated pdir bufs and find on list of allocated pdirs
649 ptbl_free_pmap_pdir(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
651 struct ptbl_buf *pbuf;
653 TAILQ_FOREACH(pbuf, &pmap->pm_pdir_list, link) {
654 if (pbuf->kva == (vm_offset_t) pdir) {
655 /* Remove from pmap ptbl buf list. */
656 TAILQ_REMOVE(&pmap->pm_pdir_list, pbuf, link);
658 /* Free corresponding pdir buf. */
665 /* Free pdir pages and invalidate pdir entry. */
667 pdir_free(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx)
675 pdir = pmap->pm_pp2d[pp2d_idx];
677 KASSERT((pdir != NULL), ("pdir_free: null pdir"));
679 pmap->pm_pp2d[pp2d_idx] = NULL;
681 for (i = 0; i < PDIR_PAGES; i++) {
682 va = ((vm_offset_t) pdir + (i * PAGE_SIZE));
683 pa = pte_vatopa(mmu, kernel_pmap, va);
684 m = PHYS_TO_VM_PAGE(pa);
685 vm_page_free_zero(m);
690 ptbl_free_pmap_pdir(mmu, pmap, pdir);
694 * Decrement pdir pages hold count and attempt to free pdir pages. Called
695 * when removing directory entry from pdir.
697 * Return 1 if pdir pages were freed.
700 pdir_unhold(mmu_t mmu, pmap_t pmap, u_int pp2d_idx)
707 KASSERT((pmap != kernel_pmap),
708 ("pdir_unhold: unholding kernel pdir!"));
710 pdir = pmap->pm_pp2d[pp2d_idx];
712 KASSERT(((vm_offset_t) pdir >= VM_MIN_KERNEL_ADDRESS),
713 ("pdir_unhold: non kva pdir"));
715 /* decrement hold count */
716 for (i = 0; i < PDIR_PAGES; i++) {
717 pa = pte_vatopa(mmu, kernel_pmap,
718 (vm_offset_t) pdir + (i * PAGE_SIZE));
719 m = PHYS_TO_VM_PAGE(pa);
724 * Free pdir pages if there are no dir entries in this pdir.
725 * wire_count has the same value for all ptbl pages, so check the
728 if (m->wire_count == 0) {
729 pdir_free(mmu, pmap, pp2d_idx);
736 * Increment hold count for pdir pages. This routine is used when new ptlb
737 * entry is being inserted into pdir.
740 pdir_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
746 KASSERT((pmap != kernel_pmap),
747 ("pdir_hold: holding kernel pdir!"));
749 KASSERT((pdir != NULL), ("pdir_hold: null pdir"));
751 for (i = 0; i < PDIR_PAGES; i++) {
752 pa = pte_vatopa(mmu, kernel_pmap,
753 (vm_offset_t) pdir + (i * PAGE_SIZE));
754 m = PHYS_TO_VM_PAGE(pa);
759 /* Allocate page table. */
761 ptbl_alloc(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx,
764 vm_page_t mtbl [PTBL_PAGES];
766 struct ptbl_buf *pbuf;
772 KASSERT((pdir[pdir_idx] == NULL),
773 ("%s: valid ptbl entry exists!", __func__));
775 pbuf = ptbl_buf_alloc();
777 panic("%s: couldn't alloc kernel virtual memory", __func__);
779 ptbl = (pte_t *) pbuf->kva;
781 for (i = 0; i < PTBL_PAGES; i++) {
782 pidx = (PTBL_PAGES * pdir_idx) + i;
783 req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
784 while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
786 rw_wunlock(&pvh_global_lock);
788 ptbl_free_pmap_ptbl(pmap, ptbl);
789 for (j = 0; j < i; j++)
790 vm_page_free(mtbl[j]);
795 rw_wlock(&pvh_global_lock);
801 /* Mapin allocated pages into kernel_pmap. */
802 mmu_booke_qenter(mmu, (vm_offset_t) ptbl, mtbl, PTBL_PAGES);
803 /* Zero whole ptbl. */
804 bzero((caddr_t) ptbl, PTBL_PAGES * PAGE_SIZE);
806 /* Add pbuf to the pmap ptbl bufs list. */
807 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
812 /* Free ptbl pages and invalidate pdir entry. */
814 ptbl_free(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
822 ptbl = pdir[pdir_idx];
824 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
826 pdir[pdir_idx] = NULL;
828 for (i = 0; i < PTBL_PAGES; i++) {
829 va = ((vm_offset_t) ptbl + (i * PAGE_SIZE));
830 pa = pte_vatopa(mmu, kernel_pmap, va);
831 m = PHYS_TO_VM_PAGE(pa);
832 vm_page_free_zero(m);
837 ptbl_free_pmap_ptbl(pmap, ptbl);
841 * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
842 * when removing pte entry from ptbl.
844 * Return 1 if ptbl pages were freed.
847 ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
857 pp2d_idx = PP2D_IDX(va);
858 pdir_idx = PDIR_IDX(va);
860 KASSERT((pmap != kernel_pmap),
861 ("ptbl_unhold: unholding kernel ptbl!"));
863 pdir = pmap->pm_pp2d[pp2d_idx];
864 ptbl = pdir[pdir_idx];
866 KASSERT(((vm_offset_t) ptbl >= VM_MIN_KERNEL_ADDRESS),
867 ("ptbl_unhold: non kva ptbl"));
869 /* decrement hold count */
870 for (i = 0; i < PTBL_PAGES; i++) {
871 pa = pte_vatopa(mmu, kernel_pmap,
872 (vm_offset_t) ptbl + (i * PAGE_SIZE));
873 m = PHYS_TO_VM_PAGE(pa);
878 * Free ptbl pages if there are no pte entries in this ptbl.
879 * wire_count has the same value for all ptbl pages, so check the
882 if (m->wire_count == 0) {
883 /* A pair of indirect entries might point to this ptbl page */
885 tlb_flush_entry(pmap, va & ~((2UL * PAGE_SIZE_1M) - 1),
886 TLB_SIZE_1M, MAS6_SIND);
887 tlb_flush_entry(pmap, (va & ~((2UL * PAGE_SIZE_1M) - 1)) | PAGE_SIZE_1M,
888 TLB_SIZE_1M, MAS6_SIND);
890 ptbl_free(mmu, pmap, pdir, pdir_idx);
891 pdir_unhold(mmu, pmap, pp2d_idx);
898 * Increment hold count for ptbl pages. This routine is used when new pte
899 * entry is being inserted into ptbl.
902 ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
909 KASSERT((pmap != kernel_pmap),
910 ("ptbl_hold: holding kernel ptbl!"));
912 ptbl = pdir[pdir_idx];
914 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
916 for (i = 0; i < PTBL_PAGES; i++) {
917 pa = pte_vatopa(mmu, kernel_pmap,
918 (vm_offset_t) ptbl + (i * PAGE_SIZE));
919 m = PHYS_TO_VM_PAGE(pa);
925 /* Initialize pool of kva ptbl buffers. */
931 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
932 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
933 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
934 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
936 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
937 TAILQ_INIT(&ptbl_buf_freelist);
939 for (i = 0; i < PTBL_BUFS; i++) {
941 ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
942 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
946 /* Get a ptbl_buf from the freelist. */
947 static struct ptbl_buf *
950 struct ptbl_buf *buf;
952 mtx_lock(&ptbl_buf_freelist_lock);
953 buf = TAILQ_FIRST(&ptbl_buf_freelist);
955 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
956 mtx_unlock(&ptbl_buf_freelist_lock);
958 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
963 /* Return ptbl buff to free pool. */
965 ptbl_buf_free(struct ptbl_buf *buf)
968 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
970 mtx_lock(&ptbl_buf_freelist_lock);
971 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
972 mtx_unlock(&ptbl_buf_freelist_lock);
976 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
979 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
981 struct ptbl_buf *pbuf;
983 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
985 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
987 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
988 if (pbuf->kva == (vm_offset_t)ptbl) {
989 /* Remove from pmap ptbl buf list. */
990 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
992 /* Free corresponding ptbl buf. */
998 /* Allocate page table. */
1000 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
1002 vm_page_t mtbl[PTBL_PAGES];
1004 struct ptbl_buf *pbuf;
1009 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1010 (pmap == kernel_pmap), pdir_idx);
1012 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1013 ("ptbl_alloc: invalid pdir_idx"));
1014 KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
1015 ("pte_alloc: valid ptbl entry exists!"));
1017 pbuf = ptbl_buf_alloc();
1019 panic("pte_alloc: couldn't alloc kernel virtual memory");
1021 ptbl = (pte_t *)pbuf->kva;
1023 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
1025 for (i = 0; i < PTBL_PAGES; i++) {
1026 pidx = (PTBL_PAGES * pdir_idx) + i;
1027 while ((m = vm_page_alloc(NULL, pidx,
1028 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
1030 rw_wunlock(&pvh_global_lock);
1032 ptbl_free_pmap_ptbl(pmap, ptbl);
1033 for (j = 0; j < i; j++)
1034 vm_page_free(mtbl[j]);
1039 rw_wlock(&pvh_global_lock);
1045 /* Map allocated pages into kernel_pmap. */
1046 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
1048 /* Zero whole ptbl. */
1049 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
1051 /* Add pbuf to the pmap ptbl bufs list. */
1052 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
1057 /* Free ptbl pages and invalidate pdir entry. */
1059 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1067 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1068 (pmap == kernel_pmap), pdir_idx);
1070 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1071 ("ptbl_free: invalid pdir_idx"));
1073 ptbl = pmap->pm_pdir[pdir_idx];
1075 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
1077 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
1080 * Invalidate the pdir entry as soon as possible, so that other CPUs
1081 * don't attempt to look up the page tables we are releasing.
1083 mtx_lock_spin(&tlbivax_mutex);
1086 pmap->pm_pdir[pdir_idx] = NULL;
1089 mtx_unlock_spin(&tlbivax_mutex);
1091 for (i = 0; i < PTBL_PAGES; i++) {
1092 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
1093 pa = pte_vatopa(mmu, kernel_pmap, va);
1094 m = PHYS_TO_VM_PAGE(pa);
1095 vm_page_free_zero(m);
1097 mmu_booke_kremove(mmu, va);
1100 ptbl_free_pmap_ptbl(pmap, ptbl);
1104 * Decrement ptbl pages hold count and attempt to free ptbl pages.
1105 * Called when removing pte entry from ptbl.
1107 * Return 1 if ptbl pages were freed.
1110 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1117 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1118 (pmap == kernel_pmap), pdir_idx);
1120 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1121 ("ptbl_unhold: invalid pdir_idx"));
1122 KASSERT((pmap != kernel_pmap),
1123 ("ptbl_unhold: unholding kernel ptbl!"));
1125 ptbl = pmap->pm_pdir[pdir_idx];
1127 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
1128 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
1129 ("ptbl_unhold: non kva ptbl"));
1131 /* decrement hold count */
1132 for (i = 0; i < PTBL_PAGES; i++) {
1133 pa = pte_vatopa(mmu, kernel_pmap,
1134 (vm_offset_t)ptbl + (i * PAGE_SIZE));
1135 m = PHYS_TO_VM_PAGE(pa);
1140 * Free ptbl pages if there are no pte etries in this ptbl.
1141 * wire_count has the same value for all ptbl pages, so check the last
1144 if (m->wire_count == 0) {
1145 ptbl_free(mmu, pmap, pdir_idx);
1147 //debugf("ptbl_unhold: e (freed ptbl)\n");
1155 * Increment hold count for ptbl pages. This routine is used when a new pte
1156 * entry is being inserted into the ptbl.
1159 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1166 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
1169 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1170 ("ptbl_hold: invalid pdir_idx"));
1171 KASSERT((pmap != kernel_pmap),
1172 ("ptbl_hold: holding kernel ptbl!"));
1174 ptbl = pmap->pm_pdir[pdir_idx];
1176 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
1178 for (i = 0; i < PTBL_PAGES; i++) {
1179 pa = pte_vatopa(mmu, kernel_pmap,
1180 (vm_offset_t)ptbl + (i * PAGE_SIZE));
1181 m = PHYS_TO_VM_PAGE(pa);
1187 /* Allocate pv_entry structure. */
1194 if (pv_entry_count > pv_entry_high_water)
1195 pagedaemon_wakeup(0); /* XXX powerpc NUMA */
1196 pv = uma_zalloc(pvzone, M_NOWAIT);
1201 /* Free pv_entry structure. */
1202 static __inline void
1203 pv_free(pv_entry_t pve)
1207 uma_zfree(pvzone, pve);
1211 /* Allocate and initialize pv_entry structure. */
1213 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
1217 //int su = (pmap == kernel_pmap);
1218 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
1219 // (u_int32_t)pmap, va, (u_int32_t)m);
1223 panic("pv_insert: no pv entries!");
1225 pve->pv_pmap = pmap;
1228 /* add to pv_list */
1229 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1230 rw_assert(&pvh_global_lock, RA_WLOCKED);
1232 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
1234 //debugf("pv_insert: e\n");
1237 /* Destroy pv entry. */
1239 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
1243 //int su = (pmap == kernel_pmap);
1244 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
1246 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1247 rw_assert(&pvh_global_lock, RA_WLOCKED);
1250 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
1251 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
1252 /* remove from pv_list */
1253 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
1254 if (TAILQ_EMPTY(&m->md.pv_list))
1255 vm_page_aflag_clear(m, PGA_WRITEABLE);
1257 /* free pv entry struct */
1263 //debugf("pv_remove: e\n");
1266 #ifdef __powerpc64__
1268 * Clean pte entry, try to free page table page if requested.
1270 * Return 1 if ptbl pages were freed, otherwise return 0.
1273 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
1278 pte = pte_find(mmu, pmap, va);
1279 KASSERT(pte != NULL, ("%s: NULL pte", __func__));
1281 if (!PTE_ISVALID(pte))
1284 /* Get vm_page_t for mapped pte. */
1285 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1287 if (PTE_ISWIRED(pte))
1288 pmap->pm_stats.wired_count--;
1290 /* Handle managed entry. */
1291 if (PTE_ISMANAGED(pte)) {
1293 /* Handle modified pages. */
1294 if (PTE_ISMODIFIED(pte))
1297 /* Referenced pages. */
1298 if (PTE_ISREFERENCED(pte))
1299 vm_page_aflag_set(m, PGA_REFERENCED);
1301 /* Remove pv_entry from pv_list. */
1302 pv_remove(pmap, va, m);
1303 } else if (m->md.pv_tracked) {
1304 pv_remove(pmap, va, m);
1305 if (TAILQ_EMPTY(&m->md.pv_list))
1306 m->md.pv_tracked = false;
1308 mtx_lock_spin(&tlbivax_mutex);
1311 tlb0_flush_entry(va);
1315 mtx_unlock_spin(&tlbivax_mutex);
1317 pmap->pm_stats.resident_count--;
1319 if (flags & PTBL_UNHOLD) {
1320 return (ptbl_unhold(mmu, pmap, va));
1326 * allocate a page of pointers to page directories, do not preallocate the
1330 pdir_alloc(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, bool nosleep)
1332 vm_page_t mtbl [PDIR_PAGES];
1334 struct ptbl_buf *pbuf;
1340 pbuf = ptbl_buf_alloc();
1343 panic("%s: couldn't alloc kernel virtual memory", __func__);
1345 /* Allocate pdir pages, this will sleep! */
1346 for (i = 0; i < PDIR_PAGES; i++) {
1347 pidx = (PDIR_PAGES * pp2d_idx) + i;
1348 req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
1349 while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
1357 /* Mapin allocated pages into kernel_pmap. */
1358 pdir = (pte_t **) pbuf->kva;
1359 pmap_qenter((vm_offset_t) pdir, mtbl, PDIR_PAGES);
1361 /* Zero whole pdir. */
1362 bzero((caddr_t) pdir, PDIR_PAGES * PAGE_SIZE);
1364 /* Add pdir to the pmap pdir bufs list. */
1365 TAILQ_INSERT_TAIL(&pmap->pm_pdir_list, pbuf, link);
1371 * Insert PTE for a given page and virtual address.
1374 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
1377 unsigned int pp2d_idx = PP2D_IDX(va);
1378 unsigned int pdir_idx = PDIR_IDX(va);
1379 unsigned int ptbl_idx = PTBL_IDX(va);
1383 /* Get the page directory pointer. */
1384 pdir = pmap->pm_pp2d[pp2d_idx];
1386 pdir = pdir_alloc(mmu, pmap, pp2d_idx, nosleep);
1388 /* Get the page table pointer. */
1389 ptbl = pdir[pdir_idx];
1392 /* Allocate page table pages. */
1393 ptbl = ptbl_alloc(mmu, pmap, pdir, pdir_idx, nosleep);
1395 KASSERT(nosleep, ("nosleep and NULL ptbl"));
1400 * Check if there is valid mapping for requested va, if there
1403 pte = &pdir[pdir_idx][ptbl_idx];
1404 if (PTE_ISVALID(pte)) {
1405 pte_remove(mmu, pmap, va, PTBL_HOLD);
1408 * pte is not used, increment hold count for ptbl
1411 if (pmap != kernel_pmap)
1412 ptbl_hold(mmu, pmap, pdir, pdir_idx);
1416 if (pdir[pdir_idx] == NULL) {
1417 if (pmap != kernel_pmap && pmap->pm_pp2d[pp2d_idx] != NULL)
1418 pdir_hold(mmu, pmap, pdir);
1419 pdir[pdir_idx] = ptbl;
1421 if (pmap->pm_pp2d[pp2d_idx] == NULL)
1422 pmap->pm_pp2d[pp2d_idx] = pdir;
1425 * Insert pv_entry into pv_list for mapped page if part of managed
1428 if ((m->oflags & VPO_UNMANAGED) == 0) {
1429 flags |= PTE_MANAGED;
1431 /* Create and insert pv entry. */
1432 pv_insert(pmap, va, m);
1435 mtx_lock_spin(&tlbivax_mutex);
1438 tlb0_flush_entry(va);
1439 pmap->pm_stats.resident_count++;
1440 pte = &pdir[pdir_idx][ptbl_idx];
1441 *pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
1442 *pte |= (PTE_VALID | flags);
1445 mtx_unlock_spin(&tlbivax_mutex);
1450 /* Return the pa for the given pmap/va. */
1452 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1457 pte = pte_find(mmu, pmap, va);
1458 if ((pte != NULL) && PTE_ISVALID(pte))
1459 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
1464 /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
1466 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
1473 /* Initialize kernel pdir */
1474 for (i = 0; i < kernel_pdirs; i++) {
1475 kernel_pmap->pm_pp2d[i + PP2D_IDX(va)] =
1476 (pte_t **)(pdir + (i * PAGE_SIZE * PDIR_PAGES));
1477 for (j = PDIR_IDX(va + (i * PAGE_SIZE * PDIR_NENTRIES * PTBL_NENTRIES));
1478 j < PDIR_NENTRIES; j++) {
1479 kernel_pmap->pm_pp2d[i + PP2D_IDX(va)][j] =
1480 (pte_t *)(pdir + (kernel_pdirs * PAGE_SIZE * PDIR_PAGES) +
1481 (((i * PDIR_NENTRIES) + j) * PAGE_SIZE * PTBL_PAGES));
1486 * Fill in PTEs covering kernel code and data. They are not required
1487 * for address translation, as this area is covered by static TLB1
1488 * entries, but for pte_vatopa() to work correctly with kernel area
1491 for (va = addr; va < data_end; va += PAGE_SIZE) {
1492 pte = &(kernel_pmap->pm_pp2d[PP2D_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
1493 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
1494 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1495 PTE_VALID | PTE_PS_4KB;
1500 * Clean pte entry, try to free page table page if requested.
1502 * Return 1 if ptbl pages were freed, otherwise return 0.
1505 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
1507 unsigned int pdir_idx = PDIR_IDX(va);
1508 unsigned int ptbl_idx = PTBL_IDX(va);
1513 //int su = (pmap == kernel_pmap);
1514 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
1515 // su, (u_int32_t)pmap, va, flags);
1517 ptbl = pmap->pm_pdir[pdir_idx];
1518 KASSERT(ptbl, ("pte_remove: null ptbl"));
1520 pte = &ptbl[ptbl_idx];
1522 if (pte == NULL || !PTE_ISVALID(pte))
1525 if (PTE_ISWIRED(pte))
1526 pmap->pm_stats.wired_count--;
1528 /* Get vm_page_t for mapped pte. */
1529 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1531 /* Handle managed entry. */
1532 if (PTE_ISMANAGED(pte)) {
1534 if (PTE_ISMODIFIED(pte))
1537 if (PTE_ISREFERENCED(pte))
1538 vm_page_aflag_set(m, PGA_REFERENCED);
1540 pv_remove(pmap, va, m);
1541 } else if (m->md.pv_tracked) {
1543 * Always pv_insert()/pv_remove() on MPC85XX, in case DPAA is
1544 * used. This is needed by the NCSW support code for fast
1545 * VA<->PA translation.
1547 pv_remove(pmap, va, m);
1548 if (TAILQ_EMPTY(&m->md.pv_list))
1549 m->md.pv_tracked = false;
1552 mtx_lock_spin(&tlbivax_mutex);
1555 tlb0_flush_entry(va);
1559 mtx_unlock_spin(&tlbivax_mutex);
1561 pmap->pm_stats.resident_count--;
1563 if (flags & PTBL_UNHOLD) {
1564 //debugf("pte_remove: e (unhold)\n");
1565 return (ptbl_unhold(mmu, pmap, pdir_idx));
1568 //debugf("pte_remove: e\n");
1573 * Insert PTE for a given page and virtual address.
1576 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
1579 unsigned int pdir_idx = PDIR_IDX(va);
1580 unsigned int ptbl_idx = PTBL_IDX(va);
1583 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
1584 pmap == kernel_pmap, pmap, va);
1586 /* Get the page table pointer. */
1587 ptbl = pmap->pm_pdir[pdir_idx];
1590 /* Allocate page table pages. */
1591 ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep);
1593 KASSERT(nosleep, ("nosleep and NULL ptbl"));
1598 * Check if there is valid mapping for requested
1599 * va, if there is, remove it.
1601 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
1602 if (PTE_ISVALID(pte)) {
1603 pte_remove(mmu, pmap, va, PTBL_HOLD);
1606 * pte is not used, increment hold count
1609 if (pmap != kernel_pmap)
1610 ptbl_hold(mmu, pmap, pdir_idx);
1615 * Insert pv_entry into pv_list for mapped page if part of managed
1618 if ((m->oflags & VPO_UNMANAGED) == 0) {
1619 flags |= PTE_MANAGED;
1621 /* Create and insert pv entry. */
1622 pv_insert(pmap, va, m);
1625 pmap->pm_stats.resident_count++;
1627 mtx_lock_spin(&tlbivax_mutex);
1630 tlb0_flush_entry(va);
1631 if (pmap->pm_pdir[pdir_idx] == NULL) {
1633 * If we just allocated a new page table, hook it in
1636 pmap->pm_pdir[pdir_idx] = ptbl;
1638 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
1639 *pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
1640 *pte |= (PTE_VALID | flags | PTE_PS_4KB); /* 4KB pages only */
1643 mtx_unlock_spin(&tlbivax_mutex);
1647 /* Return the pa for the given pmap/va. */
1649 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1654 pte = pte_find(mmu, pmap, va);
1655 if ((pte != NULL) && PTE_ISVALID(pte))
1656 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
1660 /* Get a pointer to a PTE in a page table. */
1662 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1664 unsigned int pdir_idx = PDIR_IDX(va);
1665 unsigned int ptbl_idx = PTBL_IDX(va);
1667 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
1669 if (pmap->pm_pdir[pdir_idx])
1670 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
1675 /* Set up kernel page tables. */
1677 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
1683 /* Initialize kernel pdir */
1684 for (i = 0; i < kernel_ptbls; i++)
1685 kernel_pmap->pm_pdir[kptbl_min + i] =
1686 (pte_t *)(pdir + (i * PAGE_SIZE * PTBL_PAGES));
1689 * Fill in PTEs covering kernel code and data. They are not required
1690 * for address translation, as this area is covered by static TLB1
1691 * entries, but for pte_vatopa() to work correctly with kernel area
1694 for (va = addr; va < data_end; va += PAGE_SIZE) {
1695 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
1696 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
1697 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1698 PTE_VALID | PTE_PS_4KB;
1703 /**************************************************************************/
1705 /**************************************************************************/
1708 * This is called during booke_init, before the system is really initialized.
1711 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
1713 vm_paddr_t phys_kernelend;
1714 struct mem_region *mp, *mp1;
1716 vm_paddr_t s, e, sz;
1717 vm_paddr_t physsz, hwphyssz;
1718 u_int phys_avail_count;
1719 vm_size_t kstack0_sz;
1720 vm_offset_t kernel_pdir, kstack0;
1721 vm_paddr_t kstack0_phys;
1724 debugf("mmu_booke_bootstrap: entered\n");
1726 /* Set interesting system properties */
1727 #ifdef __powerpc64__
1732 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
1736 /* Initialize invalidation mutex */
1737 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
1739 /* Read TLB0 size and associativity. */
1743 * Align kernel start and end address (kernel image).
1744 * Note that kernel end does not necessarily relate to kernsize.
1745 * kernsize is the size of the kernel that is actually mapped.
1747 kernstart = trunc_page(start);
1748 data_start = round_page(kernelend);
1749 data_end = data_start;
1752 * Addresses of preloaded modules (like file systems) use
1753 * physical addresses. Make sure we relocate those into
1754 * virtual addresses.
1756 preload_addr_relocate = kernstart - kernload;
1758 /* Allocate the dynamic per-cpu area. */
1759 dpcpu = (void *)data_end;
1760 data_end += DPCPU_SIZE;
1762 /* Allocate space for the message buffer. */
1763 msgbufp = (struct msgbuf *)data_end;
1764 data_end += msgbufsize;
1765 debugf(" msgbufp at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1766 (uintptr_t)msgbufp, data_end);
1768 data_end = round_page(data_end);
1770 /* Allocate space for ptbl_bufs. */
1771 ptbl_bufs = (struct ptbl_buf *)data_end;
1772 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
1773 debugf(" ptbl_bufs at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1774 (uintptr_t)ptbl_bufs, data_end);
1776 data_end = round_page(data_end);
1778 /* Allocate PTE tables for kernel KVA. */
1779 kernel_pdir = data_end;
1780 kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
1782 #ifdef __powerpc64__
1783 kernel_pdirs = howmany(kernel_ptbls, PDIR_NENTRIES);
1784 data_end += kernel_pdirs * PDIR_PAGES * PAGE_SIZE;
1786 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
1787 debugf(" kernel ptbls: %d\n", kernel_ptbls);
1788 debugf(" kernel pdir at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1789 kernel_pdir, data_end);
1791 debugf(" data_end: 0x%"PRI0ptrX"\n", data_end);
1792 if (data_end - kernstart > kernsize) {
1793 kernsize += tlb1_mapin_region(kernstart + kernsize,
1794 kernload + kernsize, (data_end - kernstart) - kernsize);
1796 data_end = kernstart + kernsize;
1797 debugf(" updated data_end: 0x%"PRI0ptrX"\n", data_end);
1800 * Clear the structures - note we can only do it safely after the
1801 * possible additional TLB1 translations are in place (above) so that
1802 * all range up to the currently calculated 'data_end' is covered.
1804 dpcpu_init(dpcpu, 0);
1805 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
1806 #ifdef __powerpc64__
1807 memset((void *)kernel_pdir, 0,
1808 kernel_pdirs * PDIR_PAGES * PAGE_SIZE +
1809 kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1811 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1814 /*******************************************************/
1815 /* Set the start and end of kva. */
1816 /*******************************************************/
1817 virtual_avail = round_page(data_end);
1818 virtual_end = VM_MAX_KERNEL_ADDRESS;
1820 /* Allocate KVA space for page zero/copy operations. */
1821 zero_page_va = virtual_avail;
1822 virtual_avail += PAGE_SIZE;
1823 copy_page_src_va = virtual_avail;
1824 virtual_avail += PAGE_SIZE;
1825 copy_page_dst_va = virtual_avail;
1826 virtual_avail += PAGE_SIZE;
1827 debugf("zero_page_va = 0x%"PRI0ptrX"\n", zero_page_va);
1828 debugf("copy_page_src_va = 0x"PRI0ptrX"\n", copy_page_src_va);
1829 debugf("copy_page_dst_va = 0x"PRI0ptrX"\n", copy_page_dst_va);
1831 /* Initialize page zero/copy mutexes. */
1832 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
1833 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
1835 /* Allocate KVA space for ptbl bufs. */
1836 ptbl_buf_pool_vabase = virtual_avail;
1837 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
1838 debugf("ptbl_buf_pool_vabase = 0x"PRI0ptrX" end = 0x"PRI0ptrX"\n",
1839 ptbl_buf_pool_vabase, virtual_avail);
1841 /* Calculate corresponding physical addresses for the kernel region. */
1842 phys_kernelend = kernload + kernsize;
1843 debugf("kernel image and allocated data:\n");
1844 debugf(" kernload = 0x%09llx\n", (uint64_t)kernload);
1845 debugf(" kernstart = 0x"PRI0ptrX"\n", kernstart);
1846 debugf(" kernsize = 0x"PRI0ptrX"\n", kernsize);
1848 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1849 panic("mmu_booke_bootstrap: phys_avail too small");
1852 * Remove kernel physical address range from avail regions list. Page
1853 * align all regions. Non-page aligned memory isn't very interesting
1854 * to us. Also, sort the entries for ascending addresses.
1857 /* Retrieve phys/avail mem regions */
1858 mem_regions(&physmem_regions, &physmem_regions_sz,
1859 &availmem_regions, &availmem_regions_sz);
1861 cnt = availmem_regions_sz;
1862 debugf("processing avail regions:\n");
1863 for (mp = availmem_regions; mp->mr_size; mp++) {
1865 e = mp->mr_start + mp->mr_size;
1866 debugf(" %09jx-%09jx -> ", (uintmax_t)s, (uintmax_t)e);
1867 /* Check whether this region holds all of the kernel. */
1868 if (s < kernload && e > phys_kernelend) {
1869 availmem_regions[cnt].mr_start = phys_kernelend;
1870 availmem_regions[cnt++].mr_size = e - phys_kernelend;
1873 /* Look whether this regions starts within the kernel. */
1874 if (s >= kernload && s < phys_kernelend) {
1875 if (e <= phys_kernelend)
1879 /* Now look whether this region ends within the kernel. */
1880 if (e > kernload && e <= phys_kernelend) {
1885 /* Now page align the start and size of the region. */
1891 debugf("%09jx-%09jx = %jx\n",
1892 (uintmax_t)s, (uintmax_t)e, (uintmax_t)sz);
1894 /* Check whether some memory is left here. */
1898 (cnt - (mp - availmem_regions)) * sizeof(*mp));
1904 /* Do an insertion sort. */
1905 for (mp1 = availmem_regions; mp1 < mp; mp1++)
1906 if (s < mp1->mr_start)
1909 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1917 availmem_regions_sz = cnt;
1919 /*******************************************************/
1920 /* Steal physical memory for kernel stack from the end */
1921 /* of the first avail region */
1922 /*******************************************************/
1923 kstack0_sz = kstack_pages * PAGE_SIZE;
1924 kstack0_phys = availmem_regions[0].mr_start +
1925 availmem_regions[0].mr_size;
1926 kstack0_phys -= kstack0_sz;
1927 availmem_regions[0].mr_size -= kstack0_sz;
1929 /*******************************************************/
1930 /* Fill in phys_avail table, based on availmem_regions */
1931 /*******************************************************/
1932 phys_avail_count = 0;
1935 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1937 debugf("fill in phys_avail:\n");
1938 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1940 debugf(" region: 0x%jx - 0x%jx (0x%jx)\n",
1941 (uintmax_t)availmem_regions[i].mr_start,
1942 (uintmax_t)availmem_regions[i].mr_start +
1943 availmem_regions[i].mr_size,
1944 (uintmax_t)availmem_regions[i].mr_size);
1946 if (hwphyssz != 0 &&
1947 (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1948 debugf(" hw.physmem adjust\n");
1949 if (physsz < hwphyssz) {
1950 phys_avail[j] = availmem_regions[i].mr_start;
1952 availmem_regions[i].mr_start +
1960 phys_avail[j] = availmem_regions[i].mr_start;
1961 phys_avail[j + 1] = availmem_regions[i].mr_start +
1962 availmem_regions[i].mr_size;
1964 physsz += availmem_regions[i].mr_size;
1966 physmem = btoc(physsz);
1968 /* Calculate the last available physical address. */
1969 for (i = 0; phys_avail[i + 2] != 0; i += 2)
1971 Maxmem = powerpc_btop(phys_avail[i + 1]);
1973 debugf("Maxmem = 0x%08lx\n", Maxmem);
1974 debugf("phys_avail_count = %d\n", phys_avail_count);
1975 debugf("physsz = 0x%09jx physmem = %jd (0x%09jx)\n",
1976 (uintmax_t)physsz, (uintmax_t)physmem, (uintmax_t)physmem);
1978 #ifdef __powerpc64__
1980 * Map the physical memory contiguously in TLB1.
1981 * Round so it fits into a single mapping.
1983 tlb1_mapin_region(DMAP_BASE_ADDRESS, 0,
1987 /*******************************************************/
1988 /* Initialize (statically allocated) kernel pmap. */
1989 /*******************************************************/
1990 PMAP_LOCK_INIT(kernel_pmap);
1991 #ifndef __powerpc64__
1992 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1995 debugf("kernel_pmap = 0x%"PRI0ptrX"\n", (uintptr_t)kernel_pmap);
1996 kernel_pte_alloc(virtual_avail, kernstart, kernel_pdir);
1997 for (i = 0; i < MAXCPU; i++) {
1998 kernel_pmap->pm_tid[i] = TID_KERNEL;
2000 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
2001 tidbusy[i][TID_KERNEL] = kernel_pmap;
2004 /* Mark kernel_pmap active on all CPUs */
2005 CPU_FILL(&kernel_pmap->pm_active);
2008 * Initialize the global pv list lock.
2010 rw_init(&pvh_global_lock, "pmap pv global");
2012 /*******************************************************/
2014 /*******************************************************/
2016 /* Enter kstack0 into kernel map, provide guard page */
2017 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
2018 thread0.td_kstack = kstack0;
2019 thread0.td_kstack_pages = kstack_pages;
2021 debugf("kstack_sz = 0x%08x\n", kstack0_sz);
2022 debugf("kstack0_phys at 0x%09llx - 0x%09llx\n",
2023 kstack0_phys, kstack0_phys + kstack0_sz);
2024 debugf("kstack0 at 0x%"PRI0ptrX" - 0x%"PRI0ptrX"\n",
2025 kstack0, kstack0 + kstack0_sz);
2027 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
2028 for (i = 0; i < kstack_pages; i++) {
2029 mmu_booke_kenter(mmu, kstack0, kstack0_phys);
2030 kstack0 += PAGE_SIZE;
2031 kstack0_phys += PAGE_SIZE;
2034 pmap_bootstrapped = 1;
2036 debugf("virtual_avail = %"PRI0ptrX"\n", virtual_avail);
2037 debugf("virtual_end = %"PRI0ptrX"\n", virtual_end);
2039 debugf("mmu_booke_bootstrap: exit\n");
2046 tlb_entry_t *e, tmp;
2049 /* Prepare TLB1 image for AP processors */
2051 for (i = 0; i < TLB1_ENTRIES; i++) {
2052 tlb1_read_entry(&tmp, i);
2054 if ((tmp.mas1 & MAS1_VALID) && (tmp.mas2 & _TLB_ENTRY_SHARED))
2055 memcpy(e++, &tmp, sizeof(tmp));
2060 pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
2065 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
2066 * have the snapshot of its contents in the s/w __boot_tlb1[] table
2067 * created by tlb1_ap_prep(), so use these values directly to
2068 * (re)program AP's TLB1 hardware.
2070 * Start at index 1 because index 0 has the kernel map.
2072 for (i = 1; i < TLB1_ENTRIES; i++) {
2073 if (__boot_tlb1[i].mas1 & MAS1_VALID)
2074 tlb1_write_entry(&__boot_tlb1[i], i);
2077 set_mas4_defaults();
2082 booke_pmap_init_qpages(void)
2089 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
2090 if (pc->pc_qmap_addr == 0)
2091 panic("pmap_init_qpages: unable to allocate KVA");
2095 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL);
2098 * Get the physical page address for the given pmap/virtual address.
2101 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
2106 pa = pte_vatopa(mmu, pmap, va);
2113 * Extract the physical page address associated with the given
2114 * kernel virtual address.
2117 mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
2123 if (va >= VM_MIN_KERNEL_ADDRESS && va <= VM_MAX_KERNEL_ADDRESS)
2124 p = pte_vatopa(mmu, kernel_pmap, va);
2127 /* Check TLB1 mappings */
2128 for (i = 0; i < TLB1_ENTRIES; i++) {
2129 tlb1_read_entry(&e, i);
2130 if (!(e.mas1 & MAS1_VALID))
2132 if (va >= e.virt && va < e.virt + e.size)
2133 return (e.phys + (va - e.virt));
2141 * Initialize the pmap module.
2142 * Called by vm_init, to initialize any structures that the pmap
2143 * system needs to map virtual memory.
2146 mmu_booke_init(mmu_t mmu)
2148 int shpgperproc = PMAP_SHPGPERPROC;
2151 * Initialize the address space (zone) for the pv entries. Set a
2152 * high water mark so that the system can recover from excessive
2153 * numbers of pv entries.
2155 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
2156 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
2158 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
2159 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
2161 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
2162 pv_entry_high_water = 9 * (pv_entry_max / 10);
2164 uma_zone_reserve_kva(pvzone, pv_entry_max);
2166 /* Pre-fill pvzone with initial number of pv entries. */
2167 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
2169 /* Initialize ptbl allocation. */
2174 * Map a list of wired pages into kernel virtual address space. This is
2175 * intended for temporary mappings which do not need page modification or
2176 * references recorded. Existing mappings in the region are overwritten.
2179 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
2184 while (count-- > 0) {
2185 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
2192 * Remove page mappings from kernel virtual address space. Intended for
2193 * temporary mappings entered by mmu_booke_qenter.
2196 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
2201 while (count-- > 0) {
2202 mmu_booke_kremove(mmu, va);
2208 * Map a wired page into kernel virtual address space.
2211 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
2214 mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
2218 mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
2223 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
2224 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
2226 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
2227 flags |= tlb_calc_wimg(pa, ma) << PTE_MAS2_SHIFT;
2228 flags |= PTE_PS_4KB;
2230 pte = pte_find(mmu, kernel_pmap, va);
2231 KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va. NULL PTE"));
2233 mtx_lock_spin(&tlbivax_mutex);
2236 if (PTE_ISVALID(pte)) {
2238 CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
2240 /* Flush entry from TLB0 */
2241 tlb0_flush_entry(va);
2244 *pte = PTE_RPN_FROM_PA(pa) | flags;
2246 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
2247 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n",
2248 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
2250 /* Flush the real memory from the instruction cache. */
2251 if ((flags & (PTE_I | PTE_G)) == 0)
2252 __syncicache((void *)va, PAGE_SIZE);
2255 mtx_unlock_spin(&tlbivax_mutex);
2259 * Remove a page from kernel page table.
2262 mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
2266 CTR2(KTR_PMAP,"%s: s (va = 0x"PRI0ptrX")\n", __func__, va);
2268 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
2269 (va <= VM_MAX_KERNEL_ADDRESS)),
2270 ("mmu_booke_kremove: invalid va"));
2272 pte = pte_find(mmu, kernel_pmap, va);
2274 if (!PTE_ISVALID(pte)) {
2276 CTR1(KTR_PMAP, "%s: invalid pte", __func__);
2281 mtx_lock_spin(&tlbivax_mutex);
2284 /* Invalidate entry in TLB0, update PTE. */
2285 tlb0_flush_entry(va);
2289 mtx_unlock_spin(&tlbivax_mutex);
2293 * Provide a kernel pointer corresponding to a given userland pointer.
2294 * The returned pointer is valid until the next time this function is
2295 * called in this thread. This is used internally in copyin/copyout.
2298 mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
2299 void **kaddr, size_t ulen, size_t *klen)
2302 if ((uintptr_t)uaddr + ulen > VM_MAXUSER_ADDRESS + PAGE_SIZE)
2305 *kaddr = (void *)(uintptr_t)uaddr;
2313 * Figure out where a given kernel pointer (usually in a fault) points
2314 * to from the VM's perspective, potentially remapping into userland's
2318 mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
2319 vm_offset_t *decoded_addr)
2322 if (addr < VM_MAXUSER_ADDRESS)
2327 *decoded_addr = addr;
2332 * Initialize pmap associated with process 0.
2335 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
2338 PMAP_LOCK_INIT(pmap);
2339 mmu_booke_pinit(mmu, pmap);
2340 PCPU_SET(curpmap, pmap);
2344 * Initialize a preallocated and zeroed pmap structure,
2345 * such as one in a vmspace structure.
2348 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
2352 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
2353 curthread->td_proc->p_pid, curthread->td_proc->p_comm);
2355 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
2357 for (i = 0; i < MAXCPU; i++)
2358 pmap->pm_tid[i] = TID_NONE;
2359 CPU_ZERO(&kernel_pmap->pm_active);
2360 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
2361 #ifdef __powerpc64__
2362 bzero(&pmap->pm_pp2d, sizeof(pte_t **) * PP2D_NENTRIES);
2363 TAILQ_INIT(&pmap->pm_pdir_list);
2365 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
2367 TAILQ_INIT(&pmap->pm_ptbl_list);
2371 * Release any resources held by the given physical map.
2372 * Called when a pmap initialized by mmu_booke_pinit is being released.
2373 * Should only be called if the map contains no valid mappings.
2376 mmu_booke_release(mmu_t mmu, pmap_t pmap)
2379 KASSERT(pmap->pm_stats.resident_count == 0,
2380 ("pmap_release: pmap resident count %ld != 0",
2381 pmap->pm_stats.resident_count));
2385 * Insert the given physical page at the specified virtual address in the
2386 * target physical map with the protection requested. If specified the page
2387 * will be wired down.
2390 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2391 vm_prot_t prot, u_int flags, int8_t psind)
2395 rw_wlock(&pvh_global_lock);
2397 error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind);
2399 rw_wunlock(&pvh_global_lock);
2404 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2405 vm_prot_t prot, u_int pmap_flags, int8_t psind __unused)
2410 int error, su, sync;
2412 pa = VM_PAGE_TO_PHYS(m);
2413 su = (pmap == kernel_pmap);
2416 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
2417 // "pa=0x%08x prot=0x%08x flags=%#x)\n",
2418 // (u_int32_t)pmap, su, pmap->pm_tid,
2419 // (u_int32_t)m, va, pa, prot, flags);
2422 KASSERT(((va >= virtual_avail) &&
2423 (va <= VM_MAX_KERNEL_ADDRESS)),
2424 ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
2426 KASSERT((va <= VM_MAXUSER_ADDRESS),
2427 ("mmu_booke_enter_locked: user pmap, non user va"));
2429 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
2430 VM_OBJECT_ASSERT_LOCKED(m->object);
2432 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2435 * If there is an existing mapping, and the physical address has not
2436 * changed, must be protection or wiring change.
2438 if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
2439 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
2442 * Before actually updating pte->flags we calculate and
2443 * prepare its new value in a helper var.
2446 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
2448 /* Wiring change, just update stats. */
2449 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) {
2450 if (!PTE_ISWIRED(pte)) {
2452 pmap->pm_stats.wired_count++;
2455 if (PTE_ISWIRED(pte)) {
2456 flags &= ~PTE_WIRED;
2457 pmap->pm_stats.wired_count--;
2461 if (prot & VM_PROT_WRITE) {
2462 /* Add write permissions. */
2467 if ((flags & PTE_MANAGED) != 0)
2468 vm_page_aflag_set(m, PGA_WRITEABLE);
2470 /* Handle modified pages, sense modify status. */
2473 * The PTE_MODIFIED flag could be set by underlying
2474 * TLB misses since we last read it (above), possibly
2475 * other CPUs could update it so we check in the PTE
2476 * directly rather than rely on that saved local flags
2479 if (PTE_ISMODIFIED(pte))
2483 if (prot & VM_PROT_EXECUTE) {
2489 * Check existing flags for execute permissions: if we
2490 * are turning execute permissions on, icache should
2493 if ((*pte & (PTE_UX | PTE_SX)) == 0)
2497 flags &= ~PTE_REFERENCED;
2500 * The new flags value is all calculated -- only now actually
2503 mtx_lock_spin(&tlbivax_mutex);
2506 tlb0_flush_entry(va);
2507 *pte &= ~PTE_FLAGS_MASK;
2511 mtx_unlock_spin(&tlbivax_mutex);
2515 * If there is an existing mapping, but it's for a different
2516 * physical address, pte_enter() will delete the old mapping.
2518 //if ((pte != NULL) && PTE_ISVALID(pte))
2519 // debugf("mmu_booke_enter_locked: replace\n");
2521 // debugf("mmu_booke_enter_locked: new\n");
2523 /* Now set up the flags and install the new mapping. */
2524 flags = (PTE_SR | PTE_VALID);
2530 if (prot & VM_PROT_WRITE) {
2535 if ((m->oflags & VPO_UNMANAGED) == 0)
2536 vm_page_aflag_set(m, PGA_WRITEABLE);
2539 if (prot & VM_PROT_EXECUTE) {
2545 /* If its wired update stats. */
2546 if ((pmap_flags & PMAP_ENTER_WIRED) != 0)
2549 error = pte_enter(mmu, pmap, m, va, flags,
2550 (pmap_flags & PMAP_ENTER_NOSLEEP) != 0);
2552 return (KERN_RESOURCE_SHORTAGE);
2554 if ((flags & PMAP_ENTER_WIRED) != 0)
2555 pmap->pm_stats.wired_count++;
2557 /* Flush the real memory from the instruction cache. */
2558 if (prot & VM_PROT_EXECUTE)
2562 if (sync && (su || pmap == PCPU_GET(curpmap))) {
2563 __syncicache((void *)va, PAGE_SIZE);
2567 return (KERN_SUCCESS);
2571 * Maps a sequence of resident pages belonging to the same object.
2572 * The sequence begins with the given page m_start. This page is
2573 * mapped at the given virtual address start. Each subsequent page is
2574 * mapped at a virtual address that is offset from start by the same
2575 * amount as the page is offset from m_start within the object. The
2576 * last page in the sequence is the page with the largest offset from
2577 * m_start that can be mapped at a virtual address less than the given
2578 * virtual address end. Not every virtual page between start and end
2579 * is mapped; only those for which a resident page exists with the
2580 * corresponding offset from m_start are mapped.
2583 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
2584 vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
2587 vm_pindex_t diff, psize;
2589 VM_OBJECT_ASSERT_LOCKED(m_start->object);
2591 psize = atop(end - start);
2593 rw_wlock(&pvh_global_lock);
2595 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2596 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
2597 prot & (VM_PROT_READ | VM_PROT_EXECUTE),
2598 PMAP_ENTER_NOSLEEP, 0);
2599 m = TAILQ_NEXT(m, listq);
2601 rw_wunlock(&pvh_global_lock);
2606 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2610 rw_wlock(&pvh_global_lock);
2612 mmu_booke_enter_locked(mmu, pmap, va, m,
2613 prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP,
2615 rw_wunlock(&pvh_global_lock);
2620 * Remove the given range of addresses from the specified map.
2622 * It is assumed that the start and end are properly rounded to the page size.
2625 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
2630 int su = (pmap == kernel_pmap);
2632 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
2633 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
2636 KASSERT(((va >= virtual_avail) &&
2637 (va <= VM_MAX_KERNEL_ADDRESS)),
2638 ("mmu_booke_remove: kernel pmap, non kernel va"));
2640 KASSERT((va <= VM_MAXUSER_ADDRESS),
2641 ("mmu_booke_remove: user pmap, non user va"));
2644 if (PMAP_REMOVE_DONE(pmap)) {
2645 //debugf("mmu_booke_remove: e (empty)\n");
2649 hold_flag = PTBL_HOLD_FLAG(pmap);
2650 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
2652 rw_wlock(&pvh_global_lock);
2654 for (; va < endva; va += PAGE_SIZE) {
2655 pte = pte_find(mmu, pmap, va);
2656 if ((pte != NULL) && PTE_ISVALID(pte))
2657 pte_remove(mmu, pmap, va, hold_flag);
2660 rw_wunlock(&pvh_global_lock);
2662 //debugf("mmu_booke_remove: e\n");
2666 * Remove physical page from all pmaps in which it resides.
2669 mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
2674 rw_wlock(&pvh_global_lock);
2675 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
2676 pvn = TAILQ_NEXT(pv, pv_link);
2678 PMAP_LOCK(pv->pv_pmap);
2679 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
2680 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
2681 PMAP_UNLOCK(pv->pv_pmap);
2683 vm_page_aflag_clear(m, PGA_WRITEABLE);
2684 rw_wunlock(&pvh_global_lock);
2688 * Map a range of physical addresses into kernel virtual address space.
2691 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
2692 vm_paddr_t pa_end, int prot)
2694 vm_offset_t sva = *virt;
2695 vm_offset_t va = sva;
2697 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
2698 // sva, pa_start, pa_end);
2700 while (pa_start < pa_end) {
2701 mmu_booke_kenter(mmu, va, pa_start);
2703 pa_start += PAGE_SIZE;
2707 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
2712 * The pmap must be activated before it's address space can be accessed in any
2716 mmu_booke_activate(mmu_t mmu, struct thread *td)
2721 pmap = &td->td_proc->p_vmspace->vm_pmap;
2723 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x"PRI0ptrX")",
2724 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
2726 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
2730 cpuid = PCPU_GET(cpuid);
2731 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
2732 PCPU_SET(curpmap, pmap);
2734 if (pmap->pm_tid[cpuid] == TID_NONE)
2737 /* Load PID0 register with pmap tid value. */
2738 mtspr(SPR_PID0, pmap->pm_tid[cpuid]);
2739 __asm __volatile("isync");
2741 mtspr(SPR_DBCR0, td->td_pcb->pcb_cpu.booke.dbcr0);
2745 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
2746 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
2750 * Deactivate the specified process's address space.
2753 mmu_booke_deactivate(mmu_t mmu, struct thread *td)
2757 pmap = &td->td_proc->p_vmspace->vm_pmap;
2759 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x"PRI0ptrX,
2760 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
2762 td->td_pcb->pcb_cpu.booke.dbcr0 = mfspr(SPR_DBCR0);
2764 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active);
2765 PCPU_SET(curpmap, NULL);
2769 * Copy the range specified by src_addr/len
2770 * from the source map to the range dst_addr/len
2771 * in the destination map.
2773 * This routine is only advisory and need not do anything.
2776 mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
2777 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
2783 * Set the physical protection on the specified range of this map as requested.
2786 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2793 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2794 mmu_booke_remove(mmu, pmap, sva, eva);
2798 if (prot & VM_PROT_WRITE)
2802 for (va = sva; va < eva; va += PAGE_SIZE) {
2803 if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2804 if (PTE_ISVALID(pte)) {
2805 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2807 mtx_lock_spin(&tlbivax_mutex);
2810 /* Handle modified pages. */
2811 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
2814 tlb0_flush_entry(va);
2815 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2818 mtx_unlock_spin(&tlbivax_mutex);
2826 * Clear the write and modified bits in each of the given page's mappings.
2829 mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
2834 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2835 ("mmu_booke_remove_write: page %p is not managed", m));
2838 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2839 * set by another thread while the object is locked. Thus,
2840 * if PGA_WRITEABLE is clear, no page table entries need updating.
2842 VM_OBJECT_ASSERT_WLOCKED(m->object);
2843 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2845 rw_wlock(&pvh_global_lock);
2846 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2847 PMAP_LOCK(pv->pv_pmap);
2848 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2849 if (PTE_ISVALID(pte)) {
2850 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2852 mtx_lock_spin(&tlbivax_mutex);
2855 /* Handle modified pages. */
2856 if (PTE_ISMODIFIED(pte))
2859 /* Flush mapping from TLB0. */
2860 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2863 mtx_unlock_spin(&tlbivax_mutex);
2866 PMAP_UNLOCK(pv->pv_pmap);
2868 vm_page_aflag_clear(m, PGA_WRITEABLE);
2869 rw_wunlock(&pvh_global_lock);
2873 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2882 va = trunc_page(va);
2883 sz = round_page(sz);
2885 rw_wlock(&pvh_global_lock);
2886 pmap = PCPU_GET(curpmap);
2887 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
2890 pte = pte_find(mmu, pm, va);
2891 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
2897 /* Create a mapping in the active pmap. */
2899 m = PHYS_TO_VM_PAGE(pa);
2901 pte_enter(mmu, pmap, m, addr,
2902 PTE_SR | PTE_VALID | PTE_UR, FALSE);
2903 __syncicache((void *)addr, PAGE_SIZE);
2904 pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
2907 __syncicache((void *)va, PAGE_SIZE);
2912 rw_wunlock(&pvh_global_lock);
2916 * Atomically extract and hold the physical page with the given
2917 * pmap and virtual address pair if that mapping permits the given
2921 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
2933 pte = pte_find(mmu, pmap, va);
2934 if ((pte != NULL) && PTE_ISVALID(pte)) {
2935 if (pmap == kernel_pmap)
2940 if ((*pte & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
2941 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
2943 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2954 * Initialize a vm_page's machine-dependent fields.
2957 mmu_booke_page_init(mmu_t mmu, vm_page_t m)
2960 m->md.pv_tracked = 0;
2961 TAILQ_INIT(&m->md.pv_list);
2965 * mmu_booke_zero_page_area zeros the specified hardware page by
2966 * mapping it into virtual memory and using bzero to clear
2969 * off and size must reside within a single page.
2972 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
2976 /* XXX KASSERT off and size are within a single page? */
2978 mtx_lock(&zero_page_mutex);
2981 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2982 bzero((caddr_t)va + off, size);
2983 mmu_booke_kremove(mmu, va);
2985 mtx_unlock(&zero_page_mutex);
2989 * mmu_booke_zero_page zeros the specified hardware page.
2992 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
2994 vm_offset_t off, va;
2996 mtx_lock(&zero_page_mutex);
2999 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
3000 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
3001 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
3002 mmu_booke_kremove(mmu, va);
3004 mtx_unlock(&zero_page_mutex);
3008 * mmu_booke_copy_page copies the specified (machine independent) page by
3009 * mapping the page into virtual memory and using memcopy to copy the page,
3010 * one machine dependent page at a time.
3013 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
3015 vm_offset_t sva, dva;
3017 sva = copy_page_src_va;
3018 dva = copy_page_dst_va;
3020 mtx_lock(©_page_mutex);
3021 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
3022 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
3023 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
3024 mmu_booke_kremove(mmu, dva);
3025 mmu_booke_kremove(mmu, sva);
3026 mtx_unlock(©_page_mutex);
3030 mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
3031 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
3034 vm_offset_t a_pg_offset, b_pg_offset;
3037 mtx_lock(©_page_mutex);
3038 while (xfersize > 0) {
3039 a_pg_offset = a_offset & PAGE_MASK;
3040 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
3041 mmu_booke_kenter(mmu, copy_page_src_va,
3042 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
3043 a_cp = (char *)copy_page_src_va + a_pg_offset;
3044 b_pg_offset = b_offset & PAGE_MASK;
3045 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
3046 mmu_booke_kenter(mmu, copy_page_dst_va,
3047 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
3048 b_cp = (char *)copy_page_dst_va + b_pg_offset;
3049 bcopy(a_cp, b_cp, cnt);
3050 mmu_booke_kremove(mmu, copy_page_dst_va);
3051 mmu_booke_kremove(mmu, copy_page_src_va);
3056 mtx_unlock(©_page_mutex);
3060 mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
3067 paddr = VM_PAGE_TO_PHYS(m);
3069 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
3070 flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m)) << PTE_MAS2_SHIFT;
3071 flags |= PTE_PS_4KB;
3074 qaddr = PCPU_GET(qmap_addr);
3076 pte = pte_find(mmu, kernel_pmap, qaddr);
3078 KASSERT(*pte == 0, ("mmu_booke_quick_enter_page: PTE busy"));
3081 * XXX: tlbivax is broadcast to other cores, but qaddr should
3082 * not be present in other TLBs. Is there a better instruction
3083 * sequence to use? Or just forget it & use mmu_booke_kenter()...
3085 __asm __volatile("tlbivax 0, %0" :: "r"(qaddr & MAS2_EPN_MASK));
3086 __asm __volatile("isync; msync");
3088 *pte = PTE_RPN_FROM_PA(paddr) | flags;
3090 /* Flush the real memory from the instruction cache. */
3091 if ((flags & (PTE_I | PTE_G)) == 0)
3092 __syncicache((void *)qaddr, PAGE_SIZE);
3098 mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
3102 pte = pte_find(mmu, kernel_pmap, addr);
3104 KASSERT(PCPU_GET(qmap_addr) == addr,
3105 ("mmu_booke_quick_remove_page: invalid address"));
3107 ("mmu_booke_quick_remove_page: PTE not in use"));
3114 * Return whether or not the specified physical page was modified
3115 * in any of physical maps.
3118 mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
3124 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3125 ("mmu_booke_is_modified: page %p is not managed", m));
3129 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
3130 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
3131 * is clear, no PTEs can be modified.
3133 VM_OBJECT_ASSERT_WLOCKED(m->object);
3134 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
3136 rw_wlock(&pvh_global_lock);
3137 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3138 PMAP_LOCK(pv->pv_pmap);
3139 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3141 if (PTE_ISMODIFIED(pte))
3144 PMAP_UNLOCK(pv->pv_pmap);
3148 rw_wunlock(&pvh_global_lock);
3153 * Return whether or not the specified virtual address is eligible
3157 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
3164 * Return whether or not the specified physical page was referenced
3165 * in any physical maps.
3168 mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
3174 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3175 ("mmu_booke_is_referenced: page %p is not managed", m));
3177 rw_wlock(&pvh_global_lock);
3178 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3179 PMAP_LOCK(pv->pv_pmap);
3180 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3182 if (PTE_ISREFERENCED(pte))
3185 PMAP_UNLOCK(pv->pv_pmap);
3189 rw_wunlock(&pvh_global_lock);
3194 * Clear the modify bits on the specified physical page.
3197 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
3202 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3203 ("mmu_booke_clear_modify: page %p is not managed", m));
3204 VM_OBJECT_ASSERT_WLOCKED(m->object);
3205 KASSERT(!vm_page_xbusied(m),
3206 ("mmu_booke_clear_modify: page %p is exclusive busied", m));
3209 * If the page is not PG_AWRITEABLE, then no PTEs can be modified.
3210 * If the object containing the page is locked and the page is not
3211 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set.
3213 if ((m->aflags & PGA_WRITEABLE) == 0)
3215 rw_wlock(&pvh_global_lock);
3216 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3217 PMAP_LOCK(pv->pv_pmap);
3218 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3220 mtx_lock_spin(&tlbivax_mutex);
3223 if (*pte & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
3224 tlb0_flush_entry(pv->pv_va);
3225 *pte &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
3230 mtx_unlock_spin(&tlbivax_mutex);
3232 PMAP_UNLOCK(pv->pv_pmap);
3234 rw_wunlock(&pvh_global_lock);
3238 * Return a count of reference bits for a page, clearing those bits.
3239 * It is not necessary for every reference bit to be cleared, but it
3240 * is necessary that 0 only be returned when there are truly no
3241 * reference bits set.
3243 * As an optimization, update the page's dirty field if a modified bit is
3244 * found while counting reference bits. This opportunistic update can be
3245 * performed at low cost and can eliminate the need for some future calls
3246 * to pmap_is_modified(). However, since this function stops after
3247 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
3248 * dirty pages. Those dirty pages will only be detected by a future call
3249 * to pmap_is_modified().
3252 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
3258 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3259 ("mmu_booke_ts_referenced: page %p is not managed", m));
3261 rw_wlock(&pvh_global_lock);
3262 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3263 PMAP_LOCK(pv->pv_pmap);
3264 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3266 if (PTE_ISMODIFIED(pte))
3268 if (PTE_ISREFERENCED(pte)) {
3269 mtx_lock_spin(&tlbivax_mutex);
3272 tlb0_flush_entry(pv->pv_va);
3273 *pte &= ~PTE_REFERENCED;
3276 mtx_unlock_spin(&tlbivax_mutex);
3278 if (++count >= PMAP_TS_REFERENCED_MAX) {
3279 PMAP_UNLOCK(pv->pv_pmap);
3284 PMAP_UNLOCK(pv->pv_pmap);
3286 rw_wunlock(&pvh_global_lock);
3291 * Clear the wired attribute from the mappings for the specified range of
3292 * addresses in the given pmap. Every valid mapping within that range must
3293 * have the wired attribute set. In contrast, invalid mappings cannot have
3294 * the wired attribute set, so they are ignored.
3296 * The wired attribute of the page table entry is not a hardware feature, so
3297 * there is no need to invalidate any TLB entries.
3300 mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3306 for (va = sva; va < eva; va += PAGE_SIZE) {
3307 if ((pte = pte_find(mmu, pmap, va)) != NULL &&
3309 if (!PTE_ISWIRED(pte))
3310 panic("mmu_booke_unwire: pte %p isn't wired",
3313 pmap->pm_stats.wired_count--;
3321 * Return true if the pmap's pv is one of the first 16 pvs linked to from this
3322 * page. This count may be changed upwards or downwards in the future; it is
3323 * only necessary that true be returned for a small subset of pmaps for proper
3327 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
3333 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3334 ("mmu_booke_page_exists_quick: page %p is not managed", m));
3337 rw_wlock(&pvh_global_lock);
3338 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3339 if (pv->pv_pmap == pmap) {
3346 rw_wunlock(&pvh_global_lock);
3351 * Return the number of managed mappings to the given physical page that are
3355 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
3361 if ((m->oflags & VPO_UNMANAGED) != 0)
3363 rw_wlock(&pvh_global_lock);
3364 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3365 PMAP_LOCK(pv->pv_pmap);
3366 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
3367 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
3369 PMAP_UNLOCK(pv->pv_pmap);
3371 rw_wunlock(&pvh_global_lock);
3376 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
3382 * This currently does not work for entries that
3383 * overlap TLB1 entries.
3385 for (i = 0; i < TLB1_ENTRIES; i ++) {
3386 if (tlb1_iomapped(i, pa, size, &va) == 0)
3394 mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
3400 /* Minidumps are based on virtual memory addresses. */
3402 *va = (void *)(vm_offset_t)pa;
3406 /* Raw physical memory dumps don't have a virtual address. */
3407 /* We always map a 256MB page at 256M. */
3408 gran = 256 * 1024 * 1024;
3409 ppa = rounddown2(pa, gran);
3412 tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO);
3414 if (sz > (gran - ofs))
3415 tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran,
3420 mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
3428 /* Minidumps are based on virtual memory addresses. */
3429 /* Nothing to do... */
3433 for (i = 0; i < TLB1_ENTRIES; i++) {
3434 tlb1_read_entry(&e, i);
3435 if (!(e.mas1 & MAS1_VALID))
3439 /* Raw physical memory dumps don't have a virtual address. */
3444 tlb1_write_entry(&e, i);
3446 gran = 256 * 1024 * 1024;
3447 ppa = rounddown2(pa, gran);
3449 if (sz > (gran - ofs)) {
3454 tlb1_write_entry(&e, i);
3458 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
3461 mmu_booke_scan_init(mmu_t mmu)
3468 /* Initialize phys. segments for dumpsys(). */
3469 memset(&dump_map, 0, sizeof(dump_map));
3470 mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions,
3471 &availmem_regions_sz);
3472 for (i = 0; i < physmem_regions_sz; i++) {
3473 dump_map[i].pa_start = physmem_regions[i].mr_start;
3474 dump_map[i].pa_size = physmem_regions[i].mr_size;
3479 /* Virtual segments for minidumps: */
3480 memset(&dump_map, 0, sizeof(dump_map));
3482 /* 1st: kernel .data and .bss. */
3483 dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
3484 dump_map[0].pa_size =
3485 round_page((uintptr_t)_end) - dump_map[0].pa_start;
3487 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
3488 dump_map[1].pa_start = data_start;
3489 dump_map[1].pa_size = data_end - data_start;
3491 /* 3rd: kernel VM. */
3492 va = dump_map[1].pa_start + dump_map[1].pa_size;
3493 /* Find start of next chunk (from va). */
3494 while (va < virtual_end) {
3495 /* Don't dump the buffer cache. */
3496 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
3497 va = kmi.buffer_eva;
3500 pte = pte_find(mmu, kernel_pmap, va);
3501 if (pte != NULL && PTE_ISVALID(pte))
3505 if (va < virtual_end) {
3506 dump_map[2].pa_start = va;
3508 /* Find last page in chunk. */
3509 while (va < virtual_end) {
3510 /* Don't run into the buffer cache. */
3511 if (va == kmi.buffer_sva)
3513 pte = pte_find(mmu, kernel_pmap, va);
3514 if (pte == NULL || !PTE_ISVALID(pte))
3518 dump_map[2].pa_size = va - dump_map[2].pa_start;
3523 * Map a set of physical memory pages into the kernel virtual address space.
3524 * Return a pointer to where it is mapped. This routine is intended to be used
3525 * for mapping device memory, NOT real memory.
3528 mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
3531 return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
3535 mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
3539 uintptr_t va, tmpva;
3544 * Check if this is premapped in TLB1. Note: this should probably also
3545 * check whether a sequence of TLB1 entries exist that match the
3546 * requirement, but now only checks the easy case.
3548 for (i = 0; i < TLB1_ENTRIES; i++) {
3549 tlb1_read_entry(&e, i);
3550 if (!(e.mas1 & MAS1_VALID))
3553 (pa + size) <= (e.phys + e.size) &&
3554 (ma == VM_MEMATTR_DEFAULT ||
3555 tlb_calc_wimg(pa, ma) ==
3556 (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED))))
3557 return (void *)(e.virt +
3558 (vm_offset_t)(pa - e.phys));
3561 size = roundup(size, PAGE_SIZE);
3564 * The device mapping area is between VM_MAXUSER_ADDRESS and
3565 * VM_MIN_KERNEL_ADDRESS. This gives 1GB of device addressing.
3567 #ifdef SPARSE_MAPDEV
3569 * With a sparse mapdev, align to the largest starting region. This
3570 * could feasibly be optimized for a 'best-fit' alignment, but that
3571 * calculation could be very costly.
3572 * Align to the smaller of:
3573 * - first set bit in overlap of (pa & size mask)
3574 * - largest size envelope
3576 * It's possible the device mapping may start at a PA that's not larger
3577 * than the size mask, so we need to offset in to maximize the TLB entry
3578 * range and minimize the number of used TLB entries.
3581 tmpva = tlb1_map_base;
3582 sz = ffsl(((1 << flsl(size-1)) - 1) & pa);
3583 sz = sz ? min(roundup(sz + 3, 4), flsl(size) - 1) : flsl(size) - 1;
3584 va = roundup(tlb1_map_base, 1 << sz) | (((1 << sz) - 1) & pa);
3585 #ifdef __powerpc64__
3586 } while (!atomic_cmpset_long(&tlb1_map_base, tmpva, va + size));
3588 } while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size));
3591 #ifdef __powerpc64__
3592 va = atomic_fetchadd_long(&tlb1_map_base, size);
3594 va = atomic_fetchadd_int(&tlb1_map_base, size);
3600 sz = 1 << (ilog2(size) & ~1);
3601 /* Align size to PA */
3605 } while (pa % sz != 0);
3607 /* Now align from there to VA */
3611 } while (va % sz != 0);
3614 printf("Wiring VA=%lx to PA=%jx (size=%lx)\n",
3615 va, (uintmax_t)pa, sz);
3616 if (tlb1_set_entry(va, pa, sz,
3617 _TLB_ENTRY_SHARED | tlb_calc_wimg(pa, ma)) < 0)
3628 * 'Unmap' a range mapped by mmu_booke_mapdev().
3631 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
3633 #ifdef SUPPORTS_SHRINKING_TLB1
3634 vm_offset_t base, offset;
3637 * Unmap only if this is inside kernel virtual space.
3639 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
3640 base = trunc_page(va);
3641 offset = va & PAGE_MASK;
3642 size = roundup(offset + size, PAGE_SIZE);
3643 kva_free(base, size);
3649 * mmu_booke_object_init_pt preloads the ptes for a given object into the
3650 * specified pmap. This eliminates the blast of soft faults on process startup
3651 * and immediately after an mmap.
3654 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
3655 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
3658 VM_OBJECT_ASSERT_WLOCKED(object);
3659 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3660 ("mmu_booke_object_init_pt: non-device object"));
3664 * Perform the pmap work for mincore.
3667 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
3668 vm_paddr_t *locked_pa)
3671 /* XXX: this should be implemented at some point */
3676 mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
3684 /* Check TLB1 mappings */
3685 for (i = 0; i < TLB1_ENTRIES; i++) {
3686 tlb1_read_entry(&e, i);
3687 if (!(e.mas1 & MAS1_VALID))
3689 if (addr >= e.virt && addr < e.virt + e.size)
3692 if (i < TLB1_ENTRIES) {
3693 /* Only allow full mappings to be modified for now. */
3694 /* Validate the range. */
3695 for (j = i, va = addr; va < addr + sz; va += e.size, j++) {
3696 tlb1_read_entry(&e, j);
3697 if (va != e.virt || (sz - (va - addr) < e.size))
3700 for (va = addr; va < addr + sz; va += e.size, i++) {
3701 tlb1_read_entry(&e, i);
3702 e.mas2 &= ~MAS2_WIMGE_MASK;
3703 e.mas2 |= tlb_calc_wimg(e.phys, mode);
3706 * Write it out to the TLB. Should really re-sync with other
3709 tlb1_write_entry(&e, i);
3714 /* Not in TLB1, try through pmap */
3715 /* First validate the range. */
3716 for (va = addr; va < addr + sz; va += PAGE_SIZE) {
3717 pte = pte_find(mmu, kernel_pmap, va);
3718 if (pte == NULL || !PTE_ISVALID(pte))
3722 mtx_lock_spin(&tlbivax_mutex);
3724 for (va = addr; va < addr + sz; va += PAGE_SIZE) {
3725 pte = pte_find(mmu, kernel_pmap, va);
3726 *pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT);
3727 *pte |= tlb_calc_wimg(PTE_PA(pte), mode) << PTE_MAS2_SHIFT;
3728 tlb0_flush_entry(va);
3731 mtx_unlock_spin(&tlbivax_mutex);
3736 /**************************************************************************/
3738 /**************************************************************************/
3741 * Allocate a TID. If necessary, steal one from someone else.
3742 * The new TID is flushed from the TLB before returning.
3745 tid_alloc(pmap_t pmap)
3750 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
3752 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
3754 thiscpu = PCPU_GET(cpuid);
3756 tid = PCPU_GET(booke.tid_next);
3759 PCPU_SET(booke.tid_next, tid + 1);
3761 /* If we are stealing TID then clear the relevant pmap's field */
3762 if (tidbusy[thiscpu][tid] != NULL) {
3764 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
3766 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
3768 /* Flush all entries from TLB0 matching this TID. */
3772 tidbusy[thiscpu][tid] = pmap;
3773 pmap->pm_tid[thiscpu] = tid;
3774 __asm __volatile("msync; isync");
3776 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
3777 PCPU_GET(booke.tid_next));
3782 /**************************************************************************/
3784 /**************************************************************************/
3787 #ifdef __powerpc64__
3788 tlb_print_entry(int i, uint32_t mas1, uint64_t mas2, uint32_t mas3,
3790 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
3801 if (mas1 & MAS1_VALID)
3806 if (mas1 & MAS1_IPROT)
3811 as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
3812 tid = MAS1_GETTID(mas1);
3814 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3817 size = tsize2size(tsize);
3819 debugf("%3d: (%s) [AS=%d] "
3820 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
3821 "mas2(va) = 0x%"PRI0ptrX" mas3(pa) = 0x%08x mas7 = 0x%08x\n",
3822 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
3825 /* Convert TLB0 va and way number to tlb0[] table index. */
3826 static inline unsigned int
3827 tlb0_tableidx(vm_offset_t va, unsigned int way)
3831 idx = (way * TLB0_ENTRIES_PER_WAY);
3832 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
3837 * Invalidate TLB0 entry.
3840 tlb0_flush_entry(vm_offset_t va)
3843 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
3845 mtx_assert(&tlbivax_mutex, MA_OWNED);
3847 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
3848 __asm __volatile("isync; msync");
3849 __asm __volatile("tlbsync; msync");
3851 CTR1(KTR_PMAP, "%s: e", __func__);
3854 /* Print out contents of the MAS registers for each TLB0 entry */
3856 tlb0_print_tlbentries(void)
3858 uint32_t mas0, mas1, mas3, mas7;
3859 #ifdef __powerpc64__
3864 int entryidx, way, idx;
3866 debugf("TLB0 entries:\n");
3867 for (way = 0; way < TLB0_WAYS; way ++)
3868 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
3870 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
3871 mtspr(SPR_MAS0, mas0);
3872 __asm __volatile("isync");
3874 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
3875 mtspr(SPR_MAS2, mas2);
3877 __asm __volatile("isync; tlbre");
3879 mas1 = mfspr(SPR_MAS1);
3880 mas2 = mfspr(SPR_MAS2);
3881 mas3 = mfspr(SPR_MAS3);
3882 mas7 = mfspr(SPR_MAS7);
3884 idx = tlb0_tableidx(mas2, way);
3885 tlb_print_entry(idx, mas1, mas2, mas3, mas7);
3889 /**************************************************************************/
3891 /**************************************************************************/
3894 * TLB1 mapping notes:
3896 * TLB1[0] Kernel text and data.
3897 * TLB1[1-15] Additional kernel text and data mappings (if required), PCI
3898 * windows, other devices mappings.
3902 * Read an entry from given TLB1 slot.
3905 tlb1_read_entry(tlb_entry_t *entry, unsigned int slot)
3910 KASSERT((entry != NULL), ("%s(): Entry is NULL!", __func__));
3913 __asm __volatile("wrteei 0");
3915 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(slot);
3916 mtspr(SPR_MAS0, mas0);
3917 __asm __volatile("isync; tlbre");
3919 entry->mas1 = mfspr(SPR_MAS1);
3920 entry->mas2 = mfspr(SPR_MAS2);
3921 entry->mas3 = mfspr(SPR_MAS3);
3923 switch ((mfpvr() >> 16) & 0xFFFF) {
3928 entry->mas7 = mfspr(SPR_MAS7);
3936 entry->virt = entry->mas2 & MAS2_EPN_MASK;
3937 entry->phys = ((vm_paddr_t)(entry->mas7 & MAS7_RPN) << 32) |
3938 (entry->mas3 & MAS3_RPN);
3940 tsize2size((entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT);
3943 struct tlbwrite_args {
3949 tlb1_write_entry_int(void *arg)
3951 struct tlbwrite_args *args = arg;
3955 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(args->idx);
3957 mtspr(SPR_MAS0, mas0);
3958 __asm __volatile("isync");
3959 mtspr(SPR_MAS1, args->e->mas1);
3960 __asm __volatile("isync");
3961 mtspr(SPR_MAS2, args->e->mas2);
3962 __asm __volatile("isync");
3963 mtspr(SPR_MAS3, args->e->mas3);
3964 __asm __volatile("isync");
3965 switch ((mfpvr() >> 16) & 0xFFFF) {
3970 __asm __volatile("isync");
3973 mtspr(SPR_MAS7, args->e->mas7);
3974 __asm __volatile("isync");
3980 __asm __volatile("tlbwe; isync; msync");
3985 tlb1_write_entry_sync(void *arg)
3987 /* Empty synchronization point for smp_rendezvous(). */
3991 * Write given entry to TLB1 hardware.
3994 tlb1_write_entry(tlb_entry_t *e, unsigned int idx)
3996 struct tlbwrite_args args;
4002 if ((e->mas2 & _TLB_ENTRY_SHARED) && smp_started) {
4004 smp_rendezvous(tlb1_write_entry_sync,
4005 tlb1_write_entry_int,
4006 tlb1_write_entry_sync, &args);
4013 __asm __volatile("wrteei 0");
4014 tlb1_write_entry_int(&args);
4020 * Return the largest uint value log such that 2^log <= num.
4023 ilog2(unsigned long num)
4027 #ifdef __powerpc64__
4028 __asm ("cntlzd %0, %1" : "=r" (lz) : "r" (num));
4031 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
4037 * Convert TLB TSIZE value to mapped region size.
4040 tsize2size(unsigned int tsize)
4045 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
4048 return ((1 << (2 * tsize)) * 1024);
4052 * Convert region size (must be power of 4) to TLB TSIZE value.
4055 size2tsize(vm_size_t size)
4058 return (ilog2(size) / 2 - 5);
4062 * Register permanent kernel mapping in TLB1.
4064 * Entries are created starting from index 0 (current free entry is
4065 * kept in tlb1_idx) and are not supposed to be invalidated.
4068 tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size,
4075 for (index = 0; index < TLB1_ENTRIES; index++) {
4076 tlb1_read_entry(&e, index);
4077 if ((e.mas1 & MAS1_VALID) == 0)
4079 /* Check if we're just updating the flags, and update them. */
4080 if (e.phys == pa && e.virt == va && e.size == size) {
4081 e.mas2 = (va & MAS2_EPN_MASK) | flags;
4082 tlb1_write_entry(&e, index);
4086 if (index >= TLB1_ENTRIES) {
4087 printf("tlb1_set_entry: TLB1 full!\n");
4091 /* Convert size to TSIZE */
4092 tsize = size2tsize(size);
4094 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
4095 /* XXX TS is hard coded to 0 for now as we only use single address space */
4096 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
4101 e.mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
4102 e.mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
4103 e.mas2 = (va & MAS2_EPN_MASK) | flags;
4105 /* Set supervisor RWX permission bits */
4106 e.mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
4107 e.mas7 = (pa >> 32) & MAS7_RPN;
4109 tlb1_write_entry(&e, index);
4112 * XXX in general TLB1 updates should be propagated between CPUs,
4113 * since current design assumes to have the same TLB1 set-up on all
4120 * Map in contiguous RAM region into the TLB1 using maximum of
4121 * KERNEL_REGION_MAX_TLB_ENTRIES entries.
4123 * If necessary round up last entry size and return total size
4124 * used by all allocated entries.
4127 tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
4129 vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES];
4130 vm_size_t mapped, pgsz, base, mask;
4133 /* Round up to the next 1M */
4134 size = roundup2(size, 1 << 20);
4139 pgsz = 64*1024*1024;
4140 while (mapped < size) {
4141 while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) {
4142 while (pgsz > (size - mapped))
4148 /* We under-map. Correct for this. */
4149 if (mapped < size) {
4150 while (pgs[idx - 1] == pgsz) {
4154 /* XXX We may increase beyond out starting point. */
4163 /* Align address to the boundary */
4165 va = (va + mask) & ~mask;
4166 pa = (pa + mask) & ~mask;
4169 for (idx = 0; idx < nents; idx++) {
4171 debugf("%u: %llx -> %jx, size=%jx\n", idx, pa,
4172 (uintmax_t)va, (uintmax_t)pgsz);
4173 tlb1_set_entry(va, pa, pgsz,
4174 _TLB_ENTRY_SHARED | _TLB_ENTRY_MEM);
4179 mapped = (va - base);
4180 printf("mapped size 0x%"PRI0ptrX" (wasted space 0x%"PRIxPTR")\n",
4181 mapped, mapped - size);
4186 * TLB1 initialization routine, to be called after the very first
4187 * assembler level setup done in locore.S.
4192 uint32_t mas0, mas1, mas2, mas3, mas7;
4197 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(0);
4198 mtspr(SPR_MAS0, mas0);
4199 __asm __volatile("isync; tlbre");
4201 mas1 = mfspr(SPR_MAS1);
4202 mas2 = mfspr(SPR_MAS2);
4203 mas3 = mfspr(SPR_MAS3);
4204 mas7 = mfspr(SPR_MAS7);
4206 kernload = ((vm_paddr_t)(mas7 & MAS7_RPN) << 32) |
4209 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4210 kernsize += (tsz > 0) ? tsize2size(tsz) : 0;
4212 /* Setup TLB miss defaults */
4213 set_mas4_defaults();
4217 * pmap_early_io_unmap() should be used in short conjunction with
4218 * pmap_early_io_map(), as in the following snippet:
4220 * x = pmap_early_io_map(...);
4221 * <do something with x>
4222 * pmap_early_io_unmap(x, size);
4224 * And avoiding more allocations between.
4227 pmap_early_io_unmap(vm_offset_t va, vm_size_t size)
4233 size = roundup(size, PAGE_SIZE);
4235 for (i = 0; i < TLB1_ENTRIES && size > 0; i++) {
4236 tlb1_read_entry(&e, i);
4237 if (!(e.mas1 & MAS1_VALID))
4239 if (va <= e.virt && (va + isize) >= (e.virt + e.size)) {
4241 e.mas1 &= ~MAS1_VALID;
4242 tlb1_write_entry(&e, i);
4245 if (tlb1_map_base == va + isize)
4246 tlb1_map_base -= isize;
4250 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
4257 KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!"));
4259 for (i = 0; i < TLB1_ENTRIES; i++) {
4260 tlb1_read_entry(&e, i);
4261 if (!(e.mas1 & MAS1_VALID))
4263 if (pa >= e.phys && (pa + size) <=
4265 return (e.virt + (pa - e.phys));
4268 pa_base = rounddown(pa, PAGE_SIZE);
4269 size = roundup(size + (pa - pa_base), PAGE_SIZE);
4270 tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1));
4271 va = tlb1_map_base + (pa - pa_base);
4274 sz = 1 << (ilog2(size) & ~1);
4275 tlb1_set_entry(tlb1_map_base, pa_base, sz,
4276 _TLB_ENTRY_SHARED | _TLB_ENTRY_IO);
4279 tlb1_map_base += sz;
4286 pmap_track_page(pmap_t pmap, vm_offset_t va)
4290 struct pv_entry *pve;
4292 va = trunc_page(va);
4293 pa = pmap_kextract(va);
4294 page = PHYS_TO_VM_PAGE(pa);
4296 rw_wlock(&pvh_global_lock);
4299 TAILQ_FOREACH(pve, &page->md.pv_list, pv_link) {
4300 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
4304 page->md.pv_tracked = true;
4305 pv_insert(pmap, va, page);
4308 rw_wunlock(&pvh_global_lock);
4313 * Setup MAS4 defaults.
4314 * These values are loaded to MAS0-2 on a TLB miss.
4317 set_mas4_defaults(void)
4321 /* Defaults: TLB0, PID0, TSIZED=4K */
4322 mas4 = MAS4_TLBSELD0;
4323 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
4327 mtspr(SPR_MAS4, mas4);
4328 __asm __volatile("isync");
4332 * Print out contents of the MAS registers for each TLB1 entry
4335 tlb1_print_tlbentries(void)
4337 uint32_t mas0, mas1, mas3, mas7;
4338 #ifdef __powerpc64__
4345 debugf("TLB1 entries:\n");
4346 for (i = 0; i < TLB1_ENTRIES; i++) {
4348 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
4349 mtspr(SPR_MAS0, mas0);
4351 __asm __volatile("isync; tlbre");
4353 mas1 = mfspr(SPR_MAS1);
4354 mas2 = mfspr(SPR_MAS2);
4355 mas3 = mfspr(SPR_MAS3);
4356 mas7 = mfspr(SPR_MAS7);
4358 tlb_print_entry(i, mas1, mas2, mas3, mas7);
4363 * Return 0 if the physical IO range is encompassed by one of the
4364 * the TLB1 entries, otherwise return related error code.
4367 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
4370 vm_paddr_t pa_start;
4372 unsigned int entry_tsize;
4373 vm_size_t entry_size;
4376 *va = (vm_offset_t)NULL;
4378 tlb1_read_entry(&e, i);
4379 /* Skip invalid entries */
4380 if (!(e.mas1 & MAS1_VALID))
4384 * The entry must be cache-inhibited, guarded, and r/w
4385 * so it can function as an i/o page
4387 prot = e.mas2 & (MAS2_I | MAS2_G);
4388 if (prot != (MAS2_I | MAS2_G))
4391 prot = e.mas3 & (MAS3_SR | MAS3_SW);
4392 if (prot != (MAS3_SR | MAS3_SW))
4395 /* The address should be within the entry range. */
4396 entry_tsize = (e.mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4397 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
4399 entry_size = tsize2size(entry_tsize);
4400 pa_start = (((vm_paddr_t)e.mas7 & MAS7_RPN) << 32) |
4401 (e.mas3 & MAS3_RPN);
4402 pa_end = pa_start + entry_size;
4404 if ((pa < pa_start) || ((pa + size) > pa_end))
4407 /* Return virtual address of this mapping. */
4408 *va = (e.mas2 & MAS2_EPN_MASK) + (pa - pa_start);
4413 * Invalidate all TLB0 entries which match the given TID. Note this is
4414 * dedicated for cases when invalidations should NOT be propagated to other
4418 tid_flush(tlbtid_t tid)
4421 uint32_t mas0, mas1, mas2;
4425 /* Don't evict kernel translations */
4426 if (tid == TID_KERNEL)
4430 __asm __volatile("wrteei 0");
4432 for (way = 0; way < TLB0_WAYS; way++)
4433 for (entry = 0; entry < TLB0_ENTRIES_PER_WAY; entry++) {
4435 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
4436 mtspr(SPR_MAS0, mas0);
4437 __asm __volatile("isync");
4439 mas2 = entry << MAS2_TLB0_ENTRY_IDX_SHIFT;
4440 mtspr(SPR_MAS2, mas2);
4442 __asm __volatile("isync; tlbre");
4444 mas1 = mfspr(SPR_MAS1);
4446 if (!(mas1 & MAS1_VALID))
4448 if (((mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT) != tid)
4450 mas1 &= ~MAS1_VALID;
4451 mtspr(SPR_MAS1, mas1);
4452 __asm __volatile("isync; tlbwe; isync; msync");