]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/riscv/riscv/pmap.c
Correct a new KASSERT() in r348828.
[FreeBSD/FreeBSD.git] / sys / riscv / riscv / pmap.c
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2003 Peter Wemm
11  * All rights reserved.
12  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
13  * All rights reserved.
14  * Copyright (c) 2014 Andrew Turner
15  * All rights reserved.
16  * Copyright (c) 2014 The FreeBSD Foundation
17  * All rights reserved.
18  * Copyright (c) 2015-2018 Ruslan Bukin <br@bsdpad.com>
19  * All rights reserved.
20  *
21  * This code is derived from software contributed to Berkeley by
22  * the Systems Programming Group of the University of Utah Computer
23  * Science Department and William Jolitz of UUNET Technologies Inc.
24  *
25  * Portions of this software were developed by Andrew Turner under
26  * sponsorship from The FreeBSD Foundation.
27  *
28  * Portions of this software were developed by SRI International and the
29  * University of Cambridge Computer Laboratory under DARPA/AFRL contract
30  * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
31  *
32  * Portions of this software were developed by the University of Cambridge
33  * Computer Laboratory as part of the CTSRD Project, with support from the
34  * UK Higher Education Innovation Fund (HEIF).
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. All advertising materials mentioning features or use of this software
45  *    must display the following acknowledgement:
46  *      This product includes software developed by the University of
47  *      California, Berkeley and its contributors.
48  * 4. Neither the name of the University nor the names of its contributors
49  *    may be used to endorse or promote products derived from this software
50  *    without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62  * SUCH DAMAGE.
63  *
64  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
65  */
66 /*-
67  * Copyright (c) 2003 Networks Associates Technology, Inc.
68  * All rights reserved.
69  *
70  * This software was developed for the FreeBSD Project by Jake Burkholder,
71  * Safeport Network Services, and Network Associates Laboratories, the
72  * Security Research Division of Network Associates, Inc. under
73  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
74  * CHATS research program.
75  *
76  * Redistribution and use in source and binary forms, with or without
77  * modification, are permitted provided that the following conditions
78  * are met:
79  * 1. Redistributions of source code must retain the above copyright
80  *    notice, this list of conditions and the following disclaimer.
81  * 2. Redistributions in binary form must reproduce the above copyright
82  *    notice, this list of conditions and the following disclaimer in the
83  *    documentation and/or other materials provided with the distribution.
84  *
85  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
86  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
89  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
90  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
91  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
92  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
93  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
94  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
95  * SUCH DAMAGE.
96  */
97
98 #include <sys/cdefs.h>
99 __FBSDID("$FreeBSD$");
100
101 /*
102  *      Manages physical address maps.
103  *
104  *      Since the information managed by this module is
105  *      also stored by the logical address mapping module,
106  *      this module may throw away valid virtual-to-physical
107  *      mappings at almost any time.  However, invalidations
108  *      of virtual-to-physical mappings must be done as
109  *      requested.
110  *
111  *      In order to cope with hardware architectures which
112  *      make virtual-to-physical map invalidates expensive,
113  *      this module may delay invalidate or reduced protection
114  *      operations until such time as they are actually
115  *      necessary.  This module is given full information as
116  *      to which processors are currently using which maps,
117  *      and to when physical maps must be made correct.
118  */
119
120 #include <sys/param.h>
121 #include <sys/systm.h>
122 #include <sys/bitstring.h>
123 #include <sys/bus.h>
124 #include <sys/cpuset.h>
125 #include <sys/kernel.h>
126 #include <sys/ktr.h>
127 #include <sys/lock.h>
128 #include <sys/malloc.h>
129 #include <sys/mman.h>
130 #include <sys/msgbuf.h>
131 #include <sys/mutex.h>
132 #include <sys/proc.h>
133 #include <sys/rwlock.h>
134 #include <sys/sx.h>
135 #include <sys/vmem.h>
136 #include <sys/vmmeter.h>
137 #include <sys/sched.h>
138 #include <sys/sysctl.h>
139 #include <sys/smp.h>
140
141 #include <vm/vm.h>
142 #include <vm/vm_param.h>
143 #include <vm/vm_kern.h>
144 #include <vm/vm_page.h>
145 #include <vm/vm_map.h>
146 #include <vm/vm_object.h>
147 #include <vm/vm_extern.h>
148 #include <vm/vm_pageout.h>
149 #include <vm/vm_pager.h>
150 #include <vm/vm_phys.h>
151 #include <vm/vm_radix.h>
152 #include <vm/vm_reserv.h>
153 #include <vm/uma.h>
154
155 #include <machine/machdep.h>
156 #include <machine/md_var.h>
157 #include <machine/pcb.h>
158 #include <machine/sbi.h>
159
160 #define NUL1E           (Ln_ENTRIES * Ln_ENTRIES)
161 #define NUL2E           (Ln_ENTRIES * NUL1E)
162
163 #if !defined(DIAGNOSTIC)
164 #ifdef __GNUC_GNU_INLINE__
165 #define PMAP_INLINE     __attribute__((__gnu_inline__)) inline
166 #else
167 #define PMAP_INLINE     extern inline
168 #endif
169 #else
170 #define PMAP_INLINE
171 #endif
172
173 #ifdef PV_STATS
174 #define PV_STAT(x)      do { x ; } while (0)
175 #else
176 #define PV_STAT(x)      do { } while (0)
177 #endif
178
179 #define pmap_l2_pindex(v)       ((v) >> L2_SHIFT)
180 #define pa_to_pvh(pa)           (&pv_table[pa_index(pa)])
181
182 #define NPV_LIST_LOCKS  MAXCPU
183
184 #define PHYS_TO_PV_LIST_LOCK(pa)        \
185                         (&pv_list_locks[pmap_l2_pindex(pa) % NPV_LIST_LOCKS])
186
187 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)  do {    \
188         struct rwlock **_lockp = (lockp);               \
189         struct rwlock *_new_lock;                       \
190                                                         \
191         _new_lock = PHYS_TO_PV_LIST_LOCK(pa);           \
192         if (_new_lock != *_lockp) {                     \
193                 if (*_lockp != NULL)                    \
194                         rw_wunlock(*_lockp);            \
195                 *_lockp = _new_lock;                    \
196                 rw_wlock(*_lockp);                      \
197         }                                               \
198 } while (0)
199
200 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m)        \
201                         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
202
203 #define RELEASE_PV_LIST_LOCK(lockp)             do {    \
204         struct rwlock **_lockp = (lockp);               \
205                                                         \
206         if (*_lockp != NULL) {                          \
207                 rw_wunlock(*_lockp);                    \
208                 *_lockp = NULL;                         \
209         }                                               \
210 } while (0)
211
212 #define VM_PAGE_TO_PV_LIST_LOCK(m)      \
213                         PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
214
215 /* The list of all the user pmaps */
216 LIST_HEAD(pmaplist, pmap);
217 static struct pmaplist allpmaps = LIST_HEAD_INITIALIZER();
218
219 struct pmap kernel_pmap_store;
220
221 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
222 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
223 vm_offset_t kernel_vm_end = 0;
224
225 vm_paddr_t dmap_phys_base;      /* The start of the dmap region */
226 vm_paddr_t dmap_phys_max;       /* The limit of the dmap region */
227 vm_offset_t dmap_max_addr;      /* The virtual address limit of the dmap */
228
229 /* This code assumes all L1 DMAP entries will be used */
230 CTASSERT((DMAP_MIN_ADDRESS  & ~L1_OFFSET) == DMAP_MIN_ADDRESS);
231 CTASSERT((DMAP_MAX_ADDRESS  & ~L1_OFFSET) == DMAP_MAX_ADDRESS);
232
233 static struct rwlock_padalign pvh_global_lock;
234 static struct mtx_padalign allpmaps_lock;
235
236 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0,
237     "VM/pmap parameters");
238
239 static int superpages_enabled = 1;
240 SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
241     CTLFLAG_RDTUN, &superpages_enabled, 0,
242     "Enable support for transparent superpages");
243
244 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2, CTLFLAG_RD, 0,
245     "2MB page mapping counters");
246
247 static u_long pmap_l2_demotions;
248 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD,
249     &pmap_l2_demotions, 0,
250     "2MB page demotions");
251
252 static u_long pmap_l2_mappings;
253 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, mappings, CTLFLAG_RD,
254     &pmap_l2_mappings, 0,
255     "2MB page mappings");
256
257 static u_long pmap_l2_p_failures;
258 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD,
259     &pmap_l2_p_failures, 0,
260     "2MB page promotion failures");
261
262 static u_long pmap_l2_promotions;
263 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD,
264     &pmap_l2_promotions, 0,
265     "2MB page promotions");
266
267 /*
268  * Data for the pv entry allocation mechanism
269  */
270 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
271 static struct mtx pv_chunks_mutex;
272 static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
273 static struct md_page *pv_table;
274 static struct md_page pv_dummy;
275
276 extern cpuset_t all_harts;
277
278 /*
279  * Internal flags for pmap_enter()'s helper functions.
280  */
281 #define PMAP_ENTER_NORECLAIM    0x1000000       /* Don't reclaim PV entries. */
282 #define PMAP_ENTER_NOREPLACE    0x2000000       /* Don't replace mappings. */
283
284 static void     free_pv_chunk(struct pv_chunk *pc);
285 static void     free_pv_entry(pmap_t pmap, pv_entry_t pv);
286 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
287 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
288 static void     pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
289 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
290                     vm_offset_t va);
291 static bool     pmap_demote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va);
292 static bool     pmap_demote_l2_locked(pmap_t pmap, pd_entry_t *l2,
293                     vm_offset_t va, struct rwlock **lockp);
294 static int      pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2,
295                     u_int flags, vm_page_t m, struct rwlock **lockp);
296 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
297     vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
298 static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
299     pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
300 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
301     vm_page_t m, struct rwlock **lockp);
302
303 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
304                 struct rwlock **lockp);
305
306 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
307     struct spglist *free);
308 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
309
310 #define pmap_clear(pte)                 pmap_store(pte, 0)
311 #define pmap_clear_bits(pte, bits)      atomic_clear_64(pte, bits)
312 #define pmap_load_store(pte, entry)     atomic_swap_64(pte, entry)
313 #define pmap_load_clear(pte)            pmap_load_store(pte, 0)
314 #define pmap_load(pte)                  atomic_load_64(pte)
315 #define pmap_store(pte, entry)          atomic_store_64(pte, entry)
316 #define pmap_store_bits(pte, bits)      atomic_set_64(pte, bits)
317
318 /********************/
319 /* Inline functions */
320 /********************/
321
322 static __inline void
323 pagecopy(void *s, void *d)
324 {
325
326         memcpy(d, s, PAGE_SIZE);
327 }
328
329 static __inline void
330 pagezero(void *p)
331 {
332
333         bzero(p, PAGE_SIZE);
334 }
335
336 #define pmap_l1_index(va)       (((va) >> L1_SHIFT) & Ln_ADDR_MASK)
337 #define pmap_l2_index(va)       (((va) >> L2_SHIFT) & Ln_ADDR_MASK)
338 #define pmap_l3_index(va)       (((va) >> L3_SHIFT) & Ln_ADDR_MASK)
339
340 #define PTE_TO_PHYS(pte)        ((pte >> PTE_PPN0_S) * PAGE_SIZE)
341
342 static __inline pd_entry_t *
343 pmap_l1(pmap_t pmap, vm_offset_t va)
344 {
345
346         return (&pmap->pm_l1[pmap_l1_index(va)]);
347 }
348
349 static __inline pd_entry_t *
350 pmap_l1_to_l2(pd_entry_t *l1, vm_offset_t va)
351 {
352         vm_paddr_t phys;
353         pd_entry_t *l2;
354
355         phys = PTE_TO_PHYS(pmap_load(l1));
356         l2 = (pd_entry_t *)PHYS_TO_DMAP(phys);
357
358         return (&l2[pmap_l2_index(va)]);
359 }
360
361 static __inline pd_entry_t *
362 pmap_l2(pmap_t pmap, vm_offset_t va)
363 {
364         pd_entry_t *l1;
365
366         l1 = pmap_l1(pmap, va);
367         if ((pmap_load(l1) & PTE_V) == 0)
368                 return (NULL);
369         if ((pmap_load(l1) & PTE_RX) != 0)
370                 return (NULL);
371
372         return (pmap_l1_to_l2(l1, va));
373 }
374
375 static __inline pt_entry_t *
376 pmap_l2_to_l3(pd_entry_t *l2, vm_offset_t va)
377 {
378         vm_paddr_t phys;
379         pt_entry_t *l3;
380
381         phys = PTE_TO_PHYS(pmap_load(l2));
382         l3 = (pd_entry_t *)PHYS_TO_DMAP(phys);
383
384         return (&l3[pmap_l3_index(va)]);
385 }
386
387 static __inline pt_entry_t *
388 pmap_l3(pmap_t pmap, vm_offset_t va)
389 {
390         pd_entry_t *l2;
391
392         l2 = pmap_l2(pmap, va);
393         if (l2 == NULL)
394                 return (NULL);
395         if ((pmap_load(l2) & PTE_V) == 0)
396                 return (NULL);
397         if ((pmap_load(l2) & PTE_RX) != 0)
398                 return (NULL);
399
400         return (pmap_l2_to_l3(l2, va));
401 }
402
403 static __inline void
404 pmap_resident_count_inc(pmap_t pmap, int count)
405 {
406
407         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
408         pmap->pm_stats.resident_count += count;
409 }
410
411 static __inline void
412 pmap_resident_count_dec(pmap_t pmap, int count)
413 {
414
415         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
416         KASSERT(pmap->pm_stats.resident_count >= count,
417             ("pmap %p resident count underflow %ld %d", pmap,
418             pmap->pm_stats.resident_count, count));
419         pmap->pm_stats.resident_count -= count;
420 }
421
422 static void
423 pmap_distribute_l1(struct pmap *pmap, vm_pindex_t l1index,
424     pt_entry_t entry)
425 {
426         struct pmap *user_pmap;
427         pd_entry_t *l1;
428
429         /* Distribute new kernel L1 entry to all the user pmaps */
430         if (pmap != kernel_pmap)
431                 return;
432
433         mtx_lock(&allpmaps_lock);
434         LIST_FOREACH(user_pmap, &allpmaps, pm_list) {
435                 l1 = &user_pmap->pm_l1[l1index];
436                 pmap_store(l1, entry);
437         }
438         mtx_unlock(&allpmaps_lock);
439 }
440
441 static pt_entry_t *
442 pmap_early_page_idx(vm_offset_t l1pt, vm_offset_t va, u_int *l1_slot,
443     u_int *l2_slot)
444 {
445         pt_entry_t *l2;
446         pd_entry_t *l1;
447
448         l1 = (pd_entry_t *)l1pt;
449         *l1_slot = (va >> L1_SHIFT) & Ln_ADDR_MASK;
450
451         /* Check locore has used a table L1 map */
452         KASSERT((l1[*l1_slot] & PTE_RX) == 0,
453                 ("Invalid bootstrap L1 table"));
454
455         /* Find the address of the L2 table */
456         l2 = (pt_entry_t *)init_pt_va;
457         *l2_slot = pmap_l2_index(va);
458
459         return (l2);
460 }
461
462 static vm_paddr_t
463 pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va)
464 {
465         u_int l1_slot, l2_slot;
466         pt_entry_t *l2;
467         u_int ret;
468
469         l2 = pmap_early_page_idx(l1pt, va, &l1_slot, &l2_slot);
470
471         /* Check locore has used L2 superpages */
472         KASSERT((l2[l2_slot] & PTE_RX) != 0,
473                 ("Invalid bootstrap L2 table"));
474
475         /* L2 is superpages */
476         ret = (l2[l2_slot] >> PTE_PPN1_S) << L2_SHIFT;
477         ret += (va & L2_OFFSET);
478
479         return (ret);
480 }
481
482 static void
483 pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa, vm_paddr_t max_pa)
484 {
485         vm_offset_t va;
486         vm_paddr_t pa;
487         pd_entry_t *l1;
488         u_int l1_slot;
489         pt_entry_t entry;
490         pn_t pn;
491
492         pa = dmap_phys_base = min_pa & ~L1_OFFSET;
493         va = DMAP_MIN_ADDRESS;
494         l1 = (pd_entry_t *)kern_l1;
495         l1_slot = pmap_l1_index(DMAP_MIN_ADDRESS);
496
497         for (; va < DMAP_MAX_ADDRESS && pa < max_pa;
498             pa += L1_SIZE, va += L1_SIZE, l1_slot++) {
499                 KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
500
501                 /* superpages */
502                 pn = (pa / PAGE_SIZE);
503                 entry = PTE_KERN;
504                 entry |= (pn << PTE_PPN0_S);
505                 pmap_store(&l1[l1_slot], entry);
506         }
507
508         /* Set the upper limit of the DMAP region */
509         dmap_phys_max = pa;
510         dmap_max_addr = va;
511
512         sfence_vma();
513 }
514
515 static vm_offset_t
516 pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
517 {
518         vm_offset_t l3pt;
519         pt_entry_t entry;
520         pd_entry_t *l2;
521         vm_paddr_t pa;
522         u_int l2_slot;
523         pn_t pn;
524
525         KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
526
527         l2 = pmap_l2(kernel_pmap, va);
528         l2 = (pd_entry_t *)((uintptr_t)l2 & ~(PAGE_SIZE - 1));
529         l2_slot = pmap_l2_index(va);
530         l3pt = l3_start;
531
532         for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) {
533                 KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index"));
534
535                 pa = pmap_early_vtophys(l1pt, l3pt);
536                 pn = (pa / PAGE_SIZE);
537                 entry = (PTE_V);
538                 entry |= (pn << PTE_PPN0_S);
539                 pmap_store(&l2[l2_slot], entry);
540                 l3pt += PAGE_SIZE;
541         }
542
543
544         /* Clean the L2 page table */
545         memset((void *)l3_start, 0, l3pt - l3_start);
546
547         return (l3pt);
548 }
549
550 /*
551  *      Bootstrap the system enough to run with virtual memory.
552  */
553 void
554 pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen)
555 {
556         u_int l1_slot, l2_slot, avail_slot, map_slot;
557         vm_offset_t freemempos;
558         vm_offset_t dpcpu, msgbufpv;
559         vm_paddr_t end, max_pa, min_pa, pa, start;
560         int i;
561
562         printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen);
563         printf("%lx\n", l1pt);
564         printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK);
565
566         /* Set this early so we can use the pagetable walking functions */
567         kernel_pmap_store.pm_l1 = (pd_entry_t *)l1pt;
568         PMAP_LOCK_INIT(kernel_pmap);
569
570         rw_init(&pvh_global_lock, "pmap pv global");
571
572         CPU_FILL(&kernel_pmap->pm_active);
573
574         /* Assume the address we were loaded to is a valid physical address. */
575         min_pa = max_pa = kernstart;
576
577         /*
578          * Find the minimum physical address. physmap is sorted,
579          * but may contain empty ranges.
580          */
581         for (i = 0; i < physmap_idx * 2; i += 2) {
582                 if (physmap[i] == physmap[i + 1])
583                         continue;
584                 if (physmap[i] <= min_pa)
585                         min_pa = physmap[i];
586                 if (physmap[i + 1] > max_pa)
587                         max_pa = physmap[i + 1];
588         }
589         printf("physmap_idx %lx\n", physmap_idx);
590         printf("min_pa %lx\n", min_pa);
591         printf("max_pa %lx\n", max_pa);
592
593         /* Create a direct map region early so we can use it for pa -> va */
594         pmap_bootstrap_dmap(l1pt, min_pa, max_pa);
595
596         /*
597          * Read the page table to find out what is already mapped.
598          * This assumes we have mapped a block of memory from KERNBASE
599          * using a single L1 entry.
600          */
601         (void)pmap_early_page_idx(l1pt, KERNBASE, &l1_slot, &l2_slot);
602
603         /* Sanity check the index, KERNBASE should be the first VA */
604         KASSERT(l2_slot == 0, ("The L2 index is non-zero"));
605
606         freemempos = roundup2(KERNBASE + kernlen, PAGE_SIZE);
607
608         /* Create the l3 tables for the early devmap */
609         freemempos = pmap_bootstrap_l3(l1pt,
610             VM_MAX_KERNEL_ADDRESS - L2_SIZE, freemempos);
611
612         sfence_vma();
613
614 #define alloc_pages(var, np)                                            \
615         (var) = freemempos;                                             \
616         freemempos += (np * PAGE_SIZE);                                 \
617         memset((char *)(var), 0, ((np) * PAGE_SIZE));
618
619         /* Allocate dynamic per-cpu area. */
620         alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
621         dpcpu_init((void *)dpcpu, 0);
622
623         /* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */
624         alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
625         msgbufp = (void *)msgbufpv;
626
627         virtual_avail = roundup2(freemempos, L2_SIZE);
628         virtual_end = VM_MAX_KERNEL_ADDRESS - L2_SIZE;
629         kernel_vm_end = virtual_avail;
630         
631         pa = pmap_early_vtophys(l1pt, freemempos);
632
633         /* Initialize phys_avail and dump_avail. */
634         for (avail_slot = map_slot = physmem = 0; map_slot < physmap_idx * 2;
635             map_slot += 2) {
636                 start = physmap[map_slot];
637                 end = physmap[map_slot + 1];
638
639                 if (start == end)
640                         continue;
641                 dump_avail[map_slot] = start;
642                 dump_avail[map_slot + 1] = end;
643
644                 if (start >= kernstart && end <= pa)
645                         continue;
646
647                 if (start < kernstart && end > kernstart)
648                         end = kernstart;
649                 else if (start < pa && end > pa)
650                         start = pa;
651                 phys_avail[avail_slot] = start;
652                 phys_avail[avail_slot + 1] = end;
653                 physmem += (end - start) >> PAGE_SHIFT;
654                 avail_slot += 2;
655
656                 if (end != physmap[map_slot + 1] && end > pa) {
657                         phys_avail[avail_slot] = pa;
658                         phys_avail[avail_slot + 1] = physmap[map_slot + 1];
659                         physmem += (physmap[map_slot + 1] - pa) >> PAGE_SHIFT;
660                         avail_slot += 2;
661                 }
662         }
663         phys_avail[avail_slot] = 0;
664         phys_avail[avail_slot + 1] = 0;
665
666         /*
667          * Maxmem isn't the "maximum memory", it's one larger than the
668          * highest page of the physical address space.  It should be
669          * called something like "Maxphyspage".
670          */
671         Maxmem = atop(phys_avail[avail_slot - 1]);
672 }
673
674 /*
675  *      Initialize a vm_page's machine-dependent fields.
676  */
677 void
678 pmap_page_init(vm_page_t m)
679 {
680
681         TAILQ_INIT(&m->md.pv_list);
682         m->md.pv_memattr = VM_MEMATTR_WRITE_BACK;
683 }
684
685 /*
686  *      Initialize the pmap module.
687  *      Called by vm_init, to initialize any structures that the pmap
688  *      system needs to map virtual memory.
689  */
690 void
691 pmap_init(void)
692 {
693         vm_size_t s;
694         int i, pv_npg;
695
696         /*
697          * Initialize the pv chunk and pmap list mutexes.
698          */
699         mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
700         mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_DEF);
701
702         /*
703          * Initialize the pool of pv list locks.
704          */
705         for (i = 0; i < NPV_LIST_LOCKS; i++)
706                 rw_init(&pv_list_locks[i], "pmap pv list");
707
708         /*
709          * Calculate the size of the pv head table for superpages.
710          */
711         pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L2_SIZE);
712
713         /*
714          * Allocate memory for the pv head table for superpages.
715          */
716         s = (vm_size_t)(pv_npg * sizeof(struct md_page));
717         s = round_page(s);
718         pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
719         for (i = 0; i < pv_npg; i++)
720                 TAILQ_INIT(&pv_table[i].pv_list);
721         TAILQ_INIT(&pv_dummy.pv_list);
722
723         if (superpages_enabled)
724                 pagesizes[1] = L2_SIZE;
725 }
726
727 #ifdef SMP
728 /*
729  * For SMP, these functions have to use IPIs for coherence.
730  *
731  * In general, the calling thread uses a plain fence to order the
732  * writes to the page tables before invoking an SBI callback to invoke
733  * sfence_vma() on remote CPUs.
734  */
735 static void
736 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
737 {
738         cpuset_t mask;
739
740         sched_pin();
741         mask = pmap->pm_active;
742         CPU_CLR(PCPU_GET(hart), &mask);
743         fence();
744         if (!CPU_EMPTY(&mask) && smp_started)
745                 sbi_remote_sfence_vma(mask.__bits, va, 1);
746         sfence_vma_page(va);
747         sched_unpin();
748 }
749
750 static void
751 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
752 {
753         cpuset_t mask;
754
755         sched_pin();
756         mask = pmap->pm_active;
757         CPU_CLR(PCPU_GET(hart), &mask);
758         fence();
759         if (!CPU_EMPTY(&mask) && smp_started)
760                 sbi_remote_sfence_vma(mask.__bits, sva, eva - sva + 1);
761
762         /*
763          * Might consider a loop of sfence_vma_page() for a small
764          * number of pages in the future.
765          */
766         sfence_vma();
767         sched_unpin();
768 }
769
770 static void
771 pmap_invalidate_all(pmap_t pmap)
772 {
773         cpuset_t mask;
774
775         sched_pin();
776         mask = pmap->pm_active;
777         CPU_CLR(PCPU_GET(hart), &mask);
778
779         /*
780          * XXX: The SBI doc doesn't detail how to specify x0 as the
781          * address to perform a global fence.  BBL currently treats
782          * all sfence_vma requests as global however.
783          */
784         fence();
785         if (!CPU_EMPTY(&mask) && smp_started)
786                 sbi_remote_sfence_vma(mask.__bits, 0, 0);
787         sfence_vma();
788         sched_unpin();
789 }
790 #else
791 /*
792  * Normal, non-SMP, invalidation functions.
793  * We inline these within pmap.c for speed.
794  */
795 static __inline void
796 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
797 {
798
799         sfence_vma_page(va);
800 }
801
802 static __inline void
803 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
804 {
805
806         /*
807          * Might consider a loop of sfence_vma_page() for a small
808          * number of pages in the future.
809          */
810         sfence_vma();
811 }
812
813 static __inline void
814 pmap_invalidate_all(pmap_t pmap)
815 {
816
817         sfence_vma();
818 }
819 #endif
820
821 /*
822  *      Routine:        pmap_extract
823  *      Function:
824  *              Extract the physical page address associated
825  *              with the given map/virtual_address pair.
826  */
827 vm_paddr_t 
828 pmap_extract(pmap_t pmap, vm_offset_t va)
829 {
830         pd_entry_t *l2p, l2;
831         pt_entry_t *l3p, l3;
832         vm_paddr_t pa;
833
834         pa = 0;
835         PMAP_LOCK(pmap);
836         /*
837          * Start with the l2 tabel. We are unable to allocate
838          * pages in the l1 table.
839          */
840         l2p = pmap_l2(pmap, va);
841         if (l2p != NULL) {
842                 l2 = pmap_load(l2p);
843                 if ((l2 & PTE_RX) == 0) {
844                         l3p = pmap_l2_to_l3(l2p, va);
845                         if (l3p != NULL) {
846                                 l3 = pmap_load(l3p);
847                                 pa = PTE_TO_PHYS(l3);
848                                 pa |= (va & L3_OFFSET);
849                         }
850                 } else {
851                         /* L2 is superpages */
852                         pa = (l2 >> PTE_PPN1_S) << L2_SHIFT;
853                         pa |= (va & L2_OFFSET);
854                 }
855         }
856         PMAP_UNLOCK(pmap);
857         return (pa);
858 }
859
860 /*
861  *      Routine:        pmap_extract_and_hold
862  *      Function:
863  *              Atomically extract and hold the physical page
864  *              with the given pmap and virtual address pair
865  *              if that mapping permits the given protection.
866  */
867 vm_page_t
868 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
869 {
870         pt_entry_t *l3p, l3;
871         vm_paddr_t phys;
872         vm_paddr_t pa;
873         vm_page_t m;
874
875         pa = 0;
876         m = NULL;
877         PMAP_LOCK(pmap);
878 retry:
879         l3p = pmap_l3(pmap, va);
880         if (l3p != NULL && (l3 = pmap_load(l3p)) != 0) {
881                 if ((l3 & PTE_W) != 0 || (prot & VM_PROT_WRITE) == 0) {
882                         phys = PTE_TO_PHYS(l3);
883                         if (vm_page_pa_tryrelock(pmap, phys, &pa))
884                                 goto retry;
885                         m = PHYS_TO_VM_PAGE(phys);
886                         vm_page_hold(m);
887                 }
888         }
889         PA_UNLOCK_COND(pa);
890         PMAP_UNLOCK(pmap);
891         return (m);
892 }
893
894 vm_paddr_t
895 pmap_kextract(vm_offset_t va)
896 {
897         pd_entry_t *l2;
898         pt_entry_t *l3;
899         vm_paddr_t pa;
900
901         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
902                 pa = DMAP_TO_PHYS(va);
903         } else {
904                 l2 = pmap_l2(kernel_pmap, va);
905                 if (l2 == NULL)
906                         panic("pmap_kextract: No l2");
907                 if ((pmap_load(l2) & PTE_RX) != 0) {
908                         /* superpages */
909                         pa = (pmap_load(l2) >> PTE_PPN1_S) << L2_SHIFT;
910                         pa |= (va & L2_OFFSET);
911                         return (pa);
912                 }
913
914                 l3 = pmap_l2_to_l3(l2, va);
915                 if (l3 == NULL)
916                         panic("pmap_kextract: No l3...");
917                 pa = PTE_TO_PHYS(pmap_load(l3));
918                 pa |= (va & PAGE_MASK);
919         }
920         return (pa);
921 }
922
923 /***************************************************
924  * Low level mapping routines.....
925  ***************************************************/
926
927 void
928 pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa)
929 {
930         pt_entry_t entry;
931         pt_entry_t *l3;
932         vm_offset_t va;
933         pn_t pn;
934
935         KASSERT((pa & L3_OFFSET) == 0,
936            ("pmap_kenter_device: Invalid physical address"));
937         KASSERT((sva & L3_OFFSET) == 0,
938            ("pmap_kenter_device: Invalid virtual address"));
939         KASSERT((size & PAGE_MASK) == 0,
940             ("pmap_kenter_device: Mapping is not page-sized"));
941
942         va = sva;
943         while (size != 0) {
944                 l3 = pmap_l3(kernel_pmap, va);
945                 KASSERT(l3 != NULL, ("Invalid page table, va: 0x%lx", va));
946
947                 pn = (pa / PAGE_SIZE);
948                 entry = PTE_KERN;
949                 entry |= (pn << PTE_PPN0_S);
950                 pmap_store(l3, entry);
951
952                 va += PAGE_SIZE;
953                 pa += PAGE_SIZE;
954                 size -= PAGE_SIZE;
955         }
956         pmap_invalidate_range(kernel_pmap, sva, va);
957 }
958
959 /*
960  * Remove a page from the kernel pagetables.
961  * Note: not SMP coherent.
962  */
963 PMAP_INLINE void
964 pmap_kremove(vm_offset_t va)
965 {
966         pt_entry_t *l3;
967
968         l3 = pmap_l3(kernel_pmap, va);
969         KASSERT(l3 != NULL, ("pmap_kremove: Invalid address"));
970
971         pmap_clear(l3);
972         sfence_vma();
973 }
974
975 void
976 pmap_kremove_device(vm_offset_t sva, vm_size_t size)
977 {
978         pt_entry_t *l3;
979         vm_offset_t va;
980
981         KASSERT((sva & L3_OFFSET) == 0,
982            ("pmap_kremove_device: Invalid virtual address"));
983         KASSERT((size & PAGE_MASK) == 0,
984             ("pmap_kremove_device: Mapping is not page-sized"));
985
986         va = sva;
987         while (size != 0) {
988                 l3 = pmap_l3(kernel_pmap, va);
989                 KASSERT(l3 != NULL, ("Invalid page table, va: 0x%lx", va));
990                 pmap_clear(l3);
991
992                 va += PAGE_SIZE;
993                 size -= PAGE_SIZE;
994         }
995
996         pmap_invalidate_range(kernel_pmap, sva, va);
997 }
998
999 /*
1000  *      Used to map a range of physical addresses into kernel
1001  *      virtual address space.
1002  *
1003  *      The value passed in '*virt' is a suggested virtual address for
1004  *      the mapping. Architectures which can support a direct-mapped
1005  *      physical to virtual region can return the appropriate address
1006  *      within that region, leaving '*virt' unchanged. Other
1007  *      architectures should map the pages starting at '*virt' and
1008  *      update '*virt' with the first usable address after the mapped
1009  *      region.
1010  */
1011 vm_offset_t
1012 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1013 {
1014
1015         return PHYS_TO_DMAP(start);
1016 }
1017
1018
1019 /*
1020  * Add a list of wired pages to the kva
1021  * this routine is only used for temporary
1022  * kernel mappings that do not need to have
1023  * page modification or references recorded.
1024  * Note that old mappings are simply written
1025  * over.  The page *must* be wired.
1026  * Note: SMP coherent.  Uses a ranged shootdown IPI.
1027  */
1028 void
1029 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1030 {
1031         pt_entry_t *l3, pa;
1032         vm_offset_t va;
1033         vm_page_t m;
1034         pt_entry_t entry;
1035         pn_t pn;
1036         int i;
1037
1038         va = sva;
1039         for (i = 0; i < count; i++) {
1040                 m = ma[i];
1041                 pa = VM_PAGE_TO_PHYS(m);
1042                 pn = (pa / PAGE_SIZE);
1043                 l3 = pmap_l3(kernel_pmap, va);
1044
1045                 entry = PTE_KERN;
1046                 entry |= (pn << PTE_PPN0_S);
1047                 pmap_store(l3, entry);
1048
1049                 va += L3_SIZE;
1050         }
1051         pmap_invalidate_range(kernel_pmap, sva, va);
1052 }
1053
1054 /*
1055  * This routine tears out page mappings from the
1056  * kernel -- it is meant only for temporary mappings.
1057  * Note: SMP coherent.  Uses a ranged shootdown IPI.
1058  */
1059 void
1060 pmap_qremove(vm_offset_t sva, int count)
1061 {
1062         pt_entry_t *l3;
1063         vm_offset_t va;
1064
1065         KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva));
1066
1067         for (va = sva; count-- > 0; va += PAGE_SIZE) {
1068                 l3 = pmap_l3(kernel_pmap, va);
1069                 KASSERT(l3 != NULL, ("pmap_kremove: Invalid address"));
1070                 pmap_clear(l3);
1071         }
1072         pmap_invalidate_range(kernel_pmap, sva, va);
1073 }
1074
1075 bool
1076 pmap_ps_enabled(pmap_t pmap __unused)
1077 {
1078
1079         return (superpages_enabled);
1080 }
1081
1082 /***************************************************
1083  * Page table page management routines.....
1084  ***************************************************/
1085 /*
1086  * Schedule the specified unused page table page to be freed.  Specifically,
1087  * add the page to the specified list of pages that will be released to the
1088  * physical memory manager after the TLB has been updated.
1089  */
1090 static __inline void
1091 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
1092     boolean_t set_PG_ZERO)
1093 {
1094
1095         if (set_PG_ZERO)
1096                 m->flags |= PG_ZERO;
1097         else
1098                 m->flags &= ~PG_ZERO;
1099         SLIST_INSERT_HEAD(free, m, plinks.s.ss);
1100 }
1101
1102 /*
1103  * Inserts the specified page table page into the specified pmap's collection
1104  * of idle page table pages.  Each of a pmap's page table pages is responsible
1105  * for mapping a distinct range of virtual addresses.  The pmap's collection is
1106  * ordered by this virtual address range.
1107  *
1108  * If "promoted" is false, then the page table page "ml3" must be zero filled.
1109  */
1110 static __inline int
1111 pmap_insert_pt_page(pmap_t pmap, vm_page_t ml3, bool promoted)
1112 {
1113
1114         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1115         ml3->valid = promoted ? VM_PAGE_BITS_ALL : 0;
1116         return (vm_radix_insert(&pmap->pm_root, ml3));
1117 }
1118
1119 /*
1120  * Removes the page table page mapping the specified virtual address from the
1121  * specified pmap's collection of idle page table pages, and returns it.
1122  * Otherwise, returns NULL if there is no page table page corresponding to the
1123  * specified virtual address.
1124  */
1125 static __inline vm_page_t
1126 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
1127 {
1128
1129         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1130         return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va)));
1131 }
1132         
1133 /*
1134  * Decrements a page table page's wire count, which is used to record the
1135  * number of valid page table entries within the page.  If the wire count
1136  * drops to zero, then the page table page is unmapped.  Returns TRUE if the
1137  * page table page was unmapped and FALSE otherwise.
1138  */
1139 static inline boolean_t
1140 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1141 {
1142
1143         --m->wire_count;
1144         if (m->wire_count == 0) {
1145                 _pmap_unwire_ptp(pmap, va, m, free);
1146                 return (TRUE);
1147         } else {
1148                 return (FALSE);
1149         }
1150 }
1151
1152 static void
1153 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1154 {
1155         vm_paddr_t phys;
1156
1157         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1158         if (m->pindex >= NUL1E) {
1159                 pd_entry_t *l1;
1160                 l1 = pmap_l1(pmap, va);
1161                 pmap_clear(l1);
1162                 pmap_distribute_l1(pmap, pmap_l1_index(va), 0);
1163         } else {
1164                 pd_entry_t *l2;
1165                 l2 = pmap_l2(pmap, va);
1166                 pmap_clear(l2);
1167         }
1168         pmap_resident_count_dec(pmap, 1);
1169         if (m->pindex < NUL1E) {
1170                 pd_entry_t *l1;
1171                 vm_page_t pdpg;
1172
1173                 l1 = pmap_l1(pmap, va);
1174                 phys = PTE_TO_PHYS(pmap_load(l1));
1175                 pdpg = PHYS_TO_VM_PAGE(phys);
1176                 pmap_unwire_ptp(pmap, va, pdpg, free);
1177         }
1178         pmap_invalidate_page(pmap, va);
1179
1180         vm_wire_sub(1);
1181
1182         /* 
1183          * Put page on a list so that it is released after
1184          * *ALL* TLB shootdown is done
1185          */
1186         pmap_add_delayed_free_list(m, free, TRUE);
1187 }
1188
1189 /*
1190  * After removing a page table entry, this routine is used to
1191  * conditionally free the page, and manage the hold/wire counts.
1192  */
1193 static int
1194 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
1195     struct spglist *free)
1196 {
1197         vm_page_t mpte;
1198
1199         if (va >= VM_MAXUSER_ADDRESS)
1200                 return (0);
1201         KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
1202         mpte = PHYS_TO_VM_PAGE(PTE_TO_PHYS(ptepde));
1203         return (pmap_unwire_ptp(pmap, va, mpte, free));
1204 }
1205
1206 void
1207 pmap_pinit0(pmap_t pmap)
1208 {
1209
1210         PMAP_LOCK_INIT(pmap);
1211         bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1212         pmap->pm_l1 = kernel_pmap->pm_l1;
1213         pmap->pm_satp = SATP_MODE_SV39 | (vtophys(pmap->pm_l1) >> PAGE_SHIFT);
1214         CPU_ZERO(&pmap->pm_active);
1215         pmap_activate_boot(pmap);
1216 }
1217
1218 int
1219 pmap_pinit(pmap_t pmap)
1220 {
1221         vm_paddr_t l1phys;
1222         vm_page_t l1pt;
1223
1224         /*
1225          * allocate the l1 page
1226          */
1227         while ((l1pt = vm_page_alloc(NULL, 0xdeadbeef, VM_ALLOC_NORMAL |
1228             VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
1229                 vm_wait(NULL);
1230
1231         l1phys = VM_PAGE_TO_PHYS(l1pt);
1232         pmap->pm_l1 = (pd_entry_t *)PHYS_TO_DMAP(l1phys);
1233         pmap->pm_satp = SATP_MODE_SV39 | (l1phys >> PAGE_SHIFT);
1234
1235         if ((l1pt->flags & PG_ZERO) == 0)
1236                 pagezero(pmap->pm_l1);
1237
1238         bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1239
1240         CPU_ZERO(&pmap->pm_active);
1241
1242         /* Install kernel pagetables */
1243         memcpy(pmap->pm_l1, kernel_pmap->pm_l1, PAGE_SIZE);
1244
1245         /* Add to the list of all user pmaps */
1246         mtx_lock(&allpmaps_lock);
1247         LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1248         mtx_unlock(&allpmaps_lock);
1249
1250         vm_radix_init(&pmap->pm_root);
1251
1252         return (1);
1253 }
1254
1255 /*
1256  * This routine is called if the desired page table page does not exist.
1257  *
1258  * If page table page allocation fails, this routine may sleep before
1259  * returning NULL.  It sleeps only if a lock pointer was given.
1260  *
1261  * Note: If a page allocation fails at page table level two or three,
1262  * one or two pages may be held during the wait, only to be released
1263  * afterwards.  This conservative approach is easily argued to avoid
1264  * race conditions.
1265  */
1266 static vm_page_t
1267 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
1268 {
1269         vm_page_t m, /*pdppg, */pdpg;
1270         pt_entry_t entry;
1271         vm_paddr_t phys;
1272         pn_t pn;
1273
1274         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1275
1276         /*
1277          * Allocate a page table page.
1278          */
1279         if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
1280             VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
1281                 if (lockp != NULL) {
1282                         RELEASE_PV_LIST_LOCK(lockp);
1283                         PMAP_UNLOCK(pmap);
1284                         rw_runlock(&pvh_global_lock);
1285                         vm_wait(NULL);
1286                         rw_rlock(&pvh_global_lock);
1287                         PMAP_LOCK(pmap);
1288                 }
1289
1290                 /*
1291                  * Indicate the need to retry.  While waiting, the page table
1292                  * page may have been allocated.
1293                  */
1294                 return (NULL);
1295         }
1296
1297         if ((m->flags & PG_ZERO) == 0)
1298                 pmap_zero_page(m);
1299
1300         /*
1301          * Map the pagetable page into the process address space, if
1302          * it isn't already there.
1303          */
1304
1305         if (ptepindex >= NUL1E) {
1306                 pd_entry_t *l1;
1307                 vm_pindex_t l1index;
1308
1309                 l1index = ptepindex - NUL1E;
1310                 l1 = &pmap->pm_l1[l1index];
1311
1312                 pn = (VM_PAGE_TO_PHYS(m) / PAGE_SIZE);
1313                 entry = (PTE_V);
1314                 entry |= (pn << PTE_PPN0_S);
1315                 pmap_store(l1, entry);
1316                 pmap_distribute_l1(pmap, l1index, entry);
1317         } else {
1318                 vm_pindex_t l1index;
1319                 pd_entry_t *l1, *l2;
1320
1321                 l1index = ptepindex >> (L1_SHIFT - L2_SHIFT);
1322                 l1 = &pmap->pm_l1[l1index];
1323                 if (pmap_load(l1) == 0) {
1324                         /* recurse for allocating page dir */
1325                         if (_pmap_alloc_l3(pmap, NUL1E + l1index,
1326                             lockp) == NULL) {
1327                                 vm_page_unwire_noq(m);
1328                                 vm_page_free_zero(m);
1329                                 return (NULL);
1330                         }
1331                 } else {
1332                         phys = PTE_TO_PHYS(pmap_load(l1));
1333                         pdpg = PHYS_TO_VM_PAGE(phys);
1334                         pdpg->wire_count++;
1335                 }
1336
1337                 phys = PTE_TO_PHYS(pmap_load(l1));
1338                 l2 = (pd_entry_t *)PHYS_TO_DMAP(phys);
1339                 l2 = &l2[ptepindex & Ln_ADDR_MASK];
1340
1341                 pn = (VM_PAGE_TO_PHYS(m) / PAGE_SIZE);
1342                 entry = (PTE_V);
1343                 entry |= (pn << PTE_PPN0_S);
1344                 pmap_store(l2, entry);
1345         }
1346
1347         pmap_resident_count_inc(pmap, 1);
1348
1349         return (m);
1350 }
1351
1352 static vm_page_t
1353 pmap_alloc_l2(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1354 {
1355         pd_entry_t *l1;
1356         vm_page_t l2pg;
1357         vm_pindex_t l2pindex;
1358
1359 retry:
1360         l1 = pmap_l1(pmap, va);
1361         if (l1 != NULL && (pmap_load(l1) & PTE_RWX) == 0) {
1362                 /* Add a reference to the L2 page. */
1363                 l2pg = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l1)));
1364                 l2pg->wire_count++;
1365         } else {
1366                 /* Allocate a L2 page. */
1367                 l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT;
1368                 l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp);
1369                 if (l2pg == NULL && lockp != NULL)
1370                         goto retry;
1371         }
1372         return (l2pg);
1373 }
1374
1375 static vm_page_t
1376 pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1377 {
1378         vm_pindex_t ptepindex;
1379         pd_entry_t *l2;
1380         vm_paddr_t phys;
1381         vm_page_t m;
1382
1383         /*
1384          * Calculate pagetable page index
1385          */
1386         ptepindex = pmap_l2_pindex(va);
1387 retry:
1388         /*
1389          * Get the page directory entry
1390          */
1391         l2 = pmap_l2(pmap, va);
1392
1393         /*
1394          * If the page table page is mapped, we just increment the
1395          * hold count, and activate it.
1396          */
1397         if (l2 != NULL && pmap_load(l2) != 0) {
1398                 phys = PTE_TO_PHYS(pmap_load(l2));
1399                 m = PHYS_TO_VM_PAGE(phys);
1400                 m->wire_count++;
1401         } else {
1402                 /*
1403                  * Here if the pte page isn't mapped, or if it has been
1404                  * deallocated.
1405                  */
1406                 m = _pmap_alloc_l3(pmap, ptepindex, lockp);
1407                 if (m == NULL && lockp != NULL)
1408                         goto retry;
1409         }
1410         return (m);
1411 }
1412
1413
1414 /***************************************************
1415  * Pmap allocation/deallocation routines.
1416  ***************************************************/
1417
1418 /*
1419  * Release any resources held by the given physical map.
1420  * Called when a pmap initialized by pmap_pinit is being released.
1421  * Should only be called if the map contains no valid mappings.
1422  */
1423 void
1424 pmap_release(pmap_t pmap)
1425 {
1426         vm_page_t m;
1427
1428         KASSERT(pmap->pm_stats.resident_count == 0,
1429             ("pmap_release: pmap resident count %ld != 0",
1430             pmap->pm_stats.resident_count));
1431         KASSERT(CPU_EMPTY(&pmap->pm_active),
1432             ("releasing active pmap %p", pmap));
1433
1434         mtx_lock(&allpmaps_lock);
1435         LIST_REMOVE(pmap, pm_list);
1436         mtx_unlock(&allpmaps_lock);
1437
1438         m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_l1));
1439         vm_page_unwire_noq(m);
1440         vm_page_free(m);
1441 }
1442
1443 #if 0
1444 static int
1445 kvm_size(SYSCTL_HANDLER_ARGS)
1446 {
1447         unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
1448
1449         return sysctl_handle_long(oidp, &ksize, 0, req);
1450 }
1451 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 
1452     0, 0, kvm_size, "LU", "Size of KVM");
1453
1454 static int
1455 kvm_free(SYSCTL_HANDLER_ARGS)
1456 {
1457         unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1458
1459         return sysctl_handle_long(oidp, &kfree, 0, req);
1460 }
1461 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 
1462     0, 0, kvm_free, "LU", "Amount of KVM free");
1463 #endif /* 0 */
1464
1465 /*
1466  * grow the number of kernel page table entries, if needed
1467  */
1468 void
1469 pmap_growkernel(vm_offset_t addr)
1470 {
1471         vm_paddr_t paddr;
1472         vm_page_t nkpg;
1473         pd_entry_t *l1, *l2;
1474         pt_entry_t entry;
1475         pn_t pn;
1476
1477         mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1478
1479         addr = roundup2(addr, L2_SIZE);
1480         if (addr - 1 >= vm_map_max(kernel_map))
1481                 addr = vm_map_max(kernel_map);
1482         while (kernel_vm_end < addr) {
1483                 l1 = pmap_l1(kernel_pmap, kernel_vm_end);
1484                 if (pmap_load(l1) == 0) {
1485                         /* We need a new PDP entry */
1486                         nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT,
1487                             VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
1488                             VM_ALLOC_WIRED | VM_ALLOC_ZERO);
1489                         if (nkpg == NULL)
1490                                 panic("pmap_growkernel: no memory to grow kernel");
1491                         if ((nkpg->flags & PG_ZERO) == 0)
1492                                 pmap_zero_page(nkpg);
1493                         paddr = VM_PAGE_TO_PHYS(nkpg);
1494
1495                         pn = (paddr / PAGE_SIZE);
1496                         entry = (PTE_V);
1497                         entry |= (pn << PTE_PPN0_S);
1498                         pmap_store(l1, entry);
1499                         pmap_distribute_l1(kernel_pmap,
1500                             pmap_l1_index(kernel_vm_end), entry);
1501                         continue; /* try again */
1502                 }
1503                 l2 = pmap_l1_to_l2(l1, kernel_vm_end);
1504                 if ((pmap_load(l2) & PTE_V) != 0 &&
1505                     (pmap_load(l2) & PTE_RWX) == 0) {
1506                         kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
1507                         if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
1508                                 kernel_vm_end = vm_map_max(kernel_map);
1509                                 break;
1510                         }
1511                         continue;
1512                 }
1513
1514                 nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT,
1515                     VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
1516                     VM_ALLOC_ZERO);
1517                 if (nkpg == NULL)
1518                         panic("pmap_growkernel: no memory to grow kernel");
1519                 if ((nkpg->flags & PG_ZERO) == 0) {
1520                         pmap_zero_page(nkpg);
1521                 }
1522                 paddr = VM_PAGE_TO_PHYS(nkpg);
1523
1524                 pn = (paddr / PAGE_SIZE);
1525                 entry = (PTE_V);
1526                 entry |= (pn << PTE_PPN0_S);
1527                 pmap_store(l2, entry);
1528
1529                 pmap_invalidate_page(kernel_pmap, kernel_vm_end);
1530
1531                 kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
1532                 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
1533                         kernel_vm_end = vm_map_max(kernel_map);
1534                         break;                       
1535                 }
1536         }
1537 }
1538
1539
1540 /***************************************************
1541  * page management routines.
1542  ***************************************************/
1543
1544 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
1545 CTASSERT(_NPCM == 3);
1546 CTASSERT(_NPCPV == 168);
1547
1548 static __inline struct pv_chunk *
1549 pv_to_chunk(pv_entry_t pv)
1550 {
1551
1552         return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1553 }
1554
1555 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1556
1557 #define PC_FREE0        0xfffffffffffffffful
1558 #define PC_FREE1        0xfffffffffffffffful
1559 #define PC_FREE2        0x000000fffffffffful
1560
1561 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
1562
1563 #if 0
1564 #ifdef PV_STATS
1565 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
1566
1567 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
1568         "Current number of pv entry chunks");
1569 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
1570         "Current number of pv entry chunks allocated");
1571 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
1572         "Current number of pv entry chunks frees");
1573 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
1574         "Number of times tried to get a chunk page but failed.");
1575
1576 static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
1577 static int pv_entry_spare;
1578
1579 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
1580         "Current number of pv entry frees");
1581 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
1582         "Current number of pv entry allocs");
1583 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
1584         "Current number of pv entries");
1585 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
1586         "Current number of spare pv entries");
1587 #endif
1588 #endif /* 0 */
1589
1590 /*
1591  * We are in a serious low memory condition.  Resort to
1592  * drastic measures to free some pages so we can allocate
1593  * another pv entry chunk.
1594  *
1595  * Returns NULL if PV entries were reclaimed from the specified pmap.
1596  *
1597  * We do not, however, unmap 2mpages because subsequent accesses will
1598  * allocate per-page pv entries until repromotion occurs, thereby
1599  * exacerbating the shortage of free pv entries.
1600  */
1601 static vm_page_t
1602 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
1603 {
1604
1605         panic("RISCVTODO: reclaim_pv_chunk");
1606 }
1607
1608 /*
1609  * free the pv_entry back to the free list
1610  */
1611 static void
1612 free_pv_entry(pmap_t pmap, pv_entry_t pv)
1613 {
1614         struct pv_chunk *pc;
1615         int idx, field, bit;
1616
1617         rw_assert(&pvh_global_lock, RA_LOCKED);
1618         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1619         PV_STAT(atomic_add_long(&pv_entry_frees, 1));
1620         PV_STAT(atomic_add_int(&pv_entry_spare, 1));
1621         PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
1622         pc = pv_to_chunk(pv);
1623         idx = pv - &pc->pc_pventry[0];
1624         field = idx / 64;
1625         bit = idx % 64;
1626         pc->pc_map[field] |= 1ul << bit;
1627         if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
1628             pc->pc_map[2] != PC_FREE2) {
1629                 /* 98% of the time, pc is already at the head of the list. */
1630                 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
1631                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1632                         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1633                 }
1634                 return;
1635         }
1636         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1637         free_pv_chunk(pc);
1638 }
1639
1640 static void
1641 free_pv_chunk(struct pv_chunk *pc)
1642 {
1643         vm_page_t m;
1644
1645         mtx_lock(&pv_chunks_mutex);
1646         TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1647         mtx_unlock(&pv_chunks_mutex);
1648         PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1649         PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1650         PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1651         /* entire chunk is free, return it */
1652         m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1653         dump_drop_page(m->phys_addr);
1654         vm_page_unwire_noq(m);
1655         vm_page_free(m);
1656 }
1657
1658 /*
1659  * Returns a new PV entry, allocating a new PV chunk from the system when
1660  * needed.  If this PV chunk allocation fails and a PV list lock pointer was
1661  * given, a PV chunk is reclaimed from an arbitrary pmap.  Otherwise, NULL is
1662  * returned.
1663  *
1664  * The given PV list lock may be released.
1665  */
1666 static pv_entry_t
1667 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
1668 {
1669         int bit, field;
1670         pv_entry_t pv;
1671         struct pv_chunk *pc;
1672         vm_page_t m;
1673
1674         rw_assert(&pvh_global_lock, RA_LOCKED);
1675         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1676         PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
1677 retry:
1678         pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1679         if (pc != NULL) {
1680                 for (field = 0; field < _NPCM; field++) {
1681                         if (pc->pc_map[field]) {
1682                                 bit = ffsl(pc->pc_map[field]) - 1;
1683                                 break;
1684                         }
1685                 }
1686                 if (field < _NPCM) {
1687                         pv = &pc->pc_pventry[field * 64 + bit];
1688                         pc->pc_map[field] &= ~(1ul << bit);
1689                         /* If this was the last item, move it to tail */
1690                         if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
1691                             pc->pc_map[2] == 0) {
1692                                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1693                                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
1694                                     pc_list);
1695                         }
1696                         PV_STAT(atomic_add_long(&pv_entry_count, 1));
1697                         PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
1698                         return (pv);
1699                 }
1700         }
1701         /* No free items, allocate another chunk */
1702         m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
1703             VM_ALLOC_WIRED);
1704         if (m == NULL) {
1705                 if (lockp == NULL) {
1706                         PV_STAT(pc_chunk_tryfail++);
1707                         return (NULL);
1708                 }
1709                 m = reclaim_pv_chunk(pmap, lockp);
1710                 if (m == NULL)
1711                         goto retry;
1712         }
1713         PV_STAT(atomic_add_int(&pc_chunk_count, 1));
1714         PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
1715         dump_add_page(m->phys_addr);
1716         pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1717         pc->pc_pmap = pmap;
1718         pc->pc_map[0] = PC_FREE0 & ~1ul;        /* preallocated bit 0 */
1719         pc->pc_map[1] = PC_FREE1;
1720         pc->pc_map[2] = PC_FREE2;
1721         mtx_lock(&pv_chunks_mutex);
1722         TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1723         mtx_unlock(&pv_chunks_mutex);
1724         pv = &pc->pc_pventry[0];
1725         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1726         PV_STAT(atomic_add_long(&pv_entry_count, 1));
1727         PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
1728         return (pv);
1729 }
1730
1731 /*
1732  * Ensure that the number of spare PV entries in the specified pmap meets or
1733  * exceeds the given count, "needed".
1734  *
1735  * The given PV list lock may be released.
1736  */
1737 static void
1738 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
1739 {
1740         struct pch new_tail;
1741         struct pv_chunk *pc;
1742         vm_page_t m;
1743         int avail, free;
1744         bool reclaimed;
1745
1746         rw_assert(&pvh_global_lock, RA_LOCKED);
1747         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1748         KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
1749
1750         /*
1751          * Newly allocated PV chunks must be stored in a private list until
1752          * the required number of PV chunks have been allocated.  Otherwise,
1753          * reclaim_pv_chunk() could recycle one of these chunks.  In
1754          * contrast, these chunks must be added to the pmap upon allocation.
1755          */
1756         TAILQ_INIT(&new_tail);
1757 retry:
1758         avail = 0;
1759         TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
1760                 bit_count((bitstr_t *)pc->pc_map, 0,
1761                     sizeof(pc->pc_map) * NBBY, &free);
1762                 if (free == 0)
1763                         break;
1764                 avail += free;
1765                 if (avail >= needed)
1766                         break;
1767         }
1768         for (reclaimed = false; avail < needed; avail += _NPCPV) {
1769                 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
1770                     VM_ALLOC_WIRED);
1771                 if (m == NULL) {
1772                         m = reclaim_pv_chunk(pmap, lockp);
1773                         if (m == NULL)
1774                                 goto retry;
1775                         reclaimed = true;
1776                 }
1777                 /* XXX PV STATS */
1778 #if 0
1779                 dump_add_page(m->phys_addr);
1780 #endif
1781                 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1782                 pc->pc_pmap = pmap;
1783                 pc->pc_map[0] = PC_FREE0;
1784                 pc->pc_map[1] = PC_FREE1;
1785                 pc->pc_map[2] = PC_FREE2;
1786                 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1787                 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
1788
1789                 /*
1790                  * The reclaim might have freed a chunk from the current pmap.
1791                  * If that chunk contained available entries, we need to
1792                  * re-count the number of available entries.
1793                  */
1794                 if (reclaimed)
1795                         goto retry;
1796         }
1797         if (!TAILQ_EMPTY(&new_tail)) {
1798                 mtx_lock(&pv_chunks_mutex);
1799                 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
1800                 mtx_unlock(&pv_chunks_mutex);
1801         }
1802 }
1803
1804 /*
1805  * First find and then remove the pv entry for the specified pmap and virtual
1806  * address from the specified pv list.  Returns the pv entry if found and NULL
1807  * otherwise.  This operation can be performed on pv lists for either 4KB or
1808  * 2MB page mappings.
1809  */
1810 static __inline pv_entry_t
1811 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1812 {
1813         pv_entry_t pv;
1814
1815         rw_assert(&pvh_global_lock, RA_LOCKED);
1816         TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
1817                 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
1818                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
1819                         pvh->pv_gen++;
1820                         break;
1821                 }
1822         }
1823         return (pv);
1824 }
1825
1826 /*
1827  * First find and then destroy the pv entry for the specified pmap and virtual
1828  * address.  This operation can be performed on pv lists for either 4KB or 2MB
1829  * page mappings.
1830  */
1831 static void
1832 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1833 {
1834         pv_entry_t pv;
1835
1836         pv = pmap_pvh_remove(pvh, pmap, va);
1837
1838         KASSERT(pv != NULL, ("pmap_pvh_free: pv not found for %#lx", va));
1839         free_pv_entry(pmap, pv);
1840 }
1841
1842 /*
1843  * Conditionally create the PV entry for a 4KB page mapping if the required
1844  * memory can be allocated without resorting to reclamation.
1845  */
1846 static boolean_t
1847 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
1848     struct rwlock **lockp)
1849 {
1850         pv_entry_t pv;
1851
1852         rw_assert(&pvh_global_lock, RA_LOCKED);
1853         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1854         /* Pass NULL instead of the lock pointer to disable reclamation. */
1855         if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
1856                 pv->pv_va = va;
1857                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1858                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
1859                 m->md.pv_gen++;
1860                 return (TRUE);
1861         } else
1862                 return (FALSE);
1863 }
1864
1865 /*
1866  * After demotion from a 2MB page mapping to 512 4KB page mappings,
1867  * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
1868  * entries for each of the 4KB page mappings.
1869  */
1870 static void __unused
1871 pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1872     struct rwlock **lockp)
1873 {
1874         struct md_page *pvh;
1875         struct pv_chunk *pc;
1876         pv_entry_t pv;
1877         vm_page_t m;
1878         vm_offset_t va_last;
1879         int bit, field;
1880
1881         rw_assert(&pvh_global_lock, RA_LOCKED);
1882         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1883         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1884
1885         /*
1886          * Transfer the 2mpage's pv entry for this mapping to the first
1887          * page's pv list.  Once this transfer begins, the pv list lock
1888          * must not be released until the last pv entry is reinstantiated.
1889          */
1890         pvh = pa_to_pvh(pa);
1891         va &= ~L2_OFFSET;
1892         pv = pmap_pvh_remove(pvh, pmap, va);
1893         KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found"));
1894         m = PHYS_TO_VM_PAGE(pa);
1895         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
1896         m->md.pv_gen++;
1897         /* Instantiate the remaining 511 pv entries. */
1898         va_last = va + L2_SIZE - PAGE_SIZE;
1899         for (;;) {
1900                 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1901                 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
1902                     pc->pc_map[2] != 0, ("pmap_pv_demote_l2: missing spare"));
1903                 for (field = 0; field < _NPCM; field++) {
1904                         while (pc->pc_map[field] != 0) {
1905                                 bit = ffsl(pc->pc_map[field]) - 1;
1906                                 pc->pc_map[field] &= ~(1ul << bit);
1907                                 pv = &pc->pc_pventry[field * 64 + bit];
1908                                 va += PAGE_SIZE;
1909                                 pv->pv_va = va;
1910                                 m++;
1911                                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1912                             ("pmap_pv_demote_l2: page %p is not managed", m));
1913                                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
1914                                 m->md.pv_gen++;
1915                                 if (va == va_last)
1916                                         goto out;
1917                         }
1918                 }
1919                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1920                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1921         }
1922 out:
1923         if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
1924                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1925                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1926         }
1927         /* XXX PV stats */
1928 }
1929
1930 #if VM_NRESERVLEVEL > 0
1931 static void
1932 pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1933     struct rwlock **lockp)
1934 {
1935         struct md_page *pvh;
1936         pv_entry_t pv;
1937         vm_page_t m;
1938         vm_offset_t va_last;
1939
1940         rw_assert(&pvh_global_lock, RA_LOCKED);
1941         KASSERT((va & L2_OFFSET) == 0,
1942             ("pmap_pv_promote_l2: misaligned va %#lx", va));
1943
1944         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1945
1946         m = PHYS_TO_VM_PAGE(pa);
1947         pv = pmap_pvh_remove(&m->md, pmap, va);
1948         KASSERT(pv != NULL, ("pmap_pv_promote_l2: pv for %#lx not found", va));
1949         pvh = pa_to_pvh(pa);
1950         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
1951         pvh->pv_gen++;
1952
1953         va_last = va + L2_SIZE - PAGE_SIZE;
1954         do {
1955                 m++;
1956                 va += PAGE_SIZE;
1957                 pmap_pvh_free(&m->md, pmap, va);
1958         } while (va < va_last);
1959 }
1960 #endif /* VM_NRESERVLEVEL > 0 */
1961
1962 /*
1963  * Create the PV entry for a 2MB page mapping.  Always returns true unless the
1964  * flag PMAP_ENTER_NORECLAIM is specified.  If that flag is specified, returns
1965  * false if the PV entry cannot be allocated without resorting to reclamation.
1966  */
1967 static bool
1968 pmap_pv_insert_l2(pmap_t pmap, vm_offset_t va, pd_entry_t l2e, u_int flags,
1969     struct rwlock **lockp)
1970 {
1971         struct md_page *pvh;
1972         pv_entry_t pv;
1973         vm_paddr_t pa;
1974
1975         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1976         /* Pass NULL instead of the lock pointer to disable reclamation. */
1977         if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
1978             NULL : lockp)) == NULL)
1979                 return (false);
1980         pv->pv_va = va;
1981         pa = PTE_TO_PHYS(l2e);
1982         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1983         pvh = pa_to_pvh(pa);
1984         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
1985         pvh->pv_gen++;
1986         return (true);
1987 }
1988
1989 static void
1990 pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
1991 {
1992         pt_entry_t newl2, oldl2;
1993         vm_page_t ml3;
1994         vm_paddr_t ml3pa;
1995
1996         KASSERT(!VIRT_IN_DMAP(va), ("removing direct mapping of %#lx", va));
1997         KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
1998         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1999
2000         ml3 = pmap_remove_pt_page(pmap, va);
2001         if (ml3 == NULL)
2002                 panic("pmap_remove_kernel_l2: Missing pt page");
2003
2004         ml3pa = VM_PAGE_TO_PHYS(ml3);
2005         newl2 = ml3pa | PTE_V;
2006
2007         /*
2008          * If this page table page was unmapped by a promotion, then it
2009          * contains valid mappings.  Zero it to invalidate those mappings.
2010          */
2011         if (ml3->valid != 0)
2012                 pagezero((void *)PHYS_TO_DMAP(ml3pa));
2013
2014         /*
2015          * Demote the mapping.
2016          */
2017         oldl2 = pmap_load_store(l2, newl2);
2018         KASSERT(oldl2 == 0, ("%s: found existing mapping at %p: %#lx",
2019             __func__, l2, oldl2));
2020 }
2021
2022 /*
2023  * pmap_remove_l2: Do the things to unmap a level 2 superpage.
2024  */
2025 static int
2026 pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
2027     pd_entry_t l1e, struct spglist *free, struct rwlock **lockp)
2028 {
2029         struct md_page *pvh;
2030         pt_entry_t oldl2;
2031         vm_offset_t eva, va;
2032         vm_page_t m, ml3;
2033
2034         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2035         KASSERT((sva & L2_OFFSET) == 0, ("pmap_remove_l2: sva is not aligned"));
2036         oldl2 = pmap_load_clear(l2);
2037         KASSERT((oldl2 & PTE_RWX) != 0,
2038             ("pmap_remove_l2: L2e %lx is not a superpage mapping", oldl2));
2039
2040         /*
2041          * The sfence.vma documentation states that it is sufficient to specify
2042          * a single address within a superpage mapping.  However, since we do
2043          * not perform any invalidation upon promotion, TLBs may still be
2044          * caching 4KB mappings within the superpage, so we must invalidate the
2045          * entire range.
2046          */
2047         pmap_invalidate_range(pmap, sva, sva + L2_SIZE);
2048         if ((oldl2 & PTE_SW_WIRED) != 0)
2049                 pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE;
2050         pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE);
2051         if ((oldl2 & PTE_SW_MANAGED) != 0) {
2052                 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, PTE_TO_PHYS(oldl2));
2053                 pvh = pa_to_pvh(PTE_TO_PHYS(oldl2));
2054                 pmap_pvh_free(pvh, pmap, sva);
2055                 eva = sva + L2_SIZE;
2056                 for (va = sva, m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(oldl2));
2057                     va < eva; va += PAGE_SIZE, m++) {
2058                         if ((oldl2 & PTE_D) != 0)
2059                                 vm_page_dirty(m);
2060                         if ((oldl2 & PTE_A) != 0)
2061                                 vm_page_aflag_set(m, PGA_REFERENCED);
2062                         if (TAILQ_EMPTY(&m->md.pv_list) &&
2063                             TAILQ_EMPTY(&pvh->pv_list))
2064                                 vm_page_aflag_clear(m, PGA_WRITEABLE);
2065                 }
2066         }
2067         if (pmap == kernel_pmap) {
2068                 pmap_remove_kernel_l2(pmap, l2, sva);
2069         } else {
2070                 ml3 = pmap_remove_pt_page(pmap, sva);
2071                 if (ml3 != NULL) {
2072                         KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
2073                             ("pmap_remove_l2: l3 page not promoted"));
2074                         pmap_resident_count_dec(pmap, 1);
2075                         KASSERT(ml3->wire_count == Ln_ENTRIES,
2076                             ("pmap_remove_l2: l3 page wire count error"));
2077                         ml3->wire_count = 1;
2078                         vm_page_unwire_noq(ml3);
2079                         pmap_add_delayed_free_list(ml3, free, FALSE);
2080                 }
2081         }
2082         return (pmap_unuse_pt(pmap, sva, l1e, free));
2083 }
2084
2085 /*
2086  * pmap_remove_l3: do the things to unmap a page in a process
2087  */
2088 static int
2089 pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va, 
2090     pd_entry_t l2e, struct spglist *free, struct rwlock **lockp)
2091 {
2092         pt_entry_t old_l3;
2093         vm_paddr_t phys;
2094         vm_page_t m;
2095
2096         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2097         old_l3 = pmap_load_clear(l3);
2098         pmap_invalidate_page(pmap, va);
2099         if (old_l3 & PTE_SW_WIRED)
2100                 pmap->pm_stats.wired_count -= 1;
2101         pmap_resident_count_dec(pmap, 1);
2102         if (old_l3 & PTE_SW_MANAGED) {
2103                 phys = PTE_TO_PHYS(old_l3);
2104                 m = PHYS_TO_VM_PAGE(phys);
2105                 if ((old_l3 & PTE_D) != 0)
2106                         vm_page_dirty(m);
2107                 if (old_l3 & PTE_A)
2108                         vm_page_aflag_set(m, PGA_REFERENCED);
2109                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2110                 pmap_pvh_free(&m->md, pmap, va);
2111         }
2112
2113         return (pmap_unuse_pt(pmap, va, l2e, free));
2114 }
2115
2116 /*
2117  *      Remove the given range of addresses from the specified map.
2118  *
2119  *      It is assumed that the start and end are properly
2120  *      rounded to the page size.
2121  */
2122 void
2123 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2124 {
2125         struct spglist free;
2126         struct rwlock *lock;
2127         vm_offset_t va, va_next;
2128         pd_entry_t *l1, *l2, l2e;
2129         pt_entry_t *l3;
2130
2131         /*
2132          * Perform an unsynchronized read.  This is, however, safe.
2133          */
2134         if (pmap->pm_stats.resident_count == 0)
2135                 return;
2136
2137         SLIST_INIT(&free);
2138
2139         rw_rlock(&pvh_global_lock);
2140         PMAP_LOCK(pmap);
2141
2142         lock = NULL;
2143         for (; sva < eva; sva = va_next) {
2144                 if (pmap->pm_stats.resident_count == 0)
2145                         break;
2146
2147                 l1 = pmap_l1(pmap, sva);
2148                 if (pmap_load(l1) == 0) {
2149                         va_next = (sva + L1_SIZE) & ~L1_OFFSET;
2150                         if (va_next < sva)
2151                                 va_next = eva;
2152                         continue;
2153                 }
2154
2155                 /*
2156                  * Calculate index for next page table.
2157                  */
2158                 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2159                 if (va_next < sva)
2160                         va_next = eva;
2161
2162                 l2 = pmap_l1_to_l2(l1, sva);
2163                 if (l2 == NULL)
2164                         continue;
2165                 if ((l2e = pmap_load(l2)) == 0)
2166                         continue;
2167                 if ((l2e & PTE_RWX) != 0) {
2168                         if (sva + L2_SIZE == va_next && eva >= va_next) {
2169                                 (void)pmap_remove_l2(pmap, l2, sva,
2170                                     pmap_load(l1), &free, &lock);
2171                                 continue;
2172                         } else if (!pmap_demote_l2_locked(pmap, l2, sva,
2173                             &lock)) {
2174                                 /*
2175                                  * The large page mapping was destroyed.
2176                                  */
2177                                 continue;
2178                         }
2179                         l2e = pmap_load(l2);
2180                 }
2181
2182                 /*
2183                  * Limit our scan to either the end of the va represented
2184                  * by the current page table page, or to the end of the
2185                  * range being removed.
2186                  */
2187                 if (va_next > eva)
2188                         va_next = eva;
2189
2190                 va = va_next;
2191                 for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
2192                     sva += L3_SIZE) {
2193                         if (pmap_load(l3) == 0) {
2194                                 if (va != va_next) {
2195                                         pmap_invalidate_range(pmap, va, sva);
2196                                         va = va_next;
2197                                 }
2198                                 continue;
2199                         }
2200                         if (va == va_next)
2201                                 va = sva;
2202                         if (pmap_remove_l3(pmap, l3, sva, l2e, &free, &lock)) {
2203                                 sva += L3_SIZE;
2204                                 break;
2205                         }
2206                 }
2207                 if (va != va_next)
2208                         pmap_invalidate_range(pmap, va, sva);
2209         }
2210         if (lock != NULL)
2211                 rw_wunlock(lock);
2212         rw_runlock(&pvh_global_lock);
2213         PMAP_UNLOCK(pmap);
2214         vm_page_free_pages_toq(&free, false);
2215 }
2216
2217 /*
2218  *      Routine:        pmap_remove_all
2219  *      Function:
2220  *              Removes this physical page from
2221  *              all physical maps in which it resides.
2222  *              Reflects back modify bits to the pager.
2223  *
2224  *      Notes:
2225  *              Original versions of this routine were very
2226  *              inefficient because they iteratively called
2227  *              pmap_remove (slow...)
2228  */
2229
2230 void
2231 pmap_remove_all(vm_page_t m)
2232 {
2233         struct spglist free;
2234         struct md_page *pvh;
2235         pmap_t pmap;
2236         pt_entry_t *l3, l3e;
2237         pd_entry_t *l2, l2e;
2238         pv_entry_t pv;
2239         vm_offset_t va;
2240
2241         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2242             ("pmap_remove_all: page %p is not managed", m));
2243         SLIST_INIT(&free);
2244         pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
2245             pa_to_pvh(VM_PAGE_TO_PHYS(m));
2246
2247         rw_wlock(&pvh_global_lock);
2248         while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
2249                 pmap = PV_PMAP(pv);
2250                 PMAP_LOCK(pmap);
2251                 va = pv->pv_va;
2252                 l2 = pmap_l2(pmap, va);
2253                 (void)pmap_demote_l2(pmap, l2, va);
2254                 PMAP_UNLOCK(pmap);
2255         }
2256         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2257                 pmap = PV_PMAP(pv);
2258                 PMAP_LOCK(pmap);
2259                 pmap_resident_count_dec(pmap, 1);
2260                 l2 = pmap_l2(pmap, pv->pv_va);
2261                 KASSERT(l2 != NULL, ("pmap_remove_all: no l2 table found"));
2262                 l2e = pmap_load(l2);
2263
2264                 KASSERT((l2e & PTE_RX) == 0,
2265                     ("pmap_remove_all: found a superpage in %p's pv list", m));
2266
2267                 l3 = pmap_l2_to_l3(l2, pv->pv_va);
2268                 l3e = pmap_load_clear(l3);
2269                 pmap_invalidate_page(pmap, pv->pv_va);
2270                 if (l3e & PTE_SW_WIRED)
2271                         pmap->pm_stats.wired_count--;
2272                 if ((l3e & PTE_A) != 0)
2273                         vm_page_aflag_set(m, PGA_REFERENCED);
2274
2275                 /*
2276                  * Update the vm_page_t clean and reference bits.
2277                  */
2278                 if ((l3e & PTE_D) != 0)
2279                         vm_page_dirty(m);
2280                 pmap_unuse_pt(pmap, pv->pv_va, pmap_load(l2), &free);
2281                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2282                 m->md.pv_gen++;
2283                 free_pv_entry(pmap, pv);
2284                 PMAP_UNLOCK(pmap);
2285         }
2286         vm_page_aflag_clear(m, PGA_WRITEABLE);
2287         rw_wunlock(&pvh_global_lock);
2288         vm_page_free_pages_toq(&free, false);
2289 }
2290
2291 /*
2292  *      Set the physical protection on the
2293  *      specified range of this map as requested.
2294  */
2295 void
2296 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2297 {
2298         pd_entry_t *l1, *l2, l2e;
2299         pt_entry_t *l3, l3e, mask;
2300         vm_page_t m;
2301         vm_paddr_t pa;
2302         vm_offset_t va, va_next;
2303         bool anychanged, pv_lists_locked;
2304
2305         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2306                 pmap_remove(pmap, sva, eva);
2307                 return;
2308         }
2309
2310         if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) ==
2311             (VM_PROT_WRITE | VM_PROT_EXECUTE))
2312                 return;
2313
2314         anychanged = false;
2315         pv_lists_locked = false;
2316         mask = 0;
2317         if ((prot & VM_PROT_WRITE) == 0)
2318                 mask |= PTE_W | PTE_D;
2319         if ((prot & VM_PROT_EXECUTE) == 0)
2320                 mask |= PTE_X;
2321 resume:
2322         PMAP_LOCK(pmap);
2323         for (; sva < eva; sva = va_next) {
2324                 l1 = pmap_l1(pmap, sva);
2325                 if (pmap_load(l1) == 0) {
2326                         va_next = (sva + L1_SIZE) & ~L1_OFFSET;
2327                         if (va_next < sva)
2328                                 va_next = eva;
2329                         continue;
2330                 }
2331
2332                 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2333                 if (va_next < sva)
2334                         va_next = eva;
2335
2336                 l2 = pmap_l1_to_l2(l1, sva);
2337                 if (l2 == NULL || (l2e = pmap_load(l2)) == 0)
2338                         continue;
2339                 if ((l2e & PTE_RWX) != 0) {
2340                         if (sva + L2_SIZE == va_next && eva >= va_next) {
2341 retryl2:
2342                                 if ((l2e & (PTE_SW_MANAGED | PTE_D)) ==
2343                                     (PTE_SW_MANAGED | PTE_D)) {
2344                                         pa = PTE_TO_PHYS(l2e);
2345                                         for (va = sva, m = PHYS_TO_VM_PAGE(pa);
2346                                             va < va_next; m++, va += PAGE_SIZE)
2347                                                 vm_page_dirty(m);
2348                                 }
2349                                 if (!atomic_fcmpset_long(l2, &l2e, l2e & ~mask))
2350                                         goto retryl2;
2351                                 anychanged = true;
2352                         } else {
2353                                 if (!pv_lists_locked) {
2354                                         pv_lists_locked = true;
2355                                         if (!rw_try_rlock(&pvh_global_lock)) {
2356                                                 if (anychanged)
2357                                                         pmap_invalidate_all(
2358                                                             pmap);
2359                                                 PMAP_UNLOCK(pmap);
2360                                                 rw_rlock(&pvh_global_lock);
2361                                                 goto resume;
2362                                         }
2363                                 }
2364                                 if (!pmap_demote_l2(pmap, l2, sva)) {
2365                                         /*
2366                                          * The large page mapping was destroyed.
2367                                          */
2368                                         continue;
2369                                 }
2370                         }
2371                 }
2372
2373                 if (va_next > eva)
2374                         va_next = eva;
2375
2376                 for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
2377                     sva += L3_SIZE) {
2378                         l3e = pmap_load(l3);
2379 retryl3:
2380                         if ((l3e & PTE_V) == 0)
2381                                 continue;
2382                         if ((prot & VM_PROT_WRITE) == 0 &&
2383                             (l3e & (PTE_SW_MANAGED | PTE_D)) ==
2384                             (PTE_SW_MANAGED | PTE_D)) {
2385                                 m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(l3e));
2386                                 vm_page_dirty(m);
2387                         }
2388                         if (!atomic_fcmpset_long(l3, &l3e, l3e & ~mask))
2389                                 goto retryl3;
2390                         anychanged = true;
2391                 }
2392         }
2393         if (anychanged)
2394                 pmap_invalidate_all(pmap);
2395         if (pv_lists_locked)
2396                 rw_runlock(&pvh_global_lock);
2397         PMAP_UNLOCK(pmap);
2398 }
2399
2400 int
2401 pmap_fault_fixup(pmap_t pmap, vm_offset_t va, vm_prot_t ftype)
2402 {
2403         pd_entry_t *l2, l2e;
2404         pt_entry_t bits, *pte, oldpte;
2405         int rv;
2406
2407         rv = 0;
2408         PMAP_LOCK(pmap);
2409         l2 = pmap_l2(pmap, va);
2410         if (l2 == NULL || ((l2e = pmap_load(l2)) & PTE_V) == 0)
2411                 goto done;
2412         if ((l2e & PTE_RWX) == 0) {
2413                 pte = pmap_l2_to_l3(l2, va);
2414                 if (pte == NULL || ((oldpte = pmap_load(pte) & PTE_V)) == 0)
2415                         goto done;
2416         } else {
2417                 pte = l2;
2418                 oldpte = l2e;
2419         }
2420
2421         if ((pmap != kernel_pmap && (oldpte & PTE_U) == 0) ||
2422             (ftype == VM_PROT_WRITE && (oldpte & PTE_W) == 0) ||
2423             (ftype == VM_PROT_EXECUTE && (oldpte & PTE_X) == 0) ||
2424             (ftype == VM_PROT_READ && (oldpte & PTE_R) == 0))
2425                 goto done;
2426
2427         bits = PTE_A;
2428         if (ftype == VM_PROT_WRITE)
2429                 bits |= PTE_D;
2430
2431         /*
2432          * Spurious faults can occur if the implementation caches invalid
2433          * entries in the TLB, or if simultaneous accesses on multiple CPUs
2434          * race with each other.
2435          */
2436         if ((oldpte & bits) != bits)
2437                 pmap_store_bits(pte, bits);
2438         sfence_vma();
2439         rv = 1;
2440 done:
2441         PMAP_UNLOCK(pmap);
2442         return (rv);
2443 }
2444
2445 static bool
2446 pmap_demote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va)
2447 {
2448         struct rwlock *lock;
2449         bool rv;
2450
2451         lock = NULL;
2452         rv = pmap_demote_l2_locked(pmap, l2, va, &lock);
2453         if (lock != NULL)
2454                 rw_wunlock(lock);
2455         return (rv);
2456 }
2457
2458 /*
2459  * Tries to demote a 2MB page mapping.  If demotion fails, the 2MB page
2460  * mapping is invalidated.
2461  */
2462 static bool
2463 pmap_demote_l2_locked(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
2464     struct rwlock **lockp)
2465 {
2466         struct spglist free;
2467         vm_page_t mpte;
2468         pd_entry_t newl2, oldl2;
2469         pt_entry_t *firstl3, newl3;
2470         vm_paddr_t mptepa;
2471         int i;
2472
2473         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2474
2475         oldl2 = pmap_load(l2);
2476         KASSERT((oldl2 & PTE_RWX) != 0,
2477             ("pmap_demote_l2_locked: oldl2 is not a leaf entry"));
2478         if ((oldl2 & PTE_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
2479             NULL) {
2480                 if ((oldl2 & PTE_A) == 0 || (mpte = vm_page_alloc(NULL,
2481                     pmap_l2_pindex(va), (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT :
2482                     VM_ALLOC_NORMAL) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) ==
2483                     NULL) {
2484                         SLIST_INIT(&free);
2485                         (void)pmap_remove_l2(pmap, l2, va & ~L2_OFFSET,
2486                             pmap_load(pmap_l1(pmap, va)), &free, lockp);
2487                         vm_page_free_pages_toq(&free, true);
2488                         CTR2(KTR_PMAP, "pmap_demote_l2_locked: "
2489                             "failure for va %#lx in pmap %p", va, pmap);
2490                         return (false);
2491                 }
2492                 if (va < VM_MAXUSER_ADDRESS) {
2493                         mpte->wire_count = Ln_ENTRIES;
2494                         pmap_resident_count_inc(pmap, 1);
2495                 }
2496         }
2497         mptepa = VM_PAGE_TO_PHYS(mpte);
2498         firstl3 = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
2499         newl2 = ((mptepa / PAGE_SIZE) << PTE_PPN0_S) | PTE_V;
2500         KASSERT((oldl2 & PTE_A) != 0,
2501             ("pmap_demote_l2_locked: oldl2 is missing PTE_A"));
2502         KASSERT((oldl2 & (PTE_D | PTE_W)) != PTE_W,
2503             ("pmap_demote_l2_locked: oldl2 is missing PTE_D"));
2504         newl3 = oldl2;
2505
2506         /*
2507          * If the page table page is not leftover from an earlier promotion,
2508          * initialize it.
2509          */
2510         if (mpte->valid == 0) {
2511                 for (i = 0; i < Ln_ENTRIES; i++)
2512                         pmap_store(firstl3 + i, newl3 + (i << PTE_PPN0_S));
2513         }
2514         KASSERT(PTE_TO_PHYS(pmap_load(firstl3)) == PTE_TO_PHYS(newl3),
2515             ("pmap_demote_l2_locked: firstl3 and newl3 map different physical "
2516             "addresses"));
2517
2518         /*
2519          * If the mapping has changed attributes, update the page table
2520          * entries.
2521          */
2522         if ((pmap_load(firstl3) & PTE_PROMOTE) != (newl3 & PTE_PROMOTE))
2523                 for (i = 0; i < Ln_ENTRIES; i++)
2524                         pmap_store(firstl3 + i, newl3 + (i << PTE_PPN0_S));
2525
2526         /*
2527          * The spare PV entries must be reserved prior to demoting the
2528          * mapping, that is, prior to changing the L2 entry.  Otherwise, the
2529          * state of the L2 entry and the PV lists will be inconsistent, which
2530          * can result in reclaim_pv_chunk() attempting to remove a PV entry from
2531          * the wrong PV list and pmap_pv_demote_l2() failing to find the
2532          * expected PV entry for the 2MB page mapping that is being demoted.
2533          */
2534         if ((oldl2 & PTE_SW_MANAGED) != 0)
2535                 reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp);
2536
2537         /*
2538          * Demote the mapping.
2539          */
2540         pmap_store(l2, newl2);
2541
2542         /*
2543          * Demote the PV entry.
2544          */
2545         if ((oldl2 & PTE_SW_MANAGED) != 0)
2546                 pmap_pv_demote_l2(pmap, va, PTE_TO_PHYS(oldl2), lockp);
2547
2548         atomic_add_long(&pmap_l2_demotions, 1);
2549         CTR2(KTR_PMAP, "pmap_demote_l2_locked: success for va %#lx in pmap %p",
2550             va, pmap);
2551         return (true);
2552 }
2553
2554 #if VM_NRESERVLEVEL > 0
2555 static void
2556 pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
2557     struct rwlock **lockp)
2558 {
2559         pt_entry_t *firstl3, *l3;
2560         vm_paddr_t pa;
2561         vm_page_t ml3;
2562
2563         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2564
2565         va &= ~L2_OFFSET;
2566         KASSERT((pmap_load(l2) & PTE_RWX) == 0,
2567             ("pmap_promote_l2: invalid l2 entry %p", l2));
2568
2569         firstl3 = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l2)));
2570         pa = PTE_TO_PHYS(pmap_load(firstl3));
2571         if ((pa & L2_OFFSET) != 0) {
2572                 CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx pmap %p",
2573                     va, pmap);
2574                 atomic_add_long(&pmap_l2_p_failures, 1);
2575                 return;
2576         }
2577
2578         pa += PAGE_SIZE;
2579         for (l3 = firstl3 + 1; l3 < firstl3 + Ln_ENTRIES; l3++) {
2580                 if (PTE_TO_PHYS(pmap_load(l3)) != pa) {
2581                         CTR2(KTR_PMAP,
2582                             "pmap_promote_l2: failure for va %#lx pmap %p",
2583                             va, pmap);
2584                         atomic_add_long(&pmap_l2_p_failures, 1);
2585                         return;
2586                 }
2587                 if ((pmap_load(l3) & PTE_PROMOTE) !=
2588                     (pmap_load(firstl3) & PTE_PROMOTE)) {
2589                         CTR2(KTR_PMAP,
2590                             "pmap_promote_l2: failure for va %#lx pmap %p",
2591                             va, pmap);
2592                         atomic_add_long(&pmap_l2_p_failures, 1);
2593                         return;
2594                 }
2595                 pa += PAGE_SIZE;
2596         }
2597
2598         ml3 = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l2)));
2599         KASSERT(ml3->pindex == pmap_l2_pindex(va),
2600             ("pmap_promote_l2: page table page's pindex is wrong"));
2601         if (pmap_insert_pt_page(pmap, ml3, true)) {
2602                 CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx pmap %p",
2603                     va, pmap);
2604                 atomic_add_long(&pmap_l2_p_failures, 1);
2605                 return;
2606         }
2607
2608         if ((pmap_load(firstl3) & PTE_SW_MANAGED) != 0)
2609                 pmap_pv_promote_l2(pmap, va, PTE_TO_PHYS(pmap_load(firstl3)),
2610                     lockp);
2611
2612         pmap_store(l2, pmap_load(firstl3));
2613
2614         atomic_add_long(&pmap_l2_promotions, 1);
2615         CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
2616             pmap);
2617 }
2618 #endif
2619
2620 /*
2621  *      Insert the given physical page (p) at
2622  *      the specified virtual address (v) in the
2623  *      target physical map with the protection requested.
2624  *
2625  *      If specified, the page will be wired down, meaning
2626  *      that the related pte can not be reclaimed.
2627  *
2628  *      NB:  This is the only routine which MAY NOT lazy-evaluate
2629  *      or lose information.  That is, this routine must actually
2630  *      insert this page into the given map NOW.
2631  */
2632 int
2633 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2634     u_int flags, int8_t psind)
2635 {
2636         struct rwlock *lock;
2637         pd_entry_t *l1, *l2, l2e;
2638         pt_entry_t new_l3, orig_l3;
2639         pt_entry_t *l3;
2640         pv_entry_t pv;
2641         vm_paddr_t opa, pa, l2_pa, l3_pa;
2642         vm_page_t mpte, om, l2_m, l3_m;
2643         pt_entry_t entry;
2644         pn_t l2_pn, l3_pn, pn;
2645         int rv;
2646         bool nosleep;
2647
2648         va = trunc_page(va);
2649         if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
2650                 VM_OBJECT_ASSERT_LOCKED(m->object);
2651         pa = VM_PAGE_TO_PHYS(m);
2652         pn = (pa / PAGE_SIZE);
2653
2654         new_l3 = PTE_V | PTE_R | PTE_A;
2655         if (prot & VM_PROT_EXECUTE)
2656                 new_l3 |= PTE_X;
2657         if (flags & VM_PROT_WRITE)
2658                 new_l3 |= PTE_D;
2659         if (prot & VM_PROT_WRITE)
2660                 new_l3 |= PTE_W;
2661         if (va < VM_MAX_USER_ADDRESS)
2662                 new_l3 |= PTE_U;
2663
2664         new_l3 |= (pn << PTE_PPN0_S);
2665         if ((flags & PMAP_ENTER_WIRED) != 0)
2666                 new_l3 |= PTE_SW_WIRED;
2667
2668         /*
2669          * Set modified bit gratuitously for writeable mappings if
2670          * the page is unmanaged. We do not want to take a fault
2671          * to do the dirty bit accounting for these mappings.
2672          */
2673         if ((m->oflags & VPO_UNMANAGED) != 0) {
2674                 if (prot & VM_PROT_WRITE)
2675                         new_l3 |= PTE_D;
2676         } else
2677                 new_l3 |= PTE_SW_MANAGED;
2678
2679         CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
2680
2681         lock = NULL;
2682         mpte = NULL;
2683         rw_rlock(&pvh_global_lock);
2684         PMAP_LOCK(pmap);
2685         if (psind == 1) {
2686                 /* Assert the required virtual and physical alignment. */
2687                 KASSERT((va & L2_OFFSET) == 0,
2688                     ("pmap_enter: va %#lx unaligned", va));
2689                 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
2690                 rv = pmap_enter_l2(pmap, va, new_l3, flags, m, &lock);
2691                 goto out;
2692         }
2693
2694         l2 = pmap_l2(pmap, va);
2695         if (l2 != NULL && ((l2e = pmap_load(l2)) & PTE_V) != 0 &&
2696             ((l2e & PTE_RWX) == 0 || pmap_demote_l2_locked(pmap, l2,
2697             va, &lock))) {
2698                 l3 = pmap_l2_to_l3(l2, va);
2699                 if (va < VM_MAXUSER_ADDRESS) {
2700                         mpte = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l2)));
2701                         mpte->wire_count++;
2702                 }
2703         } else if (va < VM_MAXUSER_ADDRESS) {
2704                 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
2705                 mpte = pmap_alloc_l3(pmap, va, nosleep ? NULL : &lock);
2706                 if (mpte == NULL && nosleep) {
2707                         CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
2708                         if (lock != NULL)
2709                                 rw_wunlock(lock);
2710                         rw_runlock(&pvh_global_lock);
2711                         PMAP_UNLOCK(pmap);
2712                         return (KERN_RESOURCE_SHORTAGE);
2713                 }
2714                 l3 = pmap_l3(pmap, va);
2715         } else {
2716                 l3 = pmap_l3(pmap, va);
2717                 /* TODO: This is not optimal, but should mostly work */
2718                 if (l3 == NULL) {
2719                         if (l2 == NULL) {
2720                                 l2_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
2721                                     VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
2722                                     VM_ALLOC_ZERO);
2723                                 if (l2_m == NULL)
2724                                         panic("pmap_enter: l2 pte_m == NULL");
2725                                 if ((l2_m->flags & PG_ZERO) == 0)
2726                                         pmap_zero_page(l2_m);
2727
2728                                 l2_pa = VM_PAGE_TO_PHYS(l2_m);
2729                                 l2_pn = (l2_pa / PAGE_SIZE);
2730
2731                                 l1 = pmap_l1(pmap, va);
2732                                 entry = (PTE_V);
2733                                 entry |= (l2_pn << PTE_PPN0_S);
2734                                 pmap_store(l1, entry);
2735                                 pmap_distribute_l1(pmap, pmap_l1_index(va), entry);
2736                                 l2 = pmap_l1_to_l2(l1, va);
2737                         }
2738
2739                         l3_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
2740                             VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
2741                         if (l3_m == NULL)
2742                                 panic("pmap_enter: l3 pte_m == NULL");
2743                         if ((l3_m->flags & PG_ZERO) == 0)
2744                                 pmap_zero_page(l3_m);
2745
2746                         l3_pa = VM_PAGE_TO_PHYS(l3_m);
2747                         l3_pn = (l3_pa / PAGE_SIZE);
2748                         entry = (PTE_V);
2749                         entry |= (l3_pn << PTE_PPN0_S);
2750                         pmap_store(l2, entry);
2751                         l3 = pmap_l2_to_l3(l2, va);
2752                 }
2753                 pmap_invalidate_page(pmap, va);
2754         }
2755
2756         orig_l3 = pmap_load(l3);
2757         opa = PTE_TO_PHYS(orig_l3);
2758         pv = NULL;
2759
2760         /*
2761          * Is the specified virtual address already mapped?
2762          */
2763         if ((orig_l3 & PTE_V) != 0) {
2764                 /*
2765                  * Wiring change, just update stats. We don't worry about
2766                  * wiring PT pages as they remain resident as long as there
2767                  * are valid mappings in them. Hence, if a user page is wired,
2768                  * the PT page will be also.
2769                  */
2770                 if ((flags & PMAP_ENTER_WIRED) != 0 &&
2771                     (orig_l3 & PTE_SW_WIRED) == 0)
2772                         pmap->pm_stats.wired_count++;
2773                 else if ((flags & PMAP_ENTER_WIRED) == 0 &&
2774                     (orig_l3 & PTE_SW_WIRED) != 0)
2775                         pmap->pm_stats.wired_count--;
2776
2777                 /*
2778                  * Remove the extra PT page reference.
2779                  */
2780                 if (mpte != NULL) {
2781                         mpte->wire_count--;
2782                         KASSERT(mpte->wire_count > 0,
2783                             ("pmap_enter: missing reference to page table page,"
2784                              " va: 0x%lx", va));
2785                 }
2786
2787                 /*
2788                  * Has the physical page changed?
2789                  */
2790                 if (opa == pa) {
2791                         /*
2792                          * No, might be a protection or wiring change.
2793                          */
2794                         if ((orig_l3 & PTE_SW_MANAGED) != 0 &&
2795                             (new_l3 & PTE_W) != 0)
2796                                 vm_page_aflag_set(m, PGA_WRITEABLE);
2797                         goto validate;
2798                 }
2799
2800                 /*
2801                  * The physical page has changed.  Temporarily invalidate
2802                  * the mapping.  This ensures that all threads sharing the
2803                  * pmap keep a consistent view of the mapping, which is
2804                  * necessary for the correct handling of COW faults.  It
2805                  * also permits reuse of the old mapping's PV entry,
2806                  * avoiding an allocation.
2807                  *
2808                  * For consistency, handle unmanaged mappings the same way.
2809                  */
2810                 orig_l3 = pmap_load_clear(l3);
2811                 KASSERT(PTE_TO_PHYS(orig_l3) == opa,
2812                     ("pmap_enter: unexpected pa update for %#lx", va));
2813                 if ((orig_l3 & PTE_SW_MANAGED) != 0) {
2814                         om = PHYS_TO_VM_PAGE(opa);
2815
2816                         /*
2817                          * The pmap lock is sufficient to synchronize with
2818                          * concurrent calls to pmap_page_test_mappings() and
2819                          * pmap_ts_referenced().
2820                          */
2821                         if ((orig_l3 & PTE_D) != 0)
2822                                 vm_page_dirty(om);
2823                         if ((orig_l3 & PTE_A) != 0)
2824                                 vm_page_aflag_set(om, PGA_REFERENCED);
2825                         CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
2826                         pv = pmap_pvh_remove(&om->md, pmap, va);
2827                         KASSERT(pv != NULL,
2828                             ("pmap_enter: no PV entry for %#lx", va));
2829                         if ((new_l3 & PTE_SW_MANAGED) == 0)
2830                                 free_pv_entry(pmap, pv);
2831                         if ((om->aflags & PGA_WRITEABLE) != 0 &&
2832                             TAILQ_EMPTY(&om->md.pv_list))
2833                                 vm_page_aflag_clear(om, PGA_WRITEABLE);
2834                 }
2835                 pmap_invalidate_page(pmap, va);
2836                 orig_l3 = 0;
2837         } else {
2838                 /*
2839                  * Increment the counters.
2840                  */
2841                 if ((new_l3 & PTE_SW_WIRED) != 0)
2842                         pmap->pm_stats.wired_count++;
2843                 pmap_resident_count_inc(pmap, 1);
2844         }
2845         /*
2846          * Enter on the PV list if part of our managed memory.
2847          */
2848         if ((new_l3 & PTE_SW_MANAGED) != 0) {
2849                 if (pv == NULL) {
2850                         pv = get_pv_entry(pmap, &lock);
2851                         pv->pv_va = va;
2852                 }
2853                 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
2854                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2855                 m->md.pv_gen++;
2856                 if ((new_l3 & PTE_W) != 0)
2857                         vm_page_aflag_set(m, PGA_WRITEABLE);
2858         }
2859
2860 validate:
2861         /*
2862          * Sync the i-cache on all harts before updating the PTE
2863          * if the new PTE is executable.
2864          */
2865         if (prot & VM_PROT_EXECUTE)
2866                 pmap_sync_icache(pmap, va, PAGE_SIZE);
2867
2868         /*
2869          * Update the L3 entry.
2870          */
2871         if (orig_l3 != 0) {
2872                 orig_l3 = pmap_load_store(l3, new_l3);
2873                 pmap_invalidate_page(pmap, va);
2874                 KASSERT(PTE_TO_PHYS(orig_l3) == pa,
2875                     ("pmap_enter: invalid update"));
2876                 if ((orig_l3 & (PTE_D | PTE_SW_MANAGED)) ==
2877                     (PTE_D | PTE_SW_MANAGED))
2878                         vm_page_dirty(m);
2879         } else {
2880                 pmap_store(l3, new_l3);
2881         }
2882
2883 #if VM_NRESERVLEVEL > 0
2884         if (mpte != NULL && mpte->wire_count == Ln_ENTRIES &&
2885             pmap_ps_enabled(pmap) &&
2886             (m->flags & PG_FICTITIOUS) == 0 &&
2887             vm_reserv_level_iffullpop(m) == 0)
2888                 pmap_promote_l2(pmap, l2, va, &lock);
2889 #endif
2890
2891         rv = KERN_SUCCESS;
2892 out:
2893         if (lock != NULL)
2894                 rw_wunlock(lock);
2895         rw_runlock(&pvh_global_lock);
2896         PMAP_UNLOCK(pmap);
2897         return (rv);
2898 }
2899
2900 /*
2901  * Tries to create a read- and/or execute-only 2MB page mapping.  Returns true
2902  * if successful.  Returns false if (1) a page table page cannot be allocated
2903  * without sleeping, (2) a mapping already exists at the specified virtual
2904  * address, or (3) a PV entry cannot be allocated without reclaiming another
2905  * PV entry.
2906  */
2907 static bool
2908 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2909     struct rwlock **lockp)
2910 {
2911         pd_entry_t new_l2;
2912         pn_t pn;
2913
2914         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2915
2916         pn = VM_PAGE_TO_PHYS(m) / PAGE_SIZE;
2917         new_l2 = (pd_entry_t)((pn << PTE_PPN0_S) | PTE_R | PTE_V);
2918         if ((m->oflags & VPO_UNMANAGED) == 0)
2919                 new_l2 |= PTE_SW_MANAGED;
2920         if ((prot & VM_PROT_EXECUTE) != 0)
2921                 new_l2 |= PTE_X;
2922         if (va < VM_MAXUSER_ADDRESS)
2923                 new_l2 |= PTE_U;
2924         return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP |
2925             PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
2926             KERN_SUCCESS);
2927 }
2928
2929 /*
2930  * Tries to create the specified 2MB page mapping.  Returns KERN_SUCCESS if
2931  * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
2932  * otherwise.  Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
2933  * a mapping already exists at the specified virtual address.  Returns
2934  * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
2935  * page allocation failed.  Returns KERN_RESOURCE_SHORTAGE if
2936  * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
2937  *
2938  * The parameter "m" is only used when creating a managed, writeable mapping.
2939  */
2940 static int
2941 pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
2942     vm_page_t m, struct rwlock **lockp)
2943 {
2944         struct spglist free;
2945         pd_entry_t *l2, *l3, oldl2;
2946         vm_offset_t sva;
2947         vm_page_t l2pg, mt;
2948
2949         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2950
2951         if ((l2pg = pmap_alloc_l2(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
2952             NULL : lockp)) == NULL) {
2953                 CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p",
2954                     va, pmap);
2955                 return (KERN_RESOURCE_SHORTAGE);
2956         }
2957
2958         l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg));
2959         l2 = &l2[pmap_l2_index(va)];
2960         if ((oldl2 = pmap_load(l2)) != 0) {
2961                 KASSERT(l2pg->wire_count > 1,
2962                     ("pmap_enter_l2: l2pg's wire count is too low"));
2963                 if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
2964                         l2pg->wire_count--;
2965                         CTR2(KTR_PMAP,
2966                             "pmap_enter_l2: failure for va %#lx in pmap %p",
2967                             va, pmap);
2968                         return (KERN_FAILURE);
2969                 }
2970                 SLIST_INIT(&free);
2971                 if ((oldl2 & PTE_RWX) != 0)
2972                         (void)pmap_remove_l2(pmap, l2, va,
2973                             pmap_load(pmap_l1(pmap, va)), &free, lockp);
2974                 else
2975                         for (sva = va; sva < va + L2_SIZE; sva += PAGE_SIZE) {
2976                                 l3 = pmap_l2_to_l3(l2, sva);
2977                                 if ((pmap_load(l3) & PTE_V) != 0 &&
2978                                     pmap_remove_l3(pmap, l3, sva, oldl2, &free,
2979                                     lockp) != 0)
2980                                         break;
2981                         }
2982                 vm_page_free_pages_toq(&free, true);
2983                 if (va >= VM_MAXUSER_ADDRESS) {
2984                         /*
2985                          * Both pmap_remove_l2() and pmap_remove_l3() will
2986                          * leave the kernel page table page zero filled.
2987                          */
2988                         mt = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l2)));
2989                         if (pmap_insert_pt_page(pmap, mt, false))
2990                                 panic("pmap_enter_l2: trie insert failed");
2991                 } else
2992                         KASSERT(pmap_load(l2) == 0,
2993                             ("pmap_enter_l2: non-zero L2 entry %p", l2));
2994         }
2995
2996         if ((new_l2 & PTE_SW_MANAGED) != 0) {
2997                 /*
2998                  * Abort this mapping if its PV entry could not be created.
2999                  */
3000                 if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) {
3001                         SLIST_INIT(&free);
3002                         if (pmap_unwire_ptp(pmap, va, l2pg, &free)) {
3003                                 /*
3004                                  * Although "va" is not mapped, paging-structure
3005                                  * caches could nonetheless have entries that
3006                                  * refer to the freed page table pages.
3007                                  * Invalidate those entries.
3008                                  */
3009                                 pmap_invalidate_page(pmap, va);
3010                                 vm_page_free_pages_toq(&free, true);
3011                         }
3012                         CTR2(KTR_PMAP,
3013                             "pmap_enter_l2: failure for va %#lx in pmap %p",
3014                             va, pmap);
3015                         return (KERN_RESOURCE_SHORTAGE);
3016                 }
3017                 if ((new_l2 & PTE_W) != 0)
3018                         for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
3019                                 vm_page_aflag_set(mt, PGA_WRITEABLE);
3020         }
3021
3022         /*
3023          * Increment counters.
3024          */
3025         if ((new_l2 & PTE_SW_WIRED) != 0)
3026                 pmap->pm_stats.wired_count += L2_SIZE / PAGE_SIZE;
3027         pmap->pm_stats.resident_count += L2_SIZE / PAGE_SIZE;
3028
3029         /*
3030          * Map the superpage.
3031          */
3032         pmap_store(l2, new_l2);
3033
3034         atomic_add_long(&pmap_l2_mappings, 1);
3035         CTR2(KTR_PMAP, "pmap_enter_l2: success for va %#lx in pmap %p",
3036             va, pmap);
3037
3038         return (KERN_SUCCESS);
3039 }
3040
3041 /*
3042  * Maps a sequence of resident pages belonging to the same object.
3043  * The sequence begins with the given page m_start.  This page is
3044  * mapped at the given virtual address start.  Each subsequent page is
3045  * mapped at a virtual address that is offset from start by the same
3046  * amount as the page is offset from m_start within the object.  The
3047  * last page in the sequence is the page with the largest offset from
3048  * m_start that can be mapped at a virtual address less than the given
3049  * virtual address end.  Not every virtual page between start and end
3050  * is mapped; only those for which a resident page exists with the
3051  * corresponding offset from m_start are mapped.
3052  */
3053 void
3054 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
3055     vm_page_t m_start, vm_prot_t prot)
3056 {
3057         struct rwlock *lock;
3058         vm_offset_t va;
3059         vm_page_t m, mpte;
3060         vm_pindex_t diff, psize;
3061
3062         VM_OBJECT_ASSERT_LOCKED(m_start->object);
3063
3064         psize = atop(end - start);
3065         mpte = NULL;
3066         m = m_start;
3067         lock = NULL;
3068         rw_rlock(&pvh_global_lock);
3069         PMAP_LOCK(pmap);
3070         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3071                 va = start + ptoa(diff);
3072                 if ((va & L2_OFFSET) == 0 && va + L2_SIZE <= end &&
3073                     m->psind == 1 && pmap_ps_enabled(pmap) &&
3074                     pmap_enter_2mpage(pmap, va, m, prot, &lock))
3075                         m = &m[L2_SIZE / PAGE_SIZE - 1];
3076                 else
3077                         mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte,
3078                             &lock);
3079                 m = TAILQ_NEXT(m, listq);
3080         }
3081         if (lock != NULL)
3082                 rw_wunlock(lock);
3083         rw_runlock(&pvh_global_lock);
3084         PMAP_UNLOCK(pmap);
3085 }
3086
3087 /*
3088  * this code makes some *MAJOR* assumptions:
3089  * 1. Current pmap & pmap exists.
3090  * 2. Not wired.
3091  * 3. Read access.
3092  * 4. No page table pages.
3093  * but is *MUCH* faster than pmap_enter...
3094  */
3095
3096 void
3097 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3098 {
3099         struct rwlock *lock;
3100
3101         lock = NULL;
3102         rw_rlock(&pvh_global_lock);
3103         PMAP_LOCK(pmap);
3104         (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
3105         if (lock != NULL)
3106                 rw_wunlock(lock);
3107         rw_runlock(&pvh_global_lock);
3108         PMAP_UNLOCK(pmap);
3109 }
3110
3111 static vm_page_t
3112 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3113     vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
3114 {
3115         struct spglist free;
3116         vm_paddr_t phys;
3117         pd_entry_t *l2;
3118         pt_entry_t *l3, newl3;
3119
3120         KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
3121             (m->oflags & VPO_UNMANAGED) != 0,
3122             ("pmap_enter_quick_locked: managed mapping within the clean submap"));
3123         rw_assert(&pvh_global_lock, RA_LOCKED);
3124         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3125
3126         CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
3127         /*
3128          * In the case that a page table page is not
3129          * resident, we are creating it here.
3130          */
3131         if (va < VM_MAXUSER_ADDRESS) {
3132                 vm_pindex_t l2pindex;
3133
3134                 /*
3135                  * Calculate pagetable page index
3136                  */
3137                 l2pindex = pmap_l2_pindex(va);
3138                 if (mpte && (mpte->pindex == l2pindex)) {
3139                         mpte->wire_count++;
3140                 } else {
3141                         /*
3142                          * Get the l2 entry
3143                          */
3144                         l2 = pmap_l2(pmap, va);
3145
3146                         /*
3147                          * If the page table page is mapped, we just increment
3148                          * the hold count, and activate it.  Otherwise, we
3149                          * attempt to allocate a page table page.  If this
3150                          * attempt fails, we don't retry.  Instead, we give up.
3151                          */
3152                         if (l2 != NULL && pmap_load(l2) != 0) {
3153                                 phys = PTE_TO_PHYS(pmap_load(l2));
3154                                 mpte = PHYS_TO_VM_PAGE(phys);
3155                                 mpte->wire_count++;
3156                         } else {
3157                                 /*
3158                                  * Pass NULL instead of the PV list lock
3159                                  * pointer, because we don't intend to sleep.
3160                                  */
3161                                 mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
3162                                 if (mpte == NULL)
3163                                         return (mpte);
3164                         }
3165                 }
3166                 l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
3167                 l3 = &l3[pmap_l3_index(va)];
3168         } else {
3169                 mpte = NULL;
3170                 l3 = pmap_l3(kernel_pmap, va);
3171         }
3172         if (l3 == NULL)
3173                 panic("pmap_enter_quick_locked: No l3");
3174         if (pmap_load(l3) != 0) {
3175                 if (mpte != NULL) {
3176                         mpte->wire_count--;
3177                         mpte = NULL;
3178                 }
3179                 return (mpte);
3180         }
3181
3182         /*
3183          * Enter on the PV list if part of our managed memory.
3184          */
3185         if ((m->oflags & VPO_UNMANAGED) == 0 &&
3186             !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
3187                 if (mpte != NULL) {
3188                         SLIST_INIT(&free);
3189                         if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
3190                                 pmap_invalidate_page(pmap, va);
3191                                 vm_page_free_pages_toq(&free, false);
3192                         }
3193                         mpte = NULL;
3194                 }
3195                 return (mpte);
3196         }
3197
3198         /*
3199          * Increment counters
3200          */
3201         pmap_resident_count_inc(pmap, 1);
3202
3203         newl3 = ((VM_PAGE_TO_PHYS(m) / PAGE_SIZE) << PTE_PPN0_S) |
3204             PTE_V | PTE_R;
3205         if ((prot & VM_PROT_EXECUTE) != 0)
3206                 newl3 |= PTE_X;
3207         if ((m->oflags & VPO_UNMANAGED) == 0)
3208                 newl3 |= PTE_SW_MANAGED;
3209         if (va < VM_MAX_USER_ADDRESS)
3210                 newl3 |= PTE_U;
3211
3212         /*
3213          * Sync the i-cache on all harts before updating the PTE
3214          * if the new PTE is executable.
3215          */
3216         if (prot & VM_PROT_EXECUTE)
3217                 pmap_sync_icache(pmap, va, PAGE_SIZE);
3218
3219         pmap_store(l3, newl3);
3220
3221         pmap_invalidate_page(pmap, va);
3222         return (mpte);
3223 }
3224
3225 /*
3226  * This code maps large physical mmap regions into the
3227  * processor address space.  Note that some shortcuts
3228  * are taken, but the code works.
3229  */
3230 void
3231 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
3232     vm_pindex_t pindex, vm_size_t size)
3233 {
3234
3235         VM_OBJECT_ASSERT_WLOCKED(object);
3236         KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3237             ("pmap_object_init_pt: non-device object"));
3238 }
3239
3240 /*
3241  *      Clear the wired attribute from the mappings for the specified range of
3242  *      addresses in the given pmap.  Every valid mapping within that range
3243  *      must have the wired attribute set.  In contrast, invalid mappings
3244  *      cannot have the wired attribute set, so they are ignored.
3245  *
3246  *      The wired attribute of the page table entry is not a hardware feature,
3247  *      so there is no need to invalidate any TLB entries.
3248  */
3249 void
3250 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3251 {
3252         vm_offset_t va_next;
3253         pd_entry_t *l1, *l2, l2e;
3254         pt_entry_t *l3, l3e;
3255         bool pv_lists_locked;
3256
3257         pv_lists_locked = false;
3258 retry:
3259         PMAP_LOCK(pmap);
3260         for (; sva < eva; sva = va_next) {
3261                 l1 = pmap_l1(pmap, sva);
3262                 if (pmap_load(l1) == 0) {
3263                         va_next = (sva + L1_SIZE) & ~L1_OFFSET;
3264                         if (va_next < sva)
3265                                 va_next = eva;
3266                         continue;
3267                 }
3268
3269                 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
3270                 if (va_next < sva)
3271                         va_next = eva;
3272
3273                 l2 = pmap_l1_to_l2(l1, sva);
3274                 if ((l2e = pmap_load(l2)) == 0)
3275                         continue;
3276                 if ((l2e & PTE_RWX) != 0) {
3277                         if (sva + L2_SIZE == va_next && eva >= va_next) {
3278                                 if ((l2e & PTE_SW_WIRED) == 0)
3279                                         panic("pmap_unwire: l2 %#jx is missing "
3280                                             "PTE_SW_WIRED", (uintmax_t)l2e);
3281                                 pmap_clear_bits(l2, PTE_SW_WIRED);
3282                                 continue;
3283                         } else {
3284                                 if (!pv_lists_locked) {
3285                                         pv_lists_locked = true;
3286                                         if (!rw_try_rlock(&pvh_global_lock)) {
3287                                                 PMAP_UNLOCK(pmap);
3288                                                 rw_rlock(&pvh_global_lock);
3289                                                 /* Repeat sva. */
3290                                                 goto retry;
3291                                         }
3292                                 }
3293                                 if (!pmap_demote_l2(pmap, l2, sva))
3294                                         panic("pmap_unwire: demotion failed");
3295                         }
3296                 }
3297
3298                 if (va_next > eva)
3299                         va_next = eva;
3300                 for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
3301                     sva += L3_SIZE) {
3302                         if ((l3e = pmap_load(l3)) == 0)
3303                                 continue;
3304                         if ((l3e & PTE_SW_WIRED) == 0)
3305                                 panic("pmap_unwire: l3 %#jx is missing "
3306                                     "PTE_SW_WIRED", (uintmax_t)l3e);
3307
3308                         /*
3309                          * PG_W must be cleared atomically.  Although the pmap
3310                          * lock synchronizes access to PG_W, another processor
3311                          * could be setting PG_M and/or PG_A concurrently.
3312                          */
3313                         pmap_clear_bits(l3, PTE_SW_WIRED);
3314                         pmap->pm_stats.wired_count--;
3315                 }
3316         }
3317         if (pv_lists_locked)
3318                 rw_runlock(&pvh_global_lock);
3319         PMAP_UNLOCK(pmap);
3320 }
3321
3322 /*
3323  *      Copy the range specified by src_addr/len
3324  *      from the source map to the range dst_addr/len
3325  *      in the destination map.
3326  *
3327  *      This routine is only advisory and need not do anything.
3328  */
3329
3330 void
3331 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
3332     vm_offset_t src_addr)
3333 {
3334
3335 }
3336
3337 /*
3338  *      pmap_zero_page zeros the specified hardware page by mapping
3339  *      the page into KVM and using bzero to clear its contents.
3340  */
3341 void
3342 pmap_zero_page(vm_page_t m)
3343 {
3344         vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3345
3346         pagezero((void *)va);
3347 }
3348
3349 /*
3350  *      pmap_zero_page_area zeros the specified hardware page by mapping 
3351  *      the page into KVM and using bzero to clear its contents.
3352  *
3353  *      off and size may not cover an area beyond a single hardware page.
3354  */
3355 void
3356 pmap_zero_page_area(vm_page_t m, int off, int size)
3357 {
3358         vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3359
3360         if (off == 0 && size == PAGE_SIZE)
3361                 pagezero((void *)va);
3362         else
3363                 bzero((char *)va + off, size);
3364 }
3365
3366 /*
3367  *      pmap_copy_page copies the specified (machine independent)
3368  *      page by mapping the page into virtual memory and using
3369  *      bcopy to copy the page, one machine dependent page at a
3370  *      time.
3371  */
3372 void
3373 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
3374 {
3375         vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
3376         vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
3377
3378         pagecopy((void *)src, (void *)dst);
3379 }
3380
3381 int unmapped_buf_allowed = 1;
3382
3383 void
3384 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
3385     vm_offset_t b_offset, int xfersize)
3386 {
3387         void *a_cp, *b_cp;
3388         vm_page_t m_a, m_b;
3389         vm_paddr_t p_a, p_b;
3390         vm_offset_t a_pg_offset, b_pg_offset;
3391         int cnt;
3392
3393         while (xfersize > 0) {
3394                 a_pg_offset = a_offset & PAGE_MASK;
3395                 m_a = ma[a_offset >> PAGE_SHIFT];
3396                 p_a = m_a->phys_addr;
3397                 b_pg_offset = b_offset & PAGE_MASK;
3398                 m_b = mb[b_offset >> PAGE_SHIFT];
3399                 p_b = m_b->phys_addr;
3400                 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
3401                 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
3402                 if (__predict_false(!PHYS_IN_DMAP(p_a))) {
3403                         panic("!DMAP a %lx", p_a);
3404                 } else {
3405                         a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset;
3406                 }
3407                 if (__predict_false(!PHYS_IN_DMAP(p_b))) {
3408                         panic("!DMAP b %lx", p_b);
3409                 } else {
3410                         b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset;
3411                 }
3412                 bcopy(a_cp, b_cp, cnt);
3413                 a_offset += cnt;
3414                 b_offset += cnt;
3415                 xfersize -= cnt;
3416         }
3417 }
3418
3419 vm_offset_t
3420 pmap_quick_enter_page(vm_page_t m)
3421 {
3422
3423         return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
3424 }
3425
3426 void
3427 pmap_quick_remove_page(vm_offset_t addr)
3428 {
3429 }
3430
3431 /*
3432  * Returns true if the pmap's pv is one of the first
3433  * 16 pvs linked to from this page.  This count may
3434  * be changed upwards or downwards in the future; it
3435  * is only necessary that true be returned for a small
3436  * subset of pmaps for proper page aging.
3437  */
3438 boolean_t
3439 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
3440 {
3441         struct md_page *pvh;
3442         struct rwlock *lock;
3443         pv_entry_t pv;
3444         int loops = 0;
3445         boolean_t rv;
3446
3447         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3448             ("pmap_page_exists_quick: page %p is not managed", m));
3449         rv = FALSE;
3450         rw_rlock(&pvh_global_lock);
3451         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3452         rw_rlock(lock);
3453         TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3454                 if (PV_PMAP(pv) == pmap) {
3455                         rv = TRUE;
3456                         break;
3457                 }
3458                 loops++;
3459                 if (loops >= 16)
3460                         break;
3461         }
3462         if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
3463                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3464                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3465                         if (PV_PMAP(pv) == pmap) {
3466                                 rv = TRUE;
3467                                 break;
3468                         }
3469                         loops++;
3470                         if (loops >= 16)
3471                                 break;
3472                 }
3473         }
3474         rw_runlock(lock);
3475         rw_runlock(&pvh_global_lock);
3476         return (rv);
3477 }
3478
3479 /*
3480  *      pmap_page_wired_mappings:
3481  *
3482  *      Return the number of managed mappings to the given physical page
3483  *      that are wired.
3484  */
3485 int
3486 pmap_page_wired_mappings(vm_page_t m)
3487 {
3488         struct md_page *pvh;
3489         struct rwlock *lock;
3490         pmap_t pmap;
3491         pd_entry_t *l2;
3492         pt_entry_t *l3;
3493         pv_entry_t pv;
3494         int count, md_gen, pvh_gen;
3495
3496         if ((m->oflags & VPO_UNMANAGED) != 0)
3497                 return (0);
3498         rw_rlock(&pvh_global_lock);
3499         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3500         rw_rlock(lock);
3501 restart:
3502         count = 0;
3503         TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3504                 pmap = PV_PMAP(pv);
3505                 if (!PMAP_TRYLOCK(pmap)) {
3506                         md_gen = m->md.pv_gen;
3507                         rw_runlock(lock);
3508                         PMAP_LOCK(pmap);
3509                         rw_rlock(lock);
3510                         if (md_gen != m->md.pv_gen) {
3511                                 PMAP_UNLOCK(pmap);
3512                                 goto restart;
3513                         }
3514                 }
3515                 l3 = pmap_l3(pmap, pv->pv_va);
3516                 if ((pmap_load(l3) & PTE_SW_WIRED) != 0)
3517                         count++;
3518                 PMAP_UNLOCK(pmap);
3519         }
3520         if ((m->flags & PG_FICTITIOUS) == 0) {
3521                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3522                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3523                         pmap = PV_PMAP(pv);
3524                         if (!PMAP_TRYLOCK(pmap)) {
3525                                 md_gen = m->md.pv_gen;
3526                                 pvh_gen = pvh->pv_gen;
3527                                 rw_runlock(lock);
3528                                 PMAP_LOCK(pmap);
3529                                 rw_rlock(lock);
3530                                 if (md_gen != m->md.pv_gen ||
3531                                     pvh_gen != pvh->pv_gen) {
3532                                         PMAP_UNLOCK(pmap);
3533                                         goto restart;
3534                                 }
3535                         }
3536                         l2 = pmap_l2(pmap, pv->pv_va);
3537                         if ((pmap_load(l2) & PTE_SW_WIRED) != 0)
3538                                 count++;
3539                         PMAP_UNLOCK(pmap);
3540                 }
3541         }
3542         rw_runlock(lock);
3543         rw_runlock(&pvh_global_lock);
3544         return (count);
3545 }
3546
3547 static void
3548 pmap_remove_pages_pv(pmap_t pmap, vm_page_t m, pv_entry_t pv,
3549     struct spglist *free, bool superpage)
3550 {
3551         struct md_page *pvh;
3552         vm_page_t mpte, mt;
3553
3554         if (superpage) {
3555                 pmap_resident_count_dec(pmap, Ln_ENTRIES);
3556                 pvh = pa_to_pvh(m->phys_addr);
3557                 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
3558                 pvh->pv_gen++;
3559                 if (TAILQ_EMPTY(&pvh->pv_list)) {
3560                         for (mt = m; mt < &m[Ln_ENTRIES]; mt++)
3561                                 if (TAILQ_EMPTY(&mt->md.pv_list) &&
3562                                     (mt->aflags & PGA_WRITEABLE) != 0)
3563                                         vm_page_aflag_clear(mt, PGA_WRITEABLE);
3564                 }
3565                 mpte = pmap_remove_pt_page(pmap, pv->pv_va);
3566                 if (mpte != NULL) {
3567                         KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
3568                             ("pmap_remove_pages: pte page not promoted"));
3569                         pmap_resident_count_dec(pmap, 1);
3570                         KASSERT(mpte->wire_count == Ln_ENTRIES,
3571                             ("pmap_remove_pages: pte page wire count error"));
3572                         mpte->wire_count = 0;
3573                         pmap_add_delayed_free_list(mpte, free, FALSE);
3574                 }
3575         } else {
3576                 pmap_resident_count_dec(pmap, 1);
3577                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
3578                 m->md.pv_gen++;
3579                 if (TAILQ_EMPTY(&m->md.pv_list) &&
3580                     (m->aflags & PGA_WRITEABLE) != 0) {
3581                         pvh = pa_to_pvh(m->phys_addr);
3582                         if (TAILQ_EMPTY(&pvh->pv_list))
3583                                 vm_page_aflag_clear(m, PGA_WRITEABLE);
3584                 }
3585         }
3586 }
3587
3588 /*
3589  * Destroy all managed, non-wired mappings in the given user-space
3590  * pmap.  This pmap cannot be active on any processor besides the
3591  * caller.
3592  *
3593  * This function cannot be applied to the kernel pmap.  Moreover, it
3594  * is not intended for general use.  It is only to be used during
3595  * process termination.  Consequently, it can be implemented in ways
3596  * that make it faster than pmap_remove().  First, it can more quickly
3597  * destroy mappings by iterating over the pmap's collection of PV
3598  * entries, rather than searching the page table.  Second, it doesn't
3599  * have to test and clear the page table entries atomically, because
3600  * no processor is currently accessing the user address space.  In
3601  * particular, a page table entry's dirty bit won't change state once
3602  * this function starts.
3603  */
3604 void
3605 pmap_remove_pages(pmap_t pmap)
3606 {
3607         struct spglist free;
3608         pd_entry_t ptepde;
3609         pt_entry_t *pte, tpte;
3610         vm_page_t m, mt;
3611         pv_entry_t pv;
3612         struct pv_chunk *pc, *npc;
3613         struct rwlock *lock;
3614         int64_t bit;
3615         uint64_t inuse, bitmask;
3616         int allfree, field, freed, idx;
3617         bool superpage;
3618
3619         lock = NULL;
3620
3621         SLIST_INIT(&free);
3622         rw_rlock(&pvh_global_lock);
3623         PMAP_LOCK(pmap);
3624         TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
3625                 allfree = 1;
3626                 freed = 0;
3627                 for (field = 0; field < _NPCM; field++) {
3628                         inuse = ~pc->pc_map[field] & pc_freemask[field];
3629                         while (inuse != 0) {
3630                                 bit = ffsl(inuse) - 1;
3631                                 bitmask = 1UL << bit;
3632                                 idx = field * 64 + bit;
3633                                 pv = &pc->pc_pventry[idx];
3634                                 inuse &= ~bitmask;
3635
3636                                 pte = pmap_l1(pmap, pv->pv_va);
3637                                 ptepde = pmap_load(pte);
3638                                 pte = pmap_l1_to_l2(pte, pv->pv_va);
3639                                 tpte = pmap_load(pte);
3640                                 if ((tpte & PTE_RWX) != 0) {
3641                                         superpage = true;
3642                                 } else {
3643                                         ptepde = tpte;
3644                                         pte = pmap_l2_to_l3(pte, pv->pv_va);
3645                                         tpte = pmap_load(pte);
3646                                         superpage = false;
3647                                 }
3648
3649                                 /*
3650                                  * We cannot remove wired pages from a
3651                                  * process' mapping at this time.
3652                                  */
3653                                 if (tpte & PTE_SW_WIRED) {
3654                                         allfree = 0;
3655                                         continue;
3656                                 }
3657
3658                                 m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tpte));
3659                                 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
3660                                     m < &vm_page_array[vm_page_array_size],
3661                                     ("pmap_remove_pages: bad pte %#jx",
3662                                     (uintmax_t)tpte));
3663
3664                                 pmap_clear(pte);
3665
3666                                 /*
3667                                  * Update the vm_page_t clean/reference bits.
3668                                  */
3669                                 if ((tpte & (PTE_D | PTE_W)) ==
3670                                     (PTE_D | PTE_W)) {
3671                                         if (superpage)
3672                                                 for (mt = m;
3673                                                     mt < &m[Ln_ENTRIES]; mt++)
3674                                                         vm_page_dirty(mt);
3675                                         else
3676                                                 vm_page_dirty(m);
3677                                 }
3678
3679                                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
3680
3681                                 /* Mark free */
3682                                 pc->pc_map[field] |= bitmask;
3683
3684                                 pmap_remove_pages_pv(pmap, m, pv, &free,
3685                                     superpage);
3686                                 pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
3687                                 freed++;
3688                         }
3689                 }
3690                 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
3691                 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
3692                 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
3693                 if (allfree) {
3694                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3695                         free_pv_chunk(pc);
3696                 }
3697         }
3698         if (lock != NULL)
3699                 rw_wunlock(lock);
3700         pmap_invalidate_all(pmap);
3701         rw_runlock(&pvh_global_lock);
3702         PMAP_UNLOCK(pmap);
3703         vm_page_free_pages_toq(&free, false);
3704 }
3705
3706 static bool
3707 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
3708 {
3709         struct md_page *pvh;
3710         struct rwlock *lock;
3711         pd_entry_t *l2;
3712         pt_entry_t *l3, mask;
3713         pv_entry_t pv;
3714         pmap_t pmap;
3715         int md_gen, pvh_gen;
3716         bool rv;
3717
3718         mask = 0;
3719         if (modified)
3720                 mask |= PTE_D;
3721         if (accessed)
3722                 mask |= PTE_A;
3723
3724         rv = FALSE;
3725         rw_rlock(&pvh_global_lock);
3726         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3727         rw_rlock(lock);
3728 restart:
3729         TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3730                 pmap = PV_PMAP(pv);
3731                 if (!PMAP_TRYLOCK(pmap)) {
3732                         md_gen = m->md.pv_gen;
3733                         rw_runlock(lock);
3734                         PMAP_LOCK(pmap);
3735                         rw_rlock(lock);
3736                         if (md_gen != m->md.pv_gen) {
3737                                 PMAP_UNLOCK(pmap);
3738                                 goto restart;
3739                         }
3740                 }
3741                 l3 = pmap_l3(pmap, pv->pv_va);
3742                 rv = (pmap_load(l3) & mask) == mask;
3743                 PMAP_UNLOCK(pmap);
3744                 if (rv)
3745                         goto out;
3746         }
3747         if ((m->flags & PG_FICTITIOUS) == 0) {
3748                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3749                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3750                         pmap = PV_PMAP(pv);
3751                         if (!PMAP_TRYLOCK(pmap)) {
3752                                 md_gen = m->md.pv_gen;
3753                                 pvh_gen = pvh->pv_gen;
3754                                 rw_runlock(lock);
3755                                 PMAP_LOCK(pmap);
3756                                 rw_rlock(lock);
3757                                 if (md_gen != m->md.pv_gen ||
3758                                     pvh_gen != pvh->pv_gen) {
3759                                         PMAP_UNLOCK(pmap);
3760                                         goto restart;
3761                                 }
3762                         }
3763                         l2 = pmap_l2(pmap, pv->pv_va);
3764                         rv = (pmap_load(l2) & mask) == mask;
3765                         PMAP_UNLOCK(pmap);
3766                         if (rv)
3767                                 goto out;
3768                 }
3769         }
3770 out:
3771         rw_runlock(lock);
3772         rw_runlock(&pvh_global_lock);
3773         return (rv);
3774 }
3775
3776 /*
3777  *      pmap_is_modified:
3778  *
3779  *      Return whether or not the specified physical page was modified
3780  *      in any physical maps.
3781  */
3782 boolean_t
3783 pmap_is_modified(vm_page_t m)
3784 {
3785
3786         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3787             ("pmap_is_modified: page %p is not managed", m));
3788
3789         /*
3790          * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
3791          * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
3792          * is clear, no PTEs can have PG_M set.
3793          */
3794         VM_OBJECT_ASSERT_WLOCKED(m->object);
3795         if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
3796                 return (FALSE);
3797         return (pmap_page_test_mappings(m, FALSE, TRUE));
3798 }
3799
3800 /*
3801  *      pmap_is_prefaultable:
3802  *
3803  *      Return whether or not the specified virtual address is eligible
3804  *      for prefault.
3805  */
3806 boolean_t
3807 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3808 {
3809         pt_entry_t *l3;
3810         boolean_t rv;
3811
3812         rv = FALSE;
3813         PMAP_LOCK(pmap);
3814         l3 = pmap_l3(pmap, addr);
3815         if (l3 != NULL && pmap_load(l3) != 0) {
3816                 rv = TRUE;
3817         }
3818         PMAP_UNLOCK(pmap);
3819         return (rv);
3820 }
3821
3822 /*
3823  *      pmap_is_referenced:
3824  *
3825  *      Return whether or not the specified physical page was referenced
3826  *      in any physical maps.
3827  */
3828 boolean_t
3829 pmap_is_referenced(vm_page_t m)
3830 {
3831
3832         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3833             ("pmap_is_referenced: page %p is not managed", m));
3834         return (pmap_page_test_mappings(m, TRUE, FALSE));
3835 }
3836
3837 /*
3838  * Clear the write and modified bits in each of the given page's mappings.
3839  */
3840 void
3841 pmap_remove_write(vm_page_t m)
3842 {
3843         struct md_page *pvh;
3844         struct rwlock *lock;
3845         pmap_t pmap;
3846         pd_entry_t *l2;
3847         pt_entry_t *l3, oldl3, newl3;
3848         pv_entry_t next_pv, pv;
3849         vm_offset_t va;
3850         int md_gen, pvh_gen;
3851
3852         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3853             ("pmap_remove_write: page %p is not managed", m));
3854
3855         /*
3856          * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
3857          * set by another thread while the object is locked.  Thus,
3858          * if PGA_WRITEABLE is clear, no page table entries need updating.
3859          */
3860         VM_OBJECT_ASSERT_WLOCKED(m->object);
3861         if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
3862                 return;
3863         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3864         pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
3865             pa_to_pvh(VM_PAGE_TO_PHYS(m));
3866         rw_rlock(&pvh_global_lock);
3867 retry_pv_loop:
3868         rw_wlock(lock);
3869         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
3870                 pmap = PV_PMAP(pv);
3871                 if (!PMAP_TRYLOCK(pmap)) {
3872                         pvh_gen = pvh->pv_gen;
3873                         rw_wunlock(lock);
3874                         PMAP_LOCK(pmap);
3875                         rw_wlock(lock);
3876                         if (pvh_gen != pvh->pv_gen) {
3877                                 PMAP_UNLOCK(pmap);
3878                                 rw_wunlock(lock);
3879                                 goto retry_pv_loop;
3880                         }
3881                 }
3882                 va = pv->pv_va;
3883                 l2 = pmap_l2(pmap, va);
3884                 if ((pmap_load(l2) & PTE_W) != 0)
3885                         (void)pmap_demote_l2_locked(pmap, l2, va, &lock);
3886                 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
3887                     ("inconsistent pv lock %p %p for page %p",
3888                     lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
3889                 PMAP_UNLOCK(pmap);
3890         }
3891         TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3892                 pmap = PV_PMAP(pv);
3893                 if (!PMAP_TRYLOCK(pmap)) {
3894                         pvh_gen = pvh->pv_gen;
3895                         md_gen = m->md.pv_gen;
3896                         rw_wunlock(lock);
3897                         PMAP_LOCK(pmap);
3898                         rw_wlock(lock);
3899                         if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
3900                                 PMAP_UNLOCK(pmap);
3901                                 rw_wunlock(lock);
3902                                 goto retry_pv_loop;
3903                         }
3904                 }
3905                 l3 = pmap_l3(pmap, pv->pv_va);
3906                 oldl3 = pmap_load(l3);
3907 retry:
3908                 if ((oldl3 & PTE_W) != 0) {
3909                         newl3 = oldl3 & ~(PTE_D | PTE_W);
3910                         if (!atomic_fcmpset_long(l3, &oldl3, newl3))
3911                                 goto retry;
3912                         if ((oldl3 & PTE_D) != 0)
3913                                 vm_page_dirty(m);
3914                         pmap_invalidate_page(pmap, pv->pv_va);
3915                 }
3916                 PMAP_UNLOCK(pmap);
3917         }
3918         rw_wunlock(lock);
3919         vm_page_aflag_clear(m, PGA_WRITEABLE);
3920         rw_runlock(&pvh_global_lock);
3921 }
3922
3923 /*
3924  *      pmap_ts_referenced:
3925  *
3926  *      Return a count of reference bits for a page, clearing those bits.
3927  *      It is not necessary for every reference bit to be cleared, but it
3928  *      is necessary that 0 only be returned when there are truly no
3929  *      reference bits set.
3930  *
3931  *      As an optimization, update the page's dirty field if a modified bit is
3932  *      found while counting reference bits.  This opportunistic update can be
3933  *      performed at low cost and can eliminate the need for some future calls
3934  *      to pmap_is_modified().  However, since this function stops after
3935  *      finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
3936  *      dirty pages.  Those dirty pages will only be detected by a future call
3937  *      to pmap_is_modified().
3938  */
3939 int
3940 pmap_ts_referenced(vm_page_t m)
3941 {
3942         struct spglist free;
3943         struct md_page *pvh;
3944         struct rwlock *lock;
3945         pv_entry_t pv, pvf;
3946         pmap_t pmap;
3947         pd_entry_t *l2, l2e;
3948         pt_entry_t *l3, l3e;
3949         vm_paddr_t pa;
3950         vm_offset_t va;
3951         int md_gen, pvh_gen, ret;
3952
3953         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3954             ("pmap_ts_referenced: page %p is not managed", m));
3955         SLIST_INIT(&free);
3956         ret = 0;
3957         pa = VM_PAGE_TO_PHYS(m);
3958         pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
3959
3960         lock = PHYS_TO_PV_LIST_LOCK(pa);
3961         rw_rlock(&pvh_global_lock);
3962         rw_wlock(lock);
3963 retry:
3964         if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
3965                 goto small_mappings;
3966         pv = pvf;
3967         do {
3968                 pmap = PV_PMAP(pv);
3969                 if (!PMAP_TRYLOCK(pmap)) {
3970                         pvh_gen = pvh->pv_gen;
3971                         rw_wunlock(lock);
3972                         PMAP_LOCK(pmap);
3973                         rw_wlock(lock);
3974                         if (pvh_gen != pvh->pv_gen) {
3975                                 PMAP_UNLOCK(pmap);
3976                                 goto retry;
3977                         }
3978                 }
3979                 va = pv->pv_va;
3980                 l2 = pmap_l2(pmap, va);
3981                 l2e = pmap_load(l2);
3982                 if ((l2e & (PTE_W | PTE_D)) == (PTE_W | PTE_D)) {
3983                         /*
3984                          * Although l2e is mapping a 2MB page, because
3985                          * this function is called at a 4KB page granularity,
3986                          * we only update the 4KB page under test.
3987                          */
3988                         vm_page_dirty(m);
3989                 }
3990                 if ((l2e & PTE_A) != 0) {
3991                         /*
3992                          * Since this reference bit is shared by 512 4KB
3993                          * pages, it should not be cleared every time it is
3994                          * tested.  Apply a simple "hash" function on the
3995                          * physical page number, the virtual superpage number,
3996                          * and the pmap address to select one 4KB page out of
3997                          * the 512 on which testing the reference bit will
3998                          * result in clearing that reference bit.  This
3999                          * function is designed to avoid the selection of the
4000                          * same 4KB page for every 2MB page mapping.
4001                          *
4002                          * On demotion, a mapping that hasn't been referenced
4003                          * is simply destroyed.  To avoid the possibility of a
4004                          * subsequent page fault on a demoted wired mapping,
4005                          * always leave its reference bit set.  Moreover,
4006                          * since the superpage is wired, the current state of
4007                          * its reference bit won't affect page replacement.
4008                          */
4009                         if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L2_SHIFT) ^
4010                             (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 &&
4011                             (l2e & PTE_SW_WIRED) == 0) {
4012                                 pmap_clear_bits(l2, PTE_A);
4013                                 pmap_invalidate_page(pmap, va);
4014                         }
4015                         ret++;
4016                 }
4017                 PMAP_UNLOCK(pmap);
4018                 /* Rotate the PV list if it has more than one entry. */
4019                 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
4020                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
4021                         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
4022                         pvh->pv_gen++;
4023                 }
4024                 if (ret >= PMAP_TS_REFERENCED_MAX)
4025                         goto out;
4026         } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
4027 small_mappings:
4028         if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
4029                 goto out;
4030         pv = pvf;
4031         do {
4032                 pmap = PV_PMAP(pv);
4033                 if (!PMAP_TRYLOCK(pmap)) {
4034                         pvh_gen = pvh->pv_gen;
4035                         md_gen = m->md.pv_gen;
4036                         rw_wunlock(lock);
4037                         PMAP_LOCK(pmap);
4038                         rw_wlock(lock);
4039                         if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
4040                                 PMAP_UNLOCK(pmap);
4041                                 goto retry;
4042                         }
4043                 }
4044                 l2 = pmap_l2(pmap, pv->pv_va);
4045
4046                 KASSERT((pmap_load(l2) & PTE_RX) == 0,
4047                     ("pmap_ts_referenced: found an invalid l2 table"));
4048
4049                 l3 = pmap_l2_to_l3(l2, pv->pv_va);
4050                 l3e = pmap_load(l3);
4051                 if ((l3e & PTE_D) != 0)
4052                         vm_page_dirty(m);
4053                 if ((l3e & PTE_A) != 0) {
4054                         if ((l3e & PTE_SW_WIRED) == 0) {
4055                                 /*
4056                                  * Wired pages cannot be paged out so
4057                                  * doing accessed bit emulation for
4058                                  * them is wasted effort. We do the
4059                                  * hard work for unwired pages only.
4060                                  */
4061                                 pmap_clear_bits(l3, PTE_A);
4062                                 pmap_invalidate_page(pmap, pv->pv_va);
4063                         }
4064                         ret++;
4065                 }
4066                 PMAP_UNLOCK(pmap);
4067                 /* Rotate the PV list if it has more than one entry. */
4068                 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
4069                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4070                         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
4071                         m->md.pv_gen++;
4072                 }
4073         } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && ret <
4074             PMAP_TS_REFERENCED_MAX);
4075 out:
4076         rw_wunlock(lock);
4077         rw_runlock(&pvh_global_lock);
4078         vm_page_free_pages_toq(&free, false);
4079         return (ret);
4080 }
4081
4082 /*
4083  *      Apply the given advice to the specified range of addresses within the
4084  *      given pmap.  Depending on the advice, clear the referenced and/or
4085  *      modified flags in each mapping and set the mapped page's dirty field.
4086  */
4087 void
4088 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
4089 {
4090 }
4091
4092 /*
4093  *      Clear the modify bits on the specified physical page.
4094  */
4095 void
4096 pmap_clear_modify(vm_page_t m)
4097 {
4098         struct md_page *pvh;
4099         struct rwlock *lock;
4100         pmap_t pmap;
4101         pv_entry_t next_pv, pv;
4102         pd_entry_t *l2, oldl2;
4103         pt_entry_t *l3, oldl3;
4104         vm_offset_t va;
4105         int md_gen, pvh_gen;
4106
4107         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4108             ("pmap_clear_modify: page %p is not managed", m));
4109         VM_OBJECT_ASSERT_WLOCKED(m->object);
4110         KASSERT(!vm_page_xbusied(m),
4111             ("pmap_clear_modify: page %p is exclusive busied", m));
4112
4113         /*
4114          * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
4115          * If the object containing the page is locked and the page is not
4116          * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
4117          */
4118         if ((m->aflags & PGA_WRITEABLE) == 0)
4119                 return;
4120         pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
4121             pa_to_pvh(VM_PAGE_TO_PHYS(m));
4122         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4123         rw_rlock(&pvh_global_lock);
4124         rw_wlock(lock);
4125 restart:
4126         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
4127                 pmap = PV_PMAP(pv);
4128                 if (!PMAP_TRYLOCK(pmap)) {
4129                         pvh_gen = pvh->pv_gen;
4130                         rw_wunlock(lock);
4131                         PMAP_LOCK(pmap);
4132                         rw_wlock(lock);
4133                         if (pvh_gen != pvh->pv_gen) {
4134                                 PMAP_UNLOCK(pmap);
4135                                 goto restart;
4136                         }
4137                 }
4138                 va = pv->pv_va;
4139                 l2 = pmap_l2(pmap, va);
4140                 oldl2 = pmap_load(l2);
4141                 if ((oldl2 & PTE_W) != 0) {
4142                         if (pmap_demote_l2_locked(pmap, l2, va, &lock)) {
4143                                 if ((oldl2 & PTE_SW_WIRED) == 0) {
4144                                         /*
4145                                          * Write protect the mapping to a
4146                                          * single page so that a subsequent
4147                                          * write access may repromote.
4148                                          */
4149                                         va += VM_PAGE_TO_PHYS(m) -
4150                                             PTE_TO_PHYS(oldl2);
4151                                         l3 = pmap_l2_to_l3(l2, va);
4152                                         oldl3 = pmap_load(l3);
4153                                         if ((oldl3 & PTE_V) != 0) {
4154                                                 while (!atomic_fcmpset_long(l3,
4155                                                     &oldl3, oldl3 & ~(PTE_D |
4156                                                     PTE_W)))
4157                                                         cpu_spinwait();
4158                                                 vm_page_dirty(m);
4159                                                 pmap_invalidate_page(pmap, va);
4160                                         }
4161                                 }
4162                         }
4163                 }
4164                 PMAP_UNLOCK(pmap);
4165         }
4166         TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4167                 pmap = PV_PMAP(pv);
4168                 if (!PMAP_TRYLOCK(pmap)) {
4169                         md_gen = m->md.pv_gen;
4170                         pvh_gen = pvh->pv_gen;
4171                         rw_wunlock(lock);
4172                         PMAP_LOCK(pmap);
4173                         rw_wlock(lock);
4174                         if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
4175                                 PMAP_UNLOCK(pmap);
4176                                 goto restart;
4177                         }
4178                 }
4179                 l2 = pmap_l2(pmap, pv->pv_va);
4180                 KASSERT((pmap_load(l2) & PTE_RWX) == 0,
4181                     ("pmap_clear_modify: found a 2mpage in page %p's pv list",
4182                     m));
4183                 l3 = pmap_l2_to_l3(l2, pv->pv_va);
4184                 if ((pmap_load(l3) & (PTE_D | PTE_W)) == (PTE_D | PTE_W)) {
4185                         pmap_clear_bits(l3, PTE_D);
4186                         pmap_invalidate_page(pmap, pv->pv_va);
4187                 }
4188                 PMAP_UNLOCK(pmap);
4189         }
4190         rw_wunlock(lock);
4191         rw_runlock(&pvh_global_lock);
4192 }
4193
4194 void *
4195 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
4196 {
4197
4198         return ((void *)PHYS_TO_DMAP(pa));
4199 }
4200
4201 void
4202 pmap_unmapbios(vm_paddr_t pa, vm_size_t size)
4203 {
4204 }
4205
4206 /*
4207  * Sets the memory attribute for the specified page.
4208  */
4209 void
4210 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
4211 {
4212
4213         m->md.pv_memattr = ma;
4214 }
4215
4216 /*
4217  * perform the pmap work for mincore
4218  */
4219 int
4220 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
4221 {
4222         pt_entry_t *l2, *l3, tpte;
4223         vm_paddr_t pa;
4224         int val;
4225         bool managed;
4226
4227         PMAP_LOCK(pmap);
4228 retry:
4229         managed = false;
4230         val = 0;
4231
4232         l2 = pmap_l2(pmap, addr);
4233         if (l2 != NULL && ((tpte = pmap_load(l2)) & PTE_V) != 0) {
4234                 if ((tpte & PTE_RWX) != 0) {
4235                         pa = PTE_TO_PHYS(tpte) | (addr & L2_OFFSET);
4236                         val = MINCORE_INCORE | MINCORE_SUPER;
4237                 } else {
4238                         l3 = pmap_l2_to_l3(l2, addr);
4239                         tpte = pmap_load(l3);
4240                         if ((tpte & PTE_V) == 0)
4241                                 goto done;
4242                         pa = PTE_TO_PHYS(tpte) | (addr & L3_OFFSET);
4243                         val = MINCORE_INCORE;
4244                 }
4245
4246                 if ((tpte & PTE_D) != 0)
4247                         val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
4248                 if ((tpte & PTE_A) != 0)
4249                         val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
4250                 managed = (tpte & PTE_SW_MANAGED) == PTE_SW_MANAGED;
4251         }
4252
4253 done:
4254         if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
4255             (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
4256                 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
4257                 if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
4258                         goto retry;
4259         } else
4260                 PA_UNLOCK_COND(*locked_pa);
4261         PMAP_UNLOCK(pmap);
4262         return (val);
4263 }
4264
4265 void
4266 pmap_activate_sw(struct thread *td)
4267 {
4268         pmap_t oldpmap, pmap;
4269         u_int hart;
4270
4271         oldpmap = PCPU_GET(curpmap);
4272         pmap = vmspace_pmap(td->td_proc->p_vmspace);
4273         if (pmap == oldpmap)
4274                 return;
4275         load_satp(pmap->pm_satp);
4276
4277         hart = PCPU_GET(hart);
4278 #ifdef SMP
4279         CPU_SET_ATOMIC(hart, &pmap->pm_active);
4280         CPU_CLR_ATOMIC(hart, &oldpmap->pm_active);
4281 #else
4282         CPU_SET(hart, &pmap->pm_active);
4283         CPU_CLR(hart, &oldpmap->pm_active);
4284 #endif
4285         PCPU_SET(curpmap, pmap);
4286
4287         sfence_vma();
4288 }
4289
4290 void
4291 pmap_activate(struct thread *td)
4292 {
4293
4294         critical_enter();
4295         pmap_activate_sw(td);
4296         critical_exit();
4297 }
4298
4299 void
4300 pmap_activate_boot(pmap_t pmap)
4301 {
4302         u_int hart;
4303
4304         hart = PCPU_GET(hart);
4305 #ifdef SMP
4306         CPU_SET_ATOMIC(hart, &pmap->pm_active);
4307 #else
4308         CPU_SET(hart, &pmap->pm_active);
4309 #endif
4310         PCPU_SET(curpmap, pmap);
4311 }
4312
4313 void
4314 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
4315 {
4316         cpuset_t mask;
4317
4318         /*
4319          * From the RISC-V User-Level ISA V2.2:
4320          *
4321          * "To make a store to instruction memory visible to all
4322          * RISC-V harts, the writing hart has to execute a data FENCE
4323          * before requesting that all remote RISC-V harts execute a
4324          * FENCE.I."
4325          */
4326         sched_pin();
4327         mask = all_harts;
4328         CPU_CLR(PCPU_GET(hart), &mask);
4329         fence();
4330         if (!CPU_EMPTY(&mask) && smp_started)
4331                 sbi_remote_fence_i(mask.__bits);
4332         sched_unpin();
4333 }
4334
4335 /*
4336  *      Increase the starting virtual address of the given mapping if a
4337  *      different alignment might result in more superpage mappings.
4338  */
4339 void
4340 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
4341     vm_offset_t *addr, vm_size_t size)
4342 {
4343         vm_offset_t superpage_offset;
4344
4345         if (size < L2_SIZE)
4346                 return;
4347         if (object != NULL && (object->flags & OBJ_COLORED) != 0)
4348                 offset += ptoa(object->pg_color);
4349         superpage_offset = offset & L2_OFFSET;
4350         if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) < L2_SIZE ||
4351             (*addr & L2_OFFSET) == superpage_offset)
4352                 return;
4353         if ((*addr & L2_OFFSET) < superpage_offset)
4354                 *addr = (*addr & ~L2_OFFSET) + superpage_offset;
4355         else
4356                 *addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) + superpage_offset;
4357 }
4358
4359 /**
4360  * Get the kernel virtual address of a set of physical pages. If there are
4361  * physical addresses not covered by the DMAP perform a transient mapping
4362  * that will be removed when calling pmap_unmap_io_transient.
4363  *
4364  * \param page        The pages the caller wishes to obtain the virtual
4365  *                    address on the kernel memory map.
4366  * \param vaddr       On return contains the kernel virtual memory address
4367  *                    of the pages passed in the page parameter.
4368  * \param count       Number of pages passed in.
4369  * \param can_fault   TRUE if the thread using the mapped pages can take
4370  *                    page faults, FALSE otherwise.
4371  *
4372  * \returns TRUE if the caller must call pmap_unmap_io_transient when
4373  *          finished or FALSE otherwise.
4374  *
4375  */
4376 boolean_t
4377 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
4378     boolean_t can_fault)
4379 {
4380         vm_paddr_t paddr;
4381         boolean_t needs_mapping;
4382         int error, i;
4383
4384         /*
4385          * Allocate any KVA space that we need, this is done in a separate
4386          * loop to prevent calling vmem_alloc while pinned.
4387          */
4388         needs_mapping = FALSE;
4389         for (i = 0; i < count; i++) {
4390                 paddr = VM_PAGE_TO_PHYS(page[i]);
4391                 if (__predict_false(paddr >= DMAP_MAX_PHYSADDR)) {
4392                         error = vmem_alloc(kernel_arena, PAGE_SIZE,
4393                             M_BESTFIT | M_WAITOK, &vaddr[i]);
4394                         KASSERT(error == 0, ("vmem_alloc failed: %d", error));
4395                         needs_mapping = TRUE;
4396                 } else {
4397                         vaddr[i] = PHYS_TO_DMAP(paddr);
4398                 }
4399         }
4400
4401         /* Exit early if everything is covered by the DMAP */
4402         if (!needs_mapping)
4403                 return (FALSE);
4404
4405         if (!can_fault)
4406                 sched_pin();
4407         for (i = 0; i < count; i++) {
4408                 paddr = VM_PAGE_TO_PHYS(page[i]);
4409                 if (paddr >= DMAP_MAX_PHYSADDR) {
4410                         panic(
4411                            "pmap_map_io_transient: TODO: Map out of DMAP data");
4412                 }
4413         }
4414
4415         return (needs_mapping);
4416 }
4417
4418 void
4419 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
4420     boolean_t can_fault)
4421 {
4422         vm_paddr_t paddr;
4423         int i;
4424
4425         if (!can_fault)
4426                 sched_unpin();
4427         for (i = 0; i < count; i++) {
4428                 paddr = VM_PAGE_TO_PHYS(page[i]);
4429                 if (paddr >= DMAP_MAX_PHYSADDR) {
4430                         panic("RISCVTODO: pmap_unmap_io_transient: Unmap data");
4431                 }
4432         }
4433 }
4434
4435 boolean_t
4436 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
4437 {
4438
4439         return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_BACK);
4440 }
4441
4442 bool
4443 pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l1, pd_entry_t **l2,
4444     pt_entry_t **l3)
4445 {
4446         pd_entry_t *l1p, *l2p;
4447
4448         /* Get l1 directory entry. */
4449         l1p = pmap_l1(pmap, va);
4450         *l1 = l1p;
4451
4452         if (l1p == NULL || (pmap_load(l1p) & PTE_V) == 0)
4453                 return (false);
4454
4455         if ((pmap_load(l1p) & PTE_RX) != 0) {
4456                 *l2 = NULL;
4457                 *l3 = NULL;
4458                 return (true);
4459         }
4460
4461         /* Get l2 directory entry. */
4462         l2p = pmap_l1_to_l2(l1p, va);
4463         *l2 = l2p;
4464
4465         if (l2p == NULL || (pmap_load(l2p) & PTE_V) == 0)
4466                 return (false);
4467
4468         if ((pmap_load(l2p) & PTE_RX) != 0) {
4469                 *l3 = NULL;
4470                 return (true);
4471         }
4472
4473         /* Get l3 page table entry. */
4474         *l3 = pmap_l2_to_l3(l2p, va);
4475
4476         return (true);
4477 }