]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/pmap.c
MFC r339948:
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / pmap.c
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  * Copyright (c) 2003 Peter Wemm
9  * All rights reserved.
10  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11  * All rights reserved.
12  * Copyright (c) 2014 Andrew Turner
13  * All rights reserved.
14  * Copyright (c) 2014-2016 The FreeBSD Foundation
15  * All rights reserved.
16  *
17  * This code is derived from software contributed to Berkeley by
18  * the Systems Programming Group of the University of Utah Computer
19  * Science Department and William Jolitz of UUNET Technologies Inc.
20  *
21  * This software was developed by Andrew Turner under sponsorship from
22  * the FreeBSD Foundation.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  * 3. All advertising materials mentioning features or use of this software
33  *    must display the following acknowledgement:
34  *      This product includes software developed by the University of
35  *      California, Berkeley and its contributors.
36  * 4. Neither the name of the University nor the names of its contributors
37  *    may be used to endorse or promote products derived from this software
38  *    without specific prior written permission.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  *
52  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
53  */
54 /*-
55  * Copyright (c) 2003 Networks Associates Technology, Inc.
56  * All rights reserved.
57  *
58  * This software was developed for the FreeBSD Project by Jake Burkholder,
59  * Safeport Network Services, and Network Associates Laboratories, the
60  * Security Research Division of Network Associates, Inc. under
61  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
62  * CHATS research program.
63  *
64  * Redistribution and use in source and binary forms, with or without
65  * modification, are permitted provided that the following conditions
66  * are met:
67  * 1. Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  * 2. Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in the
71  *    documentation and/or other materials provided with the distribution.
72  *
73  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
74  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
77  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
78  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
79  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
80  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
81  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
82  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
83  * SUCH DAMAGE.
84  */
85
86 #include <sys/cdefs.h>
87 __FBSDID("$FreeBSD$");
88
89 /*
90  *      Manages physical address maps.
91  *
92  *      Since the information managed by this module is
93  *      also stored by the logical address mapping module,
94  *      this module may throw away valid virtual-to-physical
95  *      mappings at almost any time.  However, invalidations
96  *      of virtual-to-physical mappings must be done as
97  *      requested.
98  *
99  *      In order to cope with hardware architectures which
100  *      make virtual-to-physical map invalidates expensive,
101  *      this module may delay invalidate or reduced protection
102  *      operations until such time as they are actually
103  *      necessary.  This module is given full information as
104  *      to which processors are currently using which maps,
105  *      and to when physical maps must be made correct.
106  */
107
108 #include "opt_vm.h"
109
110 #include <sys/param.h>
111 #include <sys/bitstring.h>
112 #include <sys/bus.h>
113 #include <sys/systm.h>
114 #include <sys/kernel.h>
115 #include <sys/ktr.h>
116 #include <sys/lock.h>
117 #include <sys/malloc.h>
118 #include <sys/mman.h>
119 #include <sys/msgbuf.h>
120 #include <sys/mutex.h>
121 #include <sys/proc.h>
122 #include <sys/rwlock.h>
123 #include <sys/sx.h>
124 #include <sys/vmem.h>
125 #include <sys/vmmeter.h>
126 #include <sys/sched.h>
127 #include <sys/sysctl.h>
128 #include <sys/_unrhdr.h>
129 #include <sys/smp.h>
130
131 #include <vm/vm.h>
132 #include <vm/vm_param.h>
133 #include <vm/vm_kern.h>
134 #include <vm/vm_page.h>
135 #include <vm/vm_map.h>
136 #include <vm/vm_object.h>
137 #include <vm/vm_extern.h>
138 #include <vm/vm_pageout.h>
139 #include <vm/vm_pager.h>
140 #include <vm/vm_phys.h>
141 #include <vm/vm_radix.h>
142 #include <vm/vm_reserv.h>
143 #include <vm/uma.h>
144
145 #include <machine/machdep.h>
146 #include <machine/md_var.h>
147 #include <machine/pcb.h>
148
149 #include <arm/include/physmem.h>
150
151 #define NL0PG           (PAGE_SIZE/(sizeof (pd_entry_t)))
152 #define NL1PG           (PAGE_SIZE/(sizeof (pd_entry_t)))
153 #define NL2PG           (PAGE_SIZE/(sizeof (pd_entry_t)))
154 #define NL3PG           (PAGE_SIZE/(sizeof (pt_entry_t)))
155
156 #define NUL0E           L0_ENTRIES
157 #define NUL1E           (NUL0E * NL1PG)
158 #define NUL2E           (NUL1E * NL2PG)
159
160 #if !defined(DIAGNOSTIC)
161 #ifdef __GNUC_GNU_INLINE__
162 #define PMAP_INLINE     __attribute__((__gnu_inline__)) inline
163 #else
164 #define PMAP_INLINE     extern inline
165 #endif
166 #else
167 #define PMAP_INLINE
168 #endif
169
170 /*
171  * These are configured by the mair_el1 register. This is set up in locore.S
172  */
173 #define DEVICE_MEMORY   0
174 #define UNCACHED_MEMORY 1
175 #define CACHED_MEMORY   2
176
177
178 #ifdef PV_STATS
179 #define PV_STAT(x)      do { x ; } while (0)
180 #else
181 #define PV_STAT(x)      do { } while (0)
182 #endif
183
184 #define pmap_l2_pindex(v)       ((v) >> L2_SHIFT)
185 #define pa_to_pvh(pa)           (&pv_table[pmap_l2_pindex(pa)])
186
187 #define NPV_LIST_LOCKS  MAXCPU
188
189 #define PHYS_TO_PV_LIST_LOCK(pa)        \
190                         (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
191
192 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)  do {    \
193         struct rwlock **_lockp = (lockp);               \
194         struct rwlock *_new_lock;                       \
195                                                         \
196         _new_lock = PHYS_TO_PV_LIST_LOCK(pa);           \
197         if (_new_lock != *_lockp) {                     \
198                 if (*_lockp != NULL)                    \
199                         rw_wunlock(*_lockp);            \
200                 *_lockp = _new_lock;                    \
201                 rw_wlock(*_lockp);                      \
202         }                                               \
203 } while (0)
204
205 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m)        \
206                         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
207
208 #define RELEASE_PV_LIST_LOCK(lockp)             do {    \
209         struct rwlock **_lockp = (lockp);               \
210                                                         \
211         if (*_lockp != NULL) {                          \
212                 rw_wunlock(*_lockp);                    \
213                 *_lockp = NULL;                         \
214         }                                               \
215 } while (0)
216
217 #define VM_PAGE_TO_PV_LIST_LOCK(m)      \
218                         PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
219
220 struct pmap kernel_pmap_store;
221
222 /* Used for mapping ACPI memory before VM is initialized */
223 #define PMAP_PREINIT_MAPPING_COUNT      32
224 #define PMAP_PREINIT_MAPPING_SIZE       (PMAP_PREINIT_MAPPING_COUNT * L2_SIZE)
225 static vm_offset_t preinit_map_va;      /* Start VA of pre-init mapping space */
226 static int vm_initialized = 0;          /* No need to use pre-init maps when set */
227
228 /*
229  * Reserve a few L2 blocks starting from 'preinit_map_va' pointer.
230  * Always map entire L2 block for simplicity.
231  * VA of L2 block = preinit_map_va + i * L2_SIZE
232  */
233 static struct pmap_preinit_mapping {
234         vm_paddr_t      pa;
235         vm_offset_t     va;
236         vm_size_t       size;
237 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
238
239 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
240 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
241 vm_offset_t kernel_vm_end = 0;
242
243 /*
244  * Data for the pv entry allocation mechanism.
245  */
246 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
247 static struct mtx pv_chunks_mutex;
248 static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
249 static struct md_page *pv_table;
250 static struct md_page pv_dummy;
251
252 vm_paddr_t dmap_phys_base;      /* The start of the dmap region */
253 vm_paddr_t dmap_phys_max;       /* The limit of the dmap region */
254 vm_offset_t dmap_max_addr;      /* The virtual address limit of the dmap */
255
256 /* This code assumes all L1 DMAP entries will be used */
257 CTASSERT((DMAP_MIN_ADDRESS  & ~L0_OFFSET) == DMAP_MIN_ADDRESS);
258 CTASSERT((DMAP_MAX_ADDRESS  & ~L0_OFFSET) == DMAP_MAX_ADDRESS);
259
260 #define DMAP_TABLES     ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
261 extern pt_entry_t pagetable_dmap[];
262
263 #define PHYSMAP_SIZE    (2 * (VM_PHYSSEG_MAX - 1))
264 static vm_paddr_t physmap[PHYSMAP_SIZE];
265 static u_int physmap_idx;
266
267 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
268
269 static int superpages_enabled = 1;
270 SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
271     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &superpages_enabled, 0,
272     "Are large page mappings enabled?");
273
274 /*
275  * Internal flags for pmap_enter()'s helper functions.
276  */
277 #define PMAP_ENTER_NORECLAIM    0x1000000       /* Don't reclaim PV entries. */
278 #define PMAP_ENTER_NOREPLACE    0x2000000       /* Don't replace mappings. */
279
280 static void     free_pv_chunk(struct pv_chunk *pc);
281 static void     free_pv_entry(pmap_t pmap, pv_entry_t pv);
282 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
283 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
284 static void     pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
285 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
286                     vm_offset_t va);
287
288 static int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode);
289 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
290 static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va);
291 static pt_entry_t *pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2,
292     vm_offset_t va, struct rwlock **lockp);
293 static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
294 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
295     vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
296 static int pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2,
297     u_int flags, vm_page_t m, struct rwlock **lockp);
298 static int pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
299     pd_entry_t l1e, struct spglist *free, struct rwlock **lockp);
300 static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
301     pd_entry_t l2e, struct spglist *free, struct rwlock **lockp);
302 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
303     vm_page_t m, struct rwlock **lockp);
304
305 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
306                 struct rwlock **lockp);
307
308 static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
309     struct spglist *free);
310 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
311 static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
312
313 /*
314  * These load the old table data and store the new value.
315  * They need to be atomic as the System MMU may write to the table at
316  * the same time as the CPU.
317  */
318 #define pmap_clear(table) atomic_store_64(table, 0)
319 #define pmap_load_store(table, entry) atomic_swap_64(table, entry)
320 #define pmap_set(table, mask) atomic_set_64(table, mask)
321 #define pmap_load_clear(table) atomic_swap_64(table, 0)
322 #define pmap_load(table) (*table)
323
324 /********************/
325 /* Inline functions */
326 /********************/
327
328 static __inline void
329 pagecopy(void *s, void *d)
330 {
331
332         memcpy(d, s, PAGE_SIZE);
333 }
334
335 static __inline pd_entry_t *
336 pmap_l0(pmap_t pmap, vm_offset_t va)
337 {
338
339         return (&pmap->pm_l0[pmap_l0_index(va)]);
340 }
341
342 static __inline pd_entry_t *
343 pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va)
344 {
345         pd_entry_t *l1;
346
347         l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
348         return (&l1[pmap_l1_index(va)]);
349 }
350
351 static __inline pd_entry_t *
352 pmap_l1(pmap_t pmap, vm_offset_t va)
353 {
354         pd_entry_t *l0;
355
356         l0 = pmap_l0(pmap, va);
357         if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE)
358                 return (NULL);
359
360         return (pmap_l0_to_l1(l0, va));
361 }
362
363 static __inline pd_entry_t *
364 pmap_l1_to_l2(pd_entry_t *l1, vm_offset_t va)
365 {
366         pd_entry_t *l2;
367
368         l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
369         return (&l2[pmap_l2_index(va)]);
370 }
371
372 static __inline pd_entry_t *
373 pmap_l2(pmap_t pmap, vm_offset_t va)
374 {
375         pd_entry_t *l1;
376
377         l1 = pmap_l1(pmap, va);
378         if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE)
379                 return (NULL);
380
381         return (pmap_l1_to_l2(l1, va));
382 }
383
384 static __inline pt_entry_t *
385 pmap_l2_to_l3(pd_entry_t *l2, vm_offset_t va)
386 {
387         pt_entry_t *l3;
388
389         l3 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l2) & ~ATTR_MASK);
390         return (&l3[pmap_l3_index(va)]);
391 }
392
393 /*
394  * Returns the lowest valid pde for a given virtual address.
395  * The next level may or may not point to a valid page or block.
396  */
397 static __inline pd_entry_t *
398 pmap_pde(pmap_t pmap, vm_offset_t va, int *level)
399 {
400         pd_entry_t *l0, *l1, *l2, desc;
401
402         l0 = pmap_l0(pmap, va);
403         desc = pmap_load(l0) & ATTR_DESCR_MASK;
404         if (desc != L0_TABLE) {
405                 *level = -1;
406                 return (NULL);
407         }
408
409         l1 = pmap_l0_to_l1(l0, va);
410         desc = pmap_load(l1) & ATTR_DESCR_MASK;
411         if (desc != L1_TABLE) {
412                 *level = 0;
413                 return (l0);
414         }
415
416         l2 = pmap_l1_to_l2(l1, va);
417         desc = pmap_load(l2) & ATTR_DESCR_MASK;
418         if (desc != L2_TABLE) {
419                 *level = 1;
420                 return (l1);
421         }
422
423         *level = 2;
424         return (l2);
425 }
426
427 /*
428  * Returns the lowest valid pte block or table entry for a given virtual
429  * address. If there are no valid entries return NULL and set the level to
430  * the first invalid level.
431  */
432 static __inline pt_entry_t *
433 pmap_pte(pmap_t pmap, vm_offset_t va, int *level)
434 {
435         pd_entry_t *l1, *l2, desc;
436         pt_entry_t *l3;
437
438         l1 = pmap_l1(pmap, va);
439         if (l1 == NULL) {
440                 *level = 0;
441                 return (NULL);
442         }
443         desc = pmap_load(l1) & ATTR_DESCR_MASK;
444         if (desc == L1_BLOCK) {
445                 *level = 1;
446                 return (l1);
447         }
448
449         if (desc != L1_TABLE) {
450                 *level = 1;
451                 return (NULL);
452         }
453
454         l2 = pmap_l1_to_l2(l1, va);
455         desc = pmap_load(l2) & ATTR_DESCR_MASK;
456         if (desc == L2_BLOCK) {
457                 *level = 2;
458                 return (l2);
459         }
460
461         if (desc != L2_TABLE) {
462                 *level = 2;
463                 return (NULL);
464         }
465
466         *level = 3;
467         l3 = pmap_l2_to_l3(l2, va);
468         if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE)
469                 return (NULL);
470
471         return (l3);
472 }
473
474 bool
475 pmap_ps_enabled(pmap_t pmap __unused)
476 {
477
478         return (superpages_enabled != 0);
479 }
480
481 bool
482 pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1,
483     pd_entry_t **l2, pt_entry_t **l3)
484 {
485         pd_entry_t *l0p, *l1p, *l2p;
486
487         if (pmap->pm_l0 == NULL)
488                 return (false);
489
490         l0p = pmap_l0(pmap, va);
491         *l0 = l0p;
492
493         if ((pmap_load(l0p) & ATTR_DESCR_MASK) != L0_TABLE)
494                 return (false);
495
496         l1p = pmap_l0_to_l1(l0p, va);
497         *l1 = l1p;
498
499         if ((pmap_load(l1p) & ATTR_DESCR_MASK) == L1_BLOCK) {
500                 *l2 = NULL;
501                 *l3 = NULL;
502                 return (true);
503         }
504
505         if ((pmap_load(l1p) & ATTR_DESCR_MASK) != L1_TABLE)
506                 return (false);
507
508         l2p = pmap_l1_to_l2(l1p, va);
509         *l2 = l2p;
510
511         if ((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK) {
512                 *l3 = NULL;
513                 return (true);
514         }
515
516         if ((pmap_load(l2p) & ATTR_DESCR_MASK) != L2_TABLE)
517                 return (false);
518
519         *l3 = pmap_l2_to_l3(l2p, va);
520
521         return (true);
522 }
523
524 static __inline int
525 pmap_l3_valid(pt_entry_t l3)
526 {
527
528         return ((l3 & ATTR_DESCR_MASK) == L3_PAGE);
529 }
530
531
532 CTASSERT(L1_BLOCK == L2_BLOCK);
533
534 /*
535  * Checks if the page is dirty. We currently lack proper tracking of this on
536  * arm64 so for now assume is a page mapped as rw was accessed it is.
537  */
538 static inline int
539 pmap_page_dirty(pt_entry_t pte)
540 {
541
542         return ((pte & (ATTR_AF | ATTR_AP_RW_BIT)) ==
543             (ATTR_AF | ATTR_AP(ATTR_AP_RW)));
544 }
545
546 static __inline void
547 pmap_resident_count_inc(pmap_t pmap, int count)
548 {
549
550         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
551         pmap->pm_stats.resident_count += count;
552 }
553
554 static __inline void
555 pmap_resident_count_dec(pmap_t pmap, int count)
556 {
557
558         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
559         KASSERT(pmap->pm_stats.resident_count >= count,
560             ("pmap %p resident count underflow %ld %d", pmap,
561             pmap->pm_stats.resident_count, count));
562         pmap->pm_stats.resident_count -= count;
563 }
564
565 static pt_entry_t *
566 pmap_early_page_idx(vm_offset_t l1pt, vm_offset_t va, u_int *l1_slot,
567     u_int *l2_slot)
568 {
569         pt_entry_t *l2;
570         pd_entry_t *l1;
571
572         l1 = (pd_entry_t *)l1pt;
573         *l1_slot = (va >> L1_SHIFT) & Ln_ADDR_MASK;
574
575         /* Check locore has used a table L1 map */
576         KASSERT((l1[*l1_slot] & ATTR_DESCR_MASK) == L1_TABLE,
577            ("Invalid bootstrap L1 table"));
578         /* Find the address of the L2 table */
579         l2 = (pt_entry_t *)init_pt_va;
580         *l2_slot = pmap_l2_index(va);
581
582         return (l2);
583 }
584
585 static vm_paddr_t
586 pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va)
587 {
588         u_int l1_slot, l2_slot;
589         pt_entry_t *l2;
590
591         l2 = pmap_early_page_idx(l1pt, va, &l1_slot, &l2_slot);
592
593         return ((l2[l2_slot] & ~ATTR_MASK) + (va & L2_OFFSET));
594 }
595
596 static vm_offset_t
597 pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
598     vm_offset_t freemempos)
599 {
600         pt_entry_t *l2;
601         vm_offset_t va;
602         vm_paddr_t l2_pa, pa;
603         u_int l1_slot, l2_slot, prev_l1_slot;
604         int i;
605
606         dmap_phys_base = min_pa & ~L1_OFFSET;
607         dmap_phys_max = 0;
608         dmap_max_addr = 0;
609         l2 = NULL;
610         prev_l1_slot = -1;
611
612 #define DMAP_TABLES     ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
613         memset(pagetable_dmap, 0, PAGE_SIZE * DMAP_TABLES);
614
615         for (i = 0; i < (physmap_idx * 2); i += 2) {
616                 pa = physmap[i] & ~L2_OFFSET;
617                 va = pa - dmap_phys_base + DMAP_MIN_ADDRESS;
618
619                 /* Create L2 mappings at the start of the region */
620                 if ((pa & L1_OFFSET) != 0) {
621                         l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
622                         if (l1_slot != prev_l1_slot) {
623                                 prev_l1_slot = l1_slot;
624                                 l2 = (pt_entry_t *)freemempos;
625                                 l2_pa = pmap_early_vtophys(kern_l1,
626                                     (vm_offset_t)l2);
627                                 freemempos += PAGE_SIZE;
628
629                                 pmap_load_store(&pagetable_dmap[l1_slot],
630                                     (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
631
632                                 memset(l2, 0, PAGE_SIZE);
633                         }
634                         KASSERT(l2 != NULL,
635                             ("pmap_bootstrap_dmap: NULL l2 map"));
636                         for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1];
637                             pa += L2_SIZE, va += L2_SIZE) {
638                                 /*
639                                  * We are on a boundary, stop to
640                                  * create a level 1 block
641                                  */
642                                 if ((pa & L1_OFFSET) == 0)
643                                         break;
644
645                                 l2_slot = pmap_l2_index(va);
646                                 KASSERT(l2_slot != 0, ("..."));
647                                 pmap_load_store(&l2[l2_slot],
648                                     (pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN |
649                                     ATTR_IDX(CACHED_MEMORY) | L2_BLOCK);
650                         }
651                         KASSERT(va == (pa - dmap_phys_base + DMAP_MIN_ADDRESS),
652                             ("..."));
653                 }
654
655                 for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1] &&
656                     (physmap[i + 1] - pa) >= L1_SIZE;
657                     pa += L1_SIZE, va += L1_SIZE) {
658                         l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
659                         pmap_load_store(&pagetable_dmap[l1_slot],
660                             (pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_XN |
661                             ATTR_IDX(CACHED_MEMORY) | L1_BLOCK);
662                 }
663
664                 /* Create L2 mappings at the end of the region */
665                 if (pa < physmap[i + 1]) {
666                         l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
667                         if (l1_slot != prev_l1_slot) {
668                                 prev_l1_slot = l1_slot;
669                                 l2 = (pt_entry_t *)freemempos;
670                                 l2_pa = pmap_early_vtophys(kern_l1,
671                                     (vm_offset_t)l2);
672                                 freemempos += PAGE_SIZE;
673
674                                 pmap_load_store(&pagetable_dmap[l1_slot],
675                                     (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
676
677                                 memset(l2, 0, PAGE_SIZE);
678                         }
679                         KASSERT(l2 != NULL,
680                             ("pmap_bootstrap_dmap: NULL l2 map"));
681                         for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1];
682                             pa += L2_SIZE, va += L2_SIZE) {
683                                 l2_slot = pmap_l2_index(va);
684                                 pmap_load_store(&l2[l2_slot],
685                                     (pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN |
686                                     ATTR_IDX(CACHED_MEMORY) | L2_BLOCK);
687                         }
688                 }
689
690                 if (pa > dmap_phys_max) {
691                         dmap_phys_max = pa;
692                         dmap_max_addr = va;
693                 }
694         }
695
696         cpu_tlb_flushID();
697
698         return (freemempos);
699 }
700
701 static vm_offset_t
702 pmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l2_start)
703 {
704         vm_offset_t l2pt;
705         vm_paddr_t pa;
706         pd_entry_t *l1;
707         u_int l1_slot;
708
709         KASSERT((va & L1_OFFSET) == 0, ("Invalid virtual address"));
710
711         l1 = (pd_entry_t *)l1pt;
712         l1_slot = pmap_l1_index(va);
713         l2pt = l2_start;
714
715         for (; va < VM_MAX_KERNEL_ADDRESS; l1_slot++, va += L1_SIZE) {
716                 KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
717
718                 pa = pmap_early_vtophys(l1pt, l2pt);
719                 pmap_load_store(&l1[l1_slot],
720                     (pa & ~Ln_TABLE_MASK) | L1_TABLE);
721                 l2pt += PAGE_SIZE;
722         }
723
724         /* Clean the L2 page table */
725         memset((void *)l2_start, 0, l2pt - l2_start);
726
727         return l2pt;
728 }
729
730 static vm_offset_t
731 pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
732 {
733         vm_offset_t l3pt;
734         vm_paddr_t pa;
735         pd_entry_t *l2;
736         u_int l2_slot;
737
738         KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
739
740         l2 = pmap_l2(kernel_pmap, va);
741         l2 = (pd_entry_t *)rounddown2((uintptr_t)l2, PAGE_SIZE);
742         l2_slot = pmap_l2_index(va);
743         l3pt = l3_start;
744
745         for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) {
746                 KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index"));
747
748                 pa = pmap_early_vtophys(l1pt, l3pt);
749                 pmap_load_store(&l2[l2_slot],
750                     (pa & ~Ln_TABLE_MASK) | L2_TABLE);
751                 l3pt += PAGE_SIZE;
752         }
753
754         /* Clean the L2 page table */
755         memset((void *)l3_start, 0, l3pt - l3_start);
756
757         return l3pt;
758 }
759
760 /*
761  *      Bootstrap the system enough to run with virtual memory.
762  */
763 void
764 pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart,
765     vm_size_t kernlen)
766 {
767         u_int l1_slot, l2_slot;
768         uint64_t kern_delta;
769         pt_entry_t *l2;
770         vm_offset_t va, freemempos;
771         vm_offset_t dpcpu, msgbufpv;
772         vm_paddr_t start_pa, pa, min_pa;
773         int i;
774
775         kern_delta = KERNBASE - kernstart;
776
777         printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen);
778         printf("%lx\n", l1pt);
779         printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK);
780
781         /* Set this early so we can use the pagetable walking functions */
782         kernel_pmap_store.pm_l0 = (pd_entry_t *)l0pt;
783         PMAP_LOCK_INIT(kernel_pmap);
784
785         /* Assume the address we were loaded to is a valid physical address */
786         min_pa = KERNBASE - kern_delta;
787
788         physmap_idx = arm_physmem_avail(physmap, nitems(physmap));
789         physmap_idx /= 2;
790
791         /*
792          * Find the minimum physical address. physmap is sorted,
793          * but may contain empty ranges.
794          */
795         for (i = 0; i < (physmap_idx * 2); i += 2) {
796                 if (physmap[i] == physmap[i + 1])
797                         continue;
798                 if (physmap[i] <= min_pa)
799                         min_pa = physmap[i];
800         }
801
802         freemempos = KERNBASE + kernlen;
803         freemempos = roundup2(freemempos, PAGE_SIZE);
804
805         /* Create a direct map region early so we can use it for pa -> va */
806         freemempos = pmap_bootstrap_dmap(l1pt, min_pa, freemempos);
807
808         va = KERNBASE;
809         start_pa = pa = KERNBASE - kern_delta;
810
811         /*
812          * Read the page table to find out what is already mapped.
813          * This assumes we have mapped a block of memory from KERNBASE
814          * using a single L1 entry.
815          */
816         l2 = pmap_early_page_idx(l1pt, KERNBASE, &l1_slot, &l2_slot);
817
818         /* Sanity check the index, KERNBASE should be the first VA */
819         KASSERT(l2_slot == 0, ("The L2 index is non-zero"));
820
821         /* Find how many pages we have mapped */
822         for (; l2_slot < Ln_ENTRIES; l2_slot++) {
823                 if ((l2[l2_slot] & ATTR_DESCR_MASK) == 0)
824                         break;
825
826                 /* Check locore used L2 blocks */
827                 KASSERT((l2[l2_slot] & ATTR_DESCR_MASK) == L2_BLOCK,
828                     ("Invalid bootstrap L2 table"));
829                 KASSERT((l2[l2_slot] & ~ATTR_MASK) == pa,
830                     ("Incorrect PA in L2 table"));
831
832                 va += L2_SIZE;
833                 pa += L2_SIZE;
834         }
835
836         va = roundup2(va, L1_SIZE);
837
838         /* Create the l2 tables up to VM_MAX_KERNEL_ADDRESS */
839         freemempos = pmap_bootstrap_l2(l1pt, va, freemempos);
840         /* And the l3 tables for the early devmap */
841         freemempos = pmap_bootstrap_l3(l1pt,
842             VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE), freemempos);
843
844         cpu_tlb_flushID();
845
846 #define alloc_pages(var, np)                                            \
847         (var) = freemempos;                                             \
848         freemempos += (np * PAGE_SIZE);                                 \
849         memset((char *)(var), 0, ((np) * PAGE_SIZE));
850
851         /* Allocate dynamic per-cpu area. */
852         alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
853         dpcpu_init((void *)dpcpu, 0);
854
855         /* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */
856         alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
857         msgbufp = (void *)msgbufpv;
858
859         /* Reserve some VA space for early BIOS/ACPI mapping */
860         preinit_map_va = roundup2(freemempos, L2_SIZE);
861
862         virtual_avail = preinit_map_va + PMAP_PREINIT_MAPPING_SIZE;
863         virtual_avail = roundup2(virtual_avail, L1_SIZE);
864         virtual_end = VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE);
865         kernel_vm_end = virtual_avail;
866
867         pa = pmap_early_vtophys(l1pt, freemempos);
868
869         arm_physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC);
870
871         cpu_tlb_flushID();
872 }
873
874 /*
875  *      Initialize a vm_page's machine-dependent fields.
876  */
877 void
878 pmap_page_init(vm_page_t m)
879 {
880
881         TAILQ_INIT(&m->md.pv_list);
882         m->md.pv_memattr = VM_MEMATTR_WRITE_BACK;
883 }
884
885 /*
886  *      Initialize the pmap module.
887  *      Called by vm_init, to initialize any structures that the pmap
888  *      system needs to map virtual memory.
889  */
890 void
891 pmap_init(void)
892 {
893         vm_size_t s;
894         int i, pv_npg;
895
896         /*
897          * Are large page mappings enabled?
898          */
899         TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
900         if (superpages_enabled) {
901                 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
902                     ("pmap_init: can't assign to pagesizes[1]"));
903                 pagesizes[1] = L2_SIZE;
904         }
905
906         /*
907          * Initialize the pv chunk list mutex.
908          */
909         mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
910
911         /*
912          * Initialize the pool of pv list locks.
913          */
914         for (i = 0; i < NPV_LIST_LOCKS; i++)
915                 rw_init(&pv_list_locks[i], "pmap pv list");
916
917         /*
918          * Calculate the size of the pv head table for superpages.
919          */
920         pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L2_SIZE);
921
922         /*
923          * Allocate memory for the pv head table for superpages.
924          */
925         s = (vm_size_t)(pv_npg * sizeof(struct md_page));
926         s = round_page(s);
927         pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
928         for (i = 0; i < pv_npg; i++)
929                 TAILQ_INIT(&pv_table[i].pv_list);
930         TAILQ_INIT(&pv_dummy.pv_list);
931
932         vm_initialized = 1;
933 }
934
935 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2, CTLFLAG_RD, 0,
936     "2MB page mapping counters");
937
938 static u_long pmap_l2_demotions;
939 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD,
940     &pmap_l2_demotions, 0, "2MB page demotions");
941
942 static u_long pmap_l2_mappings;
943 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, mappings, CTLFLAG_RD,
944     &pmap_l2_mappings, 0, "2MB page mappings");
945
946 static u_long pmap_l2_p_failures;
947 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD,
948     &pmap_l2_p_failures, 0, "2MB page promotion failures");
949
950 static u_long pmap_l2_promotions;
951 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD,
952     &pmap_l2_promotions, 0, "2MB page promotions");
953
954 /*
955  * Invalidate a single TLB entry.
956  */
957 static __inline void
958 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
959 {
960
961         sched_pin();
962         __asm __volatile(
963             "dsb  ishst         \n"
964             "tlbi vaae1is, %0   \n"
965             "dsb  ish           \n"
966             "isb                \n"
967             : : "r"(va >> PAGE_SHIFT));
968         sched_unpin();
969 }
970
971 static __inline void
972 pmap_invalidate_range_nopin(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
973 {
974         vm_offset_t addr;
975
976         dsb(ishst);
977         for (addr = sva; addr < eva; addr += PAGE_SIZE) {
978                 __asm __volatile(
979                     "tlbi vaae1is, %0" : : "r"(addr >> PAGE_SHIFT));
980         }
981         __asm __volatile(
982             "dsb  ish   \n"
983             "isb        \n");
984 }
985
986 static __inline void
987 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
988 {
989
990         sched_pin();
991         pmap_invalidate_range_nopin(pmap, sva, eva);
992         sched_unpin();
993 }
994
995 static __inline void
996 pmap_invalidate_all(pmap_t pmap)
997 {
998
999         sched_pin();
1000         __asm __volatile(
1001             "dsb  ishst         \n"
1002             "tlbi vmalle1is     \n"
1003             "dsb  ish           \n"
1004             "isb                \n");
1005         sched_unpin();
1006 }
1007
1008 /*
1009  *      Routine:        pmap_extract
1010  *      Function:
1011  *              Extract the physical page address associated
1012  *              with the given map/virtual_address pair.
1013  */
1014 vm_paddr_t
1015 pmap_extract(pmap_t pmap, vm_offset_t va)
1016 {
1017         pt_entry_t *pte, tpte;
1018         vm_paddr_t pa;
1019         int lvl;
1020
1021         pa = 0;
1022         PMAP_LOCK(pmap);
1023         /*
1024          * Find the block or page map for this virtual address. pmap_pte
1025          * will return either a valid block/page entry, or NULL.
1026          */
1027         pte = pmap_pte(pmap, va, &lvl);
1028         if (pte != NULL) {
1029                 tpte = pmap_load(pte);
1030                 pa = tpte & ~ATTR_MASK;
1031                 switch(lvl) {
1032                 case 1:
1033                         KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK,
1034                             ("pmap_extract: Invalid L1 pte found: %lx",
1035                             tpte & ATTR_DESCR_MASK));
1036                         pa |= (va & L1_OFFSET);
1037                         break;
1038                 case 2:
1039                         KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK,
1040                             ("pmap_extract: Invalid L2 pte found: %lx",
1041                             tpte & ATTR_DESCR_MASK));
1042                         pa |= (va & L2_OFFSET);
1043                         break;
1044                 case 3:
1045                         KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE,
1046                             ("pmap_extract: Invalid L3 pte found: %lx",
1047                             tpte & ATTR_DESCR_MASK));
1048                         pa |= (va & L3_OFFSET);
1049                         break;
1050                 }
1051         }
1052         PMAP_UNLOCK(pmap);
1053         return (pa);
1054 }
1055
1056 /*
1057  *      Routine:        pmap_extract_and_hold
1058  *      Function:
1059  *              Atomically extract and hold the physical page
1060  *              with the given pmap and virtual address pair
1061  *              if that mapping permits the given protection.
1062  */
1063 vm_page_t
1064 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1065 {
1066         pt_entry_t *pte, tpte;
1067         vm_offset_t off;
1068         vm_paddr_t pa;
1069         vm_page_t m;
1070         int lvl;
1071
1072         pa = 0;
1073         m = NULL;
1074         PMAP_LOCK(pmap);
1075 retry:
1076         pte = pmap_pte(pmap, va, &lvl);
1077         if (pte != NULL) {
1078                 tpte = pmap_load(pte);
1079
1080                 KASSERT(lvl > 0 && lvl <= 3,
1081                     ("pmap_extract_and_hold: Invalid level %d", lvl));
1082                 CTASSERT(L1_BLOCK == L2_BLOCK);
1083                 KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) ||
1084                     (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK),
1085                     ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl,
1086                      tpte & ATTR_DESCR_MASK));
1087                 if (((tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) ||
1088                     ((prot & VM_PROT_WRITE) == 0)) {
1089                         switch(lvl) {
1090                         case 1:
1091                                 off = va & L1_OFFSET;
1092                                 break;
1093                         case 2:
1094                                 off = va & L2_OFFSET;
1095                                 break;
1096                         case 3:
1097                         default:
1098                                 off = 0;
1099                         }
1100                         if (vm_page_pa_tryrelock(pmap,
1101                             (tpte & ~ATTR_MASK) | off, &pa))
1102                                 goto retry;
1103                         m = PHYS_TO_VM_PAGE((tpte & ~ATTR_MASK) | off);
1104                         vm_page_hold(m);
1105                 }
1106         }
1107         PA_UNLOCK_COND(pa);
1108         PMAP_UNLOCK(pmap);
1109         return (m);
1110 }
1111
1112 vm_paddr_t
1113 pmap_kextract(vm_offset_t va)
1114 {
1115         pt_entry_t *pte, tpte;
1116         vm_paddr_t pa;
1117         int lvl;
1118
1119         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
1120                 pa = DMAP_TO_PHYS(va);
1121         } else {
1122                 pa = 0;
1123                 pte = pmap_pte(kernel_pmap, va, &lvl);
1124                 if (pte != NULL) {
1125                         tpte = pmap_load(pte);
1126                         pa = tpte & ~ATTR_MASK;
1127                         switch(lvl) {
1128                         case 1:
1129                                 KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK,
1130                                     ("pmap_kextract: Invalid L1 pte found: %lx",
1131                                     tpte & ATTR_DESCR_MASK));
1132                                 pa |= (va & L1_OFFSET);
1133                                 break;
1134                         case 2:
1135                                 KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK,
1136                                     ("pmap_kextract: Invalid L2 pte found: %lx",
1137                                     tpte & ATTR_DESCR_MASK));
1138                                 pa |= (va & L2_OFFSET);
1139                                 break;
1140                         case 3:
1141                                 KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE,
1142                                     ("pmap_kextract: Invalid L3 pte found: %lx",
1143                                     tpte & ATTR_DESCR_MASK));
1144                                 pa |= (va & L3_OFFSET);
1145                                 break;
1146                         }
1147                 }
1148         }
1149         return (pa);
1150 }
1151
1152 /***************************************************
1153  * Low level mapping routines.....
1154  ***************************************************/
1155
1156 void
1157 pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
1158 {
1159         pd_entry_t *pde;
1160         pt_entry_t *pte, attr;
1161         vm_offset_t va;
1162         int lvl;
1163
1164         KASSERT((pa & L3_OFFSET) == 0,
1165            ("pmap_kenter: Invalid physical address"));
1166         KASSERT((sva & L3_OFFSET) == 0,
1167            ("pmap_kenter: Invalid virtual address"));
1168         KASSERT((size & PAGE_MASK) == 0,
1169             ("pmap_kenter: Mapping is not page-sized"));
1170
1171         attr = ATTR_DEFAULT | ATTR_IDX(mode) | L3_PAGE;
1172         if (mode == DEVICE_MEMORY)
1173                 attr |= ATTR_XN;
1174
1175         va = sva;
1176         while (size != 0) {
1177                 pde = pmap_pde(kernel_pmap, va, &lvl);
1178                 KASSERT(pde != NULL,
1179                     ("pmap_kenter: Invalid page entry, va: 0x%lx", va));
1180                 KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl));
1181
1182                 pte = pmap_l2_to_l3(pde, va);
1183                 pmap_load_store(pte, (pa & ~L3_OFFSET) | attr);
1184
1185                 va += PAGE_SIZE;
1186                 pa += PAGE_SIZE;
1187                 size -= PAGE_SIZE;
1188         }
1189         pmap_invalidate_range(kernel_pmap, sva, va);
1190 }
1191
1192 void
1193 pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa)
1194 {
1195
1196         pmap_kenter(sva, size, pa, DEVICE_MEMORY);
1197 }
1198
1199 /*
1200  * Remove a page from the kernel pagetables.
1201  */
1202 PMAP_INLINE void
1203 pmap_kremove(vm_offset_t va)
1204 {
1205         pt_entry_t *pte;
1206         int lvl;
1207
1208         pte = pmap_pte(kernel_pmap, va, &lvl);
1209         KASSERT(pte != NULL, ("pmap_kremove: Invalid address"));
1210         KASSERT(lvl == 3, ("pmap_kremove: Invalid pte level %d", lvl));
1211
1212         pmap_clear(pte);
1213         pmap_invalidate_page(kernel_pmap, va);
1214 }
1215
1216 void
1217 pmap_kremove_device(vm_offset_t sva, vm_size_t size)
1218 {
1219         pt_entry_t *pte;
1220         vm_offset_t va;
1221         int lvl;
1222
1223         KASSERT((sva & L3_OFFSET) == 0,
1224            ("pmap_kremove_device: Invalid virtual address"));
1225         KASSERT((size & PAGE_MASK) == 0,
1226             ("pmap_kremove_device: Mapping is not page-sized"));
1227
1228         va = sva;
1229         while (size != 0) {
1230                 pte = pmap_pte(kernel_pmap, va, &lvl);
1231                 KASSERT(pte != NULL, ("Invalid page table, va: 0x%lx", va));
1232                 KASSERT(lvl == 3,
1233                     ("Invalid device pagetable level: %d != 3", lvl));
1234                 pmap_clear(pte);
1235
1236                 va += PAGE_SIZE;
1237                 size -= PAGE_SIZE;
1238         }
1239         pmap_invalidate_range(kernel_pmap, sva, va);
1240 }
1241
1242 /*
1243  *      Used to map a range of physical addresses into kernel
1244  *      virtual address space.
1245  *
1246  *      The value passed in '*virt' is a suggested virtual address for
1247  *      the mapping. Architectures which can support a direct-mapped
1248  *      physical to virtual region can return the appropriate address
1249  *      within that region, leaving '*virt' unchanged. Other
1250  *      architectures should map the pages starting at '*virt' and
1251  *      update '*virt' with the first usable address after the mapped
1252  *      region.
1253  */
1254 vm_offset_t
1255 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1256 {
1257         return PHYS_TO_DMAP(start);
1258 }
1259
1260
1261 /*
1262  * Add a list of wired pages to the kva
1263  * this routine is only used for temporary
1264  * kernel mappings that do not need to have
1265  * page modification or references recorded.
1266  * Note that old mappings are simply written
1267  * over.  The page *must* be wired.
1268  * Note: SMP coherent.  Uses a ranged shootdown IPI.
1269  */
1270 void
1271 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1272 {
1273         pd_entry_t *pde;
1274         pt_entry_t *pte, pa;
1275         vm_offset_t va;
1276         vm_page_t m;
1277         int i, lvl;
1278
1279         va = sva;
1280         for (i = 0; i < count; i++) {
1281                 pde = pmap_pde(kernel_pmap, va, &lvl);
1282                 KASSERT(pde != NULL,
1283                     ("pmap_qenter: Invalid page entry, va: 0x%lx", va));
1284                 KASSERT(lvl == 2,
1285                     ("pmap_qenter: Invalid level %d", lvl));
1286
1287                 m = ma[i];
1288                 pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_AP(ATTR_AP_RW) |
1289                     ATTR_IDX(m->md.pv_memattr) | L3_PAGE;
1290                 if (m->md.pv_memattr == DEVICE_MEMORY)
1291                         pa |= ATTR_XN;
1292                 pte = pmap_l2_to_l3(pde, va);
1293                 pmap_load_store(pte, pa);
1294
1295                 va += L3_SIZE;
1296         }
1297         pmap_invalidate_range(kernel_pmap, sva, va);
1298 }
1299
1300 /*
1301  * This routine tears out page mappings from the
1302  * kernel -- it is meant only for temporary mappings.
1303  */
1304 void
1305 pmap_qremove(vm_offset_t sva, int count)
1306 {
1307         pt_entry_t *pte;
1308         vm_offset_t va;
1309         int lvl;
1310
1311         KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva));
1312
1313         va = sva;
1314         while (count-- > 0) {
1315                 pte = pmap_pte(kernel_pmap, va, &lvl);
1316                 KASSERT(lvl == 3,
1317                     ("Invalid device pagetable level: %d != 3", lvl));
1318                 if (pte != NULL) {
1319                         pmap_clear(pte);
1320                 }
1321
1322                 va += PAGE_SIZE;
1323         }
1324         pmap_invalidate_range(kernel_pmap, sva, va);
1325 }
1326
1327 /***************************************************
1328  * Page table page management routines.....
1329  ***************************************************/
1330 /*
1331  * Schedule the specified unused page table page to be freed.  Specifically,
1332  * add the page to the specified list of pages that will be released to the
1333  * physical memory manager after the TLB has been updated.
1334  */
1335 static __inline void
1336 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
1337     boolean_t set_PG_ZERO)
1338 {
1339
1340         if (set_PG_ZERO)
1341                 m->flags |= PG_ZERO;
1342         else
1343                 m->flags &= ~PG_ZERO;
1344         SLIST_INSERT_HEAD(free, m, plinks.s.ss);
1345 }
1346
1347 /*
1348  * Decrements a page table page's wire count, which is used to record the
1349  * number of valid page table entries within the page.  If the wire count
1350  * drops to zero, then the page table page is unmapped.  Returns TRUE if the
1351  * page table page was unmapped and FALSE otherwise.
1352  */
1353 static inline boolean_t
1354 pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1355 {
1356
1357         --m->wire_count;
1358         if (m->wire_count == 0) {
1359                 _pmap_unwire_l3(pmap, va, m, free);
1360                 return (TRUE);
1361         } else
1362                 return (FALSE);
1363 }
1364
1365 static void
1366 _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1367 {
1368
1369         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1370         /*
1371          * unmap the page table page
1372          */
1373         if (m->pindex >= (NUL2E + NUL1E)) {
1374                 /* l1 page */
1375                 pd_entry_t *l0;
1376
1377                 l0 = pmap_l0(pmap, va);
1378                 pmap_clear(l0);
1379         } else if (m->pindex >= NUL2E) {
1380                 /* l2 page */
1381                 pd_entry_t *l1;
1382
1383                 l1 = pmap_l1(pmap, va);
1384                 pmap_clear(l1);
1385         } else {
1386                 /* l3 page */
1387                 pd_entry_t *l2;
1388
1389                 l2 = pmap_l2(pmap, va);
1390                 pmap_clear(l2);
1391         }
1392         pmap_resident_count_dec(pmap, 1);
1393         if (m->pindex < NUL2E) {
1394                 /* We just released an l3, unhold the matching l2 */
1395                 pd_entry_t *l1, tl1;
1396                 vm_page_t l2pg;
1397
1398                 l1 = pmap_l1(pmap, va);
1399                 tl1 = pmap_load(l1);
1400                 l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
1401                 pmap_unwire_l3(pmap, va, l2pg, free);
1402         } else if (m->pindex < (NUL2E + NUL1E)) {
1403                 /* We just released an l2, unhold the matching l1 */
1404                 pd_entry_t *l0, tl0;
1405                 vm_page_t l1pg;
1406
1407                 l0 = pmap_l0(pmap, va);
1408                 tl0 = pmap_load(l0);
1409                 l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
1410                 pmap_unwire_l3(pmap, va, l1pg, free);
1411         }
1412         pmap_invalidate_page(pmap, va);
1413
1414         /*
1415          * Put page on a list so that it is released after
1416          * *ALL* TLB shootdown is done
1417          */
1418         pmap_add_delayed_free_list(m, free, TRUE);
1419 }
1420
1421 /*
1422  * After removing a page table entry, this routine is used to
1423  * conditionally free the page, and manage the hold/wire counts.
1424  */
1425 static int
1426 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
1427     struct spglist *free)
1428 {
1429         vm_page_t mpte;
1430
1431         if (va >= VM_MAXUSER_ADDRESS)
1432                 return (0);
1433         KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
1434         mpte = PHYS_TO_VM_PAGE(ptepde & ~ATTR_MASK);
1435         return (pmap_unwire_l3(pmap, va, mpte, free));
1436 }
1437
1438 void
1439 pmap_pinit0(pmap_t pmap)
1440 {
1441
1442         PMAP_LOCK_INIT(pmap);
1443         bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1444         pmap->pm_l0 = kernel_pmap->pm_l0;
1445         pmap->pm_root.rt_root = 0;
1446 }
1447
1448 int
1449 pmap_pinit(pmap_t pmap)
1450 {
1451         vm_paddr_t l0phys;
1452         vm_page_t l0pt;
1453
1454         /*
1455          * allocate the l0 page
1456          */
1457         while ((l0pt = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
1458             VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
1459                 vm_wait(NULL);
1460
1461         l0phys = VM_PAGE_TO_PHYS(l0pt);
1462         pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(l0phys);
1463
1464         if ((l0pt->flags & PG_ZERO) == 0)
1465                 pagezero(pmap->pm_l0);
1466
1467         pmap->pm_root.rt_root = 0;
1468         bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1469
1470         return (1);
1471 }
1472
1473 /*
1474  * This routine is called if the desired page table page does not exist.
1475  *
1476  * If page table page allocation fails, this routine may sleep before
1477  * returning NULL.  It sleeps only if a lock pointer was given.
1478  *
1479  * Note: If a page allocation fails at page table level two or three,
1480  * one or two pages may be held during the wait, only to be released
1481  * afterwards.  This conservative approach is easily argued to avoid
1482  * race conditions.
1483  */
1484 static vm_page_t
1485 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
1486 {
1487         vm_page_t m, l1pg, l2pg;
1488
1489         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1490
1491         /*
1492          * Allocate a page table page.
1493          */
1494         if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
1495             VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
1496                 if (lockp != NULL) {
1497                         RELEASE_PV_LIST_LOCK(lockp);
1498                         PMAP_UNLOCK(pmap);
1499                         vm_wait(NULL);
1500                         PMAP_LOCK(pmap);
1501                 }
1502
1503                 /*
1504                  * Indicate the need to retry.  While waiting, the page table
1505                  * page may have been allocated.
1506                  */
1507                 return (NULL);
1508         }
1509         if ((m->flags & PG_ZERO) == 0)
1510                 pmap_zero_page(m);
1511
1512         /*
1513          * Map the pagetable page into the process address space, if
1514          * it isn't already there.
1515          */
1516
1517         if (ptepindex >= (NUL2E + NUL1E)) {
1518                 pd_entry_t *l0;
1519                 vm_pindex_t l0index;
1520
1521                 l0index = ptepindex - (NUL2E + NUL1E);
1522                 l0 = &pmap->pm_l0[l0index];
1523                 pmap_load_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE);
1524         } else if (ptepindex >= NUL2E) {
1525                 vm_pindex_t l0index, l1index;
1526                 pd_entry_t *l0, *l1;
1527                 pd_entry_t tl0;
1528
1529                 l1index = ptepindex - NUL2E;
1530                 l0index = l1index >> L0_ENTRIES_SHIFT;
1531
1532                 l0 = &pmap->pm_l0[l0index];
1533                 tl0 = pmap_load(l0);
1534                 if (tl0 == 0) {
1535                         /* recurse for allocating page dir */
1536                         if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index,
1537                             lockp) == NULL) {
1538                                 vm_page_unwire_noq(m);
1539                                 vm_page_free_zero(m);
1540                                 return (NULL);
1541                         }
1542                 } else {
1543                         l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
1544                         l1pg->wire_count++;
1545                 }
1546
1547                 l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
1548                 l1 = &l1[ptepindex & Ln_ADDR_MASK];
1549                 pmap_load_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
1550         } else {
1551                 vm_pindex_t l0index, l1index;
1552                 pd_entry_t *l0, *l1, *l2;
1553                 pd_entry_t tl0, tl1;
1554
1555                 l1index = ptepindex >> Ln_ENTRIES_SHIFT;
1556                 l0index = l1index >> L0_ENTRIES_SHIFT;
1557
1558                 l0 = &pmap->pm_l0[l0index];
1559                 tl0 = pmap_load(l0);
1560                 if (tl0 == 0) {
1561                         /* recurse for allocating page dir */
1562                         if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1563                             lockp) == NULL) {
1564                                 vm_page_unwire_noq(m);
1565                                 vm_page_free_zero(m);
1566                                 return (NULL);
1567                         }
1568                         tl0 = pmap_load(l0);
1569                         l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
1570                         l1 = &l1[l1index & Ln_ADDR_MASK];
1571                 } else {
1572                         l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
1573                         l1 = &l1[l1index & Ln_ADDR_MASK];
1574                         tl1 = pmap_load(l1);
1575                         if (tl1 == 0) {
1576                                 /* recurse for allocating page dir */
1577                                 if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1578                                     lockp) == NULL) {
1579                                         vm_page_unwire_noq(m);
1580                                         vm_page_free_zero(m);
1581                                         return (NULL);
1582                                 }
1583                         } else {
1584                                 l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
1585                                 l2pg->wire_count++;
1586                         }
1587                 }
1588
1589                 l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
1590                 l2 = &l2[ptepindex & Ln_ADDR_MASK];
1591                 pmap_load_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
1592         }
1593
1594         pmap_resident_count_inc(pmap, 1);
1595
1596         return (m);
1597 }
1598
1599 static vm_page_t
1600 pmap_alloc_l2(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1601 {
1602         pd_entry_t *l1;
1603         vm_page_t l2pg;
1604         vm_pindex_t l2pindex;
1605
1606 retry:
1607         l1 = pmap_l1(pmap, va);
1608         if (l1 != NULL && (pmap_load(l1) & ATTR_DESCR_MASK) == L1_TABLE) {
1609                 /* Add a reference to the L2 page. */
1610                 l2pg = PHYS_TO_VM_PAGE(pmap_load(l1) & ~ATTR_MASK);
1611                 l2pg->wire_count++;
1612         } else {
1613                 /* Allocate a L2 page. */
1614                 l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT;
1615                 l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp);
1616                 if (l2pg == NULL && lockp != NULL)
1617                         goto retry;
1618         }
1619         return (l2pg);
1620 }
1621
1622 static vm_page_t
1623 pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1624 {
1625         vm_pindex_t ptepindex;
1626         pd_entry_t *pde, tpde;
1627 #ifdef INVARIANTS
1628         pt_entry_t *pte;
1629 #endif
1630         vm_page_t m;
1631         int lvl;
1632
1633         /*
1634          * Calculate pagetable page index
1635          */
1636         ptepindex = pmap_l2_pindex(va);
1637 retry:
1638         /*
1639          * Get the page directory entry
1640          */
1641         pde = pmap_pde(pmap, va, &lvl);
1642
1643         /*
1644          * If the page table page is mapped, we just increment the hold count,
1645          * and activate it. If we get a level 2 pde it will point to a level 3
1646          * table.
1647          */
1648         switch (lvl) {
1649         case -1:
1650                 break;
1651         case 0:
1652 #ifdef INVARIANTS
1653                 pte = pmap_l0_to_l1(pde, va);
1654                 KASSERT(pmap_load(pte) == 0,
1655                     ("pmap_alloc_l3: TODO: l0 superpages"));
1656 #endif
1657                 break;
1658         case 1:
1659 #ifdef INVARIANTS
1660                 pte = pmap_l1_to_l2(pde, va);
1661                 KASSERT(pmap_load(pte) == 0,
1662                     ("pmap_alloc_l3: TODO: l1 superpages"));
1663 #endif
1664                 break;
1665         case 2:
1666                 tpde = pmap_load(pde);
1667                 if (tpde != 0) {
1668                         m = PHYS_TO_VM_PAGE(tpde & ~ATTR_MASK);
1669                         m->wire_count++;
1670                         return (m);
1671                 }
1672                 break;
1673         default:
1674                 panic("pmap_alloc_l3: Invalid level %d", lvl);
1675         }
1676
1677         /*
1678          * Here if the pte page isn't mapped, or if it has been deallocated.
1679          */
1680         m = _pmap_alloc_l3(pmap, ptepindex, lockp);
1681         if (m == NULL && lockp != NULL)
1682                 goto retry;
1683
1684         return (m);
1685 }
1686
1687 /***************************************************
1688  * Pmap allocation/deallocation routines.
1689  ***************************************************/
1690
1691 /*
1692  * Release any resources held by the given physical map.
1693  * Called when a pmap initialized by pmap_pinit is being released.
1694  * Should only be called if the map contains no valid mappings.
1695  */
1696 void
1697 pmap_release(pmap_t pmap)
1698 {
1699         vm_page_t m;
1700
1701         KASSERT(pmap->pm_stats.resident_count == 0,
1702             ("pmap_release: pmap resident count %ld != 0",
1703             pmap->pm_stats.resident_count));
1704         KASSERT(vm_radix_is_empty(&pmap->pm_root),
1705             ("pmap_release: pmap has reserved page table page(s)"));
1706
1707         m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_l0));
1708
1709         vm_page_unwire_noq(m);
1710         vm_page_free_zero(m);
1711 }
1712
1713 static int
1714 kvm_size(SYSCTL_HANDLER_ARGS)
1715 {
1716         unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
1717
1718         return sysctl_handle_long(oidp, &ksize, 0, req);
1719 }
1720 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
1721     0, 0, kvm_size, "LU", "Size of KVM");
1722
1723 static int
1724 kvm_free(SYSCTL_HANDLER_ARGS)
1725 {
1726         unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1727
1728         return sysctl_handle_long(oidp, &kfree, 0, req);
1729 }
1730 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
1731     0, 0, kvm_free, "LU", "Amount of KVM free");
1732
1733 /*
1734  * grow the number of kernel page table entries, if needed
1735  */
1736 void
1737 pmap_growkernel(vm_offset_t addr)
1738 {
1739         vm_paddr_t paddr;
1740         vm_page_t nkpg;
1741         pd_entry_t *l0, *l1, *l2;
1742
1743         mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1744
1745         addr = roundup2(addr, L2_SIZE);
1746         if (addr - 1 >= vm_map_max(kernel_map))
1747                 addr = vm_map_max(kernel_map);
1748         while (kernel_vm_end < addr) {
1749                 l0 = pmap_l0(kernel_pmap, kernel_vm_end);
1750                 KASSERT(pmap_load(l0) != 0,
1751                     ("pmap_growkernel: No level 0 kernel entry"));
1752
1753                 l1 = pmap_l0_to_l1(l0, kernel_vm_end);
1754                 if (pmap_load(l1) == 0) {
1755                         /* We need a new PDP entry */
1756                         nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT,
1757                             VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
1758                             VM_ALLOC_WIRED | VM_ALLOC_ZERO);
1759                         if (nkpg == NULL)
1760                                 panic("pmap_growkernel: no memory to grow kernel");
1761                         if ((nkpg->flags & PG_ZERO) == 0)
1762                                 pmap_zero_page(nkpg);
1763                         paddr = VM_PAGE_TO_PHYS(nkpg);
1764                         pmap_load_store(l1, paddr | L1_TABLE);
1765                         continue; /* try again */
1766                 }
1767                 l2 = pmap_l1_to_l2(l1, kernel_vm_end);
1768                 if ((pmap_load(l2) & ATTR_AF) != 0) {
1769                         kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
1770                         if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
1771                                 kernel_vm_end = vm_map_max(kernel_map);
1772                                 break;
1773                         }
1774                         continue;
1775                 }
1776
1777                 nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT,
1778                     VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
1779                     VM_ALLOC_ZERO);
1780                 if (nkpg == NULL)
1781                         panic("pmap_growkernel: no memory to grow kernel");
1782                 if ((nkpg->flags & PG_ZERO) == 0)
1783                         pmap_zero_page(nkpg);
1784                 paddr = VM_PAGE_TO_PHYS(nkpg);
1785                 pmap_load_store(l2, paddr | L2_TABLE);
1786                 pmap_invalidate_page(kernel_pmap, kernel_vm_end);
1787
1788                 kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
1789                 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
1790                         kernel_vm_end = vm_map_max(kernel_map);
1791                         break;
1792                 }
1793         }
1794 }
1795
1796
1797 /***************************************************
1798  * page management routines.
1799  ***************************************************/
1800
1801 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
1802 CTASSERT(_NPCM == 3);
1803 CTASSERT(_NPCPV == 168);
1804
1805 static __inline struct pv_chunk *
1806 pv_to_chunk(pv_entry_t pv)
1807 {
1808
1809         return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1810 }
1811
1812 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1813
1814 #define PC_FREE0        0xfffffffffffffffful
1815 #define PC_FREE1        0xfffffffffffffffful
1816 #define PC_FREE2        0x000000fffffffffful
1817
1818 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
1819
1820 #if 0
1821 #ifdef PV_STATS
1822 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
1823
1824 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
1825         "Current number of pv entry chunks");
1826 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
1827         "Current number of pv entry chunks allocated");
1828 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
1829         "Current number of pv entry chunks frees");
1830 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
1831         "Number of times tried to get a chunk page but failed.");
1832
1833 static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
1834 static int pv_entry_spare;
1835
1836 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
1837         "Current number of pv entry frees");
1838 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
1839         "Current number of pv entry allocs");
1840 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
1841         "Current number of pv entries");
1842 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
1843         "Current number of spare pv entries");
1844 #endif
1845 #endif /* 0 */
1846
1847 /*
1848  * We are in a serious low memory condition.  Resort to
1849  * drastic measures to free some pages so we can allocate
1850  * another pv entry chunk.
1851  *
1852  * Returns NULL if PV entries were reclaimed from the specified pmap.
1853  *
1854  * We do not, however, unmap 2mpages because subsequent accesses will
1855  * allocate per-page pv entries until repromotion occurs, thereby
1856  * exacerbating the shortage of free pv entries.
1857  */
1858 static vm_page_t
1859 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
1860 {
1861         struct pv_chunk *pc, *pc_marker, *pc_marker_end;
1862         struct pv_chunk_header pc_marker_b, pc_marker_end_b;
1863         struct md_page *pvh;
1864         pd_entry_t *pde;
1865         pmap_t next_pmap, pmap;
1866         pt_entry_t *pte, tpte;
1867         pv_entry_t pv;
1868         vm_offset_t va;
1869         vm_page_t m, m_pc;
1870         struct spglist free;
1871         uint64_t inuse;
1872         int bit, field, freed, lvl;
1873         static int active_reclaims = 0;
1874
1875         PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1876         KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
1877
1878         pmap = NULL;
1879         m_pc = NULL;
1880         SLIST_INIT(&free);
1881         bzero(&pc_marker_b, sizeof(pc_marker_b));
1882         bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
1883         pc_marker = (struct pv_chunk *)&pc_marker_b;
1884         pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
1885
1886         mtx_lock(&pv_chunks_mutex);
1887         active_reclaims++;
1888         TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
1889         TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
1890         while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
1891             SLIST_EMPTY(&free)) {
1892                 next_pmap = pc->pc_pmap;
1893                 if (next_pmap == NULL) {
1894                         /*
1895                          * The next chunk is a marker.  However, it is
1896                          * not our marker, so active_reclaims must be
1897                          * > 1.  Consequently, the next_chunk code
1898                          * will not rotate the pv_chunks list.
1899                          */
1900                         goto next_chunk;
1901                 }
1902                 mtx_unlock(&pv_chunks_mutex);
1903
1904                 /*
1905                  * A pv_chunk can only be removed from the pc_lru list
1906                  * when both pv_chunks_mutex is owned and the
1907                  * corresponding pmap is locked.
1908                  */
1909                 if (pmap != next_pmap) {
1910                         if (pmap != NULL && pmap != locked_pmap)
1911                                 PMAP_UNLOCK(pmap);
1912                         pmap = next_pmap;
1913                         /* Avoid deadlock and lock recursion. */
1914                         if (pmap > locked_pmap) {
1915                                 RELEASE_PV_LIST_LOCK(lockp);
1916                                 PMAP_LOCK(pmap);
1917                                 mtx_lock(&pv_chunks_mutex);
1918                                 continue;
1919                         } else if (pmap != locked_pmap) {
1920                                 if (PMAP_TRYLOCK(pmap)) {
1921                                         mtx_lock(&pv_chunks_mutex);
1922                                         continue;
1923                                 } else {
1924                                         pmap = NULL; /* pmap is not locked */
1925                                         mtx_lock(&pv_chunks_mutex);
1926                                         pc = TAILQ_NEXT(pc_marker, pc_lru);
1927                                         if (pc == NULL ||
1928                                             pc->pc_pmap != next_pmap)
1929                                                 continue;
1930                                         goto next_chunk;
1931                                 }
1932                         }
1933                 }
1934
1935                 /*
1936                  * Destroy every non-wired, 4 KB page mapping in the chunk.
1937                  */
1938                 freed = 0;
1939                 for (field = 0; field < _NPCM; field++) {
1940                         for (inuse = ~pc->pc_map[field] & pc_freemask[field];
1941                             inuse != 0; inuse &= ~(1UL << bit)) {
1942                                 bit = ffsl(inuse) - 1;
1943                                 pv = &pc->pc_pventry[field * 64 + bit];
1944                                 va = pv->pv_va;
1945                                 pde = pmap_pde(pmap, va, &lvl);
1946                                 if (lvl != 2)
1947                                         continue;
1948                                 pte = pmap_l2_to_l3(pde, va);
1949                                 tpte = pmap_load(pte);
1950                                 if ((tpte & ATTR_SW_WIRED) != 0)
1951                                         continue;
1952                                 tpte = pmap_load_clear(pte);
1953                                 pmap_invalidate_page(pmap, va);
1954                                 m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK);
1955                                 if (pmap_page_dirty(tpte))
1956                                         vm_page_dirty(m);
1957                                 if ((tpte & ATTR_AF) != 0)
1958                                         vm_page_aflag_set(m, PGA_REFERENCED);
1959                                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1960                                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
1961                                 m->md.pv_gen++;
1962                                 if (TAILQ_EMPTY(&m->md.pv_list) &&
1963                                     (m->flags & PG_FICTITIOUS) == 0) {
1964                                         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
1965                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
1966                                                 vm_page_aflag_clear(m,
1967                                                     PGA_WRITEABLE);
1968                                         }
1969                                 }
1970                                 pc->pc_map[field] |= 1UL << bit;
1971                                 pmap_unuse_pt(pmap, va, pmap_load(pde), &free);
1972                                 freed++;
1973                         }
1974                 }
1975                 if (freed == 0) {
1976                         mtx_lock(&pv_chunks_mutex);
1977                         goto next_chunk;
1978                 }
1979                 /* Every freed mapping is for a 4 KB page. */
1980                 pmap_resident_count_dec(pmap, freed);
1981                 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
1982                 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
1983                 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
1984                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1985                 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 &&
1986                     pc->pc_map[2] == PC_FREE2) {
1987                         PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1988                         PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1989                         PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1990                         /* Entire chunk is free; return it. */
1991                         m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1992                         dump_drop_page(m_pc->phys_addr);
1993                         mtx_lock(&pv_chunks_mutex);
1994                         TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1995                         break;
1996                 }
1997                 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1998                 mtx_lock(&pv_chunks_mutex);
1999                 /* One freed pv entry in locked_pmap is sufficient. */
2000                 if (pmap == locked_pmap)
2001                         break;
2002
2003 next_chunk:
2004                 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
2005                 TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
2006                 if (active_reclaims == 1 && pmap != NULL) {
2007                         /*
2008                          * Rotate the pv chunks list so that we do not
2009                          * scan the same pv chunks that could not be
2010                          * freed (because they contained a wired
2011                          * and/or superpage mapping) on every
2012                          * invocation of reclaim_pv_chunk().
2013                          */
2014                         while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
2015                                 MPASS(pc->pc_pmap != NULL);
2016                                 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2017                                 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
2018                         }
2019                 }
2020         }
2021         TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
2022         TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
2023         active_reclaims--;
2024         mtx_unlock(&pv_chunks_mutex);
2025         if (pmap != NULL && pmap != locked_pmap)
2026                 PMAP_UNLOCK(pmap);
2027         if (m_pc == NULL && !SLIST_EMPTY(&free)) {
2028                 m_pc = SLIST_FIRST(&free);
2029                 SLIST_REMOVE_HEAD(&free, plinks.s.ss);
2030                 /* Recycle a freed page table page. */
2031                 m_pc->wire_count = 1;
2032         }
2033         vm_page_free_pages_toq(&free, true);
2034         return (m_pc);
2035 }
2036
2037 /*
2038  * free the pv_entry back to the free list
2039  */
2040 static void
2041 free_pv_entry(pmap_t pmap, pv_entry_t pv)
2042 {
2043         struct pv_chunk *pc;
2044         int idx, field, bit;
2045
2046         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2047         PV_STAT(atomic_add_long(&pv_entry_frees, 1));
2048         PV_STAT(atomic_add_int(&pv_entry_spare, 1));
2049         PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
2050         pc = pv_to_chunk(pv);
2051         idx = pv - &pc->pc_pventry[0];
2052         field = idx / 64;
2053         bit = idx % 64;
2054         pc->pc_map[field] |= 1ul << bit;
2055         if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
2056             pc->pc_map[2] != PC_FREE2) {
2057                 /* 98% of the time, pc is already at the head of the list. */
2058                 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
2059                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2060                         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2061                 }
2062                 return;
2063         }
2064         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2065         free_pv_chunk(pc);
2066 }
2067
2068 static void
2069 free_pv_chunk(struct pv_chunk *pc)
2070 {
2071         vm_page_t m;
2072
2073         mtx_lock(&pv_chunks_mutex);
2074         TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2075         mtx_unlock(&pv_chunks_mutex);
2076         PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
2077         PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
2078         PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
2079         /* entire chunk is free, return it */
2080         m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
2081         dump_drop_page(m->phys_addr);
2082         vm_page_unwire_noq(m);
2083         vm_page_free(m);
2084 }
2085
2086 /*
2087  * Returns a new PV entry, allocating a new PV chunk from the system when
2088  * needed.  If this PV chunk allocation fails and a PV list lock pointer was
2089  * given, a PV chunk is reclaimed from an arbitrary pmap.  Otherwise, NULL is
2090  * returned.
2091  *
2092  * The given PV list lock may be released.
2093  */
2094 static pv_entry_t
2095 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
2096 {
2097         int bit, field;
2098         pv_entry_t pv;
2099         struct pv_chunk *pc;
2100         vm_page_t m;
2101
2102         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2103         PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
2104 retry:
2105         pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2106         if (pc != NULL) {
2107                 for (field = 0; field < _NPCM; field++) {
2108                         if (pc->pc_map[field]) {
2109                                 bit = ffsl(pc->pc_map[field]) - 1;
2110                                 break;
2111                         }
2112                 }
2113                 if (field < _NPCM) {
2114                         pv = &pc->pc_pventry[field * 64 + bit];
2115                         pc->pc_map[field] &= ~(1ul << bit);
2116                         /* If this was the last item, move it to tail */
2117                         if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
2118                             pc->pc_map[2] == 0) {
2119                                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2120                                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
2121                                     pc_list);
2122                         }
2123                         PV_STAT(atomic_add_long(&pv_entry_count, 1));
2124                         PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
2125                         return (pv);
2126                 }
2127         }
2128         /* No free items, allocate another chunk */
2129         m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
2130             VM_ALLOC_WIRED);
2131         if (m == NULL) {
2132                 if (lockp == NULL) {
2133                         PV_STAT(pc_chunk_tryfail++);
2134                         return (NULL);
2135                 }
2136                 m = reclaim_pv_chunk(pmap, lockp);
2137                 if (m == NULL)
2138                         goto retry;
2139         }
2140         PV_STAT(atomic_add_int(&pc_chunk_count, 1));
2141         PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
2142         dump_add_page(m->phys_addr);
2143         pc = (void *)PHYS_TO_DMAP(m->phys_addr);
2144         pc->pc_pmap = pmap;
2145         pc->pc_map[0] = PC_FREE0 & ~1ul;        /* preallocated bit 0 */
2146         pc->pc_map[1] = PC_FREE1;
2147         pc->pc_map[2] = PC_FREE2;
2148         mtx_lock(&pv_chunks_mutex);
2149         TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
2150         mtx_unlock(&pv_chunks_mutex);
2151         pv = &pc->pc_pventry[0];
2152         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2153         PV_STAT(atomic_add_long(&pv_entry_count, 1));
2154         PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
2155         return (pv);
2156 }
2157
2158 /*
2159  * Ensure that the number of spare PV entries in the specified pmap meets or
2160  * exceeds the given count, "needed".
2161  *
2162  * The given PV list lock may be released.
2163  */
2164 static void
2165 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
2166 {
2167         struct pch new_tail;
2168         struct pv_chunk *pc;
2169         vm_page_t m;
2170         int avail, free;
2171         bool reclaimed;
2172
2173         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2174         KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
2175
2176         /*
2177          * Newly allocated PV chunks must be stored in a private list until
2178          * the required number of PV chunks have been allocated.  Otherwise,
2179          * reclaim_pv_chunk() could recycle one of these chunks.  In
2180          * contrast, these chunks must be added to the pmap upon allocation.
2181          */
2182         TAILQ_INIT(&new_tail);
2183 retry:
2184         avail = 0;
2185         TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
2186                 bit_count((bitstr_t *)pc->pc_map, 0,
2187                     sizeof(pc->pc_map) * NBBY, &free);
2188                 if (free == 0)
2189                         break;
2190                 avail += free;
2191                 if (avail >= needed)
2192                         break;
2193         }
2194         for (reclaimed = false; avail < needed; avail += _NPCPV) {
2195                 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
2196                     VM_ALLOC_WIRED);
2197                 if (m == NULL) {
2198                         m = reclaim_pv_chunk(pmap, lockp);
2199                         if (m == NULL)
2200                                 goto retry;
2201                         reclaimed = true;
2202                 }
2203                 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
2204                 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
2205                 dump_add_page(m->phys_addr);
2206                 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
2207                 pc->pc_pmap = pmap;
2208                 pc->pc_map[0] = PC_FREE0;
2209                 pc->pc_map[1] = PC_FREE1;
2210                 pc->pc_map[2] = PC_FREE2;
2211                 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2212                 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
2213                 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
2214
2215                 /*
2216                  * The reclaim might have freed a chunk from the current pmap.
2217                  * If that chunk contained available entries, we need to
2218                  * re-count the number of available entries.
2219                  */
2220                 if (reclaimed)
2221                         goto retry;
2222         }
2223         if (!TAILQ_EMPTY(&new_tail)) {
2224                 mtx_lock(&pv_chunks_mutex);
2225                 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
2226                 mtx_unlock(&pv_chunks_mutex);
2227         }
2228 }
2229
2230 /*
2231  * First find and then remove the pv entry for the specified pmap and virtual
2232  * address from the specified pv list.  Returns the pv entry if found and NULL
2233  * otherwise.  This operation can be performed on pv lists for either 4KB or
2234  * 2MB page mappings.
2235  */
2236 static __inline pv_entry_t
2237 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2238 {
2239         pv_entry_t pv;
2240
2241         TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
2242                 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
2243                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
2244                         pvh->pv_gen++;
2245                         break;
2246                 }
2247         }
2248         return (pv);
2249 }
2250
2251 /*
2252  * After demotion from a 2MB page mapping to 512 4KB page mappings,
2253  * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
2254  * entries for each of the 4KB page mappings.
2255  */
2256 static void
2257 pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
2258     struct rwlock **lockp)
2259 {
2260         struct md_page *pvh;
2261         struct pv_chunk *pc;
2262         pv_entry_t pv;
2263         vm_offset_t va_last;
2264         vm_page_t m;
2265         int bit, field;
2266
2267         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2268         KASSERT((va & L2_OFFSET) == 0,
2269             ("pmap_pv_demote_l2: va is not 2mpage aligned"));
2270         KASSERT((pa & L2_OFFSET) == 0,
2271             ("pmap_pv_demote_l2: pa is not 2mpage aligned"));
2272         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2273
2274         /*
2275          * Transfer the 2mpage's pv entry for this mapping to the first
2276          * page's pv list.  Once this transfer begins, the pv list lock
2277          * must not be released until the last pv entry is reinstantiated.
2278          */
2279         pvh = pa_to_pvh(pa);
2280         pv = pmap_pvh_remove(pvh, pmap, va);
2281         KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found"));
2282         m = PHYS_TO_VM_PAGE(pa);
2283         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2284         m->md.pv_gen++;
2285         /* Instantiate the remaining Ln_ENTRIES - 1 pv entries. */
2286         PV_STAT(atomic_add_long(&pv_entry_allocs, Ln_ENTRIES - 1));
2287         va_last = va + L2_SIZE - PAGE_SIZE;
2288         for (;;) {
2289                 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2290                 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
2291                     pc->pc_map[2] != 0, ("pmap_pv_demote_l2: missing spare"));
2292                 for (field = 0; field < _NPCM; field++) {
2293                         while (pc->pc_map[field]) {
2294                                 bit = ffsl(pc->pc_map[field]) - 1;
2295                                 pc->pc_map[field] &= ~(1ul << bit);
2296                                 pv = &pc->pc_pventry[field * 64 + bit];
2297                                 va += PAGE_SIZE;
2298                                 pv->pv_va = va;
2299                                 m++;
2300                                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2301                             ("pmap_pv_demote_l2: page %p is not managed", m));
2302                                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2303                                 m->md.pv_gen++;
2304                                 if (va == va_last)
2305                                         goto out;
2306                         }
2307                 }
2308                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2309                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2310         }
2311 out:
2312         if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
2313                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2314                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2315         }
2316         PV_STAT(atomic_add_long(&pv_entry_count, Ln_ENTRIES - 1));
2317         PV_STAT(atomic_subtract_int(&pv_entry_spare, Ln_ENTRIES - 1));
2318 }
2319
2320 /*
2321  * First find and then destroy the pv entry for the specified pmap and virtual
2322  * address.  This operation can be performed on pv lists for either 4KB or 2MB
2323  * page mappings.
2324  */
2325 static void
2326 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2327 {
2328         pv_entry_t pv;
2329
2330         pv = pmap_pvh_remove(pvh, pmap, va);
2331         KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
2332         free_pv_entry(pmap, pv);
2333 }
2334
2335 /*
2336  * Conditionally create the PV entry for a 4KB page mapping if the required
2337  * memory can be allocated without resorting to reclamation.
2338  */
2339 static boolean_t
2340 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
2341     struct rwlock **lockp)
2342 {
2343         pv_entry_t pv;
2344
2345         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2346         /* Pass NULL instead of the lock pointer to disable reclamation. */
2347         if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
2348                 pv->pv_va = va;
2349                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2350                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2351                 m->md.pv_gen++;
2352                 return (TRUE);
2353         } else
2354                 return (FALSE);
2355 }
2356
2357 /*
2358  * Create the PV entry for a 2MB page mapping.  Always returns true unless the
2359  * flag PMAP_ENTER_NORECLAIM is specified.  If that flag is specified, returns
2360  * false if the PV entry cannot be allocated without resorting to reclamation.
2361  */
2362 static bool
2363 pmap_pv_insert_l2(pmap_t pmap, vm_offset_t va, pd_entry_t l2e, u_int flags,
2364     struct rwlock **lockp)
2365 {
2366         struct md_page *pvh;
2367         pv_entry_t pv;
2368         vm_paddr_t pa;
2369
2370         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2371         /* Pass NULL instead of the lock pointer to disable reclamation. */
2372         if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
2373             NULL : lockp)) == NULL)
2374                 return (false);
2375         pv->pv_va = va;
2376         pa = l2e & ~ATTR_MASK;
2377         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2378         pvh = pa_to_pvh(pa);
2379         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
2380         pvh->pv_gen++;
2381         return (true);
2382 }
2383
2384 static void
2385 pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
2386 {
2387         pt_entry_t newl2, oldl2;
2388         vm_page_t ml3;
2389         vm_paddr_t ml3pa;
2390
2391         KASSERT(!VIRT_IN_DMAP(va), ("removing direct mapping of %#lx", va));
2392         KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
2393         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2394
2395         ml3 = pmap_remove_pt_page(pmap, va);
2396         if (ml3 == NULL)
2397                 panic("pmap_remove_kernel_l2: Missing pt page");
2398
2399         ml3pa = VM_PAGE_TO_PHYS(ml3);
2400         newl2 = ml3pa | L2_TABLE;
2401
2402         /*
2403          * If this page table page was unmapped by a promotion, then it
2404          * contains valid mappings.  Zero it to invalidate those mappings.
2405          */
2406         if (ml3->valid != 0)
2407                 pagezero((void *)PHYS_TO_DMAP(ml3pa));
2408
2409         /*
2410          * Demote the mapping.  The caller must have already invalidated the
2411          * mapping (i.e., the "break" in break-before-make).
2412          */
2413         oldl2 = pmap_load_store(l2, newl2);
2414         KASSERT(oldl2 == 0, ("%s: found existing mapping at %p: %#lx",
2415             __func__, l2, oldl2));
2416 }
2417
2418 /*
2419  * pmap_remove_l2: Do the things to unmap a level 2 superpage.
2420  */
2421 static int
2422 pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
2423     pd_entry_t l1e, struct spglist *free, struct rwlock **lockp)
2424 {
2425         struct md_page *pvh;
2426         pt_entry_t old_l2;
2427         vm_offset_t eva, va;
2428         vm_page_t m, ml3;
2429
2430         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2431         KASSERT((sva & L2_OFFSET) == 0, ("pmap_remove_l2: sva is not aligned"));
2432         old_l2 = pmap_load_clear(l2);
2433         KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
2434             ("pmap_remove_l2: L2e %lx is not a block mapping", old_l2));
2435
2436         /*
2437          * Since a promotion must break the 4KB page mappings before making
2438          * the 2MB page mapping, a pmap_invalidate_page() suffices.
2439          */
2440         pmap_invalidate_page(pmap, sva);
2441
2442         if (old_l2 & ATTR_SW_WIRED)
2443                 pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE;
2444         pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE);
2445         if (old_l2 & ATTR_SW_MANAGED) {
2446                 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, old_l2 & ~ATTR_MASK);
2447                 pvh = pa_to_pvh(old_l2 & ~ATTR_MASK);
2448                 pmap_pvh_free(pvh, pmap, sva);
2449                 eva = sva + L2_SIZE;
2450                 for (va = sva, m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
2451                     va < eva; va += PAGE_SIZE, m++) {
2452                         if (pmap_page_dirty(old_l2))
2453                                 vm_page_dirty(m);
2454                         if (old_l2 & ATTR_AF)
2455                                 vm_page_aflag_set(m, PGA_REFERENCED);
2456                         if (TAILQ_EMPTY(&m->md.pv_list) &&
2457                             TAILQ_EMPTY(&pvh->pv_list))
2458                                 vm_page_aflag_clear(m, PGA_WRITEABLE);
2459                 }
2460         }
2461         if (pmap == kernel_pmap) {
2462                 pmap_remove_kernel_l2(pmap, l2, sva);
2463         } else {
2464                 ml3 = pmap_remove_pt_page(pmap, sva);
2465                 if (ml3 != NULL) {
2466                         KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
2467                             ("pmap_remove_l2: l3 page not promoted"));
2468                         pmap_resident_count_dec(pmap, 1);
2469                         KASSERT(ml3->wire_count == NL3PG,
2470                             ("pmap_remove_l2: l3 page wire count error"));
2471                         ml3->wire_count = 0;
2472                         pmap_add_delayed_free_list(ml3, free, FALSE);
2473                 }
2474         }
2475         return (pmap_unuse_pt(pmap, sva, l1e, free));
2476 }
2477
2478 /*
2479  * pmap_remove_l3: do the things to unmap a page in a process
2480  */
2481 static int
2482 pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
2483     pd_entry_t l2e, struct spglist *free, struct rwlock **lockp)
2484 {
2485         struct md_page *pvh;
2486         pt_entry_t old_l3;
2487         vm_page_t m;
2488
2489         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2490         old_l3 = pmap_load_clear(l3);
2491         pmap_invalidate_page(pmap, va);
2492         if (old_l3 & ATTR_SW_WIRED)
2493                 pmap->pm_stats.wired_count -= 1;
2494         pmap_resident_count_dec(pmap, 1);
2495         if (old_l3 & ATTR_SW_MANAGED) {
2496                 m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
2497                 if (pmap_page_dirty(old_l3))
2498                         vm_page_dirty(m);
2499                 if (old_l3 & ATTR_AF)
2500                         vm_page_aflag_set(m, PGA_REFERENCED);
2501                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2502                 pmap_pvh_free(&m->md, pmap, va);
2503                 if (TAILQ_EMPTY(&m->md.pv_list) &&
2504                     (m->flags & PG_FICTITIOUS) == 0) {
2505                         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2506                         if (TAILQ_EMPTY(&pvh->pv_list))
2507                                 vm_page_aflag_clear(m, PGA_WRITEABLE);
2508                 }
2509         }
2510         return (pmap_unuse_pt(pmap, va, l2e, free));
2511 }
2512
2513 /*
2514  * Remove the specified range of addresses from the L3 page table that is
2515  * identified by the given L2 entry.
2516  */
2517 static void
2518 pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva,
2519     vm_offset_t eva, struct spglist *free, struct rwlock **lockp)
2520 {
2521         struct md_page *pvh;
2522         struct rwlock *new_lock;
2523         pt_entry_t *l3, old_l3;
2524         vm_offset_t va;
2525         vm_page_t m;
2526
2527         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2528         KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE),
2529             ("pmap_remove_l3_range: range crosses an L3 page table boundary"));
2530         va = eva;
2531         for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) {
2532                 if (!pmap_l3_valid(pmap_load(l3))) {
2533                         if (va != eva) {
2534                                 pmap_invalidate_range(pmap, va, sva);
2535                                 va = eva;
2536                         }
2537                         continue;
2538                 }
2539                 old_l3 = pmap_load_clear(l3);
2540                 if ((old_l3 & ATTR_SW_WIRED) != 0)
2541                         pmap->pm_stats.wired_count--;
2542                 pmap_resident_count_dec(pmap, 1);
2543                 if ((old_l3 & ATTR_SW_MANAGED) != 0) {
2544                         m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
2545                         if (pmap_page_dirty(old_l3))
2546                                 vm_page_dirty(m);
2547                         if ((old_l3 & ATTR_AF) != 0)
2548                                 vm_page_aflag_set(m, PGA_REFERENCED);
2549                         new_lock = PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m));
2550                         if (new_lock != *lockp) {
2551                                 if (*lockp != NULL) {
2552                                         /*
2553                                          * Pending TLB invalidations must be
2554                                          * performed before the PV list lock is
2555                                          * released.  Otherwise, a concurrent
2556                                          * pmap_remove_all() on a physical page
2557                                          * could return while a stale TLB entry
2558                                          * still provides access to that page. 
2559                                          */
2560                                         if (va != eva) {
2561                                                 pmap_invalidate_range(pmap, va,
2562                                                     sva);
2563                                                 va = eva;
2564                                         }
2565                                         rw_wunlock(*lockp);
2566                                 }
2567                                 *lockp = new_lock;
2568                                 rw_wlock(*lockp);
2569                         }
2570                         pmap_pvh_free(&m->md, pmap, sva);
2571                         if (TAILQ_EMPTY(&m->md.pv_list) &&
2572                             (m->flags & PG_FICTITIOUS) == 0) {
2573                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2574                                 if (TAILQ_EMPTY(&pvh->pv_list))
2575                                         vm_page_aflag_clear(m, PGA_WRITEABLE);
2576                         }
2577                 }
2578                 if (va == eva)
2579                         va = sva;
2580                 if (pmap_unuse_pt(pmap, sva, l2e, free)) {
2581                         sva += L3_SIZE;
2582                         break;
2583                 }
2584         }
2585         if (va != eva)
2586                 pmap_invalidate_range(pmap, va, sva);
2587 }
2588
2589 /*
2590  *      Remove the given range of addresses from the specified map.
2591  *
2592  *      It is assumed that the start and end are properly
2593  *      rounded to the page size.
2594  */
2595 void
2596 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2597 {
2598         struct rwlock *lock;
2599         vm_offset_t va_next;
2600         pd_entry_t *l0, *l1, *l2;
2601         pt_entry_t l3_paddr;
2602         struct spglist free;
2603
2604         /*
2605          * Perform an unsynchronized read.  This is, however, safe.
2606          */
2607         if (pmap->pm_stats.resident_count == 0)
2608                 return;
2609
2610         SLIST_INIT(&free);
2611
2612         PMAP_LOCK(pmap);
2613
2614         lock = NULL;
2615         for (; sva < eva; sva = va_next) {
2616
2617                 if (pmap->pm_stats.resident_count == 0)
2618                         break;
2619
2620                 l0 = pmap_l0(pmap, sva);
2621                 if (pmap_load(l0) == 0) {
2622                         va_next = (sva + L0_SIZE) & ~L0_OFFSET;
2623                         if (va_next < sva)
2624                                 va_next = eva;
2625                         continue;
2626                 }
2627
2628                 l1 = pmap_l0_to_l1(l0, sva);
2629                 if (pmap_load(l1) == 0) {
2630                         va_next = (sva + L1_SIZE) & ~L1_OFFSET;
2631                         if (va_next < sva)
2632                                 va_next = eva;
2633                         continue;
2634                 }
2635
2636                 /*
2637                  * Calculate index for next page table.
2638                  */
2639                 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2640                 if (va_next < sva)
2641                         va_next = eva;
2642
2643                 l2 = pmap_l1_to_l2(l1, sva);
2644                 if (l2 == NULL)
2645                         continue;
2646
2647                 l3_paddr = pmap_load(l2);
2648
2649                 if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) {
2650                         if (sva + L2_SIZE == va_next && eva >= va_next) {
2651                                 pmap_remove_l2(pmap, l2, sva, pmap_load(l1),
2652                                     &free, &lock);
2653                                 continue;
2654                         } else if (pmap_demote_l2_locked(pmap, l2, sva,
2655                             &lock) == NULL)
2656                                 continue;
2657                         l3_paddr = pmap_load(l2);
2658                 }
2659
2660                 /*
2661                  * Weed out invalid mappings.
2662                  */
2663                 if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE)
2664                         continue;
2665
2666                 /*
2667                  * Limit our scan to either the end of the va represented
2668                  * by the current page table page, or to the end of the
2669                  * range being removed.
2670                  */
2671                 if (va_next > eva)
2672                         va_next = eva;
2673
2674                 pmap_remove_l3_range(pmap, l3_paddr, sva, va_next, &free,
2675                     &lock);
2676         }
2677         if (lock != NULL)
2678                 rw_wunlock(lock);
2679         PMAP_UNLOCK(pmap);
2680         vm_page_free_pages_toq(&free, true);
2681 }
2682
2683 /*
2684  *      Routine:        pmap_remove_all
2685  *      Function:
2686  *              Removes this physical page from
2687  *              all physical maps in which it resides.
2688  *              Reflects back modify bits to the pager.
2689  *
2690  *      Notes:
2691  *              Original versions of this routine were very
2692  *              inefficient because they iteratively called
2693  *              pmap_remove (slow...)
2694  */
2695
2696 void
2697 pmap_remove_all(vm_page_t m)
2698 {
2699         struct md_page *pvh;
2700         pv_entry_t pv;
2701         pmap_t pmap;
2702         struct rwlock *lock;
2703         pd_entry_t *pde, tpde;
2704         pt_entry_t *pte, tpte;
2705         vm_offset_t va;
2706         struct spglist free;
2707         int lvl, pvh_gen, md_gen;
2708
2709         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2710             ("pmap_remove_all: page %p is not managed", m));
2711         SLIST_INIT(&free);
2712         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2713         pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
2714             pa_to_pvh(VM_PAGE_TO_PHYS(m));
2715 retry:
2716         rw_wlock(lock);
2717         while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
2718                 pmap = PV_PMAP(pv);
2719                 if (!PMAP_TRYLOCK(pmap)) {
2720                         pvh_gen = pvh->pv_gen;
2721                         rw_wunlock(lock);
2722                         PMAP_LOCK(pmap);
2723                         rw_wlock(lock);
2724                         if (pvh_gen != pvh->pv_gen) {
2725                                 rw_wunlock(lock);
2726                                 PMAP_UNLOCK(pmap);
2727                                 goto retry;
2728                         }
2729                 }
2730                 va = pv->pv_va;
2731                 pte = pmap_pte(pmap, va, &lvl);
2732                 KASSERT(pte != NULL,
2733                     ("pmap_remove_all: no page table entry found"));
2734                 KASSERT(lvl == 2,
2735                     ("pmap_remove_all: invalid pte level %d", lvl));
2736
2737                 pmap_demote_l2_locked(pmap, pte, va, &lock);
2738                 PMAP_UNLOCK(pmap);
2739         }
2740         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2741                 pmap = PV_PMAP(pv);
2742                 if (!PMAP_TRYLOCK(pmap)) {
2743                         pvh_gen = pvh->pv_gen;
2744                         md_gen = m->md.pv_gen;
2745                         rw_wunlock(lock);
2746                         PMAP_LOCK(pmap);
2747                         rw_wlock(lock);
2748                         if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
2749                                 rw_wunlock(lock);
2750                                 PMAP_UNLOCK(pmap);
2751                                 goto retry;
2752                         }
2753                 }
2754                 pmap_resident_count_dec(pmap, 1);
2755
2756                 pde = pmap_pde(pmap, pv->pv_va, &lvl);
2757                 KASSERT(pde != NULL,
2758                     ("pmap_remove_all: no page directory entry found"));
2759                 KASSERT(lvl == 2,
2760                     ("pmap_remove_all: invalid pde level %d", lvl));
2761                 tpde = pmap_load(pde);
2762
2763                 pte = pmap_l2_to_l3(pde, pv->pv_va);
2764                 tpte = pmap_load_clear(pte);
2765                 pmap_invalidate_page(pmap, pv->pv_va);
2766                 if (tpte & ATTR_SW_WIRED)
2767                         pmap->pm_stats.wired_count--;
2768                 if ((tpte & ATTR_AF) != 0)
2769                         vm_page_aflag_set(m, PGA_REFERENCED);
2770
2771                 /*
2772                  * Update the vm_page_t clean and reference bits.
2773                  */
2774                 if (pmap_page_dirty(tpte))
2775                         vm_page_dirty(m);
2776                 pmap_unuse_pt(pmap, pv->pv_va, tpde, &free);
2777                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2778                 m->md.pv_gen++;
2779                 free_pv_entry(pmap, pv);
2780                 PMAP_UNLOCK(pmap);
2781         }
2782         vm_page_aflag_clear(m, PGA_WRITEABLE);
2783         rw_wunlock(lock);
2784         vm_page_free_pages_toq(&free, true);
2785 }
2786
2787 /*
2788  *      Set the physical protection on the
2789  *      specified range of this map as requested.
2790  */
2791 void
2792 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2793 {
2794         vm_offset_t va, va_next;
2795         pd_entry_t *l0, *l1, *l2;
2796         pt_entry_t *l3p, l3, nbits;
2797
2798         KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
2799         if (prot == VM_PROT_NONE) {
2800                 pmap_remove(pmap, sva, eva);
2801                 return;
2802         }
2803
2804         if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) ==
2805             (VM_PROT_WRITE | VM_PROT_EXECUTE))
2806                 return;
2807
2808         PMAP_LOCK(pmap);
2809         for (; sva < eva; sva = va_next) {
2810
2811                 l0 = pmap_l0(pmap, sva);
2812                 if (pmap_load(l0) == 0) {
2813                         va_next = (sva + L0_SIZE) & ~L0_OFFSET;
2814                         if (va_next < sva)
2815                                 va_next = eva;
2816                         continue;
2817                 }
2818
2819                 l1 = pmap_l0_to_l1(l0, sva);
2820                 if (pmap_load(l1) == 0) {
2821                         va_next = (sva + L1_SIZE) & ~L1_OFFSET;
2822                         if (va_next < sva)
2823                                 va_next = eva;
2824                         continue;
2825                 }
2826
2827                 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2828                 if (va_next < sva)
2829                         va_next = eva;
2830
2831                 l2 = pmap_l1_to_l2(l1, sva);
2832                 if (pmap_load(l2) == 0)
2833                         continue;
2834
2835                 if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
2836                         l3p = pmap_demote_l2(pmap, l2, sva);
2837                         if (l3p == NULL)
2838                                 continue;
2839                 }
2840                 KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
2841                     ("pmap_protect: Invalid L2 entry after demotion"));
2842
2843                 if (va_next > eva)
2844                         va_next = eva;
2845
2846                 va = va_next;
2847                 for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
2848                     sva += L3_SIZE) {
2849                         l3 = pmap_load(l3p);
2850                         if (!pmap_l3_valid(l3)) {
2851                                 if (va != va_next) {
2852                                         pmap_invalidate_range(pmap, va, sva);
2853                                         va = va_next;
2854                                 }
2855                                 continue;
2856                         }
2857                         if (va == va_next)
2858                                 va = sva;
2859
2860                         nbits = 0;
2861                         if ((prot & VM_PROT_WRITE) == 0) {
2862                                 if ((l3 & ATTR_SW_MANAGED) &&
2863                                     pmap_page_dirty(l3)) {
2864                                         vm_page_dirty(PHYS_TO_VM_PAGE(l3 &
2865                                             ~ATTR_MASK));
2866                                 }
2867                                 nbits |= ATTR_AP(ATTR_AP_RO);
2868                         }
2869                         if ((prot & VM_PROT_EXECUTE) == 0)
2870                                 nbits |= ATTR_XN;
2871
2872                         pmap_set(l3p, nbits);
2873                 }
2874                 if (va != va_next)
2875                         pmap_invalidate_range(pmap, va, sva);
2876         }
2877         PMAP_UNLOCK(pmap);
2878 }
2879
2880 /*
2881  * Inserts the specified page table page into the specified pmap's collection
2882  * of idle page table pages.  Each of a pmap's page table pages is responsible
2883  * for mapping a distinct range of virtual addresses.  The pmap's collection is
2884  * ordered by this virtual address range.
2885  *
2886  * If "promoted" is false, then the page table page "mpte" must be zero filled.
2887  */
2888 static __inline int
2889 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted)
2890 {
2891
2892         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2893         mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0;
2894         return (vm_radix_insert(&pmap->pm_root, mpte));
2895 }
2896
2897 /*
2898  * Removes the page table page mapping the specified virtual address from the
2899  * specified pmap's collection of idle page table pages, and returns it.
2900  * Otherwise, returns NULL if there is no page table page corresponding to the
2901  * specified virtual address.
2902  */
2903 static __inline vm_page_t
2904 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
2905 {
2906
2907         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2908         return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va)));
2909 }
2910
2911 /*
2912  * Performs a break-before-make update of a pmap entry. This is needed when
2913  * either promoting or demoting pages to ensure the TLB doesn't get into an
2914  * inconsistent state.
2915  */
2916 static void
2917 pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte,
2918     vm_offset_t va, vm_size_t size)
2919 {
2920         register_t intr;
2921
2922         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2923
2924         /*
2925          * Ensure we don't get switched out with the page table in an
2926          * inconsistent state. We also need to ensure no interrupts fire
2927          * as they may make use of an address we are about to invalidate.
2928          */
2929         intr = intr_disable();
2930         critical_enter();
2931
2932         /* Clear the old mapping */
2933         pmap_clear(pte);
2934         pmap_invalidate_range_nopin(pmap, va, va + size);
2935
2936         /* Create the new mapping */
2937         pmap_load_store(pte, newpte);
2938         dsb(ishst);
2939
2940         critical_exit();
2941         intr_restore(intr);
2942 }
2943
2944 #if VM_NRESERVLEVEL > 0
2945 /*
2946  * After promotion from 512 4KB page mappings to a single 2MB page mapping,
2947  * replace the many pv entries for the 4KB page mappings by a single pv entry
2948  * for the 2MB page mapping.
2949  */
2950 static void
2951 pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
2952     struct rwlock **lockp)
2953 {
2954         struct md_page *pvh;
2955         pv_entry_t pv;
2956         vm_offset_t va_last;
2957         vm_page_t m;
2958
2959         KASSERT((pa & L2_OFFSET) == 0,
2960             ("pmap_pv_promote_l2: pa is not 2mpage aligned"));
2961         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2962
2963         /*
2964          * Transfer the first page's pv entry for this mapping to the 2mpage's
2965          * pv list.  Aside from avoiding the cost of a call to get_pv_entry(),
2966          * a transfer avoids the possibility that get_pv_entry() calls
2967          * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
2968          * mappings that is being promoted.
2969          */
2970         m = PHYS_TO_VM_PAGE(pa);
2971         va = va & ~L2_OFFSET;
2972         pv = pmap_pvh_remove(&m->md, pmap, va);
2973         KASSERT(pv != NULL, ("pmap_pv_promote_l2: pv not found"));
2974         pvh = pa_to_pvh(pa);
2975         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
2976         pvh->pv_gen++;
2977         /* Free the remaining NPTEPG - 1 pv entries. */
2978         va_last = va + L2_SIZE - PAGE_SIZE;
2979         do {
2980                 m++;
2981                 va += PAGE_SIZE;
2982                 pmap_pvh_free(&m->md, pmap, va);
2983         } while (va < va_last);
2984 }
2985
2986 /*
2987  * Tries to promote the 512, contiguous 4KB page mappings that are within a
2988  * single level 2 table entry to a single 2MB page mapping.  For promotion
2989  * to occur, two conditions must be met: (1) the 4KB page mappings must map
2990  * aligned, contiguous physical memory and (2) the 4KB page mappings must have
2991  * identical characteristics.
2992  */
2993 static void
2994 pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
2995     struct rwlock **lockp)
2996 {
2997         pt_entry_t *firstl3, *l3, newl2, oldl3, pa;
2998         vm_page_t mpte;
2999         vm_offset_t sva;
3000
3001         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3002
3003         sva = va & ~L2_OFFSET;
3004         firstl3 = pmap_l2_to_l3(l2, sva);
3005         newl2 = pmap_load(firstl3);
3006
3007         /* Check the alingment is valid */
3008         if (((newl2 & ~ATTR_MASK) & L2_OFFSET) != 0) {
3009                 atomic_add_long(&pmap_l2_p_failures, 1);
3010                 CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
3011                     " in pmap %p", va, pmap);
3012                 return;
3013         }
3014
3015         pa = newl2 + L2_SIZE - PAGE_SIZE;
3016         for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) {
3017                 oldl3 = pmap_load(l3);
3018                 if (oldl3 != pa) {
3019                         atomic_add_long(&pmap_l2_p_failures, 1);
3020                         CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
3021                             " in pmap %p", va, pmap);
3022                         return;
3023                 }
3024                 pa -= PAGE_SIZE;
3025         }
3026
3027         /*
3028          * Save the page table page in its current state until the L2
3029          * mapping the superpage is demoted by pmap_demote_l2() or
3030          * destroyed by pmap_remove_l3().
3031          */
3032         mpte = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
3033         KASSERT(mpte >= vm_page_array &&
3034             mpte < &vm_page_array[vm_page_array_size],
3035             ("pmap_promote_l2: page table page is out of range"));
3036         KASSERT(mpte->pindex == pmap_l2_pindex(va),
3037             ("pmap_promote_l2: page table page's pindex is wrong"));
3038         if (pmap_insert_pt_page(pmap, mpte, true)) {
3039                 atomic_add_long(&pmap_l2_p_failures, 1);
3040                 CTR2(KTR_PMAP,
3041                     "pmap_promote_l2: failure for va %#lx in pmap %p", va,
3042                     pmap);
3043                 return;
3044         }
3045
3046         if ((newl2 & ATTR_SW_MANAGED) != 0)
3047                 pmap_pv_promote_l2(pmap, va, newl2 & ~ATTR_MASK, lockp);
3048
3049         newl2 &= ~ATTR_DESCR_MASK;
3050         newl2 |= L2_BLOCK;
3051
3052         pmap_update_entry(pmap, l2, newl2, sva, L2_SIZE);
3053
3054         atomic_add_long(&pmap_l2_promotions, 1);
3055         CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
3056                     pmap);
3057 }
3058 #endif /* VM_NRESERVLEVEL > 0 */
3059
3060 /*
3061  *      Insert the given physical page (p) at
3062  *      the specified virtual address (v) in the
3063  *      target physical map with the protection requested.
3064  *
3065  *      If specified, the page will be wired down, meaning
3066  *      that the related pte can not be reclaimed.
3067  *
3068  *      NB:  This is the only routine which MAY NOT lazy-evaluate
3069  *      or lose information.  That is, this routine must actually
3070  *      insert this page into the given map NOW.
3071  */
3072 int
3073 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3074     u_int flags, int8_t psind)
3075 {
3076         struct rwlock *lock;
3077         pd_entry_t *pde;
3078         pt_entry_t new_l3, orig_l3;
3079         pt_entry_t *l2, *l3;
3080         pv_entry_t pv;
3081         vm_paddr_t opa, pa;
3082         vm_page_t mpte, om;
3083         boolean_t nosleep;
3084         int lvl, rv;
3085
3086         va = trunc_page(va);
3087         if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
3088                 VM_OBJECT_ASSERT_LOCKED(m->object);
3089         pa = VM_PAGE_TO_PHYS(m);
3090         new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
3091             L3_PAGE);
3092         if ((prot & VM_PROT_WRITE) == 0)
3093                 new_l3 |= ATTR_AP(ATTR_AP_RO);
3094         if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY)
3095                 new_l3 |= ATTR_XN;
3096         if ((flags & PMAP_ENTER_WIRED) != 0)
3097                 new_l3 |= ATTR_SW_WIRED;
3098         if (va < VM_MAXUSER_ADDRESS)
3099                 new_l3 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN;
3100         if ((m->oflags & VPO_UNMANAGED) == 0)
3101                 new_l3 |= ATTR_SW_MANAGED;
3102
3103         CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
3104
3105         lock = NULL;
3106         PMAP_LOCK(pmap);
3107         if (psind == 1) {
3108                 /* Assert the required virtual and physical alignment. */
3109                 KASSERT((va & L2_OFFSET) == 0, ("pmap_enter: va unaligned"));
3110                 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
3111                 rv = pmap_enter_l2(pmap, va, (new_l3 & ~L3_PAGE) | L2_BLOCK,
3112                     flags, m, &lock);
3113                 goto out;
3114         }
3115         mpte = NULL;
3116
3117         /*
3118          * In the case that a page table page is not
3119          * resident, we are creating it here.
3120          */
3121 retry:
3122         pde = pmap_pde(pmap, va, &lvl);
3123         if (pde != NULL && lvl == 2) {
3124                 l3 = pmap_l2_to_l3(pde, va);
3125                 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
3126                         mpte = PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
3127                         mpte->wire_count++;
3128                 }
3129                 goto havel3;
3130         } else if (pde != NULL && lvl == 1) {
3131                 l2 = pmap_l1_to_l2(pde, va);
3132                 if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK &&
3133                     (l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) {
3134                         l3 = &l3[pmap_l3_index(va)];
3135                         if (va < VM_MAXUSER_ADDRESS) {
3136                                 mpte = PHYS_TO_VM_PAGE(
3137                                     pmap_load(l2) & ~ATTR_MASK);
3138                                 mpte->wire_count++;
3139                         }
3140                         goto havel3;
3141                 }
3142                 /* We need to allocate an L3 table. */
3143         }
3144         if (va < VM_MAXUSER_ADDRESS) {
3145                 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
3146
3147                 /*
3148                  * We use _pmap_alloc_l3() instead of pmap_alloc_l3() in order
3149                  * to handle the possibility that a superpage mapping for "va"
3150                  * was created while we slept.
3151                  */
3152                 mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va),
3153                     nosleep ? NULL : &lock);
3154                 if (mpte == NULL && nosleep) {
3155                         CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
3156                         rv = KERN_RESOURCE_SHORTAGE;
3157                         goto out;
3158                 }
3159                 goto retry;
3160         } else
3161                 panic("pmap_enter: missing L3 table for kernel va %#lx", va);
3162
3163 havel3:
3164         orig_l3 = pmap_load(l3);
3165         opa = orig_l3 & ~ATTR_MASK;
3166         pv = NULL;
3167
3168         /*
3169          * Is the specified virtual address already mapped?
3170          */
3171         if (pmap_l3_valid(orig_l3)) {
3172                 /*
3173                  * Wiring change, just update stats. We don't worry about
3174                  * wiring PT pages as they remain resident as long as there
3175                  * are valid mappings in them. Hence, if a user page is wired,
3176                  * the PT page will be also.
3177                  */
3178                 if ((flags & PMAP_ENTER_WIRED) != 0 &&
3179                     (orig_l3 & ATTR_SW_WIRED) == 0)
3180                         pmap->pm_stats.wired_count++;
3181                 else if ((flags & PMAP_ENTER_WIRED) == 0 &&
3182                     (orig_l3 & ATTR_SW_WIRED) != 0)
3183                         pmap->pm_stats.wired_count--;
3184
3185                 /*
3186                  * Remove the extra PT page reference.
3187                  */
3188                 if (mpte != NULL) {
3189                         mpte->wire_count--;
3190                         KASSERT(mpte->wire_count > 0,
3191                             ("pmap_enter: missing reference to page table page,"
3192                              " va: 0x%lx", va));
3193                 }
3194
3195                 /*
3196                  * Has the physical page changed?
3197                  */
3198                 if (opa == pa) {
3199                         /*
3200                          * No, might be a protection or wiring change.
3201                          */
3202                         if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
3203                                 if ((new_l3 & ATTR_AP(ATTR_AP_RW)) ==
3204                                     ATTR_AP(ATTR_AP_RW)) {
3205                                         vm_page_aflag_set(m, PGA_WRITEABLE);
3206                                 }
3207                         }
3208                         goto validate;
3209                 }
3210
3211                 /*
3212                  * The physical page has changed.  Temporarily invalidate
3213                  * the mapping.
3214                  */
3215                 orig_l3 = pmap_load_clear(l3);
3216                 KASSERT((orig_l3 & ~ATTR_MASK) == opa,
3217                     ("pmap_enter: unexpected pa update for %#lx", va));
3218                 if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
3219                         om = PHYS_TO_VM_PAGE(opa);
3220
3221                         /*
3222                          * The pmap lock is sufficient to synchronize with
3223                          * concurrent calls to pmap_page_test_mappings() and
3224                          * pmap_ts_referenced().
3225                          */
3226                         if (pmap_page_dirty(orig_l3))
3227                                 vm_page_dirty(om);
3228                         if ((orig_l3 & ATTR_AF) != 0)
3229                                 vm_page_aflag_set(om, PGA_REFERENCED);
3230                         CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
3231                         pv = pmap_pvh_remove(&om->md, pmap, va);
3232                         if ((m->oflags & VPO_UNMANAGED) != 0)
3233                                 free_pv_entry(pmap, pv);
3234                         if ((om->aflags & PGA_WRITEABLE) != 0 &&
3235                             TAILQ_EMPTY(&om->md.pv_list) &&
3236                             ((om->flags & PG_FICTITIOUS) != 0 ||
3237                             TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
3238                                 vm_page_aflag_clear(om, PGA_WRITEABLE);
3239                 }
3240                 pmap_invalidate_page(pmap, va);
3241                 orig_l3 = 0;
3242         } else {
3243                 /*
3244                  * Increment the counters.
3245                  */
3246                 if ((new_l3 & ATTR_SW_WIRED) != 0)
3247                         pmap->pm_stats.wired_count++;
3248                 pmap_resident_count_inc(pmap, 1);
3249         }
3250         /*
3251          * Enter on the PV list if part of our managed memory.
3252          */
3253         if ((m->oflags & VPO_UNMANAGED) == 0) {
3254                 if (pv == NULL) {
3255                         pv = get_pv_entry(pmap, &lock);
3256                         pv->pv_va = va;
3257                 }
3258                 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
3259                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3260                 m->md.pv_gen++;
3261                 if ((new_l3 & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW))
3262                         vm_page_aflag_set(m, PGA_WRITEABLE);
3263         }
3264
3265 validate:
3266         /*
3267          * Sync icache if exec permission and attribute VM_MEMATTR_WRITE_BACK
3268          * is set. Do it now, before the mapping is stored and made
3269          * valid for hardware table walk. If done later, then other can
3270          * access this page before caches are properly synced.
3271          * Don't do it for kernel memory which is mapped with exec
3272          * permission even if the memory isn't going to hold executable
3273          * code. The only time when icache sync is needed is after
3274          * kernel module is loaded and the relocation info is processed.
3275          * And it's done in elf_cpu_load_file().
3276         */
3277         if ((prot & VM_PROT_EXECUTE) &&  pmap != kernel_pmap &&
3278             m->md.pv_memattr == VM_MEMATTR_WRITE_BACK &&
3279             (opa != pa || (orig_l3 & ATTR_XN)))
3280                 cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
3281
3282         /*
3283          * Update the L3 entry
3284          */
3285         if (pmap_l3_valid(orig_l3)) {
3286                 KASSERT(opa == pa, ("pmap_enter: invalid update"));
3287                 if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) {
3288                         /* same PA, different attributes */
3289                         pmap_load_store(l3, new_l3);
3290                         pmap_invalidate_page(pmap, va);
3291                         if (pmap_page_dirty(orig_l3) &&
3292                             (orig_l3 & ATTR_SW_MANAGED) != 0)
3293                                 vm_page_dirty(m);
3294                 } else {
3295                         /*
3296                          * orig_l3 == new_l3
3297                          * This can happens if multiple threads simultaneously
3298                          * access not yet mapped page. This bad for performance
3299                          * since this can cause full demotion-NOP-promotion
3300                          * cycle.
3301                          * Another possible reasons are:
3302                          * - VM and pmap memory layout are diverged
3303                          * - tlb flush is missing somewhere and CPU doesn't see
3304                          *   actual mapping.
3305                          */
3306                         CTR4(KTR_PMAP, "%s: already mapped page - "
3307                             "pmap %p va 0x%#lx pte 0x%lx",
3308                             __func__, pmap, va, new_l3);
3309                 }
3310         } else {
3311                 /* New mapping */
3312                 pmap_load_store(l3, new_l3);
3313                 dsb(ishst);
3314         }
3315
3316 #if VM_NRESERVLEVEL > 0
3317         if (pmap != pmap_kernel() &&
3318             (mpte == NULL || mpte->wire_count == NL3PG) &&
3319             pmap_ps_enabled(pmap) &&
3320             (m->flags & PG_FICTITIOUS) == 0 &&
3321             vm_reserv_level_iffullpop(m) == 0) {
3322                 pmap_promote_l2(pmap, pde, va, &lock);
3323         }
3324 #endif
3325
3326         rv = KERN_SUCCESS;
3327 out:
3328         if (lock != NULL)
3329                 rw_wunlock(lock);
3330         PMAP_UNLOCK(pmap);
3331         return (rv);
3332 }
3333
3334 /*
3335  * Tries to create a read- and/or execute-only 2MB page mapping.  Returns true
3336  * if successful.  Returns false if (1) a page table page cannot be allocated
3337  * without sleeping, (2) a mapping already exists at the specified virtual
3338  * address, or (3) a PV entry cannot be allocated without reclaiming another
3339  * PV entry.
3340  */
3341 static bool
3342 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3343     struct rwlock **lockp)
3344 {
3345         pd_entry_t new_l2;
3346
3347         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3348
3349         new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
3350             ATTR_IDX(m->md.pv_memattr) | ATTR_AP(ATTR_AP_RO) | L2_BLOCK);
3351         if ((m->oflags & VPO_UNMANAGED) == 0)
3352                 new_l2 |= ATTR_SW_MANAGED;
3353         if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY)
3354                 new_l2 |= ATTR_XN;
3355         if (va < VM_MAXUSER_ADDRESS)
3356                 new_l2 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN;
3357         return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP |
3358             PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
3359             KERN_SUCCESS);
3360 }
3361
3362 /*
3363  * Tries to create the specified 2MB page mapping.  Returns KERN_SUCCESS if
3364  * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
3365  * otherwise.  Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
3366  * a mapping already exists at the specified virtual address.  Returns
3367  * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
3368  * page allocation failed.  Returns KERN_RESOURCE_SHORTAGE if
3369  * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
3370  *
3371  * The parameter "m" is only used when creating a managed, writeable mapping.
3372  */
3373 static int
3374 pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
3375     vm_page_t m, struct rwlock **lockp)
3376 {
3377         struct spglist free;
3378         pd_entry_t *l2, old_l2;
3379         vm_page_t l2pg, mt;
3380
3381         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3382
3383         if ((l2pg = pmap_alloc_l2(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
3384             NULL : lockp)) == NULL) {
3385                 CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p",
3386                     va, pmap);
3387                 return (KERN_RESOURCE_SHORTAGE);
3388         }
3389
3390         l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg));
3391         l2 = &l2[pmap_l2_index(va)];
3392         if ((old_l2 = pmap_load(l2)) != 0) {
3393                 KASSERT(l2pg->wire_count > 1,
3394                     ("pmap_enter_l2: l2pg's wire count is too low"));
3395                 if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
3396                         l2pg->wire_count--;
3397                         CTR2(KTR_PMAP,
3398                             "pmap_enter_l2: failure for va %#lx in pmap %p",
3399                             va, pmap);
3400                         return (KERN_FAILURE);
3401                 }
3402                 SLIST_INIT(&free);
3403                 if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK)
3404                         (void)pmap_remove_l2(pmap, l2, va,
3405                             pmap_load(pmap_l1(pmap, va)), &free, lockp);
3406                 else
3407                         pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE,
3408                             &free, lockp);
3409                 vm_page_free_pages_toq(&free, true);
3410                 if (va >= VM_MAXUSER_ADDRESS) {
3411                         /*
3412                          * Both pmap_remove_l2() and pmap_remove_l3() will
3413                          * leave the kernel page table page zero filled.
3414                          */
3415                         mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
3416                         if (pmap_insert_pt_page(pmap, mt, false))
3417                                 panic("pmap_enter_l2: trie insert failed");
3418                 } else
3419                         KASSERT(pmap_load(l2) == 0,
3420                             ("pmap_enter_l2: non-zero L2 entry %p", l2));
3421         }
3422
3423         if ((new_l2 & ATTR_SW_MANAGED) != 0) {
3424                 /*
3425                  * Abort this mapping if its PV entry could not be created.
3426                  */
3427                 if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) {
3428                         SLIST_INIT(&free);
3429                         if (pmap_unwire_l3(pmap, va, l2pg, &free)) {
3430                                 /*
3431                                  * Although "va" is not mapped, paging-structure
3432                                  * caches could nonetheless have entries that
3433                                  * refer to the freed page table pages.
3434                                  * Invalidate those entries.
3435                                  */
3436                                 pmap_invalidate_page(pmap, va);
3437                                 vm_page_free_pages_toq(&free, true);
3438                         }
3439                         CTR2(KTR_PMAP,
3440                             "pmap_enter_l2: failure for va %#lx in pmap %p",
3441                             va, pmap);
3442                         return (KERN_RESOURCE_SHORTAGE);
3443                 }
3444                 if ((new_l2 & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW))
3445                         for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
3446                                 vm_page_aflag_set(mt, PGA_WRITEABLE);
3447         }
3448
3449         /*
3450          * Increment counters.
3451          */
3452         if ((new_l2 & ATTR_SW_WIRED) != 0)
3453                 pmap->pm_stats.wired_count += L2_SIZE / PAGE_SIZE;
3454         pmap->pm_stats.resident_count += L2_SIZE / PAGE_SIZE;
3455
3456         /*
3457          * Map the superpage.
3458          */
3459         (void)pmap_load_store(l2, new_l2);
3460         dsb(ishst);
3461
3462         atomic_add_long(&pmap_l2_mappings, 1);
3463         CTR2(KTR_PMAP, "pmap_enter_l2: success for va %#lx in pmap %p",
3464             va, pmap);
3465
3466         return (KERN_SUCCESS);
3467 }
3468
3469 /*
3470  * Maps a sequence of resident pages belonging to the same object.
3471  * The sequence begins with the given page m_start.  This page is
3472  * mapped at the given virtual address start.  Each subsequent page is
3473  * mapped at a virtual address that is offset from start by the same
3474  * amount as the page is offset from m_start within the object.  The
3475  * last page in the sequence is the page with the largest offset from
3476  * m_start that can be mapped at a virtual address less than the given
3477  * virtual address end.  Not every virtual page between start and end
3478  * is mapped; only those for which a resident page exists with the
3479  * corresponding offset from m_start are mapped.
3480  */
3481 void
3482 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
3483     vm_page_t m_start, vm_prot_t prot)
3484 {
3485         struct rwlock *lock;
3486         vm_offset_t va;
3487         vm_page_t m, mpte;
3488         vm_pindex_t diff, psize;
3489
3490         VM_OBJECT_ASSERT_LOCKED(m_start->object);
3491
3492         psize = atop(end - start);
3493         mpte = NULL;
3494         m = m_start;
3495         lock = NULL;
3496         PMAP_LOCK(pmap);
3497         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3498                 va = start + ptoa(diff);
3499                 if ((va & L2_OFFSET) == 0 && va + L2_SIZE <= end &&
3500                     m->psind == 1 && pmap_ps_enabled(pmap) &&
3501                     pmap_enter_2mpage(pmap, va, m, prot, &lock))
3502                         m = &m[L2_SIZE / PAGE_SIZE - 1];
3503                 else
3504                         mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte,
3505                             &lock);
3506                 m = TAILQ_NEXT(m, listq);
3507         }
3508         if (lock != NULL)
3509                 rw_wunlock(lock);
3510         PMAP_UNLOCK(pmap);
3511 }
3512
3513 /*
3514  * this code makes some *MAJOR* assumptions:
3515  * 1. Current pmap & pmap exists.
3516  * 2. Not wired.
3517  * 3. Read access.
3518  * 4. No page table pages.
3519  * but is *MUCH* faster than pmap_enter...
3520  */
3521
3522 void
3523 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3524 {
3525         struct rwlock *lock;
3526
3527         lock = NULL;
3528         PMAP_LOCK(pmap);
3529         (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
3530         if (lock != NULL)
3531                 rw_wunlock(lock);
3532         PMAP_UNLOCK(pmap);
3533 }
3534
3535 static vm_page_t
3536 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3537     vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
3538 {
3539         struct spglist free;
3540         pd_entry_t *pde;
3541         pt_entry_t *l2, *l3, l3_val;
3542         vm_paddr_t pa;
3543         int lvl;
3544
3545         KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
3546             (m->oflags & VPO_UNMANAGED) != 0,
3547             ("pmap_enter_quick_locked: managed mapping within the clean submap"));
3548         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3549
3550         CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
3551         /*
3552          * In the case that a page table page is not
3553          * resident, we are creating it here.
3554          */
3555         if (va < VM_MAXUSER_ADDRESS) {
3556                 vm_pindex_t l2pindex;
3557
3558                 /*
3559                  * Calculate pagetable page index
3560                  */
3561                 l2pindex = pmap_l2_pindex(va);
3562                 if (mpte && (mpte->pindex == l2pindex)) {
3563                         mpte->wire_count++;
3564                 } else {
3565                         /*
3566                          * Get the l2 entry
3567                          */
3568                         pde = pmap_pde(pmap, va, &lvl);
3569
3570                         /*
3571                          * If the page table page is mapped, we just increment
3572                          * the hold count, and activate it.  Otherwise, we
3573                          * attempt to allocate a page table page.  If this
3574                          * attempt fails, we don't retry.  Instead, we give up.
3575                          */
3576                         if (lvl == 1) {
3577                                 l2 = pmap_l1_to_l2(pde, va);
3578                                 if ((pmap_load(l2) & ATTR_DESCR_MASK) ==
3579                                     L2_BLOCK)
3580                                         return (NULL);
3581                         }
3582                         if (lvl == 2 && pmap_load(pde) != 0) {
3583                                 mpte =
3584                                     PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
3585                                 mpte->wire_count++;
3586                         } else {
3587                                 /*
3588                                  * Pass NULL instead of the PV list lock
3589                                  * pointer, because we don't intend to sleep.
3590                                  */
3591                                 mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
3592                                 if (mpte == NULL)
3593                                         return (mpte);
3594                         }
3595                 }
3596                 l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
3597                 l3 = &l3[pmap_l3_index(va)];
3598         } else {
3599                 mpte = NULL;
3600                 pde = pmap_pde(kernel_pmap, va, &lvl);
3601                 KASSERT(pde != NULL,
3602                     ("pmap_enter_quick_locked: Invalid page entry, va: 0x%lx",
3603                      va));
3604                 KASSERT(lvl == 2,
3605                     ("pmap_enter_quick_locked: Invalid level %d", lvl));
3606                 l3 = pmap_l2_to_l3(pde, va);
3607         }
3608
3609         /*
3610          * Abort if a mapping already exists.
3611          */
3612         if (pmap_load(l3) != 0) {
3613                 if (mpte != NULL) {
3614                         mpte->wire_count--;
3615                         mpte = NULL;
3616                 }
3617                 return (mpte);
3618         }
3619
3620         /*
3621          * Enter on the PV list if part of our managed memory.
3622          */
3623         if ((m->oflags & VPO_UNMANAGED) == 0 &&
3624             !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
3625                 if (mpte != NULL) {
3626                         SLIST_INIT(&free);
3627                         if (pmap_unwire_l3(pmap, va, mpte, &free)) {
3628                                 pmap_invalidate_page(pmap, va);
3629                                 vm_page_free_pages_toq(&free, true);
3630                         }
3631                         mpte = NULL;
3632                 }
3633                 return (mpte);
3634         }
3635
3636         /*
3637          * Increment counters
3638          */
3639         pmap_resident_count_inc(pmap, 1);
3640
3641         pa = VM_PAGE_TO_PHYS(m);
3642         l3_val = pa | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
3643             ATTR_AP(ATTR_AP_RO) | L3_PAGE;
3644         if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY)
3645                 l3_val |= ATTR_XN;
3646         else if (va < VM_MAXUSER_ADDRESS)
3647                 l3_val |= ATTR_PXN;
3648
3649         /*
3650          * Now validate mapping with RO protection
3651          */
3652         if ((m->oflags & VPO_UNMANAGED) == 0)
3653                 l3_val |= ATTR_SW_MANAGED;
3654
3655         /* Sync icache before the mapping is stored to PTE */
3656         if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
3657             m->md.pv_memattr == VM_MEMATTR_WRITE_BACK)
3658                 cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
3659
3660         pmap_load_store(l3, l3_val);
3661         dsb(ishst);
3662
3663         return (mpte);
3664 }
3665
3666 /*
3667  * This code maps large physical mmap regions into the
3668  * processor address space.  Note that some shortcuts
3669  * are taken, but the code works.
3670  */
3671 void
3672 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
3673     vm_pindex_t pindex, vm_size_t size)
3674 {
3675
3676         VM_OBJECT_ASSERT_WLOCKED(object);
3677         KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3678             ("pmap_object_init_pt: non-device object"));
3679 }
3680
3681 /*
3682  *      Clear the wired attribute from the mappings for the specified range of
3683  *      addresses in the given pmap.  Every valid mapping within that range
3684  *      must have the wired attribute set.  In contrast, invalid mappings
3685  *      cannot have the wired attribute set, so they are ignored.
3686  *
3687  *      The wired attribute of the page table entry is not a hardware feature,
3688  *      so there is no need to invalidate any TLB entries.
3689  */
3690 void
3691 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3692 {
3693         vm_offset_t va_next;
3694         pd_entry_t *l0, *l1, *l2;
3695         pt_entry_t *l3;
3696
3697         PMAP_LOCK(pmap);
3698         for (; sva < eva; sva = va_next) {
3699                 l0 = pmap_l0(pmap, sva);
3700                 if (pmap_load(l0) == 0) {
3701                         va_next = (sva + L0_SIZE) & ~L0_OFFSET;
3702                         if (va_next < sva)
3703                                 va_next = eva;
3704                         continue;
3705                 }
3706
3707                 l1 = pmap_l0_to_l1(l0, sva);
3708                 if (pmap_load(l1) == 0) {
3709                         va_next = (sva + L1_SIZE) & ~L1_OFFSET;
3710                         if (va_next < sva)
3711                                 va_next = eva;
3712                         continue;
3713                 }
3714
3715                 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
3716                 if (va_next < sva)
3717                         va_next = eva;
3718
3719                 l2 = pmap_l1_to_l2(l1, sva);
3720                 if (pmap_load(l2) == 0)
3721                         continue;
3722
3723                 if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
3724                         l3 = pmap_demote_l2(pmap, l2, sva);
3725                         if (l3 == NULL)
3726                                 continue;
3727                 }
3728                 KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
3729                     ("pmap_unwire: Invalid l2 entry after demotion"));
3730
3731                 if (va_next > eva)
3732                         va_next = eva;
3733                 for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
3734                     sva += L3_SIZE) {
3735                         if (pmap_load(l3) == 0)
3736                                 continue;
3737                         if ((pmap_load(l3) & ATTR_SW_WIRED) == 0)
3738                                 panic("pmap_unwire: l3 %#jx is missing "
3739                                     "ATTR_SW_WIRED", (uintmax_t)pmap_load(l3));
3740
3741                         /*
3742                          * PG_W must be cleared atomically.  Although the pmap
3743                          * lock synchronizes access to PG_W, another processor
3744                          * could be setting PG_M and/or PG_A concurrently.
3745                          */
3746                         atomic_clear_long(l3, ATTR_SW_WIRED);
3747                         pmap->pm_stats.wired_count--;
3748                 }
3749         }
3750         PMAP_UNLOCK(pmap);
3751 }
3752
3753 /*
3754  *      Copy the range specified by src_addr/len
3755  *      from the source map to the range dst_addr/len
3756  *      in the destination map.
3757  *
3758  *      This routine is only advisory and need not do anything.
3759  *
3760  *      Because the executable mappings created by this routine are copied,
3761  *      it should not have to flush the instruction cache.
3762  */
3763 void
3764 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
3765     vm_offset_t src_addr)
3766 {
3767         struct rwlock *lock;
3768         struct spglist free;
3769         pd_entry_t *l0, *l1, *l2, srcptepaddr;
3770         pt_entry_t *dst_pte, ptetemp, *src_pte;
3771         vm_offset_t addr, end_addr, va_next;
3772         vm_page_t dst_l2pg, dstmpte, srcmpte;
3773
3774         if (dst_addr != src_addr)
3775                 return;
3776         end_addr = src_addr + len;
3777         lock = NULL;
3778         if (dst_pmap < src_pmap) {
3779                 PMAP_LOCK(dst_pmap);
3780                 PMAP_LOCK(src_pmap);
3781         } else {
3782                 PMAP_LOCK(src_pmap);
3783                 PMAP_LOCK(dst_pmap);
3784         }
3785         for (addr = src_addr; addr < end_addr; addr = va_next) {
3786                 l0 = pmap_l0(src_pmap, addr);
3787                 if (pmap_load(l0) == 0) {
3788                         va_next = (addr + L0_SIZE) & ~L0_OFFSET;
3789                         if (va_next < addr)
3790                                 va_next = end_addr;
3791                         continue;
3792                 }
3793                 l1 = pmap_l0_to_l1(l0, addr);
3794                 if (pmap_load(l1) == 0) {
3795                         va_next = (addr + L1_SIZE) & ~L1_OFFSET;
3796                         if (va_next < addr)
3797                                 va_next = end_addr;
3798                         continue;
3799                 }
3800                 va_next = (addr + L2_SIZE) & ~L2_OFFSET;
3801                 if (va_next < addr)
3802                         va_next = end_addr;
3803                 l2 = pmap_l1_to_l2(l1, addr);
3804                 srcptepaddr = pmap_load(l2);
3805                 if (srcptepaddr == 0)
3806                         continue;
3807                 if ((srcptepaddr & ATTR_DESCR_MASK) == L2_BLOCK) {
3808                         if ((addr & L2_OFFSET) != 0 ||
3809                             addr + L2_SIZE > end_addr)
3810                                 continue;
3811                         dst_l2pg = pmap_alloc_l2(dst_pmap, addr, NULL);
3812                         if (dst_l2pg == NULL)
3813                                 break;
3814                         l2 = (pd_entry_t *)
3815                             PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_l2pg));
3816                         l2 = &l2[pmap_l2_index(addr)];
3817                         if (pmap_load(l2) == 0 &&
3818                             ((srcptepaddr & ATTR_SW_MANAGED) == 0 ||
3819                             pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr,
3820                             PMAP_ENTER_NORECLAIM, &lock))) {
3821                                 (void)pmap_load_store(l2, srcptepaddr &
3822                                     ~ATTR_SW_WIRED);
3823                                 pmap_resident_count_inc(dst_pmap, L2_SIZE /
3824                                     PAGE_SIZE);
3825                                 atomic_add_long(&pmap_l2_mappings, 1);
3826                         } else
3827                                 dst_l2pg->wire_count--;
3828                         continue;
3829                 }
3830                 KASSERT((srcptepaddr & ATTR_DESCR_MASK) == L2_TABLE,
3831                     ("pmap_copy: invalid L2 entry"));
3832                 srcptepaddr &= ~ATTR_MASK;
3833                 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
3834                 KASSERT(srcmpte->wire_count > 0,
3835                     ("pmap_copy: source page table page is unused"));
3836                 if (va_next > end_addr)
3837                         va_next = end_addr;
3838                 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
3839                 src_pte = &src_pte[pmap_l3_index(addr)];
3840                 dstmpte = NULL;
3841                 for (; addr < va_next; addr += PAGE_SIZE, src_pte++) {
3842                         ptetemp = pmap_load(src_pte);
3843
3844                         /*
3845                          * We only virtual copy managed pages.
3846                          */
3847                         if ((ptetemp & ATTR_SW_MANAGED) == 0)
3848                                 continue;
3849
3850                         if (dstmpte != NULL) {
3851                                 KASSERT(dstmpte->pindex == pmap_l2_pindex(addr),
3852                                     ("dstmpte pindex/addr mismatch"));
3853                                 dstmpte->wire_count++;
3854                         } else if ((dstmpte = pmap_alloc_l3(dst_pmap, addr,
3855                             NULL)) == NULL)
3856                                 goto out;
3857                         dst_pte = (pt_entry_t *)
3858                             PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
3859                         dst_pte = &dst_pte[pmap_l3_index(addr)];
3860                         if (pmap_load(dst_pte) == 0 &&
3861                             pmap_try_insert_pv_entry(dst_pmap, addr,
3862                             PHYS_TO_VM_PAGE(ptetemp & ~ATTR_MASK), &lock)) {
3863                                 /*
3864                                  * Clear the wired, modified, and accessed
3865                                  * (referenced) bits during the copy.
3866                                  *
3867                                  * XXX not yet
3868                                  */
3869                                 (void)pmap_load_store(dst_pte, ptetemp &
3870                                     ~ATTR_SW_WIRED);
3871                                 pmap_resident_count_inc(dst_pmap, 1);
3872                         } else {
3873                                 SLIST_INIT(&free);
3874                                 if (pmap_unwire_l3(dst_pmap, addr, dstmpte,
3875                                     &free)) {
3876                                         /*
3877                                          * Although "addr" is not mapped,
3878                                          * paging-structure caches could
3879                                          * nonetheless have entries that refer
3880                                          * to the freed page table pages.
3881                                          * Invalidate those entries.
3882                                          *
3883                                          * XXX redundant invalidation
3884                                          */
3885                                         pmap_invalidate_page(dst_pmap, addr);
3886                                         vm_page_free_pages_toq(&free, true);
3887                                 }
3888                                 goto out;
3889                         }
3890                         /* Have we copied all of the valid mappings? */ 
3891                         if (dstmpte->wire_count >= srcmpte->wire_count)
3892                                 break;
3893                 }
3894         }
3895 out:
3896         /*
3897          * XXX This barrier may not be needed because the destination pmap is
3898          * not active.
3899          */
3900         dsb(ishst);
3901
3902         if (lock != NULL)
3903                 rw_wunlock(lock);
3904         PMAP_UNLOCK(src_pmap);
3905         PMAP_UNLOCK(dst_pmap);
3906 }
3907
3908 /*
3909  *      pmap_zero_page zeros the specified hardware page by mapping
3910  *      the page into KVM and using bzero to clear its contents.
3911  */
3912 void
3913 pmap_zero_page(vm_page_t m)
3914 {
3915         vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3916
3917         pagezero((void *)va);
3918 }
3919
3920 /*
3921  *      pmap_zero_page_area zeros the specified hardware page by mapping
3922  *      the page into KVM and using bzero to clear its contents.
3923  *
3924  *      off and size may not cover an area beyond a single hardware page.
3925  */
3926 void
3927 pmap_zero_page_area(vm_page_t m, int off, int size)
3928 {
3929         vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3930
3931         if (off == 0 && size == PAGE_SIZE)
3932                 pagezero((void *)va);
3933         else
3934                 bzero((char *)va + off, size);
3935 }
3936
3937 /*
3938  *      pmap_copy_page copies the specified (machine independent)
3939  *      page by mapping the page into virtual memory and using
3940  *      bcopy to copy the page, one machine dependent page at a
3941  *      time.
3942  */
3943 void
3944 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
3945 {
3946         vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
3947         vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
3948
3949         pagecopy((void *)src, (void *)dst);
3950 }
3951
3952 int unmapped_buf_allowed = 1;
3953
3954 void
3955 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
3956     vm_offset_t b_offset, int xfersize)
3957 {
3958         void *a_cp, *b_cp;
3959         vm_page_t m_a, m_b;
3960         vm_paddr_t p_a, p_b;
3961         vm_offset_t a_pg_offset, b_pg_offset;
3962         int cnt;
3963
3964         while (xfersize > 0) {
3965                 a_pg_offset = a_offset & PAGE_MASK;
3966                 m_a = ma[a_offset >> PAGE_SHIFT];
3967                 p_a = m_a->phys_addr;
3968                 b_pg_offset = b_offset & PAGE_MASK;
3969                 m_b = mb[b_offset >> PAGE_SHIFT];
3970                 p_b = m_b->phys_addr;
3971                 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
3972                 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
3973                 if (__predict_false(!PHYS_IN_DMAP(p_a))) {
3974                         panic("!DMAP a %lx", p_a);
3975                 } else {
3976                         a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset;
3977                 }
3978                 if (__predict_false(!PHYS_IN_DMAP(p_b))) {
3979                         panic("!DMAP b %lx", p_b);
3980                 } else {
3981                         b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset;
3982                 }
3983                 bcopy(a_cp, b_cp, cnt);
3984                 a_offset += cnt;
3985                 b_offset += cnt;
3986                 xfersize -= cnt;
3987         }
3988 }
3989
3990 vm_offset_t
3991 pmap_quick_enter_page(vm_page_t m)
3992 {
3993
3994         return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
3995 }
3996
3997 void
3998 pmap_quick_remove_page(vm_offset_t addr)
3999 {
4000 }
4001
4002 /*
4003  * Returns true if the pmap's pv is one of the first
4004  * 16 pvs linked to from this page.  This count may
4005  * be changed upwards or downwards in the future; it
4006  * is only necessary that true be returned for a small
4007  * subset of pmaps for proper page aging.
4008  */
4009 boolean_t
4010 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
4011 {
4012         struct md_page *pvh;
4013         struct rwlock *lock;
4014         pv_entry_t pv;
4015         int loops = 0;
4016         boolean_t rv;
4017
4018         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4019             ("pmap_page_exists_quick: page %p is not managed", m));
4020         rv = FALSE;
4021         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4022         rw_rlock(lock);
4023         TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4024                 if (PV_PMAP(pv) == pmap) {
4025                         rv = TRUE;
4026                         break;
4027                 }
4028                 loops++;
4029                 if (loops >= 16)
4030                         break;
4031         }
4032         if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
4033                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4034                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4035                         if (PV_PMAP(pv) == pmap) {
4036                                 rv = TRUE;
4037                                 break;
4038                         }
4039                         loops++;
4040                         if (loops >= 16)
4041                                 break;
4042                 }
4043         }
4044         rw_runlock(lock);
4045         return (rv);
4046 }
4047
4048 /*
4049  *      pmap_page_wired_mappings:
4050  *
4051  *      Return the number of managed mappings to the given physical page
4052  *      that are wired.
4053  */
4054 int
4055 pmap_page_wired_mappings(vm_page_t m)
4056 {
4057         struct rwlock *lock;
4058         struct md_page *pvh;
4059         pmap_t pmap;
4060         pt_entry_t *pte;
4061         pv_entry_t pv;
4062         int count, lvl, md_gen, pvh_gen;
4063
4064         if ((m->oflags & VPO_UNMANAGED) != 0)
4065                 return (0);
4066         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4067         rw_rlock(lock);
4068 restart:
4069         count = 0;
4070         TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4071                 pmap = PV_PMAP(pv);
4072                 if (!PMAP_TRYLOCK(pmap)) {
4073                         md_gen = m->md.pv_gen;
4074                         rw_runlock(lock);
4075                         PMAP_LOCK(pmap);
4076                         rw_rlock(lock);
4077                         if (md_gen != m->md.pv_gen) {
4078                                 PMAP_UNLOCK(pmap);
4079                                 goto restart;
4080                         }
4081                 }
4082                 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4083                 if (pte != NULL && (pmap_load(pte) & ATTR_SW_WIRED) != 0)
4084                         count++;
4085                 PMAP_UNLOCK(pmap);
4086         }
4087         if ((m->flags & PG_FICTITIOUS) == 0) {
4088                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4089                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4090                         pmap = PV_PMAP(pv);
4091                         if (!PMAP_TRYLOCK(pmap)) {
4092                                 md_gen = m->md.pv_gen;
4093                                 pvh_gen = pvh->pv_gen;
4094                                 rw_runlock(lock);
4095                                 PMAP_LOCK(pmap);
4096                                 rw_rlock(lock);
4097                                 if (md_gen != m->md.pv_gen ||
4098                                     pvh_gen != pvh->pv_gen) {
4099                                         PMAP_UNLOCK(pmap);
4100                                         goto restart;
4101                                 }
4102                         }
4103                         pte = pmap_pte(pmap, pv->pv_va, &lvl);
4104                         if (pte != NULL &&
4105                             (pmap_load(pte) & ATTR_SW_WIRED) != 0)
4106                                 count++;
4107                         PMAP_UNLOCK(pmap);
4108                 }
4109         }
4110         rw_runlock(lock);
4111         return (count);
4112 }
4113
4114 /*
4115  * Destroy all managed, non-wired mappings in the given user-space
4116  * pmap.  This pmap cannot be active on any processor besides the
4117  * caller.
4118  *
4119  * This function cannot be applied to the kernel pmap.  Moreover, it
4120  * is not intended for general use.  It is only to be used during
4121  * process termination.  Consequently, it can be implemented in ways
4122  * that make it faster than pmap_remove().  First, it can more quickly
4123  * destroy mappings by iterating over the pmap's collection of PV
4124  * entries, rather than searching the page table.  Second, it doesn't
4125  * have to test and clear the page table entries atomically, because
4126  * no processor is currently accessing the user address space.  In
4127  * particular, a page table entry's dirty bit won't change state once
4128  * this function starts.
4129  */
4130 void
4131 pmap_remove_pages(pmap_t pmap)
4132 {
4133         pd_entry_t *pde;
4134         pt_entry_t *pte, tpte;
4135         struct spglist free;
4136         vm_page_t m, ml3, mt;
4137         pv_entry_t pv;
4138         struct md_page *pvh;
4139         struct pv_chunk *pc, *npc;
4140         struct rwlock *lock;
4141         int64_t bit;
4142         uint64_t inuse, bitmask;
4143         int allfree, field, freed, idx, lvl;
4144         vm_paddr_t pa;
4145
4146         lock = NULL;
4147
4148         SLIST_INIT(&free);
4149         PMAP_LOCK(pmap);
4150         TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
4151                 allfree = 1;
4152                 freed = 0;
4153                 for (field = 0; field < _NPCM; field++) {
4154                         inuse = ~pc->pc_map[field] & pc_freemask[field];
4155                         while (inuse != 0) {
4156                                 bit = ffsl(inuse) - 1;
4157                                 bitmask = 1UL << bit;
4158                                 idx = field * 64 + bit;
4159                                 pv = &pc->pc_pventry[idx];
4160                                 inuse &= ~bitmask;
4161
4162                                 pde = pmap_pde(pmap, pv->pv_va, &lvl);
4163                                 KASSERT(pde != NULL,
4164                                     ("Attempting to remove an unmapped page"));
4165
4166                                 switch(lvl) {
4167                                 case 1:
4168                                         pte = pmap_l1_to_l2(pde, pv->pv_va);
4169                                         tpte = pmap_load(pte); 
4170                                         KASSERT((tpte & ATTR_DESCR_MASK) ==
4171                                             L2_BLOCK,
4172                                             ("Attempting to remove an invalid "
4173                                             "block: %lx", tpte));
4174                                         tpte = pmap_load(pte);
4175                                         break;
4176                                 case 2:
4177                                         pte = pmap_l2_to_l3(pde, pv->pv_va);
4178                                         tpte = pmap_load(pte);
4179                                         KASSERT((tpte & ATTR_DESCR_MASK) ==
4180                                             L3_PAGE,
4181                                             ("Attempting to remove an invalid "
4182                                              "page: %lx", tpte));
4183                                         break;
4184                                 default:
4185                                         panic(
4186                                             "Invalid page directory level: %d",
4187                                             lvl);
4188                                 }
4189
4190 /*
4191  * We cannot remove wired pages from a process' mapping at this time
4192  */
4193                                 if (tpte & ATTR_SW_WIRED) {
4194                                         allfree = 0;
4195                                         continue;
4196                                 }
4197
4198                                 pa = tpte & ~ATTR_MASK;
4199
4200                                 m = PHYS_TO_VM_PAGE(pa);
4201                                 KASSERT(m->phys_addr == pa,
4202                                     ("vm_page_t %p phys_addr mismatch %016jx %016jx",
4203                                     m, (uintmax_t)m->phys_addr,
4204                                     (uintmax_t)tpte));
4205
4206                                 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
4207                                     m < &vm_page_array[vm_page_array_size],
4208                                     ("pmap_remove_pages: bad pte %#jx",
4209                                     (uintmax_t)tpte));
4210
4211                                 /*
4212                                  * Because this pmap is not active on other
4213                                  * processors, the dirty bit cannot have
4214                                  * changed state since we last loaded pte.
4215                                  */
4216                                 pmap_clear(pte);
4217
4218                                 /*
4219                                  * Update the vm_page_t clean/reference bits.
4220                                  */
4221                                 if ((tpte & ATTR_AP_RW_BIT) ==
4222                                     ATTR_AP(ATTR_AP_RW)) {
4223                                         switch (lvl) {
4224                                         case 1:
4225                                                 for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
4226                                                         vm_page_dirty(mt);
4227                                                 break;
4228                                         case 2:
4229                                                 vm_page_dirty(m);
4230                                                 break;
4231                                         }
4232                                 }
4233
4234                                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
4235
4236                                 /* Mark free */
4237                                 pc->pc_map[field] |= bitmask;
4238                                 switch (lvl) {
4239                                 case 1:
4240                                         pmap_resident_count_dec(pmap,
4241                                             L2_SIZE / PAGE_SIZE);
4242                                         pvh = pa_to_pvh(tpte & ~ATTR_MASK);
4243                                         TAILQ_REMOVE(&pvh->pv_list, pv,pv_next);
4244                                         pvh->pv_gen++;
4245                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
4246                                                 for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
4247                                                         if ((mt->aflags & PGA_WRITEABLE) != 0 &&
4248                                                             TAILQ_EMPTY(&mt->md.pv_list))
4249                                                                 vm_page_aflag_clear(mt, PGA_WRITEABLE);
4250                                         }
4251                                         ml3 = pmap_remove_pt_page(pmap,
4252                                             pv->pv_va);
4253                                         if (ml3 != NULL) {
4254                                                 KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
4255                                                     ("pmap_remove_pages: l3 page not promoted"));
4256                                                 pmap_resident_count_dec(pmap,1);
4257                                                 KASSERT(ml3->wire_count == NL3PG,
4258                                                     ("pmap_remove_pages: l3 page wire count error"));
4259                                                 ml3->wire_count = 0;
4260                                                 pmap_add_delayed_free_list(ml3,
4261                                                     &free, FALSE);
4262                                         }
4263                                         break;
4264                                 case 2:
4265                                         pmap_resident_count_dec(pmap, 1);
4266                                         TAILQ_REMOVE(&m->md.pv_list, pv,
4267                                             pv_next);
4268                                         m->md.pv_gen++;
4269                                         if ((m->aflags & PGA_WRITEABLE) != 0 &&
4270                                             TAILQ_EMPTY(&m->md.pv_list) &&
4271                                             (m->flags & PG_FICTITIOUS) == 0) {
4272                                                 pvh = pa_to_pvh(
4273                                                     VM_PAGE_TO_PHYS(m));
4274                                                 if (TAILQ_EMPTY(&pvh->pv_list))
4275                                                         vm_page_aflag_clear(m,
4276                                                             PGA_WRITEABLE);
4277                                         }
4278                                         break;
4279                                 }
4280                                 pmap_unuse_pt(pmap, pv->pv_va, pmap_load(pde),
4281                                     &free);
4282                                 freed++;
4283                         }
4284                 }
4285                 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
4286                 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
4287                 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
4288                 if (allfree) {
4289                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4290                         free_pv_chunk(pc);
4291                 }
4292         }
4293         pmap_invalidate_all(pmap);
4294         if (lock != NULL)
4295                 rw_wunlock(lock);
4296         PMAP_UNLOCK(pmap);
4297         vm_page_free_pages_toq(&free, true);
4298 }
4299
4300 /*
4301  * This is used to check if a page has been accessed or modified. As we
4302  * don't have a bit to see if it has been modified we have to assume it
4303  * has been if the page is read/write.
4304  */
4305 static boolean_t
4306 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
4307 {
4308         struct rwlock *lock;
4309         pv_entry_t pv;
4310         struct md_page *pvh;
4311         pt_entry_t *pte, mask, value;
4312         pmap_t pmap;
4313         int lvl, md_gen, pvh_gen;
4314         boolean_t rv;
4315
4316         rv = FALSE;
4317         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4318         rw_rlock(lock);
4319 restart:
4320         TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4321                 pmap = PV_PMAP(pv);
4322                 if (!PMAP_TRYLOCK(pmap)) {
4323                         md_gen = m->md.pv_gen;
4324                         rw_runlock(lock);
4325                         PMAP_LOCK(pmap);
4326                         rw_rlock(lock);
4327                         if (md_gen != m->md.pv_gen) {
4328                                 PMAP_UNLOCK(pmap);
4329                                 goto restart;
4330                         }
4331                 }
4332                 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4333                 KASSERT(lvl == 3,
4334                     ("pmap_page_test_mappings: Invalid level %d", lvl));
4335                 mask = 0;
4336                 value = 0;
4337                 if (modified) {
4338                         mask |= ATTR_AP_RW_BIT;
4339                         value |= ATTR_AP(ATTR_AP_RW);
4340                 }
4341                 if (accessed) {
4342                         mask |= ATTR_AF | ATTR_DESCR_MASK;
4343                         value |= ATTR_AF | L3_PAGE;
4344                 }
4345                 rv = (pmap_load(pte) & mask) == value;
4346                 PMAP_UNLOCK(pmap);
4347                 if (rv)
4348                         goto out;
4349         }
4350         if ((m->flags & PG_FICTITIOUS) == 0) {
4351                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4352                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4353                         pmap = PV_PMAP(pv);
4354                         if (!PMAP_TRYLOCK(pmap)) {
4355                                 md_gen = m->md.pv_gen;
4356                                 pvh_gen = pvh->pv_gen;
4357                                 rw_runlock(lock);
4358                                 PMAP_LOCK(pmap);
4359                                 rw_rlock(lock);
4360                                 if (md_gen != m->md.pv_gen ||
4361                                     pvh_gen != pvh->pv_gen) {
4362                                         PMAP_UNLOCK(pmap);
4363                                         goto restart;
4364                                 }
4365                         }
4366                         pte = pmap_pte(pmap, pv->pv_va, &lvl);
4367                         KASSERT(lvl == 2,
4368                             ("pmap_page_test_mappings: Invalid level %d", lvl));
4369                         mask = 0;
4370                         value = 0;
4371                         if (modified) {
4372                                 mask |= ATTR_AP_RW_BIT;
4373                                 value |= ATTR_AP(ATTR_AP_RW);
4374                         }
4375                         if (accessed) {
4376                                 mask |= ATTR_AF | ATTR_DESCR_MASK;
4377                                 value |= ATTR_AF | L2_BLOCK;
4378                         }
4379                         rv = (pmap_load(pte) & mask) == value;
4380                         PMAP_UNLOCK(pmap);
4381                         if (rv)
4382                                 goto out;
4383                 }
4384         }
4385 out:
4386         rw_runlock(lock);
4387         return (rv);
4388 }
4389
4390 /*
4391  *      pmap_is_modified:
4392  *
4393  *      Return whether or not the specified physical page was modified
4394  *      in any physical maps.
4395  */
4396 boolean_t
4397 pmap_is_modified(vm_page_t m)
4398 {
4399
4400         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4401             ("pmap_is_modified: page %p is not managed", m));
4402
4403         /*
4404          * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
4405          * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
4406          * is clear, no PTEs can have PG_M set.
4407          */
4408         VM_OBJECT_ASSERT_WLOCKED(m->object);
4409         if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
4410                 return (FALSE);
4411         return (pmap_page_test_mappings(m, FALSE, TRUE));
4412 }
4413
4414 /*
4415  *      pmap_is_prefaultable:
4416  *
4417  *      Return whether or not the specified virtual address is eligible
4418  *      for prefault.
4419  */
4420 boolean_t
4421 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
4422 {
4423         pt_entry_t *pte;
4424         boolean_t rv;
4425         int lvl;
4426
4427         rv = FALSE;
4428         PMAP_LOCK(pmap);
4429         pte = pmap_pte(pmap, addr, &lvl);
4430         if (pte != NULL && pmap_load(pte) != 0) {
4431                 rv = TRUE;
4432         }
4433         PMAP_UNLOCK(pmap);
4434         return (rv);
4435 }
4436
4437 /*
4438  *      pmap_is_referenced:
4439  *
4440  *      Return whether or not the specified physical page was referenced
4441  *      in any physical maps.
4442  */
4443 boolean_t
4444 pmap_is_referenced(vm_page_t m)
4445 {
4446
4447         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4448             ("pmap_is_referenced: page %p is not managed", m));
4449         return (pmap_page_test_mappings(m, TRUE, FALSE));
4450 }
4451
4452 /*
4453  * Clear the write and modified bits in each of the given page's mappings.
4454  */
4455 void
4456 pmap_remove_write(vm_page_t m)
4457 {
4458         struct md_page *pvh;
4459         pmap_t pmap;
4460         struct rwlock *lock;
4461         pv_entry_t next_pv, pv;
4462         pt_entry_t oldpte, *pte;
4463         vm_offset_t va;
4464         int lvl, md_gen, pvh_gen;
4465
4466         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4467             ("pmap_remove_write: page %p is not managed", m));
4468
4469         /*
4470          * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
4471          * set by another thread while the object is locked.  Thus,
4472          * if PGA_WRITEABLE is clear, no page table entries need updating.
4473          */
4474         VM_OBJECT_ASSERT_WLOCKED(m->object);
4475         if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
4476                 return;
4477         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4478         pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
4479             pa_to_pvh(VM_PAGE_TO_PHYS(m));
4480 retry_pv_loop:
4481         rw_wlock(lock);
4482         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
4483                 pmap = PV_PMAP(pv);
4484                 if (!PMAP_TRYLOCK(pmap)) {
4485                         pvh_gen = pvh->pv_gen;
4486                         rw_wunlock(lock);
4487                         PMAP_LOCK(pmap);
4488                         rw_wlock(lock);
4489                         if (pvh_gen != pvh->pv_gen) {
4490                                 PMAP_UNLOCK(pmap);
4491                                 rw_wunlock(lock);
4492                                 goto retry_pv_loop;
4493                         }
4494                 }
4495                 va = pv->pv_va;
4496                 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4497                 if ((pmap_load(pte) & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW))
4498                         (void)pmap_demote_l2_locked(pmap, pte, va, &lock);
4499                 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
4500                     ("inconsistent pv lock %p %p for page %p",
4501                     lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
4502                 PMAP_UNLOCK(pmap);
4503         }
4504         TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4505                 pmap = PV_PMAP(pv);
4506                 if (!PMAP_TRYLOCK(pmap)) {
4507                         pvh_gen = pvh->pv_gen;
4508                         md_gen = m->md.pv_gen;
4509                         rw_wunlock(lock);
4510                         PMAP_LOCK(pmap);
4511                         rw_wlock(lock);
4512                         if (pvh_gen != pvh->pv_gen ||
4513                             md_gen != m->md.pv_gen) {
4514                                 PMAP_UNLOCK(pmap);
4515                                 rw_wunlock(lock);
4516                                 goto retry_pv_loop;
4517                         }
4518                 }
4519                 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4520 retry:
4521                 oldpte = pmap_load(pte);
4522                 if ((oldpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) {
4523                         if (!atomic_cmpset_long(pte, oldpte,
4524                             oldpte | ATTR_AP(ATTR_AP_RO)))
4525                                 goto retry;
4526                         if ((oldpte & ATTR_AF) != 0)
4527                                 vm_page_dirty(m);
4528                         pmap_invalidate_page(pmap, pv->pv_va);
4529                 }
4530                 PMAP_UNLOCK(pmap);
4531         }
4532         rw_wunlock(lock);
4533         vm_page_aflag_clear(m, PGA_WRITEABLE);
4534 }
4535
4536 static __inline boolean_t
4537 safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
4538 {
4539
4540         return (FALSE);
4541 }
4542
4543 /*
4544  *      pmap_ts_referenced:
4545  *
4546  *      Return a count of reference bits for a page, clearing those bits.
4547  *      It is not necessary for every reference bit to be cleared, but it
4548  *      is necessary that 0 only be returned when there are truly no
4549  *      reference bits set.
4550  *
4551  *      As an optimization, update the page's dirty field if a modified bit is
4552  *      found while counting reference bits.  This opportunistic update can be
4553  *      performed at low cost and can eliminate the need for some future calls
4554  *      to pmap_is_modified().  However, since this function stops after
4555  *      finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
4556  *      dirty pages.  Those dirty pages will only be detected by a future call
4557  *      to pmap_is_modified().
4558  */
4559 int
4560 pmap_ts_referenced(vm_page_t m)
4561 {
4562         struct md_page *pvh;
4563         pv_entry_t pv, pvf;
4564         pmap_t pmap;
4565         struct rwlock *lock;
4566         pd_entry_t *pde, tpde;
4567         pt_entry_t *pte, tpte;
4568         pt_entry_t *l3;
4569         vm_offset_t va;
4570         vm_paddr_t pa;
4571         int cleared, md_gen, not_cleared, lvl, pvh_gen;
4572         struct spglist free;
4573         bool demoted;
4574
4575         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4576             ("pmap_ts_referenced: page %p is not managed", m));
4577         SLIST_INIT(&free);
4578         cleared = 0;
4579         pa = VM_PAGE_TO_PHYS(m);
4580         lock = PHYS_TO_PV_LIST_LOCK(pa);
4581         pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
4582         rw_wlock(lock);
4583 retry:
4584         not_cleared = 0;
4585         if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
4586                 goto small_mappings;
4587         pv = pvf;
4588         do {
4589                 if (pvf == NULL)
4590                         pvf = pv;
4591                 pmap = PV_PMAP(pv);
4592                 if (!PMAP_TRYLOCK(pmap)) {
4593                         pvh_gen = pvh->pv_gen;
4594                         rw_wunlock(lock);
4595                         PMAP_LOCK(pmap);
4596                         rw_wlock(lock);
4597                         if (pvh_gen != pvh->pv_gen) {
4598                                 PMAP_UNLOCK(pmap);
4599                                 goto retry;
4600                         }
4601                 }
4602                 va = pv->pv_va;
4603                 pde = pmap_pde(pmap, pv->pv_va, &lvl);
4604                 KASSERT(pde != NULL, ("pmap_ts_referenced: no l1 table found"));
4605                 KASSERT(lvl == 1,
4606                     ("pmap_ts_referenced: invalid pde level %d", lvl));
4607                 tpde = pmap_load(pde);
4608                 KASSERT((tpde & ATTR_DESCR_MASK) == L1_TABLE,
4609                     ("pmap_ts_referenced: found an invalid l1 table"));
4610                 pte = pmap_l1_to_l2(pde, pv->pv_va);
4611                 tpte = pmap_load(pte);
4612                 if (pmap_page_dirty(tpte)) {
4613                         /*
4614                          * Although "tpte" is mapping a 2MB page, because
4615                          * this function is called at a 4KB page granularity,
4616                          * we only update the 4KB page under test.
4617                          */
4618                         vm_page_dirty(m);
4619                 }
4620                 if ((tpte & ATTR_AF) != 0) {
4621                         /*
4622                          * Since this reference bit is shared by 512 4KB
4623                          * pages, it should not be cleared every time it is
4624                          * tested.  Apply a simple "hash" function on the
4625                          * physical page number, the virtual superpage number,
4626                          * and the pmap address to select one 4KB page out of
4627                          * the 512 on which testing the reference bit will
4628                          * result in clearing that reference bit.  This
4629                          * function is designed to avoid the selection of the
4630                          * same 4KB page for every 2MB page mapping.
4631                          *
4632                          * On demotion, a mapping that hasn't been referenced
4633                          * is simply destroyed.  To avoid the possibility of a
4634                          * subsequent page fault on a demoted wired mapping,
4635                          * always leave its reference bit set.  Moreover,
4636                          * since the superpage is wired, the current state of
4637                          * its reference bit won't affect page replacement.
4638                          */
4639                         if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L2_SHIFT) ^
4640                             (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 &&
4641                             (tpte & ATTR_SW_WIRED) == 0) {
4642                                 if (safe_to_clear_referenced(pmap, tpte)) {
4643                                         /*
4644                                          * TODO: We don't handle the access
4645                                          * flag at all. We need to be able
4646                                          * to set it in  the exception handler.
4647                                          */
4648                                         panic("ARM64TODO: "
4649                                             "safe_to_clear_referenced\n");
4650                                 } else if (pmap_demote_l2_locked(pmap, pte,
4651                                     pv->pv_va, &lock) != NULL) {
4652                                         demoted = true;
4653                                         va += VM_PAGE_TO_PHYS(m) -
4654                                             (tpte & ~ATTR_MASK);
4655                                         l3 = pmap_l2_to_l3(pte, va);
4656                                         pmap_remove_l3(pmap, l3, va,
4657                                             pmap_load(pte), NULL, &lock);
4658                                 } else
4659                                         demoted = true;
4660
4661                                 if (demoted) {
4662                                         /*
4663                                          * The superpage mapping was removed
4664                                          * entirely and therefore 'pv' is no
4665                                          * longer valid.
4666                                          */
4667                                         if (pvf == pv)
4668                                                 pvf = NULL;
4669                                         pv = NULL;
4670                                 }
4671                                 cleared++;
4672                                 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
4673                                     ("inconsistent pv lock %p %p for page %p",
4674                                     lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
4675                         } else
4676                                 not_cleared++;
4677                 }
4678                 PMAP_UNLOCK(pmap);
4679                 /* Rotate the PV list if it has more than one entry. */
4680                 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
4681                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
4682                         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
4683                         pvh->pv_gen++;
4684                 }
4685                 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
4686                         goto out;
4687         } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
4688 small_mappings:
4689         if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
4690                 goto out;
4691         pv = pvf;
4692         do {
4693                 if (pvf == NULL)
4694                         pvf = pv;
4695                 pmap = PV_PMAP(pv);
4696                 if (!PMAP_TRYLOCK(pmap)) {
4697                         pvh_gen = pvh->pv_gen;
4698                         md_gen = m->md.pv_gen;
4699                         rw_wunlock(lock);
4700                         PMAP_LOCK(pmap);
4701                         rw_wlock(lock);
4702                         if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
4703                                 PMAP_UNLOCK(pmap);
4704                                 goto retry;
4705                         }
4706                 }
4707                 pde = pmap_pde(pmap, pv->pv_va, &lvl);
4708                 KASSERT(pde != NULL, ("pmap_ts_referenced: no l2 table found"));
4709                 KASSERT(lvl == 2,
4710                     ("pmap_ts_referenced: invalid pde level %d", lvl));
4711                 tpde = pmap_load(pde);
4712                 KASSERT((tpde & ATTR_DESCR_MASK) == L2_TABLE,
4713                     ("pmap_ts_referenced: found an invalid l2 table"));
4714                 pte = pmap_l2_to_l3(pde, pv->pv_va);
4715                 tpte = pmap_load(pte);
4716                 if (pmap_page_dirty(tpte))
4717                         vm_page_dirty(m);
4718                 if ((tpte & ATTR_AF) != 0) {
4719                         if (safe_to_clear_referenced(pmap, tpte)) {
4720                                 /*
4721                                  * TODO: We don't handle the access flag
4722                                  * at all. We need to be able to set it in
4723                                  * the exception handler.
4724                                  */
4725                                 panic("ARM64TODO: safe_to_clear_referenced\n");
4726                         } else if ((tpte & ATTR_SW_WIRED) == 0) {
4727                                 /*
4728                                  * Wired pages cannot be paged out so
4729                                  * doing accessed bit emulation for
4730                                  * them is wasted effort. We do the
4731                                  * hard work for unwired pages only.
4732                                  */
4733                                 pmap_remove_l3(pmap, pte, pv->pv_va, tpde,
4734                                     &free, &lock);
4735                                 cleared++;
4736                                 if (pvf == pv)
4737                                         pvf = NULL;
4738                                 pv = NULL;
4739                                 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
4740                                     ("inconsistent pv lock %p %p for page %p",
4741                                     lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
4742                         } else
4743                                 not_cleared++;
4744                 }
4745                 PMAP_UNLOCK(pmap);
4746                 /* Rotate the PV list if it has more than one entry. */
4747                 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
4748                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4749                         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
4750                         m->md.pv_gen++;
4751                 }
4752         } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
4753             not_cleared < PMAP_TS_REFERENCED_MAX);
4754 out:
4755         rw_wunlock(lock);
4756         vm_page_free_pages_toq(&free, true);
4757         return (cleared + not_cleared);
4758 }
4759
4760 /*
4761  *      Apply the given advice to the specified range of addresses within the
4762  *      given pmap.  Depending on the advice, clear the referenced and/or
4763  *      modified flags in each mapping and set the mapped page's dirty field.
4764  */
4765 void
4766 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
4767 {
4768 }
4769
4770 /*
4771  *      Clear the modify bits on the specified physical page.
4772  */
4773 void
4774 pmap_clear_modify(vm_page_t m)
4775 {
4776
4777         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4778             ("pmap_clear_modify: page %p is not managed", m));
4779         VM_OBJECT_ASSERT_WLOCKED(m->object);
4780         KASSERT(!vm_page_xbusied(m),
4781             ("pmap_clear_modify: page %p is exclusive busied", m));
4782
4783         /*
4784          * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
4785          * If the object containing the page is locked and the page is not
4786          * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
4787          */
4788         if ((m->aflags & PGA_WRITEABLE) == 0)
4789                 return;
4790
4791         /* ARM64TODO: We lack support for tracking if a page is modified */
4792 }
4793
4794 void *
4795 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
4796 {
4797         struct pmap_preinit_mapping *ppim;
4798         vm_offset_t va, offset;
4799         pd_entry_t *pde;
4800         pt_entry_t *l2;
4801         int i, lvl, l2_blocks, free_l2_count, start_idx;
4802
4803         if (!vm_initialized) {
4804                 /*
4805                  * No L3 ptables so map entire L2 blocks where start VA is:
4806                  *      preinit_map_va + start_idx * L2_SIZE
4807                  * There may be duplicate mappings (multiple VA -> same PA) but
4808                  * ARM64 dcache is always PIPT so that's acceptable.
4809                  */
4810                  if (size == 0)
4811                          return (NULL);
4812
4813                  /* Calculate how many L2 blocks are needed for the mapping */
4814                 l2_blocks = (roundup2(pa + size, L2_SIZE) -
4815                     rounddown2(pa, L2_SIZE)) >> L2_SHIFT;
4816
4817                 offset = pa & L2_OFFSET;
4818
4819                 if (preinit_map_va == 0)
4820                         return (NULL);
4821
4822                 /* Map 2MiB L2 blocks from reserved VA space */
4823
4824                 free_l2_count = 0;
4825                 start_idx = -1;
4826                 /* Find enough free contiguous VA space */
4827                 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
4828                         ppim = pmap_preinit_mapping + i;
4829                         if (free_l2_count > 0 && ppim->pa != 0) {
4830                                 /* Not enough space here */
4831                                 free_l2_count = 0;
4832                                 start_idx = -1;
4833                                 continue;
4834                         }
4835
4836                         if (ppim->pa == 0) {
4837                                 /* Free L2 block */
4838                                 if (start_idx == -1)
4839                                         start_idx = i;
4840                                 free_l2_count++;
4841                                 if (free_l2_count == l2_blocks)
4842                                         break;
4843                         }
4844                 }
4845                 if (free_l2_count != l2_blocks)
4846                         panic("%s: too many preinit mappings", __func__);
4847
4848                 va = preinit_map_va + (start_idx * L2_SIZE);
4849                 for (i = start_idx; i < start_idx + l2_blocks; i++) {
4850                         /* Mark entries as allocated */
4851                         ppim = pmap_preinit_mapping + i;
4852                         ppim->pa = pa;
4853                         ppim->va = va + offset;
4854                         ppim->size = size;
4855                 }
4856
4857                 /* Map L2 blocks */
4858                 pa = rounddown2(pa, L2_SIZE);
4859                 for (i = 0; i < l2_blocks; i++) {
4860                         pde = pmap_pde(kernel_pmap, va, &lvl);
4861                         KASSERT(pde != NULL,
4862                             ("pmap_mapbios: Invalid page entry, va: 0x%lx",
4863                             va));
4864                         KASSERT(lvl == 1,
4865                             ("pmap_mapbios: Invalid level %d", lvl));
4866
4867                         /* Insert L2_BLOCK */
4868                         l2 = pmap_l1_to_l2(pde, va);
4869                         pmap_load_store(l2,
4870                             pa | ATTR_DEFAULT | ATTR_XN |
4871                             ATTR_IDX(CACHED_MEMORY) | L2_BLOCK);
4872
4873                         va += L2_SIZE;
4874                         pa += L2_SIZE;
4875                 }
4876                 pmap_invalidate_all(kernel_pmap);
4877
4878                 va = preinit_map_va + (start_idx * L2_SIZE);
4879
4880         } else {
4881                 /* kva_alloc may be used to map the pages */
4882                 offset = pa & PAGE_MASK;
4883                 size = round_page(offset + size);
4884
4885                 va = kva_alloc(size);
4886                 if (va == 0)
4887                         panic("%s: Couldn't allocate KVA", __func__);
4888
4889                 pde = pmap_pde(kernel_pmap, va, &lvl);
4890                 KASSERT(lvl == 2, ("pmap_mapbios: Invalid level %d", lvl));
4891
4892                 /* L3 table is linked */
4893                 va = trunc_page(va);
4894                 pa = trunc_page(pa);
4895                 pmap_kenter(va, size, pa, CACHED_MEMORY);
4896         }
4897
4898         return ((void *)(va + offset));
4899 }
4900
4901 void
4902 pmap_unmapbios(vm_offset_t va, vm_size_t size)
4903 {
4904         struct pmap_preinit_mapping *ppim;
4905         vm_offset_t offset, tmpsize, va_trunc;
4906         pd_entry_t *pde;
4907         pt_entry_t *l2;
4908         int i, lvl, l2_blocks, block;
4909         bool preinit_map;
4910
4911         l2_blocks =
4912            (roundup2(va + size, L2_SIZE) - rounddown2(va, L2_SIZE)) >> L2_SHIFT;
4913         KASSERT(l2_blocks > 0, ("pmap_unmapbios: invalid size %lx", size));
4914
4915         /* Remove preinit mapping */
4916         preinit_map = false;
4917         block = 0;
4918         for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
4919                 ppim = pmap_preinit_mapping + i;
4920                 if (ppim->va == va) {
4921                         KASSERT(ppim->size == size,
4922                             ("pmap_unmapbios: size mismatch"));
4923                         ppim->va = 0;
4924                         ppim->pa = 0;
4925                         ppim->size = 0;
4926                         preinit_map = true;
4927                         offset = block * L2_SIZE;
4928                         va_trunc = rounddown2(va, L2_SIZE) + offset;
4929
4930                         /* Remove L2_BLOCK */
4931                         pde = pmap_pde(kernel_pmap, va_trunc, &lvl);
4932                         KASSERT(pde != NULL,
4933                             ("pmap_unmapbios: Invalid page entry, va: 0x%lx",
4934                             va_trunc));
4935                         l2 = pmap_l1_to_l2(pde, va_trunc);
4936                         pmap_clear(l2);
4937
4938                         if (block == (l2_blocks - 1))
4939                                 break;
4940                         block++;
4941                 }
4942         }
4943         if (preinit_map) {
4944                 pmap_invalidate_all(kernel_pmap);
4945                 return;
4946         }
4947
4948         /* Unmap the pages reserved with kva_alloc. */
4949         if (vm_initialized) {
4950                 offset = va & PAGE_MASK;
4951                 size = round_page(offset + size);
4952                 va = trunc_page(va);
4953
4954                 pde = pmap_pde(kernel_pmap, va, &lvl);
4955                 KASSERT(pde != NULL,
4956                     ("pmap_unmapbios: Invalid page entry, va: 0x%lx", va));
4957                 KASSERT(lvl == 2, ("pmap_unmapbios: Invalid level %d", lvl));
4958
4959                 /* Unmap and invalidate the pages */
4960                 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
4961                         pmap_kremove(va + tmpsize);
4962
4963                 kva_free(va, size);
4964         }
4965 }
4966
4967 /*
4968  * Sets the memory attribute for the specified page.
4969  */
4970 void
4971 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
4972 {
4973
4974         m->md.pv_memattr = ma;
4975
4976         /*
4977          * If "m" is a normal page, update its direct mapping.  This update
4978          * can be relied upon to perform any cache operations that are
4979          * required for data coherence.
4980          */
4981         if ((m->flags & PG_FICTITIOUS) == 0 &&
4982             pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
4983             m->md.pv_memattr) != 0)
4984                 panic("memory attribute change on the direct map failed");
4985 }
4986
4987 /*
4988  * Changes the specified virtual address range's memory type to that given by
4989  * the parameter "mode".  The specified virtual address range must be
4990  * completely contained within either the direct map or the kernel map.  If
4991  * the virtual address range is contained within the kernel map, then the
4992  * memory type for each of the corresponding ranges of the direct map is also
4993  * changed.  (The corresponding ranges of the direct map are those ranges that
4994  * map the same physical pages as the specified virtual address range.)  These
4995  * changes to the direct map are necessary because Intel describes the
4996  * behavior of their processors as "undefined" if two or more mappings to the
4997  * same physical page have different memory types.
4998  *
4999  * Returns zero if the change completed successfully, and either EINVAL or
5000  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
5001  * of the virtual address range was not mapped, and ENOMEM is returned if
5002  * there was insufficient memory available to complete the change.  In the
5003  * latter case, the memory type may have been changed on some part of the
5004  * virtual address range or the direct map.
5005  */
5006 static int
5007 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
5008 {
5009         int error;
5010
5011         PMAP_LOCK(kernel_pmap);
5012         error = pmap_change_attr_locked(va, size, mode);
5013         PMAP_UNLOCK(kernel_pmap);
5014         return (error);
5015 }
5016
5017 static int
5018 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
5019 {
5020         vm_offset_t base, offset, tmpva;
5021         pt_entry_t l3, *pte, *newpte;
5022         int lvl;
5023
5024         PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
5025         base = trunc_page(va);
5026         offset = va & PAGE_MASK;
5027         size = round_page(offset + size);
5028
5029         if (!VIRT_IN_DMAP(base))
5030                 return (EINVAL);
5031
5032         for (tmpva = base; tmpva < base + size; ) {
5033                 pte = pmap_pte(kernel_pmap, tmpva, &lvl);
5034                 if (pte == NULL)
5035                         return (EINVAL);
5036
5037                 if ((pmap_load(pte) & ATTR_IDX_MASK) == ATTR_IDX(mode)) {
5038                         /*
5039                          * We already have the correct attribute,
5040                          * ignore this entry.
5041                          */
5042                         switch (lvl) {
5043                         default:
5044                                 panic("Invalid DMAP table level: %d\n", lvl);
5045                         case 1:
5046                                 tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE;
5047                                 break;
5048                         case 2:
5049                                 tmpva = (tmpva & ~L2_OFFSET) + L2_SIZE;
5050                                 break;
5051                         case 3:
5052                                 tmpva += PAGE_SIZE;
5053                                 break;
5054                         }
5055                 } else {
5056                         /*
5057                          * Split the entry to an level 3 table, then
5058                          * set the new attribute.
5059                          */
5060                         switch (lvl) {
5061                         default:
5062                                 panic("Invalid DMAP table level: %d\n", lvl);
5063                         case 1:
5064                                 newpte = pmap_demote_l1(kernel_pmap, pte,
5065                                     tmpva & ~L1_OFFSET);
5066                                 if (newpte == NULL)
5067                                         return (EINVAL);
5068                                 pte = pmap_l1_to_l2(pte, tmpva);
5069                         case 2:
5070                                 newpte = pmap_demote_l2(kernel_pmap, pte,
5071                                     tmpva);
5072                                 if (newpte == NULL)
5073                                         return (EINVAL);
5074                                 pte = pmap_l2_to_l3(pte, tmpva);
5075                         case 3:
5076                                 /* Update the entry */
5077                                 l3 = pmap_load(pte);
5078                                 l3 &= ~ATTR_IDX_MASK;
5079                                 l3 |= ATTR_IDX(mode);
5080                                 if (mode == DEVICE_MEMORY)
5081                                         l3 |= ATTR_XN;
5082
5083                                 pmap_update_entry(kernel_pmap, pte, l3, tmpva,
5084                                     PAGE_SIZE);
5085
5086                                 /*
5087                                  * If moving to a non-cacheable entry flush
5088                                  * the cache.
5089                                  */
5090                                 if (mode == VM_MEMATTR_UNCACHEABLE)
5091                                         cpu_dcache_wbinv_range(tmpva, L3_SIZE);
5092
5093                                 break;
5094                         }
5095                         tmpva += PAGE_SIZE;
5096                 }
5097         }
5098
5099         return (0);
5100 }
5101
5102 /*
5103  * Create an L2 table to map all addresses within an L1 mapping.
5104  */
5105 static pt_entry_t *
5106 pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
5107 {
5108         pt_entry_t *l2, newl2, oldl1;
5109         vm_offset_t tmpl1;
5110         vm_paddr_t l2phys, phys;
5111         vm_page_t ml2;
5112         int i;
5113
5114         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5115         oldl1 = pmap_load(l1);
5116         KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK,
5117             ("pmap_demote_l1: Demoting a non-block entry"));
5118         KASSERT((va & L1_OFFSET) == 0,
5119             ("pmap_demote_l1: Invalid virtual address %#lx", va));
5120         KASSERT((oldl1 & ATTR_SW_MANAGED) == 0,
5121             ("pmap_demote_l1: Level 1 table shouldn't be managed"));
5122
5123         tmpl1 = 0;
5124         if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) {
5125                 tmpl1 = kva_alloc(PAGE_SIZE);
5126                 if (tmpl1 == 0)
5127                         return (NULL);
5128         }
5129
5130         if ((ml2 = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
5131             VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
5132                 CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx"
5133                     " in pmap %p", va, pmap);
5134                 return (NULL);
5135         }
5136
5137         l2phys = VM_PAGE_TO_PHYS(ml2);
5138         l2 = (pt_entry_t *)PHYS_TO_DMAP(l2phys);
5139
5140         /* Address the range points at */
5141         phys = oldl1 & ~ATTR_MASK;
5142         /* The attributed from the old l1 table to be copied */
5143         newl2 = oldl1 & ATTR_MASK;
5144
5145         /* Create the new entries */
5146         for (i = 0; i < Ln_ENTRIES; i++) {
5147                 l2[i] = newl2 | phys;
5148                 phys += L2_SIZE;
5149         }
5150         KASSERT(l2[0] == ((oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK),
5151             ("Invalid l2 page (%lx != %lx)", l2[0],
5152             (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK));
5153
5154         if (tmpl1 != 0) {
5155                 pmap_kenter(tmpl1, PAGE_SIZE,
5156                     DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET, CACHED_MEMORY);
5157                 l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK));
5158         }
5159
5160         pmap_update_entry(pmap, l1, l2phys | L1_TABLE, va, PAGE_SIZE);
5161
5162         if (tmpl1 != 0) {
5163                 pmap_kremove(tmpl1);
5164                 kva_free(tmpl1, PAGE_SIZE);
5165         }
5166
5167         return (l2);
5168 }
5169
5170 static void
5171 pmap_demote_l2_abort(pmap_t pmap, vm_offset_t va, pt_entry_t *l2,
5172     struct rwlock **lockp)
5173 {
5174         struct spglist free;
5175
5176         SLIST_INIT(&free);
5177         (void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), &free,
5178             lockp);
5179         vm_page_free_pages_toq(&free, true);
5180 }
5181
5182 /*
5183  * Create an L3 table to map all addresses within an L2 mapping.
5184  */
5185 static pt_entry_t *
5186 pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
5187     struct rwlock **lockp)
5188 {
5189         pt_entry_t *l3, newl3, oldl2;
5190         vm_offset_t tmpl2;
5191         vm_paddr_t l3phys, phys;
5192         vm_page_t ml3;
5193         int i;
5194
5195         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5196         l3 = NULL;
5197         oldl2 = pmap_load(l2);
5198         KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK,
5199             ("pmap_demote_l2: Demoting a non-block entry"));
5200         va &= ~L2_OFFSET;
5201
5202         tmpl2 = 0;
5203         if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) {
5204                 tmpl2 = kva_alloc(PAGE_SIZE);
5205                 if (tmpl2 == 0)
5206                         return (NULL);
5207         }
5208
5209         /*
5210          * Invalidate the 2MB page mapping and return "failure" if the
5211          * mapping was never accessed.
5212          */
5213         if ((oldl2 & ATTR_AF) == 0) {
5214                 KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
5215                     ("pmap_demote_l2: a wired mapping is missing ATTR_AF"));
5216                 pmap_demote_l2_abort(pmap, va, l2, lockp);
5217                 CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx in pmap %p",
5218                     va, pmap);
5219                 goto fail;
5220         }
5221
5222         if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) {
5223                 KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
5224                     ("pmap_demote_l2: page table page for a wired mapping"
5225                     " is missing"));
5226
5227                 /*
5228                  * If the page table page is missing and the mapping
5229                  * is for a kernel address, the mapping must belong to
5230                  * the direct map.  Page table pages are preallocated
5231                  * for every other part of the kernel address space,
5232                  * so the direct map region is the only part of the
5233                  * kernel address space that must be handled here.
5234                  */
5235                 KASSERT(va < VM_MAXUSER_ADDRESS || VIRT_IN_DMAP(va),
5236                     ("pmap_demote_l2: No saved mpte for va %#lx", va));
5237
5238                 /*
5239                  * If the 2MB page mapping belongs to the direct map
5240                  * region of the kernel's address space, then the page
5241                  * allocation request specifies the highest possible
5242                  * priority (VM_ALLOC_INTERRUPT).  Otherwise, the
5243                  * priority is normal.
5244                  */
5245                 ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va),
5246                     (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
5247                     VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
5248
5249                 /*
5250                  * If the allocation of the new page table page fails,
5251                  * invalidate the 2MB page mapping and return "failure".
5252                  */
5253                 if (ml3 == NULL) {
5254                         pmap_demote_l2_abort(pmap, va, l2, lockp);
5255                         CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx"
5256                             " in pmap %p", va, pmap);
5257                         goto fail;
5258                 }
5259
5260                 if (va < VM_MAXUSER_ADDRESS) {
5261                         ml3->wire_count = NL3PG;
5262                         pmap_resident_count_inc(pmap, 1);
5263                 }
5264         }
5265
5266         l3phys = VM_PAGE_TO_PHYS(ml3);
5267         l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys);
5268
5269         /* Address the range points at */
5270         phys = oldl2 & ~ATTR_MASK;
5271         /* The attributed from the old l2 table to be copied */
5272         newl3 = (oldl2 & (ATTR_MASK & ~ATTR_DESCR_MASK)) | L3_PAGE;
5273
5274         /*
5275          * If the page table page is not leftover from an earlier promotion,
5276          * initialize it.
5277          */
5278         if (ml3->valid == 0) {
5279                 for (i = 0; i < Ln_ENTRIES; i++) {
5280                         l3[i] = newl3 | phys;
5281                         phys += L3_SIZE;
5282                 }
5283         }
5284         KASSERT(l3[0] == ((oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE),
5285             ("Invalid l3 page (%lx != %lx)", l3[0],
5286             (oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE));
5287
5288         /*
5289          * Map the temporary page so we don't lose access to the l2 table.
5290          */
5291         if (tmpl2 != 0) {
5292                 pmap_kenter(tmpl2, PAGE_SIZE,
5293                     DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET, CACHED_MEMORY);
5294                 l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK));
5295         }
5296
5297         /*
5298          * The spare PV entries must be reserved prior to demoting the
5299          * mapping, that is, prior to changing the PDE.  Otherwise, the state
5300          * of the L2 and the PV lists will be inconsistent, which can result
5301          * in reclaim_pv_chunk() attempting to remove a PV entry from the
5302          * wrong PV list and pmap_pv_demote_l2() failing to find the expected
5303          * PV entry for the 2MB page mapping that is being demoted.
5304          */
5305         if ((oldl2 & ATTR_SW_MANAGED) != 0)
5306                 reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp);
5307
5308         /*
5309          * Pass PAGE_SIZE so that a single TLB invalidation is performed on
5310          * the 2MB page mapping.
5311          */
5312         pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE);
5313
5314         /*
5315          * Demote the PV entry.
5316          */
5317         if ((oldl2 & ATTR_SW_MANAGED) != 0)
5318                 pmap_pv_demote_l2(pmap, va, oldl2 & ~ATTR_MASK, lockp);
5319
5320         atomic_add_long(&pmap_l2_demotions, 1);
5321         CTR3(KTR_PMAP, "pmap_demote_l2: success for va %#lx"
5322             " in pmap %p %lx", va, pmap, l3[0]);
5323
5324 fail:
5325         if (tmpl2 != 0) {
5326                 pmap_kremove(tmpl2);
5327                 kva_free(tmpl2, PAGE_SIZE);
5328         }
5329
5330         return (l3);
5331
5332 }
5333
5334 static pt_entry_t *
5335 pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
5336 {
5337         struct rwlock *lock;
5338         pt_entry_t *l3;
5339
5340         lock = NULL;
5341         l3 = pmap_demote_l2_locked(pmap, l2, va, &lock);
5342         if (lock != NULL)
5343                 rw_wunlock(lock);
5344         return (l3);
5345 }
5346
5347 /*
5348  * perform the pmap work for mincore
5349  */
5350 int
5351 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
5352 {
5353         pt_entry_t *pte, tpte;
5354         vm_paddr_t mask, pa;
5355         int lvl, val;
5356         bool managed;
5357
5358         PMAP_LOCK(pmap);
5359 retry:
5360         val = 0;
5361         pte = pmap_pte(pmap, addr, &lvl);
5362         if (pte != NULL) {
5363                 tpte = pmap_load(pte);
5364
5365                 switch (lvl) {
5366                 case 3:
5367                         mask = L3_OFFSET;
5368                         break;
5369                 case 2:
5370                         mask = L2_OFFSET;
5371                         break;
5372                 case 1:
5373                         mask = L1_OFFSET;
5374                         break;
5375                 default:
5376                         panic("pmap_mincore: invalid level %d", lvl);
5377                 }
5378
5379                 val = MINCORE_INCORE;
5380                 if (lvl != 3)
5381                         val |= MINCORE_SUPER;
5382                 if (pmap_page_dirty(tpte))
5383                         val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
5384                 if ((tpte & ATTR_AF) == ATTR_AF)
5385                         val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
5386
5387                 managed = (tpte & ATTR_SW_MANAGED) == ATTR_SW_MANAGED;
5388                 pa = (tpte & ~ATTR_MASK) | (addr & mask);
5389         } else
5390                 managed = false;
5391
5392         if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
5393             (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
5394                 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
5395                 if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
5396                         goto retry;
5397         } else
5398                 PA_UNLOCK_COND(*locked_pa);
5399         PMAP_UNLOCK(pmap);
5400
5401         return (val);
5402 }
5403
5404 void
5405 pmap_activate(struct thread *td)
5406 {
5407         pmap_t  pmap;
5408
5409         critical_enter();
5410         pmap = vmspace_pmap(td->td_proc->p_vmspace);
5411         td->td_proc->p_md.md_l0addr = vtophys(pmap->pm_l0);
5412         __asm __volatile(
5413             "msr ttbr0_el1, %0  \n"
5414             "isb                \n"
5415             : : "r"(td->td_proc->p_md.md_l0addr));
5416         pmap_invalidate_all(pmap);
5417         critical_exit();
5418 }
5419
5420 struct pcb *
5421 pmap_switch(struct thread *old, struct thread *new)
5422 {
5423         pcpu_bp_harden bp_harden;
5424         struct pcb *pcb;
5425
5426         /* Store the new curthread */
5427         PCPU_SET(curthread, new);
5428
5429         /* And the new pcb */
5430         pcb = new->td_pcb;
5431         PCPU_SET(curpcb, pcb);
5432
5433         /*
5434          * TODO: We may need to flush the cache here if switching
5435          * to a user process.
5436          */
5437
5438         if (old == NULL ||
5439             old->td_proc->p_md.md_l0addr != new->td_proc->p_md.md_l0addr) {
5440                 __asm __volatile(
5441                     /* Switch to the new pmap */
5442                     "msr        ttbr0_el1, %0   \n"
5443                     "isb                        \n"
5444
5445                     /* Invalidate the TLB */
5446                     "dsb        ishst           \n"
5447                     "tlbi       vmalle1is       \n"
5448                     "dsb        ish             \n"
5449                     "isb                        \n"
5450                     : : "r"(new->td_proc->p_md.md_l0addr));
5451
5452                 /*
5453                  * Stop userspace from training the branch predictor against
5454                  * other processes. This will call into a CPU specific
5455                  * function that clears the branch predictor state.
5456                  */
5457                 bp_harden = PCPU_GET(bp_harden);
5458                 if (bp_harden != NULL)
5459                         bp_harden();
5460         }
5461
5462         return (pcb);
5463 }
5464
5465 void
5466 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
5467 {
5468
5469         if (va >= VM_MIN_KERNEL_ADDRESS) {
5470                 cpu_icache_sync_range(va, sz);
5471         } else {
5472                 u_int len, offset;
5473                 vm_paddr_t pa;
5474
5475                 /* Find the length of data in this page to flush */
5476                 offset = va & PAGE_MASK;
5477                 len = imin(PAGE_SIZE - offset, sz);
5478
5479                 while (sz != 0) {
5480                         /* Extract the physical address & find it in the DMAP */
5481                         pa = pmap_extract(pmap, va);
5482                         if (pa != 0)
5483                                 cpu_icache_sync_range(PHYS_TO_DMAP(pa), len);
5484
5485                         /* Move to the next page */
5486                         sz -= len;
5487                         va += len;
5488                         /* Set the length for the next iteration */
5489                         len = imin(PAGE_SIZE, sz);
5490                 }
5491         }
5492 }
5493
5494 int
5495 pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
5496 {
5497 #ifdef SMP
5498         register_t intr;
5499         uint64_t par;
5500
5501         switch (ESR_ELx_EXCEPTION(esr)) {
5502         case EXCP_INSN_ABORT_L:
5503         case EXCP_INSN_ABORT:
5504         case EXCP_DATA_ABORT_L:
5505         case EXCP_DATA_ABORT:
5506                 break;
5507         default:
5508                 return (KERN_FAILURE);
5509         }
5510
5511         /* Data and insn aborts use same encoding for FCS field. */
5512         switch (esr & ISS_DATA_DFSC_MASK) {
5513         case ISS_DATA_DFSC_TF_L0:
5514         case ISS_DATA_DFSC_TF_L1:
5515         case ISS_DATA_DFSC_TF_L2:
5516         case ISS_DATA_DFSC_TF_L3:
5517                 PMAP_LOCK(pmap);
5518                 /* Ask the MMU to check the address */
5519                 intr = intr_disable();
5520                 if (pmap == kernel_pmap)
5521                         par = arm64_address_translate_s1e1r(far);
5522                 else
5523                         par = arm64_address_translate_s1e0r(far);
5524                 intr_restore(intr);
5525                 PMAP_UNLOCK(pmap);
5526
5527                 /*
5528                  * If the translation was successful the address was invalid
5529                  * due to a break-before-make sequence. We can unlock and
5530                  * return success to the trap handler.
5531                  */
5532                 if (PAR_SUCCESS(par))
5533                         return (KERN_SUCCESS);
5534                 break;
5535         default:
5536                 break;
5537         }
5538 #endif
5539
5540         return (KERN_FAILURE);
5541 }
5542
5543 /*
5544  *      Increase the starting virtual address of the given mapping if a
5545  *      different alignment might result in more superpage mappings.
5546  */
5547 void
5548 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
5549     vm_offset_t *addr, vm_size_t size)
5550 {
5551         vm_offset_t superpage_offset;
5552
5553         if (size < L2_SIZE)
5554                 return;
5555         if (object != NULL && (object->flags & OBJ_COLORED) != 0)
5556                 offset += ptoa(object->pg_color);
5557         superpage_offset = offset & L2_OFFSET;
5558         if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) < L2_SIZE ||
5559             (*addr & L2_OFFSET) == superpage_offset)
5560                 return;
5561         if ((*addr & L2_OFFSET) < superpage_offset)
5562                 *addr = (*addr & ~L2_OFFSET) + superpage_offset;
5563         else
5564                 *addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) + superpage_offset;
5565 }
5566
5567 /**
5568  * Get the kernel virtual address of a set of physical pages. If there are
5569  * physical addresses not covered by the DMAP perform a transient mapping
5570  * that will be removed when calling pmap_unmap_io_transient.
5571  *
5572  * \param page        The pages the caller wishes to obtain the virtual
5573  *                    address on the kernel memory map.
5574  * \param vaddr       On return contains the kernel virtual memory address
5575  *                    of the pages passed in the page parameter.
5576  * \param count       Number of pages passed in.
5577  * \param can_fault   TRUE if the thread using the mapped pages can take
5578  *                    page faults, FALSE otherwise.
5579  *
5580  * \returns TRUE if the caller must call pmap_unmap_io_transient when
5581  *          finished or FALSE otherwise.
5582  *
5583  */
5584 boolean_t
5585 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
5586     boolean_t can_fault)
5587 {
5588         vm_paddr_t paddr;
5589         boolean_t needs_mapping;
5590         int error, i;
5591
5592         /*
5593          * Allocate any KVA space that we need, this is done in a separate
5594          * loop to prevent calling vmem_alloc while pinned.
5595          */
5596         needs_mapping = FALSE;
5597         for (i = 0; i < count; i++) {
5598                 paddr = VM_PAGE_TO_PHYS(page[i]);
5599                 if (__predict_false(!PHYS_IN_DMAP(paddr))) {
5600                         error = vmem_alloc(kernel_arena, PAGE_SIZE,
5601                             M_BESTFIT | M_WAITOK, &vaddr[i]);
5602                         KASSERT(error == 0, ("vmem_alloc failed: %d", error));
5603                         needs_mapping = TRUE;
5604                 } else {
5605                         vaddr[i] = PHYS_TO_DMAP(paddr);
5606                 }
5607         }
5608
5609         /* Exit early if everything is covered by the DMAP */
5610         if (!needs_mapping)
5611                 return (FALSE);
5612
5613         if (!can_fault)
5614                 sched_pin();
5615         for (i = 0; i < count; i++) {
5616                 paddr = VM_PAGE_TO_PHYS(page[i]);
5617                 if (!PHYS_IN_DMAP(paddr)) {
5618                         panic(
5619                            "pmap_map_io_transient: TODO: Map out of DMAP data");
5620                 }
5621         }
5622
5623         return (needs_mapping);
5624 }
5625
5626 void
5627 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
5628     boolean_t can_fault)
5629 {
5630         vm_paddr_t paddr;
5631         int i;
5632
5633         if (!can_fault)
5634                 sched_unpin();
5635         for (i = 0; i < count; i++) {
5636                 paddr = VM_PAGE_TO_PHYS(page[i]);
5637                 if (!PHYS_IN_DMAP(paddr)) {
5638                         panic("ARM64TODO: pmap_unmap_io_transient: Unmap data");
5639                 }
5640         }
5641 }
5642
5643 boolean_t
5644 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
5645 {
5646
5647         return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_THROUGH);
5648 }