2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 * Carnegie Mellon requests users of this software to return to
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
66 * Resident memory system definitions.
75 * Management of resident (logical) pages.
77 * A small structure is kept for each resident
78 * page, indexed by page number. Each structure
79 * is an element of several collections:
81 * A radix tree used to quickly
82 * perform object/offset lookups
84 * A list of all pages for a given object,
85 * so they can be quickly deactivated at
86 * time of deallocation.
88 * An ordered list of pages due for pageout.
90 * In addition, the structure contains the object
91 * and offset to which this page belongs (for pageout),
92 * and sundry status bits.
94 * In general, operations on this structure's mutable fields are
95 * synchronized using either one of or a combination of locks. If a
96 * field is annotated with two of these locks then holding either is
97 * sufficient for read access but both are required for write access.
98 * The physical address of a page is used to select its page lock from
99 * a pool. The queue lock for a page depends on the value of its queue
100 * field and is described in detail below.
102 * The following annotations are possible:
103 * (A) the field is atomic and may require additional synchronization.
104 * (B) the page busy lock.
105 * (C) the field is immutable.
106 * (F) the per-domain lock for the free queues
107 * (M) Machine dependent, defined by pmap layer.
108 * (O) the object that the page belongs to.
110 * (Q) the page's queue lock.
112 * The busy lock is an embedded reader-writer lock that protects the
113 * page's contents and identity (i.e., its <object, pindex> tuple) as
114 * well as certain valid/dirty modifications. To avoid bloating the
115 * the page structure, the busy lock lacks some of the features available
116 * the kernel's general-purpose synchronization primitives. As a result,
117 * busy lock ordering rules are not verified, lock recursion is not
118 * detected, and an attempt to xbusy a busy page or sbusy an xbusy page
119 * results will trigger a panic rather than causing the thread to block.
120 * vm_page_sleep_if_busy() can be used to sleep until the page's busy
121 * state changes, after which the caller must re-lookup the page and
122 * re-evaluate its state. vm_page_busy_acquire() will block until
123 * the lock is acquired.
125 * The valid field is protected by the page busy lock (B) and object
126 * lock (O). Transitions from invalid to valid are generally done
127 * via I/O or zero filling and do not require the object lock.
128 * These must be protected with the busy lock to prevent page-in or
129 * creation races. Page invalidation generally happens as a result
130 * of truncate or msync. When invalidated, pages must not be present
131 * in pmap and must hold the object lock to prevent concurrent
132 * speculative read-only mappings that do not require busy. I/O
133 * routines may check for validity without a lock if they are prepared
134 * to handle invalidation races with higher level locks (vnode) or are
135 * unconcerned with races so long as they hold a reference to prevent
136 * recycling. When a valid bit is set while holding a shared busy
137 * lock (A) atomic operations are used to protect against concurrent
140 * In contrast, the synchronization of accesses to the page's
141 * dirty field is a mix of machine dependent (M) and busy (B). In
142 * the machine-independent layer, the page busy must be held to
143 * operate on the field. However, the pmap layer is permitted to
144 * set all bits within the field without holding that lock. If the
145 * underlying architecture does not support atomic read-modify-write
146 * operations on the field's type, then the machine-independent
147 * layer uses a 32-bit atomic on the aligned 32-bit word that
148 * contains the dirty field. In the machine-independent layer,
149 * the implementation of read-modify-write operations on the
150 * field is encapsulated in vm_page_clear_dirty_mask(). An
151 * exclusive busy lock combined with pmap_remove_{write/all}() is the
152 * only way to ensure a page can not become dirty. I/O generally
153 * removes the page from pmap to ensure exclusive access and atomic
156 * The ref_count field tracks references to the page. References that
157 * prevent the page from being reclaimable are called wirings and are
158 * counted in the low bits of ref_count. The containing object's
159 * reference, if one exists, is counted using the VPRC_OBJREF bit in the
160 * ref_count field. Additionally, the VPRC_BLOCKED bit is used to
161 * atomically check for wirings and prevent new wirings via
162 * pmap_extract_and_hold(). When a page belongs to an object, it may be
163 * wired only when the object is locked, or the page is busy, or by
164 * pmap_extract_and_hold(). As a result, if the object is locked and the
165 * page is not busy (or is exclusively busied by the current thread), and
166 * the page is unmapped, its wire count will not increase. The ref_count
167 * field is updated using atomic operations in most cases, except when it
168 * is known that no other references to the page exist, such as in the page
169 * allocator. A page may be present in the page queues, or even actively
170 * scanned by the page daemon, without an explicitly counted referenced.
171 * The page daemon must therefore handle the possibility of a concurrent
174 * The queue field is the index of the page queue containing the page,
175 * or PQ_NONE if the page is not enqueued. The queue lock of a page is
176 * the page queue lock corresponding to the page queue index, or the
177 * page lock (P) for the page if it is not enqueued. To modify the
178 * queue field, the queue lock for the old value of the field must be
179 * held. There is one exception to this rule: the page daemon may
180 * transition the queue field from PQ_INACTIVE to PQ_NONE immediately
181 * prior to freeing a page during an inactive queue scan. At that
182 * point the page has already been physically dequeued and no other
183 * references to that vm_page structure exist.
185 * To avoid contention on page queue locks, page queue operations
186 * (enqueue, dequeue, requeue) are batched using per-CPU queues. A
187 * deferred operation is requested by inserting an entry into a batch
188 * queue; the entry is simply a pointer to the page, and the request
189 * type is encoded in the page's aflags field using the values in
190 * PGA_QUEUE_STATE_MASK. The type-stability of struct vm_pages is
191 * crucial to this scheme since the processing of entries in a given
192 * batch queue may be deferred indefinitely. In particular, a page may
193 * be freed before its pending batch queue entries have been processed.
194 * The page lock (P) must be held to schedule a batched queue
195 * operation, and the page queue lock must be held in order to process
196 * batch queue entries for the page queue. There is one exception to
197 * this rule: the thread freeing a page may schedule a dequeue without
198 * holding the page lock. In this scenario the only other thread which
199 * may hold a reference to the page is the page daemon, which is
200 * careful to avoid modifying the page's queue state once the dequeue
201 * has been requested by setting PGA_DEQUEUE.
204 #if PAGE_SIZE == 4096
205 #define VM_PAGE_BITS_ALL 0xffu
206 typedef uint8_t vm_page_bits_t;
207 #elif PAGE_SIZE == 8192
208 #define VM_PAGE_BITS_ALL 0xffffu
209 typedef uint16_t vm_page_bits_t;
210 #elif PAGE_SIZE == 16384
211 #define VM_PAGE_BITS_ALL 0xffffffffu
212 typedef uint32_t vm_page_bits_t;
213 #elif PAGE_SIZE == 32768
214 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
215 typedef uint64_t vm_page_bits_t;
220 TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
222 SLIST_ENTRY(vm_page) ss; /* private slists */
230 TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */
231 vm_object_t object; /* which object am I in (O) */
232 vm_pindex_t pindex; /* offset into object (O,P) */
233 vm_paddr_t phys_addr; /* physical address of page (C) */
234 struct md_page md; /* machine dependent stuff */
235 u_int ref_count; /* page references (A) */
236 volatile u_int busy_lock; /* busy owners lock */
237 uint16_t flags; /* page PG_* flags (P) */
238 uint8_t order; /* index of the buddy queue (F) */
239 uint8_t pool; /* vm_phys freepool index (F) */
240 uint8_t aflags; /* atomic flags (A) */
241 uint8_t oflags; /* page VPO_* flags (O) */
242 uint8_t queue; /* page queue index (Q) */
243 int8_t psind; /* pagesizes[] index (O) */
244 int8_t segind; /* vm_phys segment index (C) */
245 u_char act_count; /* page usage count (P) */
246 /* NOTE that these must support one bit per DEV_BSIZE in a page */
247 /* so, on normal X86 kernels, they must be at least 8 bits wide */
248 vm_page_bits_t valid; /* valid DEV_BSIZE chunk map (O,B) */
249 vm_page_bits_t dirty; /* dirty DEV_BSIZE chunk map (M,B) */
253 * Special bits used in the ref_count field.
255 * ref_count is normally used to count wirings that prevent the page from being
256 * reclaimed, but also supports several special types of references that do not
257 * prevent reclamation. Accesses to the ref_count field must be atomic unless
258 * the page is unallocated.
260 * VPRC_OBJREF is the reference held by the containing object. It can set or
261 * cleared only when the corresponding object's write lock is held.
263 * VPRC_BLOCKED is used to atomically block wirings via pmap lookups while
264 * attempting to tear down all mappings of a given page. The page lock and
265 * object write lock must both be held in order to set or clear this bit.
267 #define VPRC_BLOCKED 0x40000000u /* mappings are being removed */
268 #define VPRC_OBJREF 0x80000000u /* object reference, cleared with (O) */
269 #define VPRC_WIRE_COUNT(c) ((c) & ~(VPRC_BLOCKED | VPRC_OBJREF))
270 #define VPRC_WIRE_COUNT_MAX (~(VPRC_BLOCKED | VPRC_OBJREF))
273 * Page flags stored in oflags:
275 * Access to these page flags is synchronized by the lock on the object
276 * containing the page (O).
278 * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
279 * indicates that the page is not under PV management but
280 * otherwise should be treated as a normal page. Pages not
281 * under PV management cannot be paged out via the
282 * object/vm_page_t because there is no knowledge of their pte
283 * mappings, and such pages are also not on any PQ queue.
286 #define VPO_KMEM_EXEC 0x01 /* kmem mapping allows execution */
287 #define VPO_SWAPSLEEP 0x02 /* waiting for swap to finish */
288 #define VPO_UNMANAGED 0x04 /* no PV management for page */
289 #define VPO_SWAPINPROG 0x08 /* swap I/O in progress on page */
292 * Busy page implementation details.
293 * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation,
294 * even if the support for owner identity is removed because of size
295 * constraints. Checks on lock recursion are then not possible, while the
296 * lock assertions effectiveness is someway reduced.
298 #define VPB_BIT_SHARED 0x01
299 #define VPB_BIT_EXCLUSIVE 0x02
300 #define VPB_BIT_WAITERS 0x04
301 #define VPB_BIT_FLAGMASK \
302 (VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS)
304 #define VPB_SHARERS_SHIFT 3
305 #define VPB_SHARERS(x) \
306 (((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT)
307 #define VPB_SHARERS_WORD(x) ((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
308 #define VPB_ONE_SHARER (1 << VPB_SHARERS_SHIFT)
310 #define VPB_SINGLE_EXCLUSIVER VPB_BIT_EXCLUSIVE
312 #define VPB_UNBUSIED VPB_SHARERS_WORD(0)
315 #define PQ_INACTIVE 0
318 #define PQ_UNSWAPPABLE 3
321 #ifndef VM_PAGE_HAVE_PGLIST
322 TAILQ_HEAD(pglist, vm_page);
323 #define VM_PAGE_HAVE_PGLIST
325 SLIST_HEAD(spglist, vm_page);
328 extern vm_page_t bogus_page;
331 extern struct mtx_padalign pa_lock[];
334 #define PDRSHIFT PDR_SHIFT
335 #elif !defined(PDRSHIFT)
339 #define pa_index(pa) ((pa) >> PDRSHIFT)
340 #define PA_LOCKPTR(pa) ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
341 #define PA_LOCKOBJPTR(pa) ((struct lock_object *)PA_LOCKPTR((pa)))
342 #define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa))
343 #define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa))
344 #define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa))
345 #define PA_UNLOCK_COND(pa) \
353 #define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a))
355 #if defined(KLD_MODULE) && !defined(KLD_TIED)
356 #define vm_page_lock(m) vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
357 #define vm_page_unlock(m) vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
358 #define vm_page_trylock(m) vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
359 #else /* !KLD_MODULE */
360 #define vm_page_lockptr(m) (PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
361 #define vm_page_lock(m) mtx_lock(vm_page_lockptr((m)))
362 #define vm_page_unlock(m) mtx_unlock(vm_page_lockptr((m)))
363 #define vm_page_trylock(m) mtx_trylock(vm_page_lockptr((m)))
365 #if defined(INVARIANTS)
366 #define vm_page_assert_locked(m) \
367 vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
368 #define vm_page_lock_assert(m, a) \
369 vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
371 #define vm_page_assert_locked(m)
372 #define vm_page_lock_assert(m, a)
376 * The vm_page's aflags are updated using atomic operations. To set or clear
377 * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
378 * must be used. Neither these flags nor these functions are part of the KBI.
380 * PGA_REFERENCED may be cleared only if the page is locked. It is set by
381 * both the MI and MD VM layers. However, kernel loadable modules should not
382 * directly set this flag. They should call vm_page_reference() instead.
384 * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().
385 * When it does so, the object must be locked, or the page must be
386 * exclusive busied. The MI VM layer must never access this flag
387 * directly. Instead, it should call pmap_page_is_write_mapped().
389 * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
390 * at least one executable mapping. It is not consumed by the MI VM layer.
392 * PGA_NOSYNC must be set and cleared with the page busy lock held.
394 * PGA_ENQUEUED is set and cleared when a page is inserted into or removed
395 * from a page queue, respectively. It determines whether the plinks.q field
396 * of the page is valid. To set or clear this flag, the queue lock for the
397 * page must be held: the page queue lock corresponding to the page's "queue"
398 * field if its value is not PQ_NONE, and the page lock otherwise.
400 * PGA_DEQUEUE is set when the page is scheduled to be dequeued from a page
401 * queue, and cleared when the dequeue request is processed. A page may
402 * have PGA_DEQUEUE set and PGA_ENQUEUED cleared, for instance if a dequeue
403 * is requested after the page is scheduled to be enqueued but before it is
404 * actually inserted into the page queue. For allocated pages, the page lock
405 * must be held to set this flag, but it may be set by vm_page_free_prep()
406 * without the page lock held. The page queue lock must be held to clear the
409 * PGA_REQUEUE is set when the page is scheduled to be enqueued or requeued
410 * in its page queue. The page lock must be held to set this flag, and the
411 * queue lock for the page must be held to clear it.
413 * PGA_REQUEUE_HEAD is a special flag for enqueuing pages near the head of
414 * the inactive queue, thus bypassing LRU. The page lock must be held to
415 * set this flag, and the queue lock for the page must be held to clear it.
417 #define PGA_WRITEABLE 0x01 /* page may be mapped writeable */
418 #define PGA_REFERENCED 0x02 /* page has been referenced */
419 #define PGA_EXECUTABLE 0x04 /* page may be mapped executable */
420 #define PGA_ENQUEUED 0x08 /* page is enqueued in a page queue */
421 #define PGA_DEQUEUE 0x10 /* page is due to be dequeued */
422 #define PGA_REQUEUE 0x20 /* page is due to be requeued */
423 #define PGA_REQUEUE_HEAD 0x40 /* page requeue should bypass LRU */
424 #define PGA_NOSYNC 0x80 /* do not collect for syncer */
426 #define PGA_QUEUE_STATE_MASK (PGA_ENQUEUED | PGA_DEQUEUE | PGA_REQUEUE | \
430 * Page flags. If changed at any other time than page allocation or
431 * freeing, the modification must be protected by the vm_page lock.
433 * The PG_PCPU_CACHE flag is set at allocation time if the page was
434 * allocated from a per-CPU cache. It is cleared the next time that the
435 * page is allocated from the physical memory allocator.
437 #define PG_PCPU_CACHE 0x0001 /* was allocated from per-CPU caches */
438 #define PG_FICTITIOUS 0x0004 /* physical page doesn't exist */
439 #define PG_ZERO 0x0008 /* page is zeroed */
440 #define PG_MARKER 0x0010 /* special queue marker page */
441 #define PG_NODUMP 0x0080 /* don't include this page in a dump */
446 #define ACT_DECLINE 1
447 #define ACT_ADVANCE 3
453 #include <sys/systm.h>
455 #include <machine/atomic.h>
458 * Each pageable resident page falls into one of five lists:
461 * Available for allocation now.
464 * Low activity, candidates for reclamation.
465 * This list is approximately LRU ordered.
468 * This is the list of pages that should be
472 * Dirty anonymous pages that cannot be paged
473 * out because no swap device is configured.
476 * Pages that are "active", i.e., they have been
477 * recently referenced.
481 extern vm_page_t vm_page_array; /* First resident page in table */
482 extern long vm_page_array_size; /* number of vm_page_t's */
483 extern long first_page; /* first physical page number */
485 #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
488 * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory
489 * page to which the given physical address belongs. The correct vm_page_t
490 * object is returned for addresses that are not page-aligned.
492 vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
495 * Page allocation parameters for vm_page for the functions
496 * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and
497 * vm_page_alloc_freelist(). Some functions support only a subset
498 * of the flags, and ignore others, see the flags legend.
500 * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*()
501 * and the vm_page_grab*() functions. See these functions for details.
503 * Bits 0 - 1 define class.
504 * Bits 2 - 15 dedicated for flags.
506 * (a) - vm_page_alloc() supports the flag.
507 * (c) - vm_page_alloc_contig() supports the flag.
508 * (f) - vm_page_alloc_freelist() supports the flag.
509 * (g) - vm_page_grab() supports the flag.
510 * (p) - vm_page_grab_pages() supports the flag.
511 * Bits above 15 define the count of additional pages that the caller
512 * intends to allocate.
514 #define VM_ALLOC_NORMAL 0
515 #define VM_ALLOC_INTERRUPT 1
516 #define VM_ALLOC_SYSTEM 2
517 #define VM_ALLOC_CLASS_MASK 3
518 #define VM_ALLOC_WAITOK 0x0008 /* (acf) Sleep and retry */
519 #define VM_ALLOC_WAITFAIL 0x0010 /* (acf) Sleep and return error */
520 #define VM_ALLOC_WIRED 0x0020 /* (acfgp) Allocate a wired page */
521 #define VM_ALLOC_ZERO 0x0040 /* (acfgp) Allocate a prezeroed page */
522 #define VM_ALLOC_NOOBJ 0x0100 /* (acg) No associated object */
523 #define VM_ALLOC_NOBUSY 0x0200 /* (acgp) Do not excl busy the page */
524 #define VM_ALLOC_NOCREAT 0x0400 /* (gp) Don't create a page */
525 #define VM_ALLOC_IGN_SBUSY 0x1000 /* (gp) Ignore shared busy flag */
526 #define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */
527 #define VM_ALLOC_SBUSY 0x4000 /* (acgp) Shared busy the page */
528 #define VM_ALLOC_NOWAIT 0x8000 /* (acfgp) Do not sleep */
529 #define VM_ALLOC_COUNT_SHIFT 16
530 #define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT)
534 malloc2vm_flags(int malloc_flags)
538 KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
539 (malloc_flags & M_NOWAIT) != 0,
540 ("M_USE_RESERVE requires M_NOWAIT"));
541 pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT :
543 if ((malloc_flags & M_ZERO) != 0)
544 pflags |= VM_ALLOC_ZERO;
545 if ((malloc_flags & M_NODUMP) != 0)
546 pflags |= VM_ALLOC_NODUMP;
547 if ((malloc_flags & M_NOWAIT))
548 pflags |= VM_ALLOC_NOWAIT;
549 if ((malloc_flags & M_WAITOK))
550 pflags |= VM_ALLOC_WAITOK;
556 * Predicates supported by vm_page_ps_test():
558 * PS_ALL_DIRTY is true only if the entire (super)page is dirty.
559 * However, it can be spuriously false when the (super)page has become
560 * dirty in the pmap but that information has not been propagated to the
561 * machine-independent layer.
563 #define PS_ALL_DIRTY 0x1
564 #define PS_ALL_VALID 0x2
565 #define PS_NONE_BUSY 0x4
567 int vm_page_busy_acquire(vm_page_t m, int allocflags);
568 void vm_page_busy_downgrade(vm_page_t m);
569 int vm_page_busy_tryupgrade(vm_page_t m);
570 void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared);
571 void vm_page_free(vm_page_t m);
572 void vm_page_free_zero(vm_page_t m);
574 void vm_page_activate (vm_page_t);
575 void vm_page_advise(vm_page_t m, int advice);
576 vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
577 vm_page_t vm_page_alloc_domain(vm_object_t, vm_pindex_t, int, int);
578 vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);
579 vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int,
581 vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
582 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
583 vm_paddr_t boundary, vm_memattr_t memattr);
584 vm_page_t vm_page_alloc_contig_domain(vm_object_t object,
585 vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low,
586 vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
587 vm_memattr_t memattr);
588 vm_page_t vm_page_alloc_freelist(int, int);
589 vm_page_t vm_page_alloc_freelist_domain(int, int, int);
590 bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose);
591 void vm_page_change_lock(vm_page_t m, struct mtx **mtx);
592 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
593 int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
594 vm_page_t *ma, int count);
595 int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex,
597 void vm_page_deactivate(vm_page_t);
598 void vm_page_deactivate_noreuse(vm_page_t);
599 void vm_page_dequeue(vm_page_t m);
600 void vm_page_dequeue_deferred(vm_page_t m);
601 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
602 bool vm_page_free_prep(vm_page_t m);
603 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
604 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
605 int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
606 void vm_page_invalid(vm_page_t m);
607 void vm_page_launder(vm_page_t m);
608 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
609 vm_page_t vm_page_next(vm_page_t m);
610 int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
611 void vm_page_pqbatch_drain(void);
612 void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
613 vm_page_t vm_page_prev(vm_page_t m);
614 bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m);
615 void vm_page_putfake(vm_page_t m);
616 void vm_page_readahead_finish(vm_page_t m);
617 bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,
618 vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
619 bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
620 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
621 void vm_page_reference(vm_page_t m);
622 #define VPR_TRYFREE 0x01
623 #define VPR_NOREUSE 0x02
624 void vm_page_release(vm_page_t m, int flags);
625 void vm_page_release_locked(vm_page_t m, int flags);
626 bool vm_page_remove(vm_page_t);
627 int vm_page_rename(vm_page_t, vm_object_t, vm_pindex_t);
628 vm_page_t vm_page_replace(vm_page_t mnew, vm_object_t object,
630 void vm_page_requeue(vm_page_t m);
631 int vm_page_sbusied(vm_page_t m);
632 vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start,
633 vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options);
634 void vm_page_set_valid_range(vm_page_t m, int base, int size);
635 int vm_page_sleep_if_busy(vm_page_t m, const char *msg);
636 int vm_page_sleep_if_xbusy(vm_page_t m, const char *msg);
637 vm_offset_t vm_page_startup(vm_offset_t vaddr);
638 void vm_page_sunbusy(vm_page_t m);
639 void vm_page_swapqueue(vm_page_t m, uint8_t oldq, uint8_t newq);
640 bool vm_page_try_remove_all(vm_page_t m);
641 bool vm_page_try_remove_write(vm_page_t m);
642 int vm_page_trysbusy(vm_page_t m);
643 int vm_page_tryxbusy(vm_page_t m);
644 void vm_page_unhold_pages(vm_page_t *ma, int count);
645 void vm_page_unswappable(vm_page_t m);
646 void vm_page_unwire(vm_page_t m, uint8_t queue);
647 bool vm_page_unwire_noq(vm_page_t m);
648 void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
649 void vm_page_wire(vm_page_t);
650 bool vm_page_wire_mapped(vm_page_t m);
651 void vm_page_xunbusy_hard(vm_page_t m);
652 void vm_page_set_validclean (vm_page_t, int, int);
653 void vm_page_clear_dirty(vm_page_t, int, int);
654 void vm_page_set_invalid(vm_page_t, int, int);
655 void vm_page_valid(vm_page_t m);
656 int vm_page_is_valid(vm_page_t, int, int);
657 void vm_page_test_dirty(vm_page_t);
658 vm_page_bits_t vm_page_bits(int base, int size);
659 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
660 void vm_page_free_toq(vm_page_t m);
661 void vm_page_free_pages_toq(struct spglist *free, bool update_wire_count);
663 void vm_page_dirty_KBI(vm_page_t m);
664 void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
665 void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
666 int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
667 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
668 void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line);
669 void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
672 #define vm_page_assert_busied(m) \
673 KASSERT(vm_page_busied(m), \
674 ("vm_page_assert_busied: page %p not busy @ %s:%d", \
675 (m), __FILE__, __LINE__))
677 #define vm_page_assert_sbusied(m) \
678 KASSERT(vm_page_sbusied(m), \
679 ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \
680 (m), __FILE__, __LINE__))
682 #define vm_page_assert_unbusied(m) \
683 KASSERT(!vm_page_busied(m), \
684 ("vm_page_assert_unbusied: page %p busy @ %s:%d", \
685 (m), __FILE__, __LINE__))
687 #define vm_page_assert_xbusied(m) \
688 KASSERT(vm_page_xbusied(m), \
689 ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
690 (m), __FILE__, __LINE__))
692 #define vm_page_busied(m) \
693 ((m)->busy_lock != VPB_UNBUSIED)
695 #define vm_page_sbusy(m) do { \
696 if (!vm_page_trysbusy(m)) \
697 panic("%s: page %p failed shared busying", __func__, \
701 #define vm_page_xbusied(m) \
702 (((m)->busy_lock & VPB_SINGLE_EXCLUSIVER) != 0)
704 #define vm_page_xbusy(m) do { \
705 if (!vm_page_tryxbusy(m)) \
706 panic("%s: page %p failed exclusive busying", __func__, \
710 /* Note: page m's lock must not be owned by the caller. */
711 #define vm_page_xunbusy(m) do { \
712 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \
713 VPB_SINGLE_EXCLUSIVER, VPB_UNBUSIED)) \
714 vm_page_xunbusy_hard(m); \
718 void vm_page_object_busy_assert(vm_page_t m);
719 #define VM_PAGE_OBJECT_BUSY_ASSERT(m) vm_page_object_busy_assert(m)
720 void vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits);
721 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) \
722 vm_page_assert_pga_writeable(m, bits)
724 #define VM_PAGE_OBJECT_BUSY_ASSERT(m) (void)0
725 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) (void)0
729 * We want to use atomic updates for the aflags field, which is 8 bits wide.
730 * However, not all architectures support atomic operations on 8-bit
731 * destinations. In order that we can easily use a 32-bit operation, we
732 * require that the aflags field be 32-bit aligned.
734 _Static_assert(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0,
735 "aflags field is not 32-bit aligned");
738 * We want to be able to update the aflags and queue fields atomically in
739 * the same operation.
741 _Static_assert(offsetof(struct vm_page, aflags) / sizeof(uint32_t) ==
742 offsetof(struct vm_page, queue) / sizeof(uint32_t),
743 "aflags and queue fields do not belong to the same 32-bit word");
744 _Static_assert(offsetof(struct vm_page, queue) % sizeof(uint32_t) == 2,
745 "queue field is at an unexpected offset");
746 _Static_assert(sizeof(((struct vm_page *)NULL)->queue) == 1,
747 "queue field has an unexpected size");
749 #if BYTE_ORDER == LITTLE_ENDIAN
750 #define VM_PAGE_AFLAG_SHIFT 0
751 #define VM_PAGE_QUEUE_SHIFT 16
753 #define VM_PAGE_AFLAG_SHIFT 24
754 #define VM_PAGE_QUEUE_SHIFT 8
756 #define VM_PAGE_QUEUE_MASK (0xff << VM_PAGE_QUEUE_SHIFT)
759 * Clear the given bits in the specified page.
762 vm_page_aflag_clear(vm_page_t m, uint8_t bits)
767 * The PGA_REFERENCED flag can only be cleared if the page is locked.
769 if ((bits & PGA_REFERENCED) != 0)
770 vm_page_assert_locked(m);
773 * Access the whole 32-bit word containing the aflags field with an
774 * atomic update. Parallel non-atomic updates to the other fields
775 * within this word are handled properly by the atomic update.
777 addr = (void *)&m->aflags;
778 val = bits << VM_PAGE_AFLAG_SHIFT;
779 atomic_clear_32(addr, val);
783 * Set the given bits in the specified page.
786 vm_page_aflag_set(vm_page_t m, uint8_t bits)
790 VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits);
793 * Access the whole 32-bit word containing the aflags field with an
794 * atomic update. Parallel non-atomic updates to the other fields
795 * within this word are handled properly by the atomic update.
797 addr = (void *)&m->aflags;
798 val = bits << VM_PAGE_AFLAG_SHIFT;
799 atomic_set_32(addr, val);
803 * Atomically update the queue state of the page. The operation fails if
804 * any of the queue flags in "fflags" are set or if the "queue" field of
805 * the page does not match the expected value; if the operation is
806 * successful, the flags in "nflags" are set and all other queue state
810 vm_page_pqstate_cmpset(vm_page_t m, uint32_t oldq, uint32_t newq,
811 uint32_t fflags, uint32_t nflags)
813 uint32_t *addr, nval, oval, qsmask;
815 fflags <<= VM_PAGE_AFLAG_SHIFT;
816 nflags <<= VM_PAGE_AFLAG_SHIFT;
817 newq <<= VM_PAGE_QUEUE_SHIFT;
818 oldq <<= VM_PAGE_QUEUE_SHIFT;
819 qsmask = ((PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD) <<
820 VM_PAGE_AFLAG_SHIFT) | VM_PAGE_QUEUE_MASK;
822 addr = (void *)&m->aflags;
823 oval = atomic_load_32(addr);
825 if ((oval & fflags) != 0)
827 if ((oval & VM_PAGE_QUEUE_MASK) != oldq)
829 nval = (oval & ~qsmask) | nflags | newq;
830 } while (!atomic_fcmpset_32(addr, &oval, nval));
838 * Set all bits in the page's dirty field.
840 * The object containing the specified page must be locked if the
841 * call is made from the machine-independent layer.
843 * See vm_page_clear_dirty_mask().
846 vm_page_dirty(vm_page_t m)
849 /* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
850 #if (defined(KLD_MODULE) && !defined(KLD_TIED)) || defined(INVARIANTS)
851 vm_page_dirty_KBI(m);
853 m->dirty = VM_PAGE_BITS_ALL;
860 * Set page to not be dirty. Note: does not clear pmap modify bits
863 vm_page_undirty(vm_page_t m)
866 VM_PAGE_OBJECT_BUSY_ASSERT(m);
871 vm_page_replace_checked(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
876 mret = vm_page_replace(mnew, object, pindex);
877 KASSERT(mret == mold,
878 ("invalid page replacement, mold=%p, mret=%p", mold, mret));
880 /* Unused if !INVARIANTS. */
888 * Return the index of the queue containing m. This index is guaranteed
889 * not to change while the page lock is held.
891 static inline uint8_t
892 vm_page_queue(vm_page_t m)
895 vm_page_assert_locked(m);
897 if ((m->aflags & PGA_DEQUEUE) != 0)
899 atomic_thread_fence_acq();
904 vm_page_active(vm_page_t m)
907 return (vm_page_queue(m) == PQ_ACTIVE);
911 vm_page_inactive(vm_page_t m)
914 return (vm_page_queue(m) == PQ_INACTIVE);
918 vm_page_in_laundry(vm_page_t m)
922 queue = vm_page_queue(m);
923 return (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE);
929 * Release a reference to a page and return the old reference count.
932 vm_page_drop(vm_page_t m, u_int val)
937 * Synchronize with vm_page_free_prep(): ensure that all updates to the
938 * page structure are visible before it is freed.
940 atomic_thread_fence_rel();
941 old = atomic_fetchadd_int(&m->ref_count, -val);
942 KASSERT(old != VPRC_BLOCKED,
943 ("vm_page_drop: page %p has an invalid refcount value", m));
950 * Perform a racy check to determine whether a reference prevents the page
951 * from being reclaimable. If the page's object is locked, and the page is
952 * unmapped and unbusied or exclusively busied by the current thread, no
953 * new wirings may be created.
956 vm_page_wired(vm_page_t m)
959 return (VPRC_WIRE_COUNT(m->ref_count) > 0);
963 vm_page_all_valid(vm_page_t m)
966 return (m->valid == VM_PAGE_BITS_ALL);
970 vm_page_none_valid(vm_page_t m)
973 return (m->valid == 0);
977 #endif /* !_VM_PAGE_ */