2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 * Carnegie Mellon requests users of this software to return to
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
66 * Resident memory system definitions.
75 * Management of resident (logical) pages.
77 * A small structure is kept for each resident
78 * page, indexed by page number. Each structure
79 * is an element of several collections:
81 * A radix tree used to quickly
82 * perform object/offset lookups
84 * A list of all pages for a given object,
85 * so they can be quickly deactivated at
86 * time of deallocation.
88 * An ordered list of pages due for pageout.
90 * In addition, the structure contains the object
91 * and offset to which this page belongs (for pageout),
92 * and sundry status bits.
94 * In general, operations on this structure's mutable fields are
95 * synchronized using either one of or a combination of the lock on the
96 * object that the page belongs to (O), the pool lock for the page (P),
97 * or the lock for either the free or paging queue (Q). If a field is
98 * annotated below with two of these locks, then holding either lock is
99 * sufficient for read access, but both locks are required for write
102 * In contrast, the synchronization of accesses to the page's
103 * dirty field is machine dependent (M). In the
104 * machine-independent layer, the lock on the object that the
105 * page belongs to must be held in order to operate on the field.
106 * However, the pmap layer is permitted to set all bits within
107 * the field without holding that lock. If the underlying
108 * architecture does not support atomic read-modify-write
109 * operations on the field's type, then the machine-independent
110 * layer uses a 32-bit atomic on the aligned 32-bit word that
111 * contains the dirty field. In the machine-independent layer,
112 * the implementation of read-modify-write operations on the
113 * field is encapsulated in vm_page_clear_dirty_mask().
116 #if PAGE_SIZE == 4096
117 #define VM_PAGE_BITS_ALL 0xffu
118 typedef uint8_t vm_page_bits_t;
119 #elif PAGE_SIZE == 8192
120 #define VM_PAGE_BITS_ALL 0xffffu
121 typedef uint16_t vm_page_bits_t;
122 #elif PAGE_SIZE == 16384
123 #define VM_PAGE_BITS_ALL 0xffffffffu
124 typedef uint32_t vm_page_bits_t;
125 #elif PAGE_SIZE == 32768
126 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
127 typedef uint64_t vm_page_bits_t;
132 TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
134 SLIST_ENTRY(vm_page) ss; /* private slists */
142 TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */
143 vm_object_t object; /* which object am I in (O,P) */
144 vm_pindex_t pindex; /* offset into object (O,P) */
145 vm_paddr_t phys_addr; /* physical address of page */
146 struct md_page md; /* machine dependent stuff */
147 u_int wire_count; /* wired down maps refs (P) */
148 volatile u_int busy_lock; /* busy owners lock */
149 uint16_t hold_count; /* page hold count (P) */
150 uint16_t flags; /* page PG_* flags (P) */
151 uint8_t aflags; /* access is atomic */
152 uint8_t oflags; /* page VPO_* flags (O) */
153 uint8_t queue; /* page queue index (P,Q) */
154 int8_t psind; /* pagesizes[] index (O) */
156 uint8_t order; /* index of the buddy queue */
158 u_char act_count; /* page usage count (P) */
159 /* NOTE that these must support one bit per DEV_BSIZE in a page */
160 /* so, on normal X86 kernels, they must be at least 8 bits wide */
161 vm_page_bits_t valid; /* map of valid DEV_BSIZE chunks (O) */
162 vm_page_bits_t dirty; /* map of dirty DEV_BSIZE chunks (M) */
166 * Page flags stored in oflags:
168 * Access to these page flags is synchronized by the lock on the object
169 * containing the page (O).
171 * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
172 * indicates that the page is not under PV management but
173 * otherwise should be treated as a normal page. Pages not
174 * under PV management cannot be paged out via the
175 * object/vm_page_t because there is no knowledge of their pte
176 * mappings, and such pages are also not on any PQ queue.
179 #define VPO_UNUSED01 0x01 /* --available-- */
180 #define VPO_SWAPSLEEP 0x02 /* waiting for swap to finish */
181 #define VPO_UNMANAGED 0x04 /* no PV management for page */
182 #define VPO_SWAPINPROG 0x08 /* swap I/O in progress on page */
183 #define VPO_NOSYNC 0x10 /* do not collect for syncer */
186 * Busy page implementation details.
187 * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation,
188 * even if the support for owner identity is removed because of size
189 * constraints. Checks on lock recursion are then not possible, while the
190 * lock assertions effectiveness is someway reduced.
192 #define VPB_BIT_SHARED 0x01
193 #define VPB_BIT_EXCLUSIVE 0x02
194 #define VPB_BIT_WAITERS 0x04
195 #define VPB_BIT_FLAGMASK \
196 (VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS)
198 #define VPB_SHARERS_SHIFT 3
199 #define VPB_SHARERS(x) \
200 (((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT)
201 #define VPB_SHARERS_WORD(x) ((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
202 #define VPB_ONE_SHARER (1 << VPB_SHARERS_SHIFT)
204 #define VPB_SINGLE_EXCLUSIVER VPB_BIT_EXCLUSIVE
206 #define VPB_UNBUSIED VPB_SHARERS_WORD(0)
209 #define PQ_INACTIVE 0
212 #define PQ_UNSWAPPABLE 3
215 #ifndef VM_PAGE_HAVE_PGLIST
216 TAILQ_HEAD(pglist, vm_page);
217 #define VM_PAGE_HAVE_PGLIST
219 SLIST_HEAD(spglist, vm_page);
221 struct vm_pagequeue {
225 u_int * const pq_vcnt;
226 const char * const pq_name;
227 } __aligned(CACHE_LINE_SIZE);
231 struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
232 u_int vmd_page_count;
233 u_int vmd_free_count;
234 long vmd_segs; /* bitmask of the segments */
237 int vmd_last_active_scan;
238 struct vm_page vmd_laundry_marker;
239 struct vm_page vmd_marker; /* marker for pagedaemon private use */
240 struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
243 extern struct vm_domain vm_dom[MAXMEMDOM];
245 #define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED)
246 #define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex)
247 #define vm_pagequeue_lockptr(pq) (&(pq)->pq_mutex)
248 #define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex)
251 extern vm_page_t bogus_page;
254 vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
258 vm_pagequeue_assert_locked(pq);
260 pq->pq_cnt += addend;
261 atomic_add_int(pq->pq_vcnt, addend);
263 #define vm_pagequeue_cnt_inc(pq) vm_pagequeue_cnt_add((pq), 1)
264 #define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1)
267 extern struct mtx_padalign vm_page_queue_free_mtx;
268 extern struct mtx_padalign pa_lock[];
271 #define PDRSHIFT PDR_SHIFT
272 #elif !defined(PDRSHIFT)
276 #define pa_index(pa) ((pa) >> PDRSHIFT)
277 #define PA_LOCKPTR(pa) ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
278 #define PA_LOCKOBJPTR(pa) ((struct lock_object *)PA_LOCKPTR((pa)))
279 #define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa))
280 #define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa))
281 #define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa))
282 #define PA_UNLOCK_COND(pa) \
290 #define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a))
293 #define vm_page_lock(m) vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
294 #define vm_page_unlock(m) vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
295 #define vm_page_trylock(m) vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
296 #else /* !KLD_MODULE */
297 #define vm_page_lockptr(m) (PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
298 #define vm_page_lock(m) mtx_lock(vm_page_lockptr((m)))
299 #define vm_page_unlock(m) mtx_unlock(vm_page_lockptr((m)))
300 #define vm_page_trylock(m) mtx_trylock(vm_page_lockptr((m)))
302 #if defined(INVARIANTS)
303 #define vm_page_assert_locked(m) \
304 vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
305 #define vm_page_lock_assert(m, a) \
306 vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
308 #define vm_page_assert_locked(m)
309 #define vm_page_lock_assert(m, a)
313 * The vm_page's aflags are updated using atomic operations. To set or clear
314 * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
315 * must be used. Neither these flags nor these functions are part of the KBI.
317 * PGA_REFERENCED may be cleared only if the page is locked. It is set by
318 * both the MI and MD VM layers. However, kernel loadable modules should not
319 * directly set this flag. They should call vm_page_reference() instead.
321 * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().
322 * When it does so, the object must be locked, or the page must be
323 * exclusive busied. The MI VM layer must never access this flag
324 * directly. Instead, it should call pmap_page_is_write_mapped().
326 * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
327 * at least one executable mapping. It is not consumed by the MI VM layer.
329 #define PGA_WRITEABLE 0x01 /* page may be mapped writeable */
330 #define PGA_REFERENCED 0x02 /* page has been referenced */
331 #define PGA_EXECUTABLE 0x04 /* page may be mapped executable */
334 * Page flags. If changed at any other time than page allocation or
335 * freeing, the modification must be protected by the vm_page lock.
337 #define PG_FICTITIOUS 0x0004 /* physical page doesn't exist */
338 #define PG_ZERO 0x0008 /* page is zeroed */
339 #define PG_MARKER 0x0010 /* special queue marker page */
340 #define PG_NODUMP 0x0080 /* don't include this page in a dump */
341 #define PG_UNHOLDFREE 0x0100 /* delayed free of a held page */
346 #define ACT_DECLINE 1
347 #define ACT_ADVANCE 3
353 #include <sys/systm.h>
355 #include <machine/atomic.h>
358 * Each pageable resident page falls into one of five lists:
361 * Available for allocation now.
364 * Low activity, candidates for reclamation.
365 * This list is approximately LRU ordered.
368 * This is the list of pages that should be
372 * Dirty anonymous pages that cannot be paged
373 * out because no swap device is configured.
376 * Pages that are "active", i.e., they have been
377 * recently referenced.
381 extern int vm_page_zero_count;
383 extern vm_page_t vm_page_array; /* First resident page in table */
384 extern long vm_page_array_size; /* number of vm_page_t's */
385 extern long first_page; /* first physical page number */
387 #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
390 * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory
391 * page to which the given physical address belongs. The correct vm_page_t
392 * object is returned for addresses that are not page-aligned.
394 vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
397 * Page allocation parameters for vm_page for the functions
398 * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and
399 * vm_page_alloc_freelist(). Some functions support only a subset
400 * of the flags, and ignore others, see the flags legend.
402 * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*()
403 * and the vm_page_grab*() functions. See these functions for details.
405 * Bits 0 - 1 define class.
406 * Bits 2 - 15 dedicated for flags.
408 * (a) - vm_page_alloc() supports the flag.
409 * (c) - vm_page_alloc_contig() supports the flag.
410 * (f) - vm_page_alloc_freelist() supports the flag.
411 * (g) - vm_page_grab() supports the flag.
412 * (p) - vm_page_grab_pages() supports the flag.
413 * Bits above 15 define the count of additional pages that the caller
414 * intends to allocate.
416 #define VM_ALLOC_NORMAL 0
417 #define VM_ALLOC_INTERRUPT 1
418 #define VM_ALLOC_SYSTEM 2
419 #define VM_ALLOC_CLASS_MASK 3
420 #define VM_ALLOC_WAITOK 0x0008 /* (acf) Sleep and retry */
421 #define VM_ALLOC_WAITFAIL 0x0010 /* (acf) Sleep and return error */
422 #define VM_ALLOC_WIRED 0x0020 /* (acfgp) Allocate a wired page */
423 #define VM_ALLOC_ZERO 0x0040 /* (acfgp) Allocate a prezeroed page */
424 #define VM_ALLOC_NOOBJ 0x0100 /* (acg) No associated object */
425 #define VM_ALLOC_NOBUSY 0x0200 /* (acgp) Do not excl busy the page */
426 #define VM_ALLOC_IGN_SBUSY 0x1000 /* (gp) Ignore shared busy flag */
427 #define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */
428 #define VM_ALLOC_SBUSY 0x4000 /* (acgp) Shared busy the page */
429 #define VM_ALLOC_NOWAIT 0x8000 /* (acfgp) Do not sleep */
430 #define VM_ALLOC_COUNT_SHIFT 16
431 #define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT)
435 malloc2vm_flags(int malloc_flags)
439 KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
440 (malloc_flags & M_NOWAIT) != 0,
441 ("M_USE_RESERVE requires M_NOWAIT"));
442 pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT :
444 if ((malloc_flags & M_ZERO) != 0)
445 pflags |= VM_ALLOC_ZERO;
446 if ((malloc_flags & M_NODUMP) != 0)
447 pflags |= VM_ALLOC_NODUMP;
448 if ((malloc_flags & M_NOWAIT))
449 pflags |= VM_ALLOC_NOWAIT;
450 if ((malloc_flags & M_WAITOK))
451 pflags |= VM_ALLOC_WAITOK;
457 * Predicates supported by vm_page_ps_test():
459 * PS_ALL_DIRTY is true only if the entire (super)page is dirty.
460 * However, it can be spuriously false when the (super)page has become
461 * dirty in the pmap but that information has not been propagated to the
462 * machine-independent layer.
464 #define PS_ALL_DIRTY 0x1
465 #define PS_ALL_VALID 0x2
466 #define PS_NONE_BUSY 0x4
468 void vm_page_busy_downgrade(vm_page_t m);
469 void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared);
470 void vm_page_flash(vm_page_t m);
471 void vm_page_hold(vm_page_t mem);
472 void vm_page_unhold(vm_page_t mem);
473 void vm_page_free(vm_page_t m);
474 void vm_page_free_zero(vm_page_t m);
476 void vm_page_activate (vm_page_t);
477 void vm_page_advise(vm_page_t m, int advice);
478 vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
479 vm_page_t vm_page_alloc_domain(vm_object_t, vm_pindex_t, int, int);
480 vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);
481 vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int,
483 vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
484 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
485 vm_paddr_t boundary, vm_memattr_t memattr);
486 vm_page_t vm_page_alloc_contig_domain(vm_object_t object,
487 vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low,
488 vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
489 vm_memattr_t memattr);
490 vm_page_t vm_page_alloc_freelist(int, int);
491 vm_page_t vm_page_alloc_freelist_domain(int, int, int);
492 void vm_page_change_lock(vm_page_t m, struct mtx **mtx);
493 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
494 int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
495 vm_page_t *ma, int count);
496 void vm_page_deactivate(vm_page_t);
497 void vm_page_deactivate_noreuse(vm_page_t);
498 void vm_page_dequeue(vm_page_t m);
499 void vm_page_dequeue_locked(vm_page_t m);
500 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
501 void vm_page_free_phys_pglist(struct pglist *tq);
502 bool vm_page_free_prep(vm_page_t m, bool pagequeue_locked);
503 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
504 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
505 int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
506 void vm_page_launder(vm_page_t m);
507 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
508 vm_page_t vm_page_next(vm_page_t m);
509 int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
510 struct vm_pagequeue *vm_page_pagequeue(vm_page_t m);
511 vm_page_t vm_page_prev(vm_page_t m);
512 bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m);
513 void vm_page_putfake(vm_page_t m);
514 void vm_page_readahead_finish(vm_page_t m);
515 bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,
516 vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
517 bool vm_page_reclaim_contig_domain(int req, u_long npages, int domain,
518 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
519 void vm_page_reference(vm_page_t m);
520 void vm_page_remove (vm_page_t);
521 int vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
522 vm_page_t vm_page_replace(vm_page_t mnew, vm_object_t object,
524 void vm_page_requeue(vm_page_t m);
525 void vm_page_requeue_locked(vm_page_t m);
526 int vm_page_sbusied(vm_page_t m);
527 vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start,
528 vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options);
529 void vm_page_set_valid_range(vm_page_t m, int base, int size);
530 int vm_page_sleep_if_busy(vm_page_t m, const char *msg);
531 vm_offset_t vm_page_startup(vm_offset_t vaddr);
532 void vm_page_sunbusy(vm_page_t m);
533 bool vm_page_try_to_free(vm_page_t m);
534 int vm_page_trysbusy(vm_page_t m);
535 void vm_page_unhold_pages(vm_page_t *ma, int count);
536 void vm_page_unswappable(vm_page_t m);
537 boolean_t vm_page_unwire(vm_page_t m, uint8_t queue);
538 void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
539 void vm_page_wire (vm_page_t);
540 void vm_page_xunbusy_hard(vm_page_t m);
541 void vm_page_xunbusy_maybelocked(vm_page_t m);
542 void vm_page_set_validclean (vm_page_t, int, int);
543 void vm_page_clear_dirty (vm_page_t, int, int);
544 void vm_page_set_invalid (vm_page_t, int, int);
545 int vm_page_is_valid (vm_page_t, int, int);
546 void vm_page_test_dirty (vm_page_t);
547 vm_page_bits_t vm_page_bits(int base, int size);
548 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
549 void vm_page_free_toq(vm_page_t m);
551 void vm_page_dirty_KBI(vm_page_t m);
552 void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
553 void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
554 int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
555 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
556 void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line);
557 void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
560 #define vm_page_assert_sbusied(m) \
561 KASSERT(vm_page_sbusied(m), \
562 ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \
563 (m), __FILE__, __LINE__))
565 #define vm_page_assert_unbusied(m) \
566 KASSERT(!vm_page_busied(m), \
567 ("vm_page_assert_unbusied: page %p busy @ %s:%d", \
568 (m), __FILE__, __LINE__))
570 #define vm_page_assert_xbusied(m) \
571 KASSERT(vm_page_xbusied(m), \
572 ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
573 (m), __FILE__, __LINE__))
575 #define vm_page_busied(m) \
576 ((m)->busy_lock != VPB_UNBUSIED)
578 #define vm_page_sbusy(m) do { \
579 if (!vm_page_trysbusy(m)) \
580 panic("%s: page %p failed shared busying", __func__, \
584 #define vm_page_tryxbusy(m) \
585 (atomic_cmpset_acq_int(&(m)->busy_lock, VPB_UNBUSIED, \
586 VPB_SINGLE_EXCLUSIVER))
588 #define vm_page_xbusied(m) \
589 (((m)->busy_lock & VPB_SINGLE_EXCLUSIVER) != 0)
591 #define vm_page_xbusy(m) do { \
592 if (!vm_page_tryxbusy(m)) \
593 panic("%s: page %p failed exclusive busying", __func__, \
597 /* Note: page m's lock must not be owned by the caller. */
598 #define vm_page_xunbusy(m) do { \
599 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \
600 VPB_SINGLE_EXCLUSIVER, VPB_UNBUSIED)) \
601 vm_page_xunbusy_hard(m); \
605 void vm_page_object_lock_assert(vm_page_t m);
606 #define VM_PAGE_OBJECT_LOCK_ASSERT(m) vm_page_object_lock_assert(m)
607 void vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits);
608 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) \
609 vm_page_assert_pga_writeable(m, bits)
611 #define VM_PAGE_OBJECT_LOCK_ASSERT(m) (void)0
612 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) (void)0
616 * We want to use atomic updates for the aflags field, which is 8 bits wide.
617 * However, not all architectures support atomic operations on 8-bit
618 * destinations. In order that we can easily use a 32-bit operation, we
619 * require that the aflags field be 32-bit aligned.
621 CTASSERT(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0);
624 * Clear the given bits in the specified page.
627 vm_page_aflag_clear(vm_page_t m, uint8_t bits)
632 * The PGA_REFERENCED flag can only be cleared if the page is locked.
634 if ((bits & PGA_REFERENCED) != 0)
635 vm_page_assert_locked(m);
638 * Access the whole 32-bit word containing the aflags field with an
639 * atomic update. Parallel non-atomic updates to the other fields
640 * within this word are handled properly by the atomic update.
642 addr = (void *)&m->aflags;
643 KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0,
644 ("vm_page_aflag_clear: aflags is misaligned"));
646 #if BYTE_ORDER == BIG_ENDIAN
649 atomic_clear_32(addr, val);
653 * Set the given bits in the specified page.
656 vm_page_aflag_set(vm_page_t m, uint8_t bits)
660 VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits);
663 * Access the whole 32-bit word containing the aflags field with an
664 * atomic update. Parallel non-atomic updates to the other fields
665 * within this word are handled properly by the atomic update.
667 addr = (void *)&m->aflags;
668 KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0,
669 ("vm_page_aflag_set: aflags is misaligned"));
671 #if BYTE_ORDER == BIG_ENDIAN
674 atomic_set_32(addr, val);
680 * Set all bits in the page's dirty field.
682 * The object containing the specified page must be locked if the
683 * call is made from the machine-independent layer.
685 * See vm_page_clear_dirty_mask().
688 vm_page_dirty(vm_page_t m)
691 /* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
692 #if defined(KLD_MODULE) || defined(INVARIANTS)
693 vm_page_dirty_KBI(m);
695 m->dirty = VM_PAGE_BITS_ALL;
702 * If the given page is in a page queue, then remove it from that page
705 * The page must be locked.
708 vm_page_remque(vm_page_t m)
711 if (m->queue != PQ_NONE)
718 * Set page to not be dirty. Note: does not clear pmap modify bits
721 vm_page_undirty(vm_page_t m)
724 VM_PAGE_OBJECT_LOCK_ASSERT(m);
729 vm_page_replace_checked(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
734 mret = vm_page_replace(mnew, object, pindex);
735 KASSERT(mret == mold,
736 ("invalid page replacement, mold=%p, mret=%p", mold, mret));
738 /* Unused if !INVARIANTS. */
744 vm_page_active(vm_page_t m)
747 return (m->queue == PQ_ACTIVE);
751 vm_page_inactive(vm_page_t m)
754 return (m->queue == PQ_INACTIVE);
758 vm_page_in_laundry(vm_page_t m)
761 return (m->queue == PQ_LAUNDRY || m->queue == PQ_UNSWAPPABLE);
765 #endif /* !_VM_PAGE_ */