2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
64 * Resident memory system definitions.
73 * Management of resident (logical) pages.
75 * A small structure is kept for each resident
76 * page, indexed by page number. Each structure
77 * is an element of several lists:
79 * A hash table bucket used to quickly
80 * perform object/offset lookups
82 * A list of all pages for a given object,
83 * so they can be quickly deactivated at
84 * time of deallocation.
86 * An ordered list of pages due for pageout.
88 * In addition, the structure contains the object
89 * and offset to which this page belongs (for pageout),
90 * and sundry status bits.
92 * In general, operations on this structure's mutable fields are
93 * synchronized using either one of or a combination of the lock on the
94 * object that the page belongs to (O), the pool lock for the page (P),
95 * or the lock for either the free or paging queue (Q). If a field is
96 * annotated below with two of these locks, then holding either lock is
97 * sufficient for read access, but both locks are required for write
100 * In contrast, the synchronization of accesses to the page's
101 * dirty field is machine dependent (M). In the
102 * machine-independent layer, the lock on the object that the
103 * page belongs to must be held in order to operate on the field.
104 * However, the pmap layer is permitted to set all bits within
105 * the field without holding that lock. If the underlying
106 * architecture does not support atomic read-modify-write
107 * operations on the field's type, then the machine-independent
108 * layer uses a 32-bit atomic on the aligned 32-bit word that
109 * contains the dirty field. In the machine-independent layer,
110 * the implementation of read-modify-write operations on the
111 * field is encapsulated in vm_page_clear_dirty_mask().
114 #if PAGE_SIZE == 4096
115 #define VM_PAGE_BITS_ALL 0xffu
116 typedef uint8_t vm_page_bits_t;
117 #elif PAGE_SIZE == 8192
118 #define VM_PAGE_BITS_ALL 0xffffu
119 typedef uint16_t vm_page_bits_t;
120 #elif PAGE_SIZE == 16384
121 #define VM_PAGE_BITS_ALL 0xffffffffu
122 typedef uint32_t vm_page_bits_t;
123 #elif PAGE_SIZE == 32768
124 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
125 typedef uint64_t vm_page_bits_t;
129 TAILQ_ENTRY(vm_page) pageq; /* page queue or free list (Q) */
130 TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */
132 vm_object_t object; /* which object am I in (O,P)*/
133 vm_pindex_t pindex; /* offset into object (O,P) */
134 vm_paddr_t phys_addr; /* physical address of page */
135 struct md_page md; /* machine dependant stuff */
136 uint8_t queue; /* page queue index (P,Q) */
138 short hold_count; /* page hold count (P) */
139 uint8_t order; /* index of the buddy queue */
141 u_short cow; /* page cow mapping count (P) */
142 u_int wire_count; /* wired down maps refs (P) */
143 uint8_t aflags; /* access is atomic */
144 uint8_t oflags; /* page VPO_* flags (O) */
145 uint16_t flags; /* page PG_* flags (P) */
146 u_char act_count; /* page usage count (P) */
147 u_char busy; /* page busy count (O) */
148 /* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
149 /* so, on normal X86 kernels, they must be at least 8 bits wide */
150 vm_page_bits_t valid; /* map of valid DEV_BSIZE chunks (O) */
151 vm_page_bits_t dirty; /* map of dirty DEV_BSIZE chunks (M) */
155 * Page flags stored in oflags:
157 * Access to these page flags is synchronized by the lock on the object
158 * containing the page (O).
160 * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
161 * indicates that the page is not under PV management but
162 * otherwise should be treated as a normal page. Pages not
163 * under PV management cannot be paged out via the
164 * object/vm_page_t because there is no knowledge of their pte
165 * mappings, and such pages are also not on any PQ queue.
168 #define VPO_BUSY 0x01 /* page is in transit */
169 #define VPO_WANTED 0x02 /* someone is waiting for page */
170 #define VPO_UNMANAGED 0x04 /* no PV management for page */
171 #define VPO_SWAPINPROG 0x08 /* swap I/O in progress on page */
172 #define VPO_NOSYNC 0x10 /* do not collect for syncer */
175 #define PQ_INACTIVE 0
179 TAILQ_HEAD(pglist, vm_page);
181 struct vm_pagequeue {
185 const char *const pq_name;
186 } __aligned(CACHE_LINE_SIZE);
188 extern struct vm_pagequeue vm_pagequeues[PQ_COUNT];
190 #define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED)
191 #define vm_pagequeue_init_lock(pq) mtx_init(&(pq)->pq_mutex, \
192 (pq)->pq_name, "vm pagequeue", MTX_DEF | MTX_DUPOK);
193 #define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex)
194 #define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex)
196 extern struct mtx_padalign vm_page_queue_free_mtx;
197 extern struct mtx_padalign pa_lock[];
200 #define PDRSHIFT PDR_SHIFT
201 #elif !defined(PDRSHIFT)
205 #define pa_index(pa) ((pa) >> PDRSHIFT)
206 #define PA_LOCKPTR(pa) ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
207 #define PA_LOCKOBJPTR(pa) ((struct lock_object *)PA_LOCKPTR((pa)))
208 #define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa))
209 #define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa))
210 #define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa))
211 #define PA_UNLOCK_COND(pa) \
219 #define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a))
222 #define vm_page_lock(m) vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
223 #define vm_page_unlock(m) vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
224 #define vm_page_trylock(m) vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
225 #else /* !KLD_MODULE */
226 #define vm_page_lockptr(m) (PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
227 #define vm_page_lock(m) mtx_lock(vm_page_lockptr((m)))
228 #define vm_page_unlock(m) mtx_unlock(vm_page_lockptr((m)))
229 #define vm_page_trylock(m) mtx_trylock(vm_page_lockptr((m)))
231 #if defined(INVARIANTS)
232 #define vm_page_assert_locked(m) \
233 vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
234 #define vm_page_lock_assert(m, a) \
235 vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
237 #define vm_page_assert_locked(m)
238 #define vm_page_lock_assert(m, a)
242 * The vm_page's aflags are updated using atomic operations. To set or clear
243 * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
244 * must be used. Neither these flags nor these functions are part of the KBI.
246 * PGA_REFERENCED may be cleared only if the page is locked. It is set by
247 * both the MI and MD VM layers. However, kernel loadable modules should not
248 * directly set this flag. They should call vm_page_reference() instead.
250 * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter(). When it
251 * does so, the page must be VPO_BUSY. The MI VM layer must never access this
252 * flag directly. Instead, it should call pmap_page_is_write_mapped().
254 * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
255 * at least one executable mapping. It is not consumed by the MI VM layer.
257 #define PGA_WRITEABLE 0x01 /* page may be mapped writeable */
258 #define PGA_REFERENCED 0x02 /* page has been referenced */
259 #define PGA_EXECUTABLE 0x04 /* page may be mapped executable */
262 * Page flags. If changed at any other time than page allocation or
263 * freeing, the modification must be protected by the vm_page lock.
265 #define PG_CACHED 0x0001 /* page is cached */
266 #define PG_FREE 0x0002 /* page is free */
267 #define PG_FICTITIOUS 0x0004 /* physical page doesn't exist */
268 #define PG_ZERO 0x0008 /* page is zeroed */
269 #define PG_MARKER 0x0010 /* special queue marker page */
270 #define PG_SLAB 0x0020 /* object pointer is actually a slab */
271 #define PG_WINATCFLS 0x0040 /* flush dirty page on inactive q */
272 #define PG_NODUMP 0x0080 /* don't include this page in a dump */
273 #define PG_UNHOLDFREE 0x0100 /* delayed free of a held page */
278 #define ACT_DECLINE 1
279 #define ACT_ADVANCE 3
285 #include <sys/systm.h>
287 #include <machine/atomic.h>
290 * Each pageable resident page falls into one of four lists:
293 * Available for allocation now.
296 * Almost available for allocation. Still associated with
297 * an object, but clean and immediately freeable.
299 * The following lists are LRU sorted:
302 * Low activity, candidates for reclamation.
303 * This is the list of pages that should be
307 * Pages that are "active" i.e. they have been
308 * recently referenced.
312 extern int vm_page_zero_count;
314 extern vm_page_t vm_page_array; /* First resident page in table */
315 extern long vm_page_array_size; /* number of vm_page_t's */
316 extern long first_page; /* first physical page number */
318 #define VM_PAGE_IS_FREE(m) (((m)->flags & PG_FREE) != 0)
320 #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
322 vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
324 /* page allocation classes: */
325 #define VM_ALLOC_NORMAL 0
326 #define VM_ALLOC_INTERRUPT 1
327 #define VM_ALLOC_SYSTEM 2
328 #define VM_ALLOC_CLASS_MASK 3
329 /* page allocation flags: */
330 #define VM_ALLOC_WIRED 0x0020 /* non pageable */
331 #define VM_ALLOC_ZERO 0x0040 /* Try to obtain a zeroed page */
332 #define VM_ALLOC_RETRY 0x0080 /* Mandatory with vm_page_grab() */
333 #define VM_ALLOC_NOOBJ 0x0100 /* No associated object */
334 #define VM_ALLOC_NOBUSY 0x0200 /* Do not busy the page */
335 #define VM_ALLOC_IFCACHED 0x0400 /* Fail if the page is not cached */
336 #define VM_ALLOC_IFNOTCACHED 0x0800 /* Fail if the page is cached */
337 #define VM_ALLOC_IGN_SBUSY 0x1000 /* vm_page_grab() only */
338 #define VM_ALLOC_NODUMP 0x2000 /* don't include in dump */
340 #define VM_ALLOC_COUNT_SHIFT 16
341 #define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT)
345 malloc2vm_flags(int malloc_flags)
349 KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
350 (malloc_flags & M_NOWAIT) != 0,
351 ("M_USE_RESERVE requires M_NOWAIT"));
352 pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT :
354 if ((malloc_flags & M_ZERO) != 0)
355 pflags |= VM_ALLOC_ZERO;
356 if ((malloc_flags & M_NODUMP) != 0)
357 pflags |= VM_ALLOC_NODUMP;
362 void vm_page_busy(vm_page_t m);
363 void vm_page_flash(vm_page_t m);
364 void vm_page_io_start(vm_page_t m);
365 void vm_page_io_finish(vm_page_t m);
366 void vm_page_hold(vm_page_t mem);
367 void vm_page_unhold(vm_page_t mem);
368 void vm_page_free(vm_page_t m);
369 void vm_page_free_zero(vm_page_t m);
370 void vm_page_wakeup(vm_page_t m);
372 void vm_page_activate (vm_page_t);
373 vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int);
374 vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
375 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
376 vm_paddr_t boundary, vm_memattr_t memattr);
377 vm_page_t vm_page_alloc_freelist(int, int);
378 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
379 void vm_page_cache(vm_page_t);
380 void vm_page_cache_free(vm_object_t, vm_pindex_t, vm_pindex_t);
381 void vm_page_cache_transfer(vm_object_t, vm_pindex_t, vm_object_t);
382 int vm_page_try_to_cache (vm_page_t);
383 int vm_page_try_to_free (vm_page_t);
384 void vm_page_dontneed(vm_page_t);
385 void vm_page_deactivate (vm_page_t);
386 void vm_page_dequeue(vm_page_t m);
387 void vm_page_dequeue_locked(vm_page_t m);
388 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
389 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
390 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
391 void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
392 boolean_t vm_page_is_cached(vm_object_t object, vm_pindex_t pindex);
393 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
394 vm_page_t vm_page_next(vm_page_t m);
395 int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
396 vm_page_t vm_page_prev(vm_page_t m);
397 void vm_page_putfake(vm_page_t m);
398 void vm_page_readahead_finish(vm_page_t m);
399 void vm_page_reference(vm_page_t m);
400 void vm_page_remove (vm_page_t);
401 void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
402 void vm_page_requeue(vm_page_t m);
403 void vm_page_requeue_locked(vm_page_t m);
404 void vm_page_set_valid_range(vm_page_t m, int base, int size);
405 void vm_page_sleep(vm_page_t m, const char *msg);
406 vm_offset_t vm_page_startup(vm_offset_t vaddr);
407 void vm_page_unhold_pages(vm_page_t *ma, int count);
408 void vm_page_unwire (vm_page_t, int);
409 void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
410 void vm_page_wire (vm_page_t);
411 void vm_page_set_validclean (vm_page_t, int, int);
412 void vm_page_clear_dirty (vm_page_t, int, int);
413 void vm_page_set_invalid (vm_page_t, int, int);
414 int vm_page_is_valid (vm_page_t, int, int);
415 void vm_page_test_dirty (vm_page_t);
416 vm_page_bits_t vm_page_bits(int base, int size);
417 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
418 void vm_page_free_toq(vm_page_t m);
419 void vm_page_zero_idle_wakeup(void);
420 void vm_page_cowfault (vm_page_t);
421 int vm_page_cowsetup(vm_page_t);
422 void vm_page_cowclear (vm_page_t);
424 void vm_page_dirty_KBI(vm_page_t m);
425 void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
426 void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
427 int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
428 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
429 void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line);
430 void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
434 void vm_page_object_lock_assert(vm_page_t m);
435 #define VM_PAGE_OBJECT_LOCK_ASSERT(m) vm_page_object_lock_assert(m)
437 #define VM_PAGE_OBJECT_LOCK_ASSERT(m) (void)0
441 * We want to use atomic updates for the aflags field, which is 8 bits wide.
442 * However, not all architectures support atomic operations on 8-bit
443 * destinations. In order that we can easily use a 32-bit operation, we
444 * require that the aflags field be 32-bit aligned.
446 CTASSERT(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0);
449 * Clear the given bits in the specified page.
452 vm_page_aflag_clear(vm_page_t m, uint8_t bits)
457 * The PGA_REFERENCED flag can only be cleared if the page is locked.
459 if ((bits & PGA_REFERENCED) != 0)
460 vm_page_assert_locked(m);
463 * Access the whole 32-bit word containing the aflags field with an
464 * atomic update. Parallel non-atomic updates to the other fields
465 * within this word are handled properly by the atomic update.
467 addr = (void *)&m->aflags;
468 KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0,
469 ("vm_page_aflag_clear: aflags is misaligned"));
471 #if BYTE_ORDER == BIG_ENDIAN
474 atomic_clear_32(addr, val);
478 * Set the given bits in the specified page.
481 vm_page_aflag_set(vm_page_t m, uint8_t bits)
486 * The PGA_WRITEABLE flag can only be set if the page is managed and
487 * VPO_BUSY. Currently, this flag is only set by pmap_enter().
489 KASSERT((bits & PGA_WRITEABLE) == 0 ||
490 (m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == VPO_BUSY,
491 ("vm_page_aflag_set: PGA_WRITEABLE and !VPO_BUSY"));
494 * Access the whole 32-bit word containing the aflags field with an
495 * atomic update. Parallel non-atomic updates to the other fields
496 * within this word are handled properly by the atomic update.
498 addr = (void *)&m->aflags;
499 KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0,
500 ("vm_page_aflag_set: aflags is misaligned"));
502 #if BYTE_ORDER == BIG_ENDIAN
505 atomic_set_32(addr, val);
511 * Set all bits in the page's dirty field.
513 * The object containing the specified page must be locked if the
514 * call is made from the machine-independent layer.
516 * See vm_page_clear_dirty_mask().
519 vm_page_dirty(vm_page_t m)
522 /* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
523 #if defined(KLD_MODULE) || defined(INVARIANTS)
524 vm_page_dirty_KBI(m);
526 m->dirty = VM_PAGE_BITS_ALL;
533 * If the given page is in a page queue, then remove it from that page
536 * The page must be locked.
539 vm_page_remque(vm_page_t m)
542 if (m->queue != PQ_NONE)
547 * vm_page_sleep_if_busy:
549 * Sleep and release the page queues lock if VPO_BUSY is set or,
550 * if also_m_busy is TRUE, busy is non-zero. Returns TRUE if the
551 * thread slept and the page queues lock was released.
552 * Otherwise, retains the page queues lock and returns FALSE.
554 * The object containing the given page must be locked.
557 vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg)
560 if ((m->oflags & VPO_BUSY) || (also_m_busy && m->busy)) {
561 vm_page_sleep(m, msg);
570 * Set page to not be dirty. Note: does not clear pmap modify bits
573 vm_page_undirty(vm_page_t m)
576 VM_PAGE_OBJECT_LOCK_ASSERT(m);
581 #endif /* !_VM_PAGE_ */