2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * from: @(#)vm_object.h 8.3 (Berkeley) 1/12/94
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 * Carnegie Mellon requests users of this software to return to
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
66 * Virtual memory object module definitions.
72 #include <sys/queue.h>
73 #include <sys/_lock.h>
74 #include <sys/_mutex.h>
75 #include <sys/_pctrie.h>
76 #include <sys/_rwlock.h>
77 #include <sys/_domainset.h>
79 #include <vm/_vm_radix.h>
84 * vm_object_t Virtual memory object.
88 * (c) const until freed
90 * (f) free pages queue mutex
94 #ifndef VM_PAGE_HAVE_PGLIST
95 TAILQ_HEAD(pglist, vm_page);
96 #define VM_PAGE_HAVE_PGLIST
101 TAILQ_ENTRY(vm_object) object_list; /* list of all objects */
102 LIST_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */
103 LIST_ENTRY(vm_object) shadow_list; /* chain of shadow objects */
104 struct pglist memq; /* list of resident pages */
105 struct vm_radix rtree; /* root of the resident page radix trie*/
106 vm_pindex_t size; /* Object size */
107 struct domainset_ref domain; /* NUMA policy. */
108 volatile int generation; /* generation ID */
109 int cleangeneration; /* Generation at clean time */
110 volatile u_int ref_count; /* How many refs?? */
111 int shadow_count; /* how many objects that this is a shadow for */
112 vm_memattr_t memattr; /* default memory attribute for pages */
113 objtype_t type; /* type of pager */
114 u_short flags; /* see below */
115 u_short pg_color; /* (c) color of first page in obj */
116 volatile u_int paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
117 volatile u_int busy; /* (a) object is busy, disallow page busy. */
118 int resident_page_count; /* number of resident pages */
119 struct vm_object *backing_object; /* object that I'm a shadow of */
120 vm_ooffset_t backing_object_offset;/* Offset in backing object */
121 TAILQ_ENTRY(vm_object) pager_object_list; /* list of all objects of this pager type */
122 LIST_HEAD(, vm_reserv) rvq; /* list of reservations */
128 * vnp_size - current size of file
132 vm_ooffset_t writemappings;
138 * devp_pglist - list of allocated pages
141 TAILQ_HEAD(, vm_page) devp_pglist;
142 struct cdev_pager_ops *ops;
149 * sgp_pglist - list of allocated pages
152 TAILQ_HEAD(, vm_page) sgp_pglist;
158 * swp_tmpfs - back-pointer to the tmpfs vnode,
159 * if any, which uses the vm object
160 * as backing store. The handle
161 * cannot be reused for linking,
162 * because the vnode can be
163 * reclaimed and recreated, making
164 * the handle changed and hash-chain
167 * swp_blks - pc-trie of the allocated swap blocks.
172 struct pctrie swp_blks;
173 vm_ooffset_t writemappings;
184 #define OBJ_FICTITIOUS 0x0001 /* (c) contains fictitious pages */
185 #define OBJ_UNMANAGED 0x0002 /* (c) contains unmanaged pages */
186 #define OBJ_POPULATE 0x0004 /* pager implements populate() */
187 #define OBJ_DEAD 0x0008 /* dead objects (during rundown) */
188 #define OBJ_ANON 0x0010 /* (c) contains anonymous memory */
189 #define OBJ_UMTXDEAD 0x0020 /* umtx pshared was terminated */
190 #define OBJ_SIZEVNLOCK 0x0040 /* lock vnode to check obj size */
191 #define OBJ_PG_DTOR 0x0080 /* dont reset object, leave that for dtor */
192 #define OBJ_TMPFS_NODE 0x0200 /* object belongs to tmpfs VREG node */
193 #define OBJ_SPLIT 0x0400 /* object is being split */
194 #define OBJ_COLLAPSING 0x0800 /* Parent of collapse. */
195 #define OBJ_COLORED 0x1000 /* pg_color is defined */
196 #define OBJ_ONEMAPPING 0x2000 /* One USE (a single, non-forked) mapping flag */
197 #define OBJ_SHADOWLIST 0x4000 /* Object is on the shadow list. */
198 #define OBJ_TMPFS 0x8000 /* has tmpfs vnode allocated */
201 * Helpers to perform conversion between vm_object page indexes and offsets.
202 * IDX_TO_OFF() converts an index into an offset.
203 * OFF_TO_IDX() converts an offset into an index.
204 * OBJ_MAX_SIZE specifies the maximum page index corresponding to the
205 * maximum unsigned offset.
207 #define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT)
208 #define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT))
209 #define OBJ_MAX_SIZE (OFF_TO_IDX(UINT64_MAX) + 1)
213 #define OBJPC_SYNC 0x1 /* sync I/O */
214 #define OBJPC_INVAL 0x2 /* invalidate */
215 #define OBJPC_NOSYNC 0x4 /* skip if PGA_NOSYNC */
218 * The following options are supported by vm_object_page_remove().
220 #define OBJPR_CLEANONLY 0x1 /* Don't remove dirty pages. */
221 #define OBJPR_NOTMAPPED 0x2 /* Don't unmap pages. */
223 TAILQ_HEAD(object_q, vm_object);
225 extern struct object_q vm_object_list; /* list of allocated objects */
226 extern struct mtx vm_object_list_mtx; /* lock for object list and count */
228 extern struct vm_object kernel_object_store;
230 /* kernel and kmem are aliased for backwards KPI compat. */
231 #define kernel_object (&kernel_object_store)
232 #define kmem_object (&kernel_object_store)
234 #define VM_OBJECT_ASSERT_LOCKED(object) \
235 rw_assert(&(object)->lock, RA_LOCKED)
236 #define VM_OBJECT_ASSERT_RLOCKED(object) \
237 rw_assert(&(object)->lock, RA_RLOCKED)
238 #define VM_OBJECT_ASSERT_WLOCKED(object) \
239 rw_assert(&(object)->lock, RA_WLOCKED)
240 #define VM_OBJECT_ASSERT_UNLOCKED(object) \
241 rw_assert(&(object)->lock, RA_UNLOCKED)
242 #define VM_OBJECT_LOCK_DOWNGRADE(object) \
243 rw_downgrade(&(object)->lock)
244 #define VM_OBJECT_RLOCK(object) \
245 rw_rlock(&(object)->lock)
246 #define VM_OBJECT_RUNLOCK(object) \
247 rw_runlock(&(object)->lock)
248 #define VM_OBJECT_SLEEP(object, wchan, pri, wmesg, timo) \
249 rw_sleep((wchan), &(object)->lock, (pri), (wmesg), (timo))
250 #define VM_OBJECT_TRYRLOCK(object) \
251 rw_try_rlock(&(object)->lock)
252 #define VM_OBJECT_TRYWLOCK(object) \
253 rw_try_wlock(&(object)->lock)
254 #define VM_OBJECT_TRYUPGRADE(object) \
255 rw_try_upgrade(&(object)->lock)
256 #define VM_OBJECT_WLOCK(object) \
257 rw_wlock(&(object)->lock)
258 #define VM_OBJECT_WOWNED(object) \
259 rw_wowned(&(object)->lock)
260 #define VM_OBJECT_WUNLOCK(object) \
261 rw_wunlock(&(object)->lock)
262 #define VM_OBJECT_DROP(object) \
263 lock_class_rw.lc_unlock(&(object)->lock.lock_object)
264 #define VM_OBJECT_PICKUP(object, state) \
265 lock_class_rw.lc_lock(&(object)->lock.lock_object, (state))
267 #define VM_OBJECT_ASSERT_PAGING(object) \
268 KASSERT((object)->paging_in_progress != 0, \
269 ("vm_object %p is not paging", object))
270 #define VM_OBJECT_ASSERT_REFERENCE(object) \
271 KASSERT((object)->reference_count != 0, \
272 ("vm_object %p is not referenced", object))
277 * The object must be locked or thread private.
280 vm_object_set_flag(vm_object_t object, u_short bits)
283 object->flags |= bits;
287 * Conditionally set the object's color, which (1) enables the allocation
288 * of physical memory reservations for anonymous objects and larger-than-
289 * superpage-sized named objects and (2) determines the first page offset
290 * within the object at which a reservation may be allocated. In other
291 * words, the color determines the alignment of the object with respect
292 * to the largest superpage boundary. When mapping named objects, like
293 * files or POSIX shared memory objects, the color should be set to zero
294 * before a virtual address is selected for the mapping. In contrast,
295 * for anonymous objects, the color may be set after the virtual address
298 * The object must be locked.
301 vm_object_color(vm_object_t object, u_short color)
304 if ((object->flags & OBJ_COLORED) == 0) {
305 object->pg_color = color;
306 object->flags |= OBJ_COLORED;
311 vm_object_reserv(vm_object_t object)
314 if (object != NULL &&
315 (object->flags & (OBJ_COLORED | OBJ_FICTITIOUS)) == OBJ_COLORED) {
322 vm_object_mightbedirty(vm_object_t object)
325 if (object->type != OBJT_VNODE) {
326 if ((object->flags & OBJ_TMPFS_NODE) == 0)
329 KASSERT(object->type == OBJT_SWAP,
330 ("TMPFS_NODE obj %p is not swap", object));
333 return (object->generation != object->cleangeneration);
336 void vm_object_clear_flag(vm_object_t object, u_short bits);
337 void vm_object_pip_add(vm_object_t object, short i);
338 void vm_object_pip_wakeup(vm_object_t object);
339 void vm_object_pip_wakeupn(vm_object_t object, short i);
340 void vm_object_pip_wait(vm_object_t object, char *waitid);
341 void vm_object_pip_wait_unlocked(vm_object_t object, char *waitid);
343 void vm_object_busy(vm_object_t object);
344 void vm_object_unbusy(vm_object_t object);
345 void vm_object_busy_wait(vm_object_t object, const char *wmesg);
348 vm_object_busied(vm_object_t object)
351 return (object->busy != 0);
353 #define VM_OBJECT_ASSERT_BUSY(object) MPASS(vm_object_busied((object)))
355 void umtx_shm_object_init(vm_object_t object);
356 void umtx_shm_object_terminated(vm_object_t object);
357 extern int umtx_shm_vnobj_persistent;
359 vm_object_t vm_object_allocate (objtype_t, vm_pindex_t);
360 vm_object_t vm_object_allocate_anon(vm_pindex_t, vm_object_t, struct ucred *,
362 boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t,
364 void vm_object_collapse (vm_object_t);
365 void vm_object_deallocate (vm_object_t);
366 void vm_object_destroy (vm_object_t);
367 void vm_object_terminate (vm_object_t);
368 void vm_object_set_writeable_dirty (vm_object_t);
369 void vm_object_init (void);
370 int vm_object_kvme_type(vm_object_t object, struct vnode **vpp);
371 void vm_object_madvise(vm_object_t, vm_pindex_t, vm_pindex_t, int);
372 boolean_t vm_object_page_clean(vm_object_t object, vm_ooffset_t start,
373 vm_ooffset_t end, int flags);
374 void vm_object_page_noreuse(vm_object_t object, vm_pindex_t start,
376 void vm_object_page_remove(vm_object_t object, vm_pindex_t start,
377 vm_pindex_t end, int options);
378 boolean_t vm_object_populate(vm_object_t, vm_pindex_t, vm_pindex_t);
379 void vm_object_print(long addr, boolean_t have_addr, long count, char *modif);
380 void vm_object_reference (vm_object_t);
381 void vm_object_reference_locked(vm_object_t);
382 int vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr);
383 void vm_object_shadow(vm_object_t *, vm_ooffset_t *, vm_size_t, struct ucred *,
385 void vm_object_split(vm_map_entry_t);
386 boolean_t vm_object_sync(vm_object_t, vm_ooffset_t, vm_size_t, boolean_t,
388 void vm_object_unwire(vm_object_t object, vm_ooffset_t offset,
389 vm_size_t length, uint8_t queue);
390 struct vnode *vm_object_vnode(vm_object_t object);
393 #endif /* _VM_OBJECT_ */