2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)vm_map.h 8.9 (Berkeley) 5/17/95
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 * Carnegie Mellon requests users of this software to return to
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
64 * $Id: vm_map.h,v 1.47 1999/08/23 18:08:34 alc Exp $
68 * Virtual memory map module definitions.
77 * vm_map_t the high-level address map data structure.
78 * vm_map_entry_t an entry in an address map.
82 * Objects which live in maps may be either VM objects, or
83 * another map (called a "sharing map") which denotes read-write
84 * sharing with other maps.
88 struct vm_object *vm_object; /* object object */
89 struct vm_map *sub_map; /* belongs to another map */
93 * Address map entries consist of start and end addresses,
94 * a VM object (or sharing map) and offset into that object,
95 * and user-exported inheritance and protection information.
96 * Also included is control information for virtual copy operations.
99 struct vm_map_entry *prev; /* previous entry */
100 struct vm_map_entry *next; /* next entry */
101 vm_offset_t start; /* start address */
102 vm_offset_t end; /* end address */
103 vm_offset_t avail_ssize; /* amt can grow if this is a stack */
104 union vm_map_object object; /* object I point to */
105 vm_ooffset_t offset; /* offset into object */
106 u_char eflags; /* map entry flags */
107 /* Only in task maps: */
108 vm_prot_t protection; /* protection code */
109 vm_prot_t max_protection; /* maximum protection */
110 vm_inherit_t inheritance; /* inheritance */
111 int wired_count; /* can be paged if = 0 */
114 #define MAP_ENTRY_UNUSED_01 0x1
115 #define MAP_ENTRY_IS_SUB_MAP 0x2
116 #define MAP_ENTRY_COW 0x4
117 #define MAP_ENTRY_NEEDS_COPY 0x8
118 #define MAP_ENTRY_NOFAULT 0x10
119 #define MAP_ENTRY_USER_WIRED 0x20
121 #define MAP_ENTRY_BEHAV_NORMAL 0x00 /* default behavior */
122 #define MAP_ENTRY_BEHAV_SEQUENTIAL 0x40 /* expect sequential access */
123 #define MAP_ENTRY_BEHAV_RANDOM 0x80 /* expect random access */
124 #define MAP_ENTRY_BEHAV_RESERVED 0xC0 /* future use */
126 #define MAP_ENTRY_BEHAV_MASK 0xC0
128 static __inline u_char
129 vm_map_entry_behavior(struct vm_map_entry *entry)
131 return entry->eflags & MAP_ENTRY_BEHAV_MASK;
135 vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior)
137 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
138 (behavior & MAP_ENTRY_BEHAV_MASK);
142 * Maps are doubly-linked lists of map entries, kept sorted
143 * by address. A single hint is provided to start
144 * searches again from the last successful search,
145 * insertion, or removal.
147 * Note: the lock structure cannot be the first element of vm_map
148 * because this can result in a running lockup between two or more
149 * system processes trying to kmem_alloc_wait() due to kmem_alloc_wait()
150 * and free tsleep/waking up 'map' and the underlying lockmgr also
151 * sleeping and waking up on 'map'. The lockup occurs when the map fills
152 * up. The 'exec' map, for example.
155 struct vm_map_entry header; /* List of entries */
156 struct lock lock; /* Lock for map data */
157 int nentries; /* Number of entries */
158 vm_size_t size; /* virtual size */
159 u_char system_map; /* Am I a system map? */
160 vm_map_entry_t hint; /* hint for quick lookups */
161 unsigned int timestamp; /* Version number */
162 vm_map_entry_t first_free; /* First free space hint */
163 struct pmap *pmap; /* Physical map */
164 #define min_offset header.start
165 #define max_offset header.end
169 * Shareable process virtual address space.
170 * May eventually be merged with vm_map.
171 * Several fields are temporary (text, data stuff).
174 struct vm_map vm_map; /* VM address map */
175 struct pmap vm_pmap; /* private physical map */
176 int vm_refcnt; /* number of references */
177 caddr_t vm_shm; /* SYS5 shared memory private data XXX */
178 /* we copy from vm_startcopy to the end of the structure on fork */
179 #define vm_startcopy vm_rssize
180 segsz_t vm_rssize; /* current resident set size in pages */
181 segsz_t vm_swrss; /* resident set size before last swap */
182 segsz_t vm_tsize; /* text size (pages) XXX */
183 segsz_t vm_dsize; /* data size (pages) XXX */
184 segsz_t vm_ssize; /* stack size (pages) */
185 caddr_t vm_taddr; /* user virtual address of text XXX */
186 caddr_t vm_daddr; /* user virtual address of data XXX */
187 caddr_t vm_maxsaddr; /* user VA at max stack growth */
188 caddr_t vm_minsaddr; /* user VA at max stack growth */
192 * Macros: vm_map_lock, etc.
194 * Perform locking on the data portion of a map. Note that
195 * these macros mimic procedure calls returning void. The
196 * semicolon is supplied by the user of these macros, not
197 * by the macros themselves. The macros can safely be used
198 * as unbraced elements in a higher level statement.
201 #define vm_map_lock_drain_interlock(map) \
203 lockmgr(&(map)->lock, LK_DRAIN|LK_INTERLOCK, \
204 &(map)->ref_lock, curproc); \
205 (map)->timestamp++; \
209 /* #define MAP_LOCK_DIAGNOSTIC 1 */
210 #ifdef MAP_LOCK_DIAGNOSTIC
211 #define vm_map_lock(map) \
213 printf ("locking map LK_EXCLUSIVE: 0x%x\n", map); \
214 if (lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc) != 0) { \
215 panic("vm_map_lock: failed to get lock"); \
217 (map)->timestamp++; \
220 #define vm_map_lock(map) \
222 if (lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc) != 0) { \
223 panic("vm_map_lock: failed to get lock"); \
225 (map)->timestamp++; \
229 #define vm_map_lock(map) \
231 lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc); \
232 (map)->timestamp++; \
234 #endif /* DIAGNOSTIC */
236 #if defined(MAP_LOCK_DIAGNOSTIC)
237 #define vm_map_unlock(map) \
239 printf ("locking map LK_RELEASE: 0x%x\n", map); \
240 lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc); \
242 #define vm_map_lock_read(map) \
244 printf ("locking map LK_SHARED: 0x%x\n", map); \
245 lockmgr(&(map)->lock, LK_SHARED, (void *)0, curproc); \
247 #define vm_map_unlock_read(map) \
249 printf ("locking map LK_RELEASE: 0x%x\n", map); \
250 lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc); \
253 #define vm_map_unlock(map) \
254 lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc)
255 #define vm_map_lock_read(map) \
256 lockmgr(&(map)->lock, LK_SHARED, (void *)0, curproc)
257 #define vm_map_unlock_read(map) \
258 lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc)
261 static __inline__ int
262 _vm_map_lock_upgrade(vm_map_t map, struct proc *p) {
264 #if defined(MAP_LOCK_DIAGNOSTIC)
265 printf("locking map LK_EXCLUPGRADE: 0x%x\n", map);
267 error = lockmgr(&map->lock, LK_EXCLUPGRADE, (void *)0, p);
273 #define vm_map_lock_upgrade(map) _vm_map_lock_upgrade(map, curproc)
275 #if defined(MAP_LOCK_DIAGNOSTIC)
276 #define vm_map_lock_downgrade(map) \
278 printf ("locking map LK_DOWNGRADE: 0x%x\n", map); \
279 lockmgr(&(map)->lock, LK_DOWNGRADE, (void *)0, curproc); \
282 #define vm_map_lock_downgrade(map) \
283 lockmgr(&(map)->lock, LK_DOWNGRADE, (void *)0, curproc)
286 #define vm_map_set_recursive(map) \
288 simple_lock(&(map)->lock.lk_interlock); \
289 (map)->lock.lk_flags |= LK_CANRECURSE; \
290 simple_unlock(&(map)->lock.lk_interlock); \
292 #define vm_map_clear_recursive(map) \
294 simple_lock(&(map)->lock.lk_interlock); \
295 (map)->lock.lk_flags &= ~LK_CANRECURSE; \
296 simple_unlock(&(map)->lock.lk_interlock); \
300 * Functions implemented as macros
302 #define vm_map_min(map) ((map)->min_offset)
303 #define vm_map_max(map) ((map)->max_offset)
304 #define vm_map_pmap(map) ((map)->pmap)
306 static __inline struct pmap *
307 vmspace_pmap(struct vmspace *vmspace)
309 return &vmspace->vm_pmap;
313 vmspace_resident_count(struct vmspace *vmspace)
315 return pmap_resident_count(vmspace_pmap(vmspace));
318 /* XXX: number of kernel maps and entries to statically allocate */
320 #define MAX_KMAPENT 128
321 #define MAX_MAPENT 128
324 * Copy-on-write flags for vm_map operations
326 #define MAP_UNUSED_01 0x1
327 #define MAP_COPY_ON_WRITE 0x2
328 #define MAP_NOFAULT 0x4
329 #define MAP_PREFAULT 0x8
330 #define MAP_PREFAULT_PARTIAL 0x10
333 * vm_fault option flags
335 #define VM_FAULT_NORMAL 0 /* Nothing special */
336 #define VM_FAULT_CHANGE_WIRING 1 /* Change the wiring as appropriate */
337 #define VM_FAULT_USER_WIRE 2 /* Likewise, but for user purposes */
338 #define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE)
339 #define VM_FAULT_HOLD 4 /* Hold the page */
340 #define VM_FAULT_DIRTY 8 /* Dirty the page */
343 boolean_t vm_map_check_protection __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t));
345 vm_map_t vm_map_create __P((struct pmap *, vm_offset_t, vm_offset_t));
346 int vm_map_delete __P((vm_map_t, vm_offset_t, vm_offset_t));
347 int vm_map_find __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, boolean_t, vm_prot_t, vm_prot_t, int));
348 int vm_map_findspace __P((vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *));
349 int vm_map_inherit __P((vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t));
350 void vm_map_init __P((struct vm_map *, vm_offset_t, vm_offset_t));
351 int vm_map_insert __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int));
352 int vm_map_lookup __P((vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
353 vm_pindex_t *, vm_prot_t *, boolean_t *));
354 void vm_map_lookup_done __P((vm_map_t, vm_map_entry_t));
355 boolean_t vm_map_lookup_entry __P((vm_map_t, vm_offset_t, vm_map_entry_t *));
356 int vm_map_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t));
357 int vm_map_user_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t));
358 int vm_map_clean __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t));
359 int vm_map_protect __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
360 int vm_map_remove __P((vm_map_t, vm_offset_t, vm_offset_t));
361 void vm_map_startup __P((void));
362 int vm_map_submap __P((vm_map_t, vm_offset_t, vm_offset_t, vm_map_t));
363 void vm_map_madvise __P((vm_map_t, vm_offset_t, vm_offset_t, int));
364 void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
365 void vm_init2 __P((void));
366 int vm_uiomove __P((vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *));
367 void vm_freeze_copyopts __P((vm_object_t, vm_pindex_t, vm_pindex_t));
368 int vm_map_stack __P((vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int));
369 int vm_map_growstack __P((struct proc *p, vm_offset_t addr));
372 #endif /* _VM_MAP_ */