2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 * Carnegie Mellon requests users of this software to return to
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
65 #ifndef _VM_PAGEQUEUE_
66 #define _VM_PAGEQUEUE_
73 const char * const pq_name;
75 } __aligned(CACHE_LINE_SIZE);
77 #ifndef VM_BATCHQUEUE_SIZE
78 #define VM_BATCHQUEUE_SIZE 7
81 struct vm_batchqueue {
82 vm_page_t bq_pa[VM_BATCHQUEUE_SIZE];
84 } __aligned(CACHE_LINE_SIZE);
87 #include <sys/pidctrl.h>
91 * One vm_domain per-numa domain. Contains pagequeues, free page structures,
103 struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
104 struct mtx_padalign vmd_free_mtx;
105 struct mtx_padalign vmd_pageout_mtx;
106 uma_zone_t vmd_pgcache; /* (c) page free cache. */
107 struct vmem *vmd_kernel_arena; /* (c) per-domain kva R/W arena. */
108 struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
109 u_int vmd_domain; /* (c) Domain number. */
110 u_int vmd_page_count; /* (c) Total page count. */
111 long vmd_segs; /* (c) bitmask of the segments */
112 u_int __aligned(CACHE_LINE_SIZE) vmd_free_count; /* (a,f) free page count */
113 u_int vmd_pageout_deficit; /* (a) Estimated number of pages deficit */
114 uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)];
116 /* Paging control variables, used within single threaded page daemon. */
117 struct pidctrl vmd_pid; /* Pageout controller. */
120 int vmd_last_active_scan;
121 struct vm_page vmd_markers[PQ_COUNT]; /* (q) markers for queue scans */
122 struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
123 struct vm_page vmd_clock[2]; /* markers for active queue scan */
125 int vmd_pageout_wanted; /* (a, p) pageout daemon wait channel */
126 int vmd_pageout_pages_needed; /* (d) page daemon waiting for pages? */
127 bool vmd_minset; /* (d) Are we in vm_min_domains? */
128 bool vmd_severeset; /* (d) Are we in vm_severe_domains? */
131 VM_LAUNDRY_BACKGROUND,
133 } vmd_laundry_request;
135 /* Paging thresholds and targets. */
136 u_int vmd_clean_pages_freed; /* (q) accumulator for laundry thread */
137 u_int vmd_background_launder_target; /* (c) */
138 u_int vmd_free_reserved; /* (c) pages reserved for deadlock */
139 u_int vmd_free_target; /* (c) pages desired free */
140 u_int vmd_free_min; /* (c) pages desired free */
141 u_int vmd_inactive_target; /* (c) pages desired inactive */
142 u_int vmd_pageout_free_min; /* (c) min pages reserved for kernel */
143 u_int vmd_pageout_wakeup_thresh;/* (c) min pages to wake pagedaemon */
144 u_int vmd_interrupt_free_min; /* (c) reserved pages for int code */
145 u_int vmd_free_severe; /* (c) severe page depletion point */
147 /* Name for sysctl etc. */
148 struct sysctl_oid *vmd_oid;
149 char vmd_name[sizeof(__XSTRING(MAXMEMDOM))];
150 } __aligned(CACHE_LINE_SIZE);
152 extern struct vm_domain vm_dom[MAXMEMDOM];
154 #define VM_DOMAIN(n) (&vm_dom[(n)])
156 #define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED)
157 #define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex)
158 #define vm_pagequeue_lockptr(pq) (&(pq)->pq_mutex)
159 #define vm_pagequeue_trylock(pq) mtx_trylock(&(pq)->pq_mutex)
160 #define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex)
162 #define vm_domain_free_assert_locked(n) \
163 mtx_assert(vm_domain_free_lockptr((n)), MA_OWNED)
164 #define vm_domain_free_assert_unlocked(n) \
165 mtx_assert(vm_domain_free_lockptr((n)), MA_NOTOWNED)
166 #define vm_domain_free_lock(d) \
167 mtx_lock(vm_domain_free_lockptr((d)))
168 #define vm_domain_free_lockptr(d) \
170 #define vm_domain_free_trylock(d) \
171 mtx_trylock(vm_domain_free_lockptr((d)))
172 #define vm_domain_free_unlock(d) \
173 mtx_unlock(vm_domain_free_lockptr((d)))
175 #define vm_domain_pageout_lockptr(d) \
176 (&(d)->vmd_pageout_mtx)
177 #define vm_domain_pageout_assert_locked(n) \
178 mtx_assert(vm_domain_pageout_lockptr((n)), MA_OWNED)
179 #define vm_domain_pageout_assert_unlocked(n) \
180 mtx_assert(vm_domain_pageout_lockptr((n)), MA_NOTOWNED)
181 #define vm_domain_pageout_lock(d) \
182 mtx_lock(vm_domain_pageout_lockptr((d)))
183 #define vm_domain_pageout_unlock(d) \
184 mtx_unlock(vm_domain_pageout_lockptr((d)))
187 vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
190 vm_pagequeue_assert_locked(pq);
191 pq->pq_cnt += addend;
193 #define vm_pagequeue_cnt_inc(pq) vm_pagequeue_cnt_add((pq), 1)
194 #define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1)
197 vm_batchqueue_init(struct vm_batchqueue *bq)
204 vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m)
207 if (bq->bq_cnt < nitems(bq->bq_pa)) {
208 bq->bq_pa[bq->bq_cnt++] = m;
214 static inline vm_page_t
215 vm_batchqueue_pop(struct vm_batchqueue *bq)
220 return (bq->bq_pa[--bq->bq_cnt]);
223 void vm_domain_set(struct vm_domain *vmd);
224 void vm_domain_clear(struct vm_domain *vmd);
225 int vm_domain_allocate(struct vm_domain *vmd, int req, int npages);
228 * vm_pagequeue_domain:
230 * Return the memory domain the page belongs to.
232 static inline struct vm_domain *
233 vm_pagequeue_domain(vm_page_t m)
236 return (VM_DOMAIN(vm_phys_domain(m)));
240 * Return the number of pages we need to free-up or cache
241 * A positive number indicates that we do not have enough free pages.
244 vm_paging_target(struct vm_domain *vmd)
247 return (vmd->vmd_free_target - vmd->vmd_free_count);
251 * Returns TRUE if the pagedaemon needs to be woken up.
254 vm_paging_needed(struct vm_domain *vmd, u_int free_count)
257 return (free_count < vmd->vmd_pageout_wakeup_thresh);
261 * Returns TRUE if the domain is below the min paging target.
264 vm_paging_min(struct vm_domain *vmd)
267 return (vmd->vmd_free_min > vmd->vmd_free_count);
271 * Returns TRUE if the domain is below the severe paging target.
274 vm_paging_severe(struct vm_domain *vmd)
277 return (vmd->vmd_free_severe > vmd->vmd_free_count);
281 * Return the number of pages we need to launder.
282 * A positive number indicates that we have a shortfall of clean pages.
285 vm_laundry_target(struct vm_domain *vmd)
288 return (vm_paging_target(vmd));
291 void pagedaemon_wakeup(int domain);
294 vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
298 old = atomic_fetchadd_int(&vmd->vmd_free_count, adj);
301 * Only update bitsets on transitions. Notice we short-circuit the
302 * rest of the checks if we're above min already.
304 if (old < vmd->vmd_free_min && (new >= vmd->vmd_free_min ||
305 (old < vmd->vmd_free_severe && new >= vmd->vmd_free_severe) ||
306 (old < vmd->vmd_pageout_free_min &&
307 new >= vmd->vmd_pageout_free_min)))
308 vm_domain_clear(vmd);
312 #endif /* !_VM_PAGEQUEUE_ */