2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 * Carnegie Mellon requests users of this software to return to
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
65 #ifndef _VM_PAGEQUEUE_
66 #define _VM_PAGEQUEUE_
73 const char * const pq_name;
75 } __aligned(CACHE_LINE_SIZE);
77 #ifndef VM_BATCHQUEUE_SIZE
78 #define VM_BATCHQUEUE_SIZE 7
81 struct vm_batchqueue {
82 vm_page_t bq_pa[VM_BATCHQUEUE_SIZE];
84 } __aligned(CACHE_LINE_SIZE);
87 #include <sys/pidctrl.h>
91 * One vm_domain per NUMA domain. Contains pagequeues, free page structures,
102 * A unique page daemon thread manages each vm_domain structure and is
103 * responsible for ensuring that some free memory is available by freeing
104 * inactive pages and aging active pages. To decide how many pages to process,
105 * it uses thresholds derived from the number of pages in the domain:
110 * |-> vmd_inactive_target (~3%)
111 * | - The active queue scan target is given by
112 * | (vmd_inactive_target + vmd_free_target - vmd_free_count).
115 * |-> vmd_free_target (~2%)
116 * | - Target for page reclamation.
118 * |-> vmd_pageout_wakeup_thresh (~1.8%)
119 * | - Threshold for waking up the page daemon.
122 * |-> vmd_free_min (~0.5%)
123 * | - First low memory threshold.
124 * | - Causes per-CPU caching to be lazily disabled in UMA.
125 * | - vm_wait() sleeps below this threshold.
127 * |-> vmd_free_severe (~0.25%)
128 * | - Second low memory threshold.
129 * | - Triggers aggressive UMA reclamation, disables delayed buffer
132 * |-> vmd_free_reserved (~0.13%)
133 * | - Minimum for VM_ALLOC_NORMAL page allocations.
134 * |-> vmd_pageout_free_min (32 + 2 pages)
135 * | - Minimum for waking a page daemon thread sleeping in vm_wait().
136 * |-> vmd_interrupt_free_min (2 pages)
137 * | - Minimum for VM_ALLOC_SYSTEM page allocations.
141 * Free page count regulation:
143 * The page daemon attempts to ensure that the free page count is above the free
144 * target. It wakes up periodically (every 100ms) to input the current free
145 * page shortage (free_target - free_count) to a PID controller, which in
146 * response outputs the number of pages to attempt to reclaim. The shortage's
147 * current magnitude, rate of change, and cumulative value are together used to
148 * determine the controller's output. The page daemon target thus adapts
149 * dynamically to the system's demand for free pages, resulting in less
150 * burstiness than a simple hysteresis loop.
152 * When the free page count drops below the wakeup threshold,
153 * vm_domain_allocate() proactively wakes up the page daemon. This helps ensure
154 * that the system responds promptly to a large instantaneous free page
157 * The page daemon also attempts to ensure that some fraction of the system's
158 * memory is present in the inactive (I) and laundry (L) page queues, so that it
159 * can respond promptly to a sudden free page shortage. In particular, the page
160 * daemon thread aggressively scans active pages so long as the following
163 * len(I) + len(L) + free_target - free_count < inactive_target
165 * Otherwise, when the inactive target is met, the page daemon periodically
166 * scans a small portion of the active queue in order to maintain up-to-date
167 * per-page access history. Unreferenced pages in the active queue thus
168 * eventually migrate to the inactive queue.
170 * The per-domain laundry thread periodically launders dirty pages based on the
171 * number of clean pages freed by the page daemon since the last laundering. If
172 * the page daemon fails to meet its scan target (i.e., the PID controller
173 * output) because of a shortage of clean inactive pages, the laundry thread
174 * attempts to launder enough pages to meet the free page target.
177 * Page allocation priorities:
179 * The system defines three page allocation priorities: VM_ALLOC_NORMAL,
180 * VM_ALLOC_SYSTEM and VM_ALLOC_INTERRUPT. An interrupt-priority allocation can
181 * claim any free page. This priority is used in the pmap layer when attempting
182 * to allocate a page for the kernel page tables; in such cases an allocation
183 * failure will usually result in a kernel panic. The system priority is used
184 * for most other kernel memory allocations, for instance by UMA's slab
185 * allocator or the buffer cache. Such allocations will fail if the free count
186 * is below interrupt_free_min. All other allocations occur at the normal
187 * priority, which is typically used for allocation of user pages, for instance
188 * in the page fault handler or when allocating page table pages or pv_entry
189 * structures for user pmaps. Such allocations fail if the free count is below
190 * the free_reserved threshold.
193 * Free memory shortages:
195 * The system uses the free_min and free_severe thresholds to apply
196 * back-pressure and give the page daemon a chance to recover. When a page
197 * allocation fails due to a shortage and the allocating thread cannot handle
198 * failure, it may call vm_wait() to sleep until free pages are available.
199 * vm_domain_freecnt_inc() wakes sleeping threads once the free page count rises
200 * above the free_min threshold; the page daemon and laundry threads are given
201 * priority and will wake up once free_count reaches the (much smaller)
202 * pageout_free_min threshold.
204 * On NUMA systems, the domainset iterators always prefer NUMA domains where the
205 * free page count is above the free_min threshold. This means that given the
206 * choice between two NUMA domains, one above the free_min threshold and one
207 * below, the former will be used to satisfy the allocation request regardless
208 * of the domain selection policy.
210 * In addition to reclaiming memory from the page queues, the vm_lowmem event
211 * fires every ten seconds so long as the system is under memory pressure (i.e.,
212 * vmd_free_count < vmd_free_target). This allows kernel subsystems to register
213 * for notifications of free page shortages, upon which they may shrink their
214 * caches. Following a vm_lowmem event, UMA's caches are pruned to ensure that
215 * they do not contain an excess of unused memory. When a domain is below the
216 * free_min threshold, UMA limits the population of per-CPU caches. When a
217 * domain falls below the free_severe threshold, UMA's caches are completely
220 * If the system encounters a global memory shortage, it may resort to the
221 * out-of-memory (OOM) killer, which selects a process and delivers SIGKILL in a
222 * last-ditch attempt to free up some pages. Either of the two following
223 * conditions will activate the OOM killer:
225 * 1. The page daemons collectively fail to reclaim any pages during their
226 * inactive queue scans. After vm_pageout_oom_seq consecutive scans fail,
227 * the page daemon thread votes for an OOM kill, and an OOM kill is
228 * triggered when all page daemons have voted. This heuristic is strict and
229 * may fail to trigger even when the system is effectively deadlocked.
231 * 2. Threads in the user fault handler are repeatedly unable to make progress
232 * while allocating a page to satisfy the fault. After
233 * vm_pfault_oom_attempts page allocation failures with intervening
234 * vm_wait() calls, the faulting thread will trigger an OOM kill.
237 struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
238 struct mtx_padalign vmd_free_mtx;
239 struct mtx_padalign vmd_pageout_mtx;
244 } vmd_pgcache[VM_NFREEPOOL];
245 struct vmem *vmd_kernel_arena; /* (c) per-domain kva R/W arena. */
246 struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
247 u_int vmd_domain; /* (c) Domain number. */
248 u_int vmd_page_count; /* (c) Total page count. */
249 long vmd_segs; /* (c) bitmask of the segments */
250 u_int __aligned(CACHE_LINE_SIZE) vmd_free_count; /* (a,f) free page count */
251 u_int vmd_pageout_deficit; /* (a) Estimated number of pages deficit */
252 uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)];
254 /* Paging control variables, used within single threaded page daemon. */
255 struct pidctrl vmd_pid; /* Pageout controller. */
258 int vmd_last_active_scan;
259 struct vm_page vmd_markers[PQ_COUNT]; /* (q) markers for queue scans */
260 struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
261 struct vm_page vmd_clock[2]; /* markers for active queue scan */
263 int vmd_pageout_wanted; /* (a, p) pageout daemon wait channel */
264 int vmd_pageout_pages_needed; /* (d) page daemon waiting for pages? */
265 bool vmd_minset; /* (d) Are we in vm_min_domains? */
266 bool vmd_severeset; /* (d) Are we in vm_severe_domains? */
269 VM_LAUNDRY_BACKGROUND,
271 } vmd_laundry_request;
273 /* Paging thresholds and targets. */
274 u_int vmd_clean_pages_freed; /* (q) accumulator for laundry thread */
275 u_int vmd_background_launder_target; /* (c) */
276 u_int vmd_free_reserved; /* (c) pages reserved for deadlock */
277 u_int vmd_free_target; /* (c) pages desired free */
278 u_int vmd_free_min; /* (c) pages desired free */
279 u_int vmd_inactive_target; /* (c) pages desired inactive */
280 u_int vmd_pageout_free_min; /* (c) min pages reserved for kernel */
281 u_int vmd_pageout_wakeup_thresh;/* (c) min pages to wake pagedaemon */
282 u_int vmd_interrupt_free_min; /* (c) reserved pages for int code */
283 u_int vmd_free_severe; /* (c) severe page depletion point */
285 /* Name for sysctl etc. */
286 struct sysctl_oid *vmd_oid;
287 char vmd_name[sizeof(__XSTRING(MAXMEMDOM))];
288 } __aligned(CACHE_LINE_SIZE);
290 extern struct vm_domain vm_dom[MAXMEMDOM];
292 #define VM_DOMAIN(n) (&vm_dom[(n)])
293 #define VM_DOMAIN_EMPTY(n) (vm_dom[(n)].vmd_page_count == 0)
295 #define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED)
296 #define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex)
297 #define vm_pagequeue_lockptr(pq) (&(pq)->pq_mutex)
298 #define vm_pagequeue_trylock(pq) mtx_trylock(&(pq)->pq_mutex)
299 #define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex)
301 #define vm_domain_free_assert_locked(n) \
302 mtx_assert(vm_domain_free_lockptr((n)), MA_OWNED)
303 #define vm_domain_free_assert_unlocked(n) \
304 mtx_assert(vm_domain_free_lockptr((n)), MA_NOTOWNED)
305 #define vm_domain_free_lock(d) \
306 mtx_lock(vm_domain_free_lockptr((d)))
307 #define vm_domain_free_lockptr(d) \
309 #define vm_domain_free_trylock(d) \
310 mtx_trylock(vm_domain_free_lockptr((d)))
311 #define vm_domain_free_unlock(d) \
312 mtx_unlock(vm_domain_free_lockptr((d)))
314 #define vm_domain_pageout_lockptr(d) \
315 (&(d)->vmd_pageout_mtx)
316 #define vm_domain_pageout_assert_locked(n) \
317 mtx_assert(vm_domain_pageout_lockptr((n)), MA_OWNED)
318 #define vm_domain_pageout_assert_unlocked(n) \
319 mtx_assert(vm_domain_pageout_lockptr((n)), MA_NOTOWNED)
320 #define vm_domain_pageout_lock(d) \
321 mtx_lock(vm_domain_pageout_lockptr((d)))
322 #define vm_domain_pageout_unlock(d) \
323 mtx_unlock(vm_domain_pageout_lockptr((d)))
326 vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
329 vm_pagequeue_assert_locked(pq);
330 pq->pq_cnt += addend;
332 #define vm_pagequeue_cnt_inc(pq) vm_pagequeue_cnt_add((pq), 1)
333 #define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1)
336 vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m)
339 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
340 vm_pagequeue_cnt_dec(pq);
344 vm_batchqueue_init(struct vm_batchqueue *bq)
351 vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m)
354 if (bq->bq_cnt < nitems(bq->bq_pa)) {
355 bq->bq_pa[bq->bq_cnt++] = m;
361 static inline vm_page_t
362 vm_batchqueue_pop(struct vm_batchqueue *bq)
367 return (bq->bq_pa[--bq->bq_cnt]);
370 void vm_domain_set(struct vm_domain *vmd);
371 void vm_domain_clear(struct vm_domain *vmd);
372 int vm_domain_allocate(struct vm_domain *vmd, int req, int npages);
375 * vm_pagequeue_domain:
377 * Return the memory domain the page belongs to.
379 static inline struct vm_domain *
380 vm_pagequeue_domain(vm_page_t m)
383 return (VM_DOMAIN(vm_phys_domain(m)));
387 * Return the number of pages we need to free-up or cache
388 * A positive number indicates that we do not have enough free pages.
391 vm_paging_target(struct vm_domain *vmd)
394 return (vmd->vmd_free_target - vmd->vmd_free_count);
398 * Returns TRUE if the pagedaemon needs to be woken up.
401 vm_paging_needed(struct vm_domain *vmd, u_int free_count)
404 return (free_count < vmd->vmd_pageout_wakeup_thresh);
408 * Returns TRUE if the domain is below the min paging target.
411 vm_paging_min(struct vm_domain *vmd)
414 return (vmd->vmd_free_min > vmd->vmd_free_count);
418 * Returns TRUE if the domain is below the severe paging target.
421 vm_paging_severe(struct vm_domain *vmd)
424 return (vmd->vmd_free_severe > vmd->vmd_free_count);
428 * Return the number of pages we need to launder.
429 * A positive number indicates that we have a shortfall of clean pages.
432 vm_laundry_target(struct vm_domain *vmd)
435 return (vm_paging_target(vmd));
438 void pagedaemon_wakeup(int domain);
441 vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
445 old = atomic_fetchadd_int(&vmd->vmd_free_count, adj);
448 * Only update bitsets on transitions. Notice we short-circuit the
449 * rest of the checks if we're above min already.
451 if (old < vmd->vmd_free_min && (new >= vmd->vmd_free_min ||
452 (old < vmd->vmd_free_severe && new >= vmd->vmd_free_severe) ||
453 (old < vmd->vmd_pageout_free_min &&
454 new >= vmd->vmd_pageout_free_min)))
455 vm_domain_clear(vmd);
459 #endif /* !_VM_PAGEQUEUE_ */