2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2017, Jeffrey Roberson <jeff@freebsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bitset.h>
38 #include <sys/domainset.h>
41 #include <sys/mutex.h>
42 #include <sys/malloc.h>
43 #include <sys/vmmeter.h>
46 #include <vm/vm_param.h>
47 #include <vm/vm_domainset.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_phys.h>
54 * Iterators are written such that the first nowait pass has as short a
55 * codepath as possible to eliminate bloat from the allocator. It is
56 * assumed that most allocations are successful.
59 static int vm_domainset_default_stride = 64;
62 * Determine which policy is to be used for this allocation.
65 vm_domainset_iter_init(struct vm_domainset_iter *di, struct vm_object *obj,
68 struct domainset *domain;
71 * object policy takes precedence over thread policy. The policies
72 * are immutable and unsynchronized. Updates can race but pointer
73 * loads are assumed to be atomic.
75 if (obj != NULL && (domain = obj->domain.dr_policy) != NULL) {
76 di->di_domain = domain;
77 di->di_iter = &obj->domain.dr_iterator;
79 di->di_domain = curthread->td_domain.dr_policy;
80 di->di_iter = &curthread->td_domain.dr_iterator;
82 di->di_policy = di->di_domain->ds_policy;
83 if (di->di_policy == DOMAINSET_POLICY_INTERLEAVE) {
84 #if VM_NRESERVLEVEL > 0
85 if (vm_object_reserv(obj)) {
87 * Color the pindex so we end up on the correct
88 * reservation boundary.
90 pindex += obj->pg_color;
91 pindex >>= VM_LEVEL_0_ORDER;
94 pindex /= vm_domainset_default_stride;
96 * Offset pindex so the first page of each object does
97 * not end up in domain 0.
100 pindex += (((uintptr_t)obj) / sizeof(*obj));
101 di->di_offset = pindex;
106 vm_domainset_iter_rr(struct vm_domainset_iter *di, int *domain)
109 *domain = di->di_domain->ds_order[
110 ++(*di->di_iter) % di->di_domain->ds_cnt];
114 vm_domainset_iter_prefer(struct vm_domainset_iter *di, int *domain)
119 d = di->di_domain->ds_order[
120 ++(*di->di_iter) % di->di_domain->ds_cnt];
121 } while (d == di->di_domain->ds_prefer);
126 vm_domainset_iter_interleave(struct vm_domainset_iter *di, int *domain)
130 d = di->di_offset % di->di_domain->ds_cnt;
132 *domain = di->di_domain->ds_order[d];
136 vm_domainset_iter_next(struct vm_domainset_iter *di, int *domain)
139 KASSERT(di->di_n > 0,
140 ("vm_domainset_iter_first: Invalid n %d", di->di_n));
141 switch (di->di_policy) {
142 case DOMAINSET_POLICY_FIRSTTOUCH:
144 * To prevent impossible allocations we convert an invalid
145 * first-touch to round-robin.
148 case DOMAINSET_POLICY_INTERLEAVE:
150 case DOMAINSET_POLICY_ROUNDROBIN:
151 vm_domainset_iter_rr(di, domain);
153 case DOMAINSET_POLICY_PREFER:
154 vm_domainset_iter_prefer(di, domain);
157 panic("vm_domainset_iter_first: Unknown policy %d",
160 KASSERT(*domain < vm_ndomains,
161 ("vm_domainset_iter_next: Invalid domain %d", *domain));
165 vm_domainset_iter_first(struct vm_domainset_iter *di, int *domain)
168 switch (di->di_policy) {
169 case DOMAINSET_POLICY_FIRSTTOUCH:
170 *domain = PCPU_GET(domain);
171 if (DOMAINSET_ISSET(*domain, &di->di_domain->ds_mask)) {
173 * Add an extra iteration because we will visit the
174 * current domain a second time in the rr iterator.
176 di->di_n = di->di_domain->ds_cnt + 1;
180 * To prevent impossible allocations we convert an invalid
181 * first-touch to round-robin.
184 case DOMAINSET_POLICY_ROUNDROBIN:
185 di->di_n = di->di_domain->ds_cnt;
186 vm_domainset_iter_rr(di, domain);
188 case DOMAINSET_POLICY_PREFER:
189 *domain = di->di_domain->ds_prefer;
190 di->di_n = di->di_domain->ds_cnt;
192 case DOMAINSET_POLICY_INTERLEAVE:
193 vm_domainset_iter_interleave(di, domain);
194 di->di_n = di->di_domain->ds_cnt;
197 panic("vm_domainset_iter_first: Unknown policy %d",
200 KASSERT(di->di_n > 0,
201 ("vm_domainset_iter_first: Invalid n %d", di->di_n));
202 KASSERT(*domain < vm_ndomains,
203 ("vm_domainset_iter_first: Invalid domain %d", *domain));
207 vm_domainset_iter_page_init(struct vm_domainset_iter *di, struct vm_object *obj,
208 vm_pindex_t pindex, int *domain, int *req)
211 vm_domainset_iter_init(di, obj, pindex);
213 *req = (di->di_flags & ~(VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) |
215 vm_domainset_iter_first(di, domain);
219 vm_domainset_iter_page(struct vm_domainset_iter *di, int *domain, int *req)
223 * If we exhausted all options with NOWAIT and did a WAITFAIL it
224 * is time to return an error to the caller.
226 if ((*req & VM_ALLOC_WAITFAIL) != 0)
229 /* If there are more domains to visit we run the iterator. */
230 if (--di->di_n != 0) {
231 vm_domainset_iter_next(di, domain);
235 /* If we visited all domains and this was a NOWAIT we return error. */
236 if ((di->di_flags & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) == 0)
240 * We have visited all domains with non-blocking allocations, try
241 * from the beginning with a blocking allocation.
243 vm_domainset_iter_first(di, domain);
251 vm_domainset_iter_malloc_init(struct vm_domainset_iter *di,
252 struct vm_object *obj, int *domain, int *flags)
255 vm_domainset_iter_init(di, obj, 0);
256 if (di->di_policy == DOMAINSET_POLICY_INTERLEAVE)
257 di->di_policy = DOMAINSET_POLICY_ROUNDROBIN;
258 di->di_flags = *flags;
259 *flags = (di->di_flags & ~M_WAITOK) | M_NOWAIT;
260 vm_domainset_iter_first(di, domain);
264 vm_domainset_iter_malloc(struct vm_domainset_iter *di, int *domain, int *flags)
267 /* If there are more domains to visit we run the iterator. */
268 if (--di->di_n != 0) {
269 vm_domainset_iter_next(di, domain);
273 /* If we visited all domains and this was a NOWAIT we return error. */
274 if ((di->di_flags & M_WAITOK) == 0)
278 * We have visited all domains with non-blocking allocations, try
279 * from the beginning with a blocking allocation.
281 vm_domainset_iter_first(di, domain);
282 *flags = di->di_flags;
289 vm_domainset_iter_page(struct vm_domainset_iter *di, int *domain, int *flags)
292 return (EJUSTRETURN);
296 vm_domainset_iter_page_init(struct vm_domainset_iter *di,
297 struct vm_object *obj, vm_pindex_t pindex, int *domain, int *flags)
304 vm_domainset_iter_malloc(struct vm_domainset_iter *di, int *domain, int *flags)
307 return (EJUSTRETURN);
311 vm_domainset_iter_malloc_init(struct vm_domainset_iter *di,
312 struct vm_object *obj, int *domain, int *flags)