2 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org>
5 * Copyright (c) 2008 Nokia Corporation
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/sysproto.h>
40 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
46 #include <sys/refcount.h>
47 #include <sys/sched.h>
49 #include <sys/syscallsubr.h>
50 #include <sys/cpuset.h>
52 #include <sys/queue.h>
53 #include <sys/limits.h>
55 #include <sys/interrupt.h>
64 * cpusets provide a mechanism for creating and manipulating sets of
65 * processors for the purpose of constraining the scheduling of threads to
66 * specific processors.
68 * Each process belongs to an identified set, by default this is set 1. Each
69 * thread may further restrict the cpus it may run on to a subset of this
70 * named set. This creates an anonymous set which other threads and processes
71 * may not join by number.
73 * The named set is referred to herein as the 'base' set to avoid ambiguity.
74 * This set is usually a child of a 'root' set while the anonymous set may
75 * simply be referred to as a mask. In the syscall api these are referred to
76 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here.
78 * Threads inherit their set from their creator whether it be anonymous or
79 * not. This means that anonymous sets are immutable because they may be
80 * shared. To modify an anonymous set a new set is created with the desired
81 * mask and the same parent as the existing anonymous set. This gives the
82 * illusion of each thread having a private mask.A
84 * Via the syscall apis a user may ask to retrieve or modify the root, base,
85 * or mask that is discovered via a pid, tid, or setid. Modifying a set
86 * modifies all numbered and anonymous child sets to comply with the new mask.
87 * Modifying a pid or tid's mask applies only to that tid but must still
88 * exist within the assigned parent set.
90 * A thread may not be assigned to a a group seperate from other threads in
91 * the process. This is to remove ambiguity when the setid is queried with
92 * a pid argument. There is no other technical limitation.
94 * This somewhat complex arrangement is intended to make it easy for
95 * applications to query available processors and bind their threads to
96 * specific processors while also allowing administrators to dynamically
97 * reprovision by changing sets which apply to groups of processes.
99 * A simple application should not concern itself with sets at all and
100 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id
101 * meaning 'curthread'. It may query availble cpus for that tid with a
102 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...).
104 static uma_zone_t cpuset_zone;
105 static struct mtx cpuset_lock;
106 static struct setlist cpuset_ids;
107 static struct unrhdr *cpuset_unr;
108 static struct cpuset *cpuset_zero;
110 cpuset_t *cpuset_root;
113 * Acquire a reference to a cpuset, all pointers must be tracked with refs.
116 cpuset_ref(struct cpuset *set)
119 refcount_acquire(&set->cs_ref);
124 * Walks up the tree from 'set' to find the root. Returns the root
127 static struct cpuset *
128 cpuset_refroot(struct cpuset *set)
131 for (; set->cs_parent != NULL; set = set->cs_parent)
132 if (set->cs_flags & CPU_SET_ROOT)
140 * Find the first non-anonymous set starting from 'set'. Returns this set
141 * referenced. May return the passed in set with an extra ref if it is
144 static struct cpuset *
145 cpuset_refbase(struct cpuset *set)
148 if (set->cs_id == CPUSET_INVALID)
149 set = set->cs_parent;
156 * Release a reference in a context where it is safe to allocte.
159 cpuset_rel(struct cpuset *set)
163 if (refcount_release(&set->cs_ref) == 0)
165 mtx_lock_spin(&cpuset_lock);
166 LIST_REMOVE(set, cs_siblings);
168 if (id != CPUSET_INVALID)
169 LIST_REMOVE(set, cs_link);
170 mtx_unlock_spin(&cpuset_lock);
171 cpuset_rel(set->cs_parent);
172 uma_zfree(cpuset_zone, set);
173 if (id != CPUSET_INVALID)
174 free_unr(cpuset_unr, id);
178 * Deferred release must be used when in a context that is not safe to
179 * allocate/free. This places any unreferenced sets on the list 'head'.
182 cpuset_rel_defer(struct setlist *head, struct cpuset *set)
185 if (refcount_release(&set->cs_ref) == 0)
187 mtx_lock_spin(&cpuset_lock);
188 LIST_REMOVE(set, cs_siblings);
189 if (set->cs_id != CPUSET_INVALID)
190 LIST_REMOVE(set, cs_link);
191 LIST_INSERT_HEAD(head, set, cs_link);
192 mtx_unlock_spin(&cpuset_lock);
196 * Complete a deferred release. Removes the set from the list provided to
200 cpuset_rel_complete(struct cpuset *set)
202 LIST_REMOVE(set, cs_link);
203 cpuset_rel(set->cs_parent);
204 uma_zfree(cpuset_zone, set);
208 * Find a set based on an id. Returns it with a ref.
210 static struct cpuset *
211 cpuset_lookup(cpusetid_t setid, struct thread *td)
215 if (setid == CPUSET_INVALID)
217 mtx_lock_spin(&cpuset_lock);
218 LIST_FOREACH(set, &cpuset_ids, cs_link)
219 if (set->cs_id == setid)
223 mtx_unlock_spin(&cpuset_lock);
225 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__));
226 if (set != NULL && jailed(td->td_ucred)) {
227 struct cpuset *jset, *tset;
229 jset = td->td_ucred->cr_prison->pr_cpuset;
230 for (tset = set; tset != NULL; tset = tset->cs_parent)
243 * Create a set in the space provided in 'set' with the provided parameters.
244 * The set is returned with a single ref. May return EDEADLK if the set
245 * will have no valid cpu based on restrictions from the parent.
248 _cpuset_create(struct cpuset *set, struct cpuset *parent, const cpuset_t *mask,
252 if (!CPU_OVERLAP(&parent->cs_mask, mask))
254 CPU_COPY(mask, &set->cs_mask);
255 LIST_INIT(&set->cs_children);
256 refcount_init(&set->cs_ref, 1);
258 mtx_lock_spin(&cpuset_lock);
259 CPU_AND(&set->cs_mask, &parent->cs_mask);
261 set->cs_parent = cpuset_ref(parent);
262 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
263 if (set->cs_id != CPUSET_INVALID)
264 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
265 mtx_unlock_spin(&cpuset_lock);
271 * Create a new non-anonymous set with the requested parent and mask. May
272 * return failures if the mask is invalid or a new number can not be
276 cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask)
282 id = alloc_unr(cpuset_unr);
285 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK);
286 error = _cpuset_create(set, parent, mask, id);
289 free_unr(cpuset_unr, id);
290 uma_zfree(cpuset_zone, set);
296 * Recursively check for errors that would occur from applying mask to
297 * the tree of sets starting at 'set'. Checks for sets that would become
298 * empty as well as RDONLY flags.
301 cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int check_mask)
307 mtx_assert(&cpuset_lock, MA_OWNED);
308 if (set->cs_flags & CPU_SET_RDONLY)
311 if (!CPU_OVERLAP(&set->cs_mask, mask))
313 CPU_COPY(&set->cs_mask, &newmask);
314 CPU_AND(&newmask, mask);
316 CPU_COPY(mask, &newmask);
318 LIST_FOREACH(nset, &set->cs_children, cs_siblings)
319 if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0)
325 * Applies the mask 'mask' without checking for empty sets or permissions.
328 cpuset_update(struct cpuset *set, cpuset_t *mask)
332 mtx_assert(&cpuset_lock, MA_OWNED);
333 CPU_AND(&set->cs_mask, mask);
334 LIST_FOREACH(nset, &set->cs_children, cs_siblings)
335 cpuset_update(nset, &set->cs_mask);
341 * Modify the set 'set' to use a copy of the mask provided. Apply this new
342 * mask to restrict all children in the tree. Checks for validity before
343 * applying the changes.
346 cpuset_modify(struct cpuset *set, cpuset_t *mask)
351 error = priv_check(curthread, PRIV_SCHED_CPUSET);
355 * In case we are called from within the jail
356 * we do not allow modifying the dedicated root
357 * cpuset of the jail but may still allow to
360 if (jailed(curthread->td_ucred) &&
361 set->cs_flags & CPU_SET_ROOT)
364 * Verify that we have access to this set of
367 root = set->cs_parent;
368 if (root && !CPU_SUBSET(&root->cs_mask, mask))
370 mtx_lock_spin(&cpuset_lock);
371 error = cpuset_testupdate(set, mask, 0);
374 CPU_COPY(mask, &set->cs_mask);
375 cpuset_update(set, mask);
377 mtx_unlock_spin(&cpuset_lock);
383 * Resolve the 'which' parameter of several cpuset apis.
385 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also
386 * checks for permission via p_cansched().
388 * For WHICH_SET returns a valid set with a new reference.
390 * -1 may be supplied for any argument to mean the current proc/thread or
391 * the base set of the current thread. May fail with ESRCH/EPERM.
394 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp,
395 struct cpuset **setp)
412 if ((p = pfind(id)) == NULL)
422 sx_slock(&allproc_lock);
423 FOREACH_PROC_IN_SYSTEM(p) {
425 FOREACH_THREAD_IN_PROC(p, td)
426 if (td->td_tid == id)
432 sx_sunlock(&allproc_lock);
436 case CPU_WHICH_CPUSET:
438 thread_lock(curthread);
439 set = cpuset_refbase(curthread->td_cpuset);
440 thread_unlock(curthread);
442 set = cpuset_lookup(id, curthread);
450 /* Find `set' for prison with given id. */
453 sx_slock(&allprison_lock);
454 pr = prison_find_child(curthread->td_ucred->cr_prison, id);
455 sx_sunlock(&allprison_lock);
458 cpuset_ref(pr->pr_cpuset);
459 *setp = pr->pr_cpuset;
460 mtx_unlock(&pr->pr_mtx);
468 error = p_cansched(curthread, p);
474 td = FIRST_THREAD_IN_PROC(p);
481 * Create an anonymous set with the provided mask in the space provided by
482 * 'fset'. If the passed in set is anonymous we use its parent otherwise
483 * the new set is a child of 'set'.
486 cpuset_shadow(struct cpuset *set, struct cpuset *fset, const cpuset_t *mask)
488 struct cpuset *parent;
490 if (set->cs_id == CPUSET_INVALID)
491 parent = set->cs_parent;
494 if (!CPU_SUBSET(&parent->cs_mask, mask))
496 return (_cpuset_create(fset, parent, mask, CPUSET_INVALID));
500 * Handle two cases for replacing the base set or mask of an entire process.
502 * 1) Set is non-null and mask is null. This reparents all anonymous sets
503 * to the provided set and replaces all non-anonymous td_cpusets with the
505 * 2) Mask is non-null and set is null. This replaces or creates anonymous
506 * sets for every thread with the existing base as a parent.
508 * This is overly complicated because we can't allocate while holding a
509 * spinlock and spinlocks must be held while changing and examining thread
513 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask)
515 struct setlist freelist;
516 struct setlist droplist;
517 struct cpuset *tdset;
525 * The algorithm requires two passes due to locking considerations.
527 * 1) Lookup the process and acquire the locks in the required order.
528 * 2) If enough cpusets have not been allocated release the locks and
529 * allocate them. Loop.
531 LIST_INIT(&freelist);
532 LIST_INIT(&droplist);
535 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset);
538 if (nfree >= p->p_numthreads)
540 threads = p->p_numthreads;
542 for (; nfree < threads; nfree++) {
543 nset = uma_zalloc(cpuset_zone, M_WAITOK);
544 LIST_INSERT_HEAD(&freelist, nset, cs_link);
547 PROC_LOCK_ASSERT(p, MA_OWNED);
549 * Now that the appropriate locks are held and we have enough cpusets,
550 * make sure the operation will succeed before applying changes. The
551 * proc lock prevents td_cpuset from changing between calls.
554 FOREACH_THREAD_IN_PROC(p, td) {
556 tdset = td->td_cpuset;
558 * Verify that a new mask doesn't specify cpus outside of
559 * the set the thread is a member of.
562 if (tdset->cs_id == CPUSET_INVALID)
563 tdset = tdset->cs_parent;
564 if (!CPU_SUBSET(&tdset->cs_mask, mask))
567 * Verify that a new set won't leave an existing thread
568 * mask without a cpu to run on. It can, however, restrict
571 } else if (tdset->cs_id == CPUSET_INVALID) {
572 if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask))
580 * Replace each thread's cpuset while using deferred release. We
581 * must do this because the thread lock must be held while operating
582 * on the thread and this limits the type of operations allowed.
584 FOREACH_THREAD_IN_PROC(p, td) {
587 * If we presently have an anonymous set or are applying a
588 * mask we must create an anonymous shadow set. That is
589 * either parented to our existing base or the supplied set.
591 * If we have a base set with no anonymous shadow we simply
592 * replace it outright.
594 tdset = td->td_cpuset;
595 if (tdset->cs_id == CPUSET_INVALID || mask) {
596 nset = LIST_FIRST(&freelist);
597 LIST_REMOVE(nset, cs_link);
599 error = cpuset_shadow(tdset, nset, mask);
601 error = _cpuset_create(nset, set,
602 &tdset->cs_mask, CPUSET_INVALID);
604 LIST_INSERT_HEAD(&freelist, nset, cs_link);
609 nset = cpuset_ref(set);
610 cpuset_rel_defer(&droplist, tdset);
611 td->td_cpuset = nset;
618 while ((nset = LIST_FIRST(&droplist)) != NULL)
619 cpuset_rel_complete(nset);
620 while ((nset = LIST_FIRST(&freelist)) != NULL) {
621 LIST_REMOVE(nset, cs_link);
622 uma_zfree(cpuset_zone, nset);
628 * Apply an anonymous mask to a single thread.
631 cpuset_setthread(lwpid_t id, cpuset_t *mask)
639 nset = uma_zalloc(cpuset_zone, M_WAITOK);
640 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set);
645 error = cpuset_shadow(td->td_cpuset, nset, mask);
648 td->td_cpuset = nset;
658 uma_zfree(cpuset_zone, nset);
663 * Creates the cpuset for thread0. We make two sets:
665 * 0 - The root set which should represent all valid processors in the
666 * system. It is initially created with a mask of all processors
667 * because we don't know what processors are valid until cpuset_init()
668 * runs. This set is immutable.
669 * 1 - The default set which all processes are a member of until changed.
670 * This allows an administrator to move all threads off of given cpus to
671 * dedicate them to high priority tasks or save power etc.
679 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL,
680 NULL, NULL, UMA_ALIGN_PTR, 0);
681 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE);
683 * Create the root system set for the whole machine. Doesn't use
684 * cpuset_create() due to NULL parent.
686 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
687 CPU_FILL(&set->cs_mask);
688 LIST_INIT(&set->cs_children);
689 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
691 set->cs_flags = CPU_SET_ROOT;
693 cpuset_root = &set->cs_mask;
695 * Now derive a default, modifiable set from that to give out.
697 set = uma_zalloc(cpuset_zone, M_WAITOK);
698 error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1);
699 KASSERT(error == 0, ("Error creating default set: %d\n", error));
701 * Initialize the unit allocator. 0 and 1 are allocated above.
703 cpuset_unr = new_unrhdr(2, INT_MAX, NULL);
709 * Create a cpuset, which would be cpuset_create() but
710 * mark the new 'set' as root.
712 * We are not going to reparent the td to it. Use cpuset_setproc_update_set()
715 * In case of no error, returns the set in *setp locked with a reference.
718 cpuset_create_root(struct prison *pr, struct cpuset **setp)
723 KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__));
724 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__));
726 error = cpuset_create(setp, pr->pr_cpuset, &pr->pr_cpuset->cs_mask);
730 KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data",
731 __func__, __LINE__));
733 /* Mark the set as root. */
735 set->cs_flags |= CPU_SET_ROOT;
741 cpuset_setproc_update_set(struct proc *p, struct cpuset *set)
745 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__));
746 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__));
749 error = cpuset_setproc(p->p_pid, set, NULL);
757 * This is called once the final set of system cpus is known. Modifies
758 * the root set and all children and mark the root readonly.
761 cpuset_init(void *arg)
767 mask.__bits[0] = all_cpus;
771 if (cpuset_modify(cpuset_zero, &mask))
772 panic("Can't set initial cpuset mask.\n");
773 cpuset_zero->cs_flags |= CPU_SET_RDONLY;
775 SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL);
777 #ifndef _SYS_SYSPROTO_H_
783 cpuset(struct thread *td, struct cpuset_args *uap)
790 root = cpuset_refroot(td->td_cpuset);
792 error = cpuset_create(&set, root, &root->cs_mask);
796 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id));
798 error = cpuset_setproc(-1, set, NULL);
803 #ifndef _SYS_SYSPROTO_H_
804 struct cpuset_setid_args {
811 cpuset_setid(struct thread *td, struct cpuset_setid_args *uap)
817 * Presently we only support per-process sets.
819 if (uap->which != CPU_WHICH_PID)
821 set = cpuset_lookup(uap->setid, td);
824 error = cpuset_setproc(uap->id, set, NULL);
829 #ifndef _SYS_SYSPROTO_H_
830 struct cpuset_getid_args {
837 cpuset_getid(struct thread *td, struct cpuset_getid_args *uap)
846 if (uap->level == CPU_LEVEL_WHICH && uap->which != CPU_WHICH_CPUSET)
848 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set);
851 switch (uap->which) {
855 set = cpuset_refbase(ttd->td_cpuset);
859 case CPU_WHICH_CPUSET:
865 switch (uap->level) {
867 nset = cpuset_refroot(set);
871 case CPU_LEVEL_CPUSET:
873 case CPU_LEVEL_WHICH:
879 error = copyout(&id, uap->setid, sizeof(id));
884 #ifndef _SYS_SYSPROTO_H_
885 struct cpuset_getaffinity_args {
894 cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap)
904 if (uap->cpusetsize < sizeof(cpuset_t) ||
905 uap->cpusetsize > CPU_MAXSIZE / NBBY)
907 size = uap->cpusetsize;
908 mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
909 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set);
912 switch (uap->level) {
914 case CPU_LEVEL_CPUSET:
915 switch (uap->which) {
919 set = cpuset_ref(ttd->td_cpuset);
922 case CPU_WHICH_CPUSET:
929 if (uap->level == CPU_LEVEL_ROOT)
930 nset = cpuset_refroot(set);
932 nset = cpuset_refbase(set);
933 CPU_COPY(&nset->cs_mask, mask);
936 case CPU_LEVEL_WHICH:
937 switch (uap->which) {
940 CPU_COPY(&ttd->td_cpuset->cs_mask, mask);
944 FOREACH_THREAD_IN_PROC(p, ttd) {
946 CPU_OR(mask, &ttd->td_cpuset->cs_mask);
950 case CPU_WHICH_CPUSET:
952 CPU_COPY(&set->cs_mask, mask);
955 error = intr_getaffinity(uap->id, mask);
968 error = copyout(mask, uap->mask, size);
974 #ifndef _SYS_SYSPROTO_H_
975 struct cpuset_setaffinity_args {
980 const cpuset_t *mask;
984 cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap)
993 if (uap->cpusetsize < sizeof(cpuset_t) ||
994 uap->cpusetsize > CPU_MAXSIZE / NBBY)
996 mask = malloc(uap->cpusetsize, M_TEMP, M_WAITOK | M_ZERO);
997 error = copyin(uap->mask, mask, uap->cpusetsize);
1001 * Verify that no high bits are set.
1003 if (uap->cpusetsize > sizeof(cpuset_t)) {
1007 end = cp = (char *)&mask->__bits;
1008 end += uap->cpusetsize;
1009 cp += sizeof(cpuset_t);
1017 switch (uap->level) {
1018 case CPU_LEVEL_ROOT:
1019 case CPU_LEVEL_CPUSET:
1020 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set);
1023 switch (uap->which) {
1027 set = cpuset_ref(ttd->td_cpuset);
1031 case CPU_WHICH_CPUSET:
1032 case CPU_WHICH_JAIL:
1038 if (uap->level == CPU_LEVEL_ROOT)
1039 nset = cpuset_refroot(set);
1041 nset = cpuset_refbase(set);
1042 error = cpuset_modify(nset, mask);
1046 case CPU_LEVEL_WHICH:
1047 switch (uap->which) {
1049 error = cpuset_setthread(uap->id, mask);
1052 error = cpuset_setproc(uap->id, NULL, mask);
1054 case CPU_WHICH_CPUSET:
1055 case CPU_WHICH_JAIL:
1056 error = cpuset_which(uap->which, uap->id, &p,
1059 error = cpuset_modify(set, mask);
1064 error = intr_setaffinity(uap->id, mask);
1081 DB_SHOW_COMMAND(cpusets, db_show_cpusets)
1086 LIST_FOREACH(set, &cpuset_ids, cs_link) {
1087 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n",
1088 set, set->cs_id, set->cs_ref, set->cs_flags,
1089 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0);
1090 db_printf(" mask=");
1091 for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) {
1092 if (CPU_ISSET(cpu, &set->cs_mask)) {
1094 db_printf("%d", cpu);
1097 db_printf(",%d", cpu);