2 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org>
5 * Copyright (c) 2008 Nokia Corporation
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/sysproto.h>
40 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
46 #include <sys/refcount.h>
47 #include <sys/sched.h>
49 #include <sys/syscallsubr.h>
50 #include <sys/capsicum.h>
51 #include <sys/cpuset.h>
53 #include <sys/queue.h>
54 #include <sys/libkern.h>
55 #include <sys/limits.h>
57 #include <sys/interrupt.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_param.h>
69 * cpusets provide a mechanism for creating and manipulating sets of
70 * processors for the purpose of constraining the scheduling of threads to
71 * specific processors.
73 * Each process belongs to an identified set, by default this is set 1. Each
74 * thread may further restrict the cpus it may run on to a subset of this
75 * named set. This creates an anonymous set which other threads and processes
76 * may not join by number.
78 * The named set is referred to herein as the 'base' set to avoid ambiguity.
79 * This set is usually a child of a 'root' set while the anonymous set may
80 * simply be referred to as a mask. In the syscall api these are referred to
81 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here.
83 * Threads inherit their set from their creator whether it be anonymous or
84 * not. This means that anonymous sets are immutable because they may be
85 * shared. To modify an anonymous set a new set is created with the desired
86 * mask and the same parent as the existing anonymous set. This gives the
87 * illusion of each thread having a private mask.
89 * Via the syscall apis a user may ask to retrieve or modify the root, base,
90 * or mask that is discovered via a pid, tid, or setid. Modifying a set
91 * modifies all numbered and anonymous child sets to comply with the new mask.
92 * Modifying a pid or tid's mask applies only to that tid but must still
93 * exist within the assigned parent set.
95 * A thread may not be assigned to a group separate from other threads in
96 * the process. This is to remove ambiguity when the setid is queried with
97 * a pid argument. There is no other technical limitation.
99 * This somewhat complex arrangement is intended to make it easy for
100 * applications to query available processors and bind their threads to
101 * specific processors while also allowing administrators to dynamically
102 * reprovision by changing sets which apply to groups of processes.
104 * A simple application should not concern itself with sets at all and
105 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id
106 * meaning 'curthread'. It may query available cpus for that tid with a
107 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...).
109 static uma_zone_t cpuset_zone;
110 static struct mtx cpuset_lock;
111 static struct setlist cpuset_ids;
112 static struct unrhdr *cpuset_unr;
113 static struct cpuset *cpuset_zero, *cpuset_default;
115 /* Return the size of cpuset_t at the kernel level */
116 SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD | CTLFLAG_CAPRD,
117 SYSCTL_NULL_INT_PTR, sizeof(cpuset_t), "sizeof(cpuset_t)");
119 cpuset_t *cpuset_root;
120 cpuset_t cpuset_domain[MAXMEMDOM];
123 * Acquire a reference to a cpuset, all pointers must be tracked with refs.
126 cpuset_ref(struct cpuset *set)
129 refcount_acquire(&set->cs_ref);
134 * Walks up the tree from 'set' to find the root. Returns the root
137 static struct cpuset *
138 cpuset_refroot(struct cpuset *set)
141 for (; set->cs_parent != NULL; set = set->cs_parent)
142 if (set->cs_flags & CPU_SET_ROOT)
150 * Find the first non-anonymous set starting from 'set'. Returns this set
151 * referenced. May return the passed in set with an extra ref if it is
154 static struct cpuset *
155 cpuset_refbase(struct cpuset *set)
158 if (set->cs_id == CPUSET_INVALID)
159 set = set->cs_parent;
166 * Release a reference in a context where it is safe to allocate.
169 cpuset_rel(struct cpuset *set)
173 if (refcount_release(&set->cs_ref) == 0)
175 mtx_lock_spin(&cpuset_lock);
176 LIST_REMOVE(set, cs_siblings);
178 if (id != CPUSET_INVALID)
179 LIST_REMOVE(set, cs_link);
180 mtx_unlock_spin(&cpuset_lock);
181 cpuset_rel(set->cs_parent);
182 uma_zfree(cpuset_zone, set);
183 if (id != CPUSET_INVALID)
184 free_unr(cpuset_unr, id);
188 * Deferred release must be used when in a context that is not safe to
189 * allocate/free. This places any unreferenced sets on the list 'head'.
192 cpuset_rel_defer(struct setlist *head, struct cpuset *set)
195 if (refcount_release(&set->cs_ref) == 0)
197 mtx_lock_spin(&cpuset_lock);
198 LIST_REMOVE(set, cs_siblings);
199 if (set->cs_id != CPUSET_INVALID)
200 LIST_REMOVE(set, cs_link);
201 LIST_INSERT_HEAD(head, set, cs_link);
202 mtx_unlock_spin(&cpuset_lock);
206 * Complete a deferred release. Removes the set from the list provided to
210 cpuset_rel_complete(struct cpuset *set)
212 LIST_REMOVE(set, cs_link);
213 cpuset_rel(set->cs_parent);
214 uma_zfree(cpuset_zone, set);
218 * Find a set based on an id. Returns it with a ref.
220 static struct cpuset *
221 cpuset_lookup(cpusetid_t setid, struct thread *td)
225 if (setid == CPUSET_INVALID)
227 mtx_lock_spin(&cpuset_lock);
228 LIST_FOREACH(set, &cpuset_ids, cs_link)
229 if (set->cs_id == setid)
233 mtx_unlock_spin(&cpuset_lock);
235 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__));
236 if (set != NULL && jailed(td->td_ucred)) {
237 struct cpuset *jset, *tset;
239 jset = td->td_ucred->cr_prison->pr_cpuset;
240 for (tset = set; tset != NULL; tset = tset->cs_parent)
253 * Create a set in the space provided in 'set' with the provided parameters.
254 * The set is returned with a single ref. May return EDEADLK if the set
255 * will have no valid cpu based on restrictions from the parent.
258 _cpuset_create(struct cpuset *set, struct cpuset *parent, const cpuset_t *mask,
262 if (!CPU_OVERLAP(&parent->cs_mask, mask))
264 CPU_COPY(mask, &set->cs_mask);
265 LIST_INIT(&set->cs_children);
266 refcount_init(&set->cs_ref, 1);
268 mtx_lock_spin(&cpuset_lock);
269 CPU_AND(&set->cs_mask, &parent->cs_mask);
271 set->cs_parent = cpuset_ref(parent);
272 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
273 if (set->cs_id != CPUSET_INVALID)
274 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
275 mtx_unlock_spin(&cpuset_lock);
281 * Create a new non-anonymous set with the requested parent and mask. May
282 * return failures if the mask is invalid or a new number can not be
286 cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask)
292 id = alloc_unr(cpuset_unr);
295 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK);
296 error = _cpuset_create(set, parent, mask, id);
299 free_unr(cpuset_unr, id);
300 uma_zfree(cpuset_zone, set);
306 * Recursively check for errors that would occur from applying mask to
307 * the tree of sets starting at 'set'. Checks for sets that would become
308 * empty as well as RDONLY flags.
311 cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int check_mask)
317 mtx_assert(&cpuset_lock, MA_OWNED);
318 if (set->cs_flags & CPU_SET_RDONLY)
321 if (!CPU_OVERLAP(&set->cs_mask, mask))
323 CPU_COPY(&set->cs_mask, &newmask);
324 CPU_AND(&newmask, mask);
326 CPU_COPY(mask, &newmask);
328 LIST_FOREACH(nset, &set->cs_children, cs_siblings)
329 if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0)
335 * Applies the mask 'mask' without checking for empty sets or permissions.
338 cpuset_update(struct cpuset *set, cpuset_t *mask)
342 mtx_assert(&cpuset_lock, MA_OWNED);
343 CPU_AND(&set->cs_mask, mask);
344 LIST_FOREACH(nset, &set->cs_children, cs_siblings)
345 cpuset_update(nset, &set->cs_mask);
351 * Modify the set 'set' to use a copy of the mask provided. Apply this new
352 * mask to restrict all children in the tree. Checks for validity before
353 * applying the changes.
356 cpuset_modify(struct cpuset *set, cpuset_t *mask)
361 error = priv_check(curthread, PRIV_SCHED_CPUSET);
365 * In case we are called from within the jail
366 * we do not allow modifying the dedicated root
367 * cpuset of the jail but may still allow to
370 if (jailed(curthread->td_ucred) &&
371 set->cs_flags & CPU_SET_ROOT)
374 * Verify that we have access to this set of
377 root = set->cs_parent;
378 if (root && !CPU_SUBSET(&root->cs_mask, mask))
380 mtx_lock_spin(&cpuset_lock);
381 error = cpuset_testupdate(set, mask, 0);
384 CPU_COPY(mask, &set->cs_mask);
385 cpuset_update(set, mask);
387 mtx_unlock_spin(&cpuset_lock);
393 * Resolve the 'which' parameter of several cpuset apis.
395 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also
396 * checks for permission via p_cansched().
398 * For WHICH_SET returns a valid set with a new reference.
400 * -1 may be supplied for any argument to mean the current proc/thread or
401 * the base set of the current thread. May fail with ESRCH/EPERM.
404 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp,
405 struct cpuset **setp)
422 if ((p = pfind(id)) == NULL)
437 case CPU_WHICH_CPUSET:
439 thread_lock(curthread);
440 set = cpuset_refbase(curthread->td_cpuset);
441 thread_unlock(curthread);
443 set = cpuset_lookup(id, curthread);
451 /* Find `set' for prison with given id. */
454 sx_slock(&allprison_lock);
455 pr = prison_find_child(curthread->td_ucred->cr_prison, id);
456 sx_sunlock(&allprison_lock);
459 cpuset_ref(pr->pr_cpuset);
460 *setp = pr->pr_cpuset;
461 mtx_unlock(&pr->pr_mtx);
465 case CPU_WHICH_DOMAIN:
470 error = p_cansched(curthread, p);
476 td = FIRST_THREAD_IN_PROC(p);
483 * Create an anonymous set with the provided mask in the space provided by
484 * 'fset'. If the passed in set is anonymous we use its parent otherwise
485 * the new set is a child of 'set'.
488 cpuset_shadow(struct cpuset *set, struct cpuset *fset, const cpuset_t *mask)
490 struct cpuset *parent;
492 if (set->cs_id == CPUSET_INVALID)
493 parent = set->cs_parent;
496 if (!CPU_SUBSET(&parent->cs_mask, mask))
498 return (_cpuset_create(fset, parent, mask, CPUSET_INVALID));
502 * Handle two cases for replacing the base set or mask of an entire process.
504 * 1) Set is non-null and mask is null. This reparents all anonymous sets
505 * to the provided set and replaces all non-anonymous td_cpusets with the
507 * 2) Mask is non-null and set is null. This replaces or creates anonymous
508 * sets for every thread with the existing base as a parent.
510 * This is overly complicated because we can't allocate while holding a
511 * spinlock and spinlocks must be held while changing and examining thread
515 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask)
517 struct setlist freelist;
518 struct setlist droplist;
519 struct cpuset *tdset;
528 * The algorithm requires two passes due to locking considerations.
530 * 1) Lookup the process and acquire the locks in the required order.
531 * 2) If enough cpusets have not been allocated release the locks and
532 * allocate them. Loop.
534 LIST_INIT(&freelist);
535 LIST_INIT(&droplist);
538 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset);
541 if (nfree >= p->p_numthreads)
543 threads = p->p_numthreads;
545 for (; nfree < threads; nfree++) {
546 nset = uma_zalloc(cpuset_zone, M_WAITOK);
547 LIST_INSERT_HEAD(&freelist, nset, cs_link);
550 PROC_LOCK_ASSERT(p, MA_OWNED);
552 * Now that the appropriate locks are held and we have enough cpusets,
553 * make sure the operation will succeed before applying changes. The
554 * proc lock prevents td_cpuset from changing between calls.
557 FOREACH_THREAD_IN_PROC(p, td) {
559 tdset = td->td_cpuset;
561 * Verify that a new mask doesn't specify cpus outside of
562 * the set the thread is a member of.
565 if (tdset->cs_id == CPUSET_INVALID)
566 tdset = tdset->cs_parent;
567 if (!CPU_SUBSET(&tdset->cs_mask, mask))
570 * Verify that a new set won't leave an existing thread
571 * mask without a cpu to run on. It can, however, restrict
574 } else if (tdset->cs_id == CPUSET_INVALID) {
575 if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask))
583 * Replace each thread's cpuset while using deferred release. We
584 * must do this because the thread lock must be held while operating
585 * on the thread and this limits the type of operations allowed.
587 FOREACH_THREAD_IN_PROC(p, td) {
590 * If we presently have an anonymous set or are applying a
591 * mask we must create an anonymous shadow set. That is
592 * either parented to our existing base or the supplied set.
594 * If we have a base set with no anonymous shadow we simply
595 * replace it outright.
597 tdset = td->td_cpuset;
598 if (tdset->cs_id == CPUSET_INVALID || mask) {
599 nset = LIST_FIRST(&freelist);
600 LIST_REMOVE(nset, cs_link);
602 error = cpuset_shadow(tdset, nset, mask);
604 error = _cpuset_create(nset, set,
605 &tdset->cs_mask, CPUSET_INVALID);
607 LIST_INSERT_HEAD(&freelist, nset, cs_link);
612 nset = cpuset_ref(set);
613 cpuset_rel_defer(&droplist, tdset);
614 td->td_cpuset = nset;
621 while ((nset = LIST_FIRST(&droplist)) != NULL)
622 cpuset_rel_complete(nset);
623 while ((nset = LIST_FIRST(&freelist)) != NULL) {
624 LIST_REMOVE(nset, cs_link);
625 uma_zfree(cpuset_zone, nset);
631 * Return a string representing a valid layout for a cpuset_t object.
632 * It expects an incoming buffer at least sized as CPUSETBUFSIZ.
635 cpusetobj_strprint(char *buf, const cpuset_t *set)
638 size_t i, bytesp, bufsiz;
642 bufsiz = CPUSETBUFSIZ;
644 for (i = 0; i < (_NCPUWORDS - 1); i++) {
645 bytesp = snprintf(tbuf, bufsiz, "%lx,", set->__bits[i]);
649 snprintf(tbuf, bufsiz, "%lx", set->__bits[_NCPUWORDS - 1]);
654 * Build a valid cpuset_t object from a string representation.
655 * It expects an incoming buffer at least sized as CPUSETBUFSIZ.
658 cpusetobj_strscan(cpuset_t *set, const char *buf)
663 if (strlen(buf) > CPUSETBUFSIZ - 1)
666 /* Allow to pass a shorter version of the mask when necessary. */
668 for (i = 0; buf[i] != '\0'; i++)
671 if (nwords > _NCPUWORDS)
675 for (i = 0; i < (nwords - 1); i++) {
676 ret = sscanf(buf, "%lx,", &set->__bits[i]);
677 if (ret == 0 || ret == -1)
679 buf = strstr(buf, ",");
684 ret = sscanf(buf, "%lx", &set->__bits[nwords - 1]);
685 if (ret == 0 || ret == -1)
691 * Apply an anonymous mask to a single thread.
694 cpuset_setthread(lwpid_t id, cpuset_t *mask)
702 nset = uma_zalloc(cpuset_zone, M_WAITOK);
703 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set);
708 error = cpuset_shadow(td->td_cpuset, nset, mask);
711 td->td_cpuset = nset;
721 uma_zfree(cpuset_zone, nset);
726 * Apply new cpumask to the ithread.
729 cpuset_setithread(lwpid_t id, int cpu)
731 struct cpuset *nset, *rset;
732 struct cpuset *parent, *old_set;
739 nset = uma_zalloc(cpuset_zone, M_WAITOK);
740 rset = uma_zalloc(cpuset_zone, M_WAITOK);
741 cs_id = CPUSET_INVALID;
745 CPU_COPY(cpuset_root, &mask);
749 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &old_set);
750 if (error != 0 || ((cs_id = alloc_unr(cpuset_unr)) == CPUSET_INVALID))
753 /* cpuset_which() returns with PROC_LOCK held. */
754 old_set = td->td_cpuset;
759 * roll back to default set. We're not using cpuset_shadow()
760 * here because we can fail CPU_SUBSET() check. This can happen
761 * if default set does not contain all CPUs.
763 error = _cpuset_create(nset, cpuset_default, &mask,
769 if (old_set->cs_id == 1 || (old_set->cs_id == CPUSET_INVALID &&
770 old_set->cs_parent->cs_id == 1)) {
773 * Current set is either default (1) or
774 * shadowed version of default set.
776 * Allocate new root set to be able to shadow it
779 error = _cpuset_create(rset, cpuset_zero,
780 &cpuset_zero->cs_mask, cs_id);
785 rset->cs_flags |= CPU_SET_ROOT;
788 cs_id = CPUSET_INVALID;
790 /* Assume existing set was already allocated by previous call */
795 error = cpuset_shadow(parent, nset, &mask);
799 td->td_cpuset = nset;
810 uma_zfree(cpuset_zone, nset);
812 uma_zfree(cpuset_zone, rset);
813 if (cs_id != CPUSET_INVALID)
814 free_unr(cpuset_unr, cs_id);
820 * Creates system-wide cpusets and the cpuset for thread0 including two
823 * 0 - The root set which should represent all valid processors in the
824 * system. It is initially created with a mask of all processors
825 * because we don't know what processors are valid until cpuset_init()
826 * runs. This set is immutable.
827 * 1 - The default set which all processes are a member of until changed.
828 * This allows an administrator to move all threads off of given cpus to
829 * dedicate them to high priority tasks or save power etc.
837 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL,
838 NULL, NULL, UMA_ALIGN_PTR, 0);
839 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE);
842 * Create the root system set for the whole machine. Doesn't use
843 * cpuset_create() due to NULL parent.
845 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
846 CPU_FILL(&set->cs_mask);
847 LIST_INIT(&set->cs_children);
848 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
850 set->cs_flags = CPU_SET_ROOT;
852 cpuset_root = &set->cs_mask;
855 * Now derive a default, modifiable set from that to give out.
857 set = uma_zalloc(cpuset_zone, M_WAITOK);
858 error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1);
859 KASSERT(error == 0, ("Error creating default set: %d\n", error));
860 cpuset_default = set;
863 * Initialize the unit allocator. 0 and 1 are allocated above.
865 cpuset_unr = new_unrhdr(2, INT_MAX, NULL);
868 * If MD code has not initialized per-domain cpusets, place all
871 for (i = 0; i < MAXMEMDOM; i++)
872 if (!CPU_EMPTY(&cpuset_domain[i]))
874 CPU_COPY(&all_cpus, &cpuset_domain[0]);
881 * Create a cpuset, which would be cpuset_create() but
882 * mark the new 'set' as root.
884 * We are not going to reparent the td to it. Use cpuset_setproc_update_set()
887 * In case of no error, returns the set in *setp locked with a reference.
890 cpuset_create_root(struct prison *pr, struct cpuset **setp)
895 KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__));
896 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__));
898 error = cpuset_create(setp, pr->pr_cpuset, &pr->pr_cpuset->cs_mask);
902 KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data",
903 __func__, __LINE__));
905 /* Mark the set as root. */
907 set->cs_flags |= CPU_SET_ROOT;
913 cpuset_setproc_update_set(struct proc *p, struct cpuset *set)
917 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__));
918 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__));
921 error = cpuset_setproc(p->p_pid, set, NULL);
929 * This is called once the final set of system cpus is known. Modifies
930 * the root set and all children and mark the root read-only.
933 cpuset_init(void *arg)
938 if (cpuset_modify(cpuset_zero, &mask))
939 panic("Can't set initial cpuset mask.\n");
940 cpuset_zero->cs_flags |= CPU_SET_RDONLY;
942 SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL);
944 #ifndef _SYS_SYSPROTO_H_
950 sys_cpuset(struct thread *td, struct cpuset_args *uap)
957 root = cpuset_refroot(td->td_cpuset);
959 error = cpuset_create(&set, root, &root->cs_mask);
963 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id));
965 error = cpuset_setproc(-1, set, NULL);
970 #ifndef _SYS_SYSPROTO_H_
971 struct cpuset_setid_args {
978 sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap)
981 return (kern_cpuset_setid(td, uap->which, uap->id, uap->setid));
985 kern_cpuset_setid(struct thread *td, cpuwhich_t which,
986 id_t id, cpusetid_t setid)
992 * Presently we only support per-process sets.
994 if (which != CPU_WHICH_PID)
996 set = cpuset_lookup(setid, td);
999 error = cpuset_setproc(id, set, NULL);
1004 #ifndef _SYS_SYSPROTO_H_
1005 struct cpuset_getid_args {
1013 sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap)
1016 return (kern_cpuset_getid(td, uap->level, uap->which, uap->id,
1021 kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which,
1022 id_t id, cpusetid_t *setid)
1024 struct cpuset *nset;
1031 if (level == CPU_LEVEL_WHICH && which != CPU_WHICH_CPUSET)
1033 error = cpuset_which(which, id, &p, &ttd, &set);
1040 set = cpuset_refbase(ttd->td_cpuset);
1044 case CPU_WHICH_CPUSET:
1045 case CPU_WHICH_JAIL:
1048 case CPU_WHICH_DOMAIN:
1052 case CPU_LEVEL_ROOT:
1053 nset = cpuset_refroot(set);
1057 case CPU_LEVEL_CPUSET:
1059 case CPU_LEVEL_WHICH:
1065 error = copyout(&tmpid, setid, sizeof(tmpid));
1070 #ifndef _SYS_SYSPROTO_H_
1071 struct cpuset_getaffinity_args {
1080 sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap)
1083 return (kern_cpuset_getaffinity(td, uap->level, uap->which,
1084 uap->id, uap->cpusetsize, uap->mask));
1088 kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
1089 id_t id, size_t cpusetsize, cpuset_t *maskp)
1092 struct cpuset *nset;
1099 if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY)
1101 /* In Capability mode, you can only get your own CPU set. */
1102 if (IN_CAPABILITY_MODE(td)) {
1103 if (level != CPU_LEVEL_WHICH)
1105 if (which != CPU_WHICH_TID && which != CPU_WHICH_PID)
1111 mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
1112 error = cpuset_which(which, id, &p, &ttd, &set);
1116 case CPU_LEVEL_ROOT:
1117 case CPU_LEVEL_CPUSET:
1122 set = cpuset_ref(ttd->td_cpuset);
1125 case CPU_WHICH_CPUSET:
1126 case CPU_WHICH_JAIL:
1129 case CPU_WHICH_INTRHANDLER:
1130 case CPU_WHICH_ITHREAD:
1131 case CPU_WHICH_DOMAIN:
1135 if (level == CPU_LEVEL_ROOT)
1136 nset = cpuset_refroot(set);
1138 nset = cpuset_refbase(set);
1139 CPU_COPY(&nset->cs_mask, mask);
1142 case CPU_LEVEL_WHICH:
1146 CPU_COPY(&ttd->td_cpuset->cs_mask, mask);
1150 FOREACH_THREAD_IN_PROC(p, ttd) {
1152 CPU_OR(mask, &ttd->td_cpuset->cs_mask);
1156 case CPU_WHICH_CPUSET:
1157 case CPU_WHICH_JAIL:
1158 CPU_COPY(&set->cs_mask, mask);
1161 case CPU_WHICH_INTRHANDLER:
1162 case CPU_WHICH_ITHREAD:
1163 error = intr_getaffinity(id, which, mask);
1165 case CPU_WHICH_DOMAIN:
1166 if (id < 0 || id >= MAXMEMDOM)
1169 CPU_COPY(&cpuset_domain[id], mask);
1182 error = copyout(mask, maskp, size);
1188 #ifndef _SYS_SYSPROTO_H_
1189 struct cpuset_setaffinity_args {
1194 const cpuset_t *mask;
1198 sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap)
1201 return (kern_cpuset_setaffinity(td, uap->level, uap->which,
1202 uap->id, uap->cpusetsize, uap->mask));
1206 kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
1207 id_t id, size_t cpusetsize, const cpuset_t *maskp)
1209 struct cpuset *nset;
1216 if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY)
1218 /* In Capability mode, you can only set your own CPU set. */
1219 if (IN_CAPABILITY_MODE(td)) {
1220 if (level != CPU_LEVEL_WHICH)
1222 if (which != CPU_WHICH_TID && which != CPU_WHICH_PID)
1227 mask = malloc(cpusetsize, M_TEMP, M_WAITOK | M_ZERO);
1228 error = copyin(maskp, mask, cpusetsize);
1232 * Verify that no high bits are set.
1234 if (cpusetsize > sizeof(cpuset_t)) {
1238 end = cp = (char *)&mask->__bits;
1240 cp += sizeof(cpuset_t);
1249 case CPU_LEVEL_ROOT:
1250 case CPU_LEVEL_CPUSET:
1251 error = cpuset_which(which, id, &p, &ttd, &set);
1258 set = cpuset_ref(ttd->td_cpuset);
1262 case CPU_WHICH_CPUSET:
1263 case CPU_WHICH_JAIL:
1266 case CPU_WHICH_INTRHANDLER:
1267 case CPU_WHICH_ITHREAD:
1268 case CPU_WHICH_DOMAIN:
1272 if (level == CPU_LEVEL_ROOT)
1273 nset = cpuset_refroot(set);
1275 nset = cpuset_refbase(set);
1276 error = cpuset_modify(nset, mask);
1280 case CPU_LEVEL_WHICH:
1283 error = cpuset_setthread(id, mask);
1286 error = cpuset_setproc(id, NULL, mask);
1288 case CPU_WHICH_CPUSET:
1289 case CPU_WHICH_JAIL:
1290 error = cpuset_which(which, id, &p, &ttd, &set);
1292 error = cpuset_modify(set, mask);
1297 case CPU_WHICH_INTRHANDLER:
1298 case CPU_WHICH_ITHREAD:
1299 error = intr_setaffinity(id, which, mask);
1317 ddb_display_cpuset(const cpuset_t *set)
1321 for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) {
1322 if (CPU_ISSET(cpu, set)) {
1324 db_printf("%d", cpu);
1327 db_printf(",%d", cpu);
1331 db_printf("<none>");
1334 DB_SHOW_COMMAND(cpusets, db_show_cpusets)
1338 LIST_FOREACH(set, &cpuset_ids, cs_link) {
1339 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n",
1340 set, set->cs_id, set->cs_ref, set->cs_flags,
1341 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0);
1342 db_printf(" mask=");
1343 ddb_display_cpuset(&set->cs_mask);