2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Mike Karels at Berkeley Software Design, Inc.
8 * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD
9 * project, to make these variables more userfriendly.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
41 #include "opt_capsicum.h"
42 #include "opt_compat.h"
43 #include "opt_ktrace.h"
45 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/capsicum.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/malloc.h>
56 #include <sys/mutex.h>
57 #include <sys/rmlock.h>
60 #include <sys/sysproto.h>
63 #include <sys/ktrace.h>
68 #include <security/mac/mac_framework.h>
71 #include <vm/vm_extern.h>
73 static MALLOC_DEFINE(M_SYSCTL, "sysctl", "sysctl internal magic");
74 static MALLOC_DEFINE(M_SYSCTLOID, "sysctloid", "sysctl dynamic oids");
75 static MALLOC_DEFINE(M_SYSCTLTMP, "sysctltmp", "sysctl temp output buffer");
78 * The sysctllock protects the MIB tree. It also protects sysctl
79 * contexts used with dynamic sysctls. The sysctl_register_oid() and
80 * sysctl_unregister_oid() routines require the sysctllock to already
81 * be held, so the sysctl_wlock() and sysctl_wunlock() routines are
82 * provided for the few places in the kernel which need to use that
83 * API rather than using the dynamic API. Use of the dynamic API is
84 * strongly encouraged for most code.
86 * The sysctlmemlock is used to limit the amount of user memory wired for
87 * sysctl requests. This is implemented by serializing any userland
88 * sysctl requests larger than a single page via an exclusive lock.
90 static struct rmlock sysctllock;
91 static struct sx sysctlmemlock;
93 #define SYSCTL_WLOCK() rm_wlock(&sysctllock)
94 #define SYSCTL_WUNLOCK() rm_wunlock(&sysctllock)
95 #define SYSCTL_RLOCK(tracker) rm_rlock(&sysctllock, (tracker))
96 #define SYSCTL_RUNLOCK(tracker) rm_runlock(&sysctllock, (tracker))
97 #define SYSCTL_WLOCKED() rm_wowned(&sysctllock)
98 #define SYSCTL_ASSERT_LOCKED() rm_assert(&sysctllock, RA_LOCKED)
99 #define SYSCTL_ASSERT_WLOCKED() rm_assert(&sysctllock, RA_WLOCKED)
100 #define SYSCTL_ASSERT_RLOCKED() rm_assert(&sysctllock, RA_RLOCKED)
101 #define SYSCTL_INIT() rm_init_flags(&sysctllock, "sysctl lock", \
103 #define SYSCTL_SLEEP(ch, wmesg, timo) \
104 rm_sleep(ch, &sysctllock, 0, wmesg, timo)
106 static int sysctl_root(SYSCTL_HANDLER_ARGS);
109 struct sysctl_oid_list sysctl__children = SLIST_HEAD_INITIALIZER(&sysctl__children);
111 static int sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del,
113 static int sysctl_old_kernel(struct sysctl_req *, const void *, size_t);
114 static int sysctl_new_kernel(struct sysctl_req *, void *, size_t);
116 static struct sysctl_oid *
117 sysctl_find_oidname(const char *name, struct sysctl_oid_list *list)
119 struct sysctl_oid *oidp;
121 SYSCTL_ASSERT_LOCKED();
122 SLIST_FOREACH(oidp, list, oid_link) {
123 if (strcmp(oidp->oid_name, name) == 0) {
131 * Initialization of the MIB tree.
133 * Order by number in each list.
150 sysctl_root_handler_locked(struct sysctl_oid *oid, void *arg1, intmax_t arg2,
151 struct sysctl_req *req, struct rm_priotracker *tracker)
155 if (oid->oid_kind & CTLFLAG_DYN)
156 atomic_add_int(&oid->oid_running, 1);
159 SYSCTL_RUNLOCK(tracker);
163 if (!(oid->oid_kind & CTLFLAG_MPSAFE))
165 error = oid->oid_handler(oid, arg1, arg2, req);
166 if (!(oid->oid_kind & CTLFLAG_MPSAFE))
169 KFAIL_POINT_ERROR(_debug_fail_point, sysctl_running, error);
172 SYSCTL_RLOCK(tracker);
176 if (oid->oid_kind & CTLFLAG_DYN) {
177 if (atomic_fetchadd_int(&oid->oid_running, -1) == 1 &&
178 (oid->oid_kind & CTLFLAG_DYING) != 0)
179 wakeup(&oid->oid_running);
186 sysctl_load_tunable_by_oid_locked(struct sysctl_oid *oidp)
188 struct sysctl_req req;
189 struct sysctl_oid *curr;
192 ssize_t rem = sizeof(path);
205 for (curr = oidp; curr != NULL; curr = SYSCTL_PARENT(curr)) {
206 len = strlen(curr->oid_name);
211 printf("OID path exceeds %d bytes\n", (int)sizeof(path));
214 memcpy(path + rem, curr->oid_name, len);
216 path[rem + len] = '.';
219 memset(&req, 0, sizeof(req));
222 req.oldfunc = sysctl_old_kernel;
223 req.newfunc = sysctl_new_kernel;
224 req.lock = REQ_UNWIRED;
226 switch (oidp->oid_kind & CTLTYPE) {
228 if (getenv_int(path + rem, &val_int) == 0)
230 req.newlen = sizeof(val_int);
231 req.newptr = &val_int;
234 if (getenv_uint(path + rem, (unsigned int *)&val_int) == 0)
236 req.newlen = sizeof(val_int);
237 req.newptr = &val_int;
240 if (getenv_long(path + rem, &val_long) == 0)
242 req.newlen = sizeof(val_long);
243 req.newptr = &val_long;
246 if (getenv_ulong(path + rem, (unsigned long *)&val_long) == 0)
248 req.newlen = sizeof(val_long);
249 req.newptr = &val_long;
252 if (getenv_int(path + rem, &val_int) == 0)
255 req.newlen = sizeof(val_8);
259 if (getenv_int(path + rem, &val_int) == 0)
262 req.newlen = sizeof(val_16);
263 req.newptr = &val_16;
266 if (getenv_long(path + rem, &val_long) == 0)
269 req.newlen = sizeof(val_32);
270 req.newptr = &val_32;
273 if (getenv_quad(path + rem, &val_quad) == 0)
276 req.newlen = sizeof(val_64);
277 req.newptr = &val_64;
280 if (getenv_uint(path + rem, (unsigned int *)&val_int) == 0)
283 req.newlen = sizeof(val_8);
287 if (getenv_uint(path + rem, (unsigned int *)&val_int) == 0)
290 req.newlen = sizeof(val_16);
291 req.newptr = &val_16;
294 if (getenv_ulong(path + rem, (unsigned long *)&val_long) == 0)
297 req.newlen = sizeof(val_32);
298 req.newptr = &val_32;
301 /* XXX there is no getenv_uquad() */
302 if (getenv_quad(path + rem, &val_quad) == 0)
305 req.newlen = sizeof(val_64);
306 req.newptr = &val_64;
309 penv = kern_getenv(path + rem);
312 req.newlen = strlen(penv);
318 error = sysctl_root_handler_locked(oidp, oidp->oid_arg1,
319 oidp->oid_arg2, &req, NULL);
321 printf("Setting sysctl %s failed: %d\n", path + rem, error);
327 sysctl_register_oid(struct sysctl_oid *oidp)
329 struct sysctl_oid_list *parent = oidp->oid_parent;
330 struct sysctl_oid *p;
331 struct sysctl_oid *q;
336 * First check if another oid with the same name already
337 * exists in the parent's list.
339 SYSCTL_ASSERT_WLOCKED();
340 p = sysctl_find_oidname(oidp->oid_name, parent);
342 if ((p->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
346 printf("can't re-use a leaf (%s)!\n", p->oid_name);
350 /* get current OID number */
351 oid_number = oidp->oid_number;
354 #error "OID_AUTO is expected to be a negative value"
357 * Any negative OID number qualifies as OID_AUTO. Valid OID
358 * numbers should always be positive.
360 * NOTE: DO NOT change the starting value here, change it in
361 * <sys/sysctl.h>, and make sure it is at least 256 to
362 * accommodate e.g. net.inet.raw as a static sysctl node.
364 if (oid_number < 0) {
368 * By decrementing the next OID number we spend less
369 * time inserting the OIDs into a sorted list.
371 if (--newoid < CTL_AUTO_START)
378 * Insert the OID into the parent's list sorted by OID number.
382 SLIST_FOREACH(p, parent, oid_link) {
383 /* check if the current OID number is in use */
384 if (oid_number == p->oid_number) {
385 /* get the next valid OID number */
386 if (oid_number < CTL_AUTO_START ||
387 oid_number == 0x7fffffff) {
388 /* wraparound - restart */
389 oid_number = CTL_AUTO_START;
390 /* don't loop forever */
392 panic("sysctl: Out of OID numbers\n");
397 } else if (oid_number < p->oid_number)
401 /* check for non-auto OID number collision */
402 if (oidp->oid_number >= 0 && oidp->oid_number < CTL_AUTO_START &&
403 oid_number >= CTL_AUTO_START) {
404 printf("sysctl: OID number(%d) is already in use for '%s'\n",
405 oidp->oid_number, oidp->oid_name);
407 /* update the OID number, if any */
408 oidp->oid_number = oid_number;
410 SLIST_INSERT_AFTER(q, oidp, oid_link);
412 SLIST_INSERT_HEAD(parent, oidp, oid_link);
414 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE &&
416 (oidp->oid_kind & CTLFLAG_VNET) == 0 &&
418 (oidp->oid_kind & CTLFLAG_TUN) != 0 &&
419 (oidp->oid_kind & CTLFLAG_NOFETCH) == 0) {
420 /* only fetch value once */
421 oidp->oid_kind |= CTLFLAG_NOFETCH;
422 /* try to fetch value from kernel environment */
423 sysctl_load_tunable_by_oid_locked(oidp);
428 sysctl_unregister_oid(struct sysctl_oid *oidp)
430 struct sysctl_oid *p;
433 SYSCTL_ASSERT_WLOCKED();
435 if (oidp->oid_number == OID_AUTO) {
438 SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
440 SLIST_REMOVE(oidp->oid_parent, oidp,
441 sysctl_oid, oid_link);
449 * This can happen when a module fails to register and is
450 * being unloaded afterwards. It should not be a panic()
454 printf("%s: failed to unregister sysctl\n", __func__);
457 /* Initialize a new context to keep track of dynamically added sysctls. */
459 sysctl_ctx_init(struct sysctl_ctx_list *c)
467 * No locking here, the caller is responsible for not adding
468 * new nodes to a context until after this function has
475 /* Free the context, and destroy all dynamic oids registered in this context */
477 sysctl_ctx_free(struct sysctl_ctx_list *clist)
479 struct sysctl_ctx_entry *e, *e1;
484 * First perform a "dry run" to check if it's ok to remove oids.
486 * XXX This algorithm is a hack. But I don't know any
487 * XXX better solution for now...
490 TAILQ_FOREACH(e, clist, link) {
491 error = sysctl_remove_oid_locked(e->entry, 0, 0);
496 * Restore deregistered entries, either from the end,
497 * or from the place where error occurred.
498 * e contains the entry that was not unregistered
501 e1 = TAILQ_PREV(e, sysctl_ctx_list, link);
503 e1 = TAILQ_LAST(clist, sysctl_ctx_list);
505 sysctl_register_oid(e1->entry);
506 e1 = TAILQ_PREV(e1, sysctl_ctx_list, link);
512 /* Now really delete the entries */
513 e = TAILQ_FIRST(clist);
515 e1 = TAILQ_NEXT(e, link);
516 error = sysctl_remove_oid_locked(e->entry, 1, 0);
518 panic("sysctl_remove_oid: corrupt tree, entry: %s",
520 free(e, M_SYSCTLOID);
527 /* Add an entry to the context */
528 struct sysctl_ctx_entry *
529 sysctl_ctx_entry_add(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
531 struct sysctl_ctx_entry *e;
533 SYSCTL_ASSERT_WLOCKED();
534 if (clist == NULL || oidp == NULL)
536 e = malloc(sizeof(struct sysctl_ctx_entry), M_SYSCTLOID, M_WAITOK);
538 TAILQ_INSERT_HEAD(clist, e, link);
542 /* Find an entry in the context */
543 struct sysctl_ctx_entry *
544 sysctl_ctx_entry_find(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
546 struct sysctl_ctx_entry *e;
548 SYSCTL_ASSERT_WLOCKED();
549 if (clist == NULL || oidp == NULL)
551 TAILQ_FOREACH(e, clist, link) {
559 * Delete an entry from the context.
560 * NOTE: this function doesn't free oidp! You have to remove it
561 * with sysctl_remove_oid().
564 sysctl_ctx_entry_del(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
566 struct sysctl_ctx_entry *e;
568 if (clist == NULL || oidp == NULL)
571 e = sysctl_ctx_entry_find(clist, oidp);
573 TAILQ_REMOVE(clist, e, link);
575 free(e, M_SYSCTLOID);
584 * Remove dynamically created sysctl trees.
585 * oidp - top of the tree to be removed
586 * del - if 0 - just deregister, otherwise free up entries as well
587 * recurse - if != 0 traverse the subtree to be deleted
590 sysctl_remove_oid(struct sysctl_oid *oidp, int del, int recurse)
595 error = sysctl_remove_oid_locked(oidp, del, recurse);
601 sysctl_remove_name(struct sysctl_oid *parent, const char *name,
602 int del, int recurse)
604 struct sysctl_oid *p, *tmp;
609 SLIST_FOREACH_SAFE(p, SYSCTL_CHILDREN(parent), oid_link, tmp) {
610 if (strcmp(p->oid_name, name) == 0) {
611 error = sysctl_remove_oid_locked(p, del, recurse);
622 sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del, int recurse)
624 struct sysctl_oid *p, *tmp;
627 SYSCTL_ASSERT_WLOCKED();
630 if ((oidp->oid_kind & CTLFLAG_DYN) == 0) {
631 printf("can't remove non-dynamic nodes!\n");
635 * WARNING: normal method to do this should be through
636 * sysctl_ctx_free(). Use recursing as the last resort
637 * method to purge your sysctl tree of leftovers...
638 * However, if some other code still references these nodes,
641 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
642 if (oidp->oid_refcnt == 1) {
643 SLIST_FOREACH_SAFE(p,
644 SYSCTL_CHILDREN(oidp), oid_link, tmp) {
646 printf("Warning: failed attempt to "
647 "remove oid %s with child %s\n",
648 oidp->oid_name, p->oid_name);
651 error = sysctl_remove_oid_locked(p, del,
658 if (oidp->oid_refcnt > 1 ) {
661 if (oidp->oid_refcnt == 0) {
662 printf("Warning: bad oid_refcnt=%u (%s)!\n",
663 oidp->oid_refcnt, oidp->oid_name);
666 sysctl_unregister_oid(oidp);
669 * Wait for all threads running the handler to drain.
670 * This preserves the previous behavior when the
671 * sysctl lock was held across a handler invocation,
672 * and is necessary for module unload correctness.
674 while (oidp->oid_running > 0) {
675 oidp->oid_kind |= CTLFLAG_DYING;
676 SYSCTL_SLEEP(&oidp->oid_running, "oidrm", 0);
679 free(__DECONST(char *, oidp->oid_descr),
681 free(__DECONST(char *, oidp->oid_name), M_SYSCTLOID);
682 free(oidp, M_SYSCTLOID);
688 * Create new sysctls at run time.
689 * clist may point to a valid context initialized with sysctl_ctx_init().
692 sysctl_add_oid(struct sysctl_ctx_list *clist, struct sysctl_oid_list *parent,
693 int number, const char *name, int kind, void *arg1, intmax_t arg2,
694 int (*handler)(SYSCTL_HANDLER_ARGS), const char *fmt, const char *descr)
696 struct sysctl_oid *oidp;
698 /* You have to hook up somewhere.. */
701 /* Check if the node already exists, otherwise create it */
703 oidp = sysctl_find_oidname(name, parent);
705 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
707 /* Update the context */
709 sysctl_ctx_entry_add(clist, oidp);
714 printf("can't re-use a leaf (%s)!\n", name);
718 oidp = malloc(sizeof(struct sysctl_oid), M_SYSCTLOID, M_WAITOK|M_ZERO);
719 oidp->oid_parent = parent;
720 SLIST_INIT(&oidp->oid_children);
721 oidp->oid_number = number;
722 oidp->oid_refcnt = 1;
723 oidp->oid_name = strdup(name, M_SYSCTLOID);
724 oidp->oid_handler = handler;
725 oidp->oid_kind = CTLFLAG_DYN | kind;
726 oidp->oid_arg1 = arg1;
727 oidp->oid_arg2 = arg2;
730 oidp->oid_descr = strdup(descr, M_SYSCTLOID);
731 /* Update the context, if used */
733 sysctl_ctx_entry_add(clist, oidp);
734 /* Register this oid */
735 sysctl_register_oid(oidp);
741 * Rename an existing oid.
744 sysctl_rename_oid(struct sysctl_oid *oidp, const char *name)
749 newname = strdup(name, M_SYSCTLOID);
751 oldname = __DECONST(char *, oidp->oid_name);
752 oidp->oid_name = newname;
754 free(oldname, M_SYSCTLOID);
758 * Reparent an existing oid.
761 sysctl_move_oid(struct sysctl_oid *oid, struct sysctl_oid_list *parent)
763 struct sysctl_oid *oidp;
766 if (oid->oid_parent == parent) {
770 oidp = sysctl_find_oidname(oid->oid_name, parent);
775 sysctl_unregister_oid(oid);
776 oid->oid_parent = parent;
777 oid->oid_number = OID_AUTO;
778 sysctl_register_oid(oid);
784 * Register the kernel's oids on startup.
786 SET_DECLARE(sysctl_set, struct sysctl_oid);
789 sysctl_register_all(void *arg)
791 struct sysctl_oid **oidp;
793 sx_init(&sysctlmemlock, "sysctl mem");
796 SET_FOREACH(oidp, sysctl_set)
797 sysctl_register_oid(*oidp);
800 SYSINIT(sysctl, SI_SUB_KMEM, SI_ORDER_FIRST, sysctl_register_all, 0);
805 * These functions implement a presently undocumented interface
806 * used by the sysctl program to walk the tree, and get the type
807 * so it can print the value.
808 * This interface is under work and consideration, and should probably
809 * be killed with a big axe by the first person who can find the time.
810 * (be aware though, that the proper interface isn't as obvious as it
811 * may seem, there are various conflicting requirements.
813 * {0,0} printf the entire MIB-tree.
814 * {0,1,...} return the name of the "..." OID.
815 * {0,2,...} return the next OID.
816 * {0,3} return the OID of the name in "new"
817 * {0,4,...} return the kind & format info for the "..." OID.
818 * {0,5,...} return the description the "..." OID.
823 sysctl_sysctl_debug_dump_node(struct sysctl_oid_list *l, int i)
826 struct sysctl_oid *oidp;
828 SYSCTL_ASSERT_LOCKED();
829 SLIST_FOREACH(oidp, l, oid_link) {
834 printf("%d %s ", oidp->oid_number, oidp->oid_name);
837 oidp->oid_kind & CTLFLAG_RD ? 'R':' ',
838 oidp->oid_kind & CTLFLAG_WR ? 'W':' ');
840 if (oidp->oid_handler)
843 switch (oidp->oid_kind & CTLTYPE) {
846 if (!oidp->oid_handler) {
847 sysctl_sysctl_debug_dump_node(
848 SYSCTL_CHILDREN(oidp), i + 2);
851 case CTLTYPE_INT: printf(" Int\n"); break;
852 case CTLTYPE_UINT: printf(" u_int\n"); break;
853 case CTLTYPE_LONG: printf(" Long\n"); break;
854 case CTLTYPE_ULONG: printf(" u_long\n"); break;
855 case CTLTYPE_STRING: printf(" String\n"); break;
856 case CTLTYPE_S8: printf(" int8_t\n"); break;
857 case CTLTYPE_S16: printf(" int16_t\n"); break;
858 case CTLTYPE_S32: printf(" int32_t\n"); break;
859 case CTLTYPE_S64: printf(" int64_t\n"); break;
860 case CTLTYPE_U8: printf(" uint8_t\n"); break;
861 case CTLTYPE_U16: printf(" uint16_t\n"); break;
862 case CTLTYPE_U32: printf(" uint32_t\n"); break;
863 case CTLTYPE_U64: printf(" uint64_t\n"); break;
864 case CTLTYPE_OPAQUE: printf(" Opaque/struct\n"); break;
865 default: printf("\n");
872 sysctl_sysctl_debug(SYSCTL_HANDLER_ARGS)
874 struct rm_priotracker tracker;
877 error = priv_check(req->td, PRIV_SYSCTL_DEBUG);
880 SYSCTL_RLOCK(&tracker);
881 sysctl_sysctl_debug_dump_node(&sysctl__children, 0);
882 SYSCTL_RUNLOCK(&tracker);
886 SYSCTL_PROC(_sysctl, 0, debug, CTLTYPE_STRING|CTLFLAG_RD|CTLFLAG_MPSAFE,
887 0, 0, sysctl_sysctl_debug, "-", "");
891 sysctl_sysctl_name(SYSCTL_HANDLER_ARGS)
893 int *name = (int *) arg1;
894 u_int namelen = arg2;
896 struct sysctl_oid *oid;
897 struct sysctl_oid_list *lsp = &sysctl__children, *lsp2;
898 struct rm_priotracker tracker;
901 SYSCTL_RLOCK(&tracker);
904 snprintf(buf,sizeof(buf),"%d",*name);
906 error = SYSCTL_OUT(req, ".", 1);
908 error = SYSCTL_OUT(req, buf, strlen(buf));
916 SLIST_FOREACH(oid, lsp, oid_link) {
917 if (oid->oid_number != *name)
921 error = SYSCTL_OUT(req, ".", 1);
923 error = SYSCTL_OUT(req, oid->oid_name,
924 strlen(oid->oid_name));
931 if ((oid->oid_kind & CTLTYPE) != CTLTYPE_NODE)
934 if (oid->oid_handler)
937 lsp2 = SYSCTL_CHILDREN(oid);
942 error = SYSCTL_OUT(req, "", 1);
944 SYSCTL_RUNLOCK(&tracker);
949 * XXXRW/JA: Shouldn't return name data for nodes that we don't permit in
952 static SYSCTL_NODE(_sysctl, 1, name, CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD,
953 sysctl_sysctl_name, "");
956 sysctl_sysctl_next_ls(struct sysctl_oid_list *lsp, int *name, u_int namelen,
957 int *next, int *len, int level, struct sysctl_oid **oidpp)
959 struct sysctl_oid *oidp;
961 SYSCTL_ASSERT_LOCKED();
963 SLIST_FOREACH(oidp, lsp, oid_link) {
964 *next = oidp->oid_number;
967 if (oidp->oid_kind & CTLFLAG_SKIP)
971 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
973 if (oidp->oid_handler)
974 /* We really should call the handler here...*/
976 lsp = SYSCTL_CHILDREN(oidp);
977 if (!sysctl_sysctl_next_ls(lsp, 0, 0, next+1,
978 len, level+1, oidpp))
983 if (oidp->oid_number < *name)
986 if (oidp->oid_number > *name) {
987 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
989 if (oidp->oid_handler)
991 lsp = SYSCTL_CHILDREN(oidp);
992 if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1,
993 next+1, len, level+1, oidpp))
997 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
1000 if (oidp->oid_handler)
1003 lsp = SYSCTL_CHILDREN(oidp);
1004 if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1, next+1,
1005 len, level+1, oidpp))
1016 sysctl_sysctl_next(SYSCTL_HANDLER_ARGS)
1018 int *name = (int *) arg1;
1019 u_int namelen = arg2;
1021 struct sysctl_oid *oid;
1022 struct sysctl_oid_list *lsp = &sysctl__children;
1023 struct rm_priotracker tracker;
1024 int newoid[CTL_MAXNAME];
1026 SYSCTL_RLOCK(&tracker);
1027 i = sysctl_sysctl_next_ls(lsp, name, namelen, newoid, &j, 1, &oid);
1028 SYSCTL_RUNLOCK(&tracker);
1031 error = SYSCTL_OUT(req, newoid, j * sizeof (int));
1036 * XXXRW/JA: Shouldn't return next data for nodes that we don't permit in
1039 static SYSCTL_NODE(_sysctl, 2, next, CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD,
1040 sysctl_sysctl_next, "");
1043 name2oid(char *name, int *oid, int *len, struct sysctl_oid **oidpp)
1045 struct sysctl_oid *oidp;
1046 struct sysctl_oid_list *lsp = &sysctl__children;
1049 SYSCTL_ASSERT_LOCKED();
1051 for (*len = 0; *len < CTL_MAXNAME;) {
1052 p = strsep(&name, ".");
1054 oidp = SLIST_FIRST(lsp);
1055 for (;; oidp = SLIST_NEXT(oidp, oid_link)) {
1058 if (strcmp(p, oidp->oid_name) == 0)
1061 *oid++ = oidp->oid_number;
1064 if (name == NULL || *name == '\0') {
1070 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
1073 if (oidp->oid_handler)
1076 lsp = SYSCTL_CHILDREN(oidp);
1082 sysctl_sysctl_name2oid(SYSCTL_HANDLER_ARGS)
1085 int error, oid[CTL_MAXNAME], len = 0;
1086 struct sysctl_oid *op = NULL;
1087 struct rm_priotracker tracker;
1091 if (req->newlen >= MAXPATHLEN) /* XXX arbitrary, undocumented */
1092 return (ENAMETOOLONG);
1094 p = malloc(req->newlen+1, M_SYSCTL, M_WAITOK);
1096 error = SYSCTL_IN(req, p, req->newlen);
1102 p [req->newlen] = '\0';
1104 SYSCTL_RLOCK(&tracker);
1105 error = name2oid(p, oid, &len, &op);
1106 SYSCTL_RUNLOCK(&tracker);
1113 error = SYSCTL_OUT(req, oid, len * sizeof *oid);
1118 * XXXRW/JA: Shouldn't return name2oid data for nodes that we don't permit in
1121 SYSCTL_PROC(_sysctl, 3, name2oid,
1122 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE
1123 | CTLFLAG_CAPRW, 0, 0, sysctl_sysctl_name2oid, "I", "");
1126 sysctl_sysctl_oidfmt(SYSCTL_HANDLER_ARGS)
1128 struct sysctl_oid *oid;
1129 struct rm_priotracker tracker;
1132 SYSCTL_RLOCK(&tracker);
1133 error = sysctl_find_oid(arg1, arg2, &oid, NULL, req);
1137 if (oid->oid_fmt == NULL) {
1141 error = SYSCTL_OUT(req, &oid->oid_kind, sizeof(oid->oid_kind));
1144 error = SYSCTL_OUT(req, oid->oid_fmt, strlen(oid->oid_fmt) + 1);
1146 SYSCTL_RUNLOCK(&tracker);
1151 static SYSCTL_NODE(_sysctl, 4, oidfmt, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_CAPRD,
1152 sysctl_sysctl_oidfmt, "");
1155 sysctl_sysctl_oiddescr(SYSCTL_HANDLER_ARGS)
1157 struct sysctl_oid *oid;
1158 struct rm_priotracker tracker;
1161 SYSCTL_RLOCK(&tracker);
1162 error = sysctl_find_oid(arg1, arg2, &oid, NULL, req);
1166 if (oid->oid_descr == NULL) {
1170 error = SYSCTL_OUT(req, oid->oid_descr, strlen(oid->oid_descr) + 1);
1172 SYSCTL_RUNLOCK(&tracker);
1176 static SYSCTL_NODE(_sysctl, 5, oiddescr, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_CAPRD,
1177 sysctl_sysctl_oiddescr, "");
1180 * Default "handler" functions.
1186 * a variable: point arg1 at it.
1187 * a constant: pass it in arg2.
1191 sysctl_handle_bool(SYSCTL_HANDLER_ARGS)
1197 * Attempt to get a coherent snapshot by making a copy of the data.
1200 temp = *(bool *)arg1 ? 1 : 0;
1202 temp = arg2 ? 1 : 0;
1204 error = SYSCTL_OUT(req, &temp, sizeof(temp));
1205 if (error || !req->newptr)
1211 error = SYSCTL_IN(req, &temp, sizeof(temp));
1213 *(bool *)arg1 = temp ? 1 : 0;
1219 * Handle an int8_t, signed or unsigned.
1221 * a variable: point arg1 at it.
1222 * a constant: pass it in arg2.
1226 sysctl_handle_8(SYSCTL_HANDLER_ARGS)
1232 * Attempt to get a coherent snapshot by making a copy of the data.
1235 tmpout = *(int8_t *)arg1;
1238 error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout));
1240 if (error || !req->newptr)
1246 error = SYSCTL_IN(req, arg1, sizeof(tmpout));
1251 * Handle an int16_t, signed or unsigned.
1253 * a variable: point arg1 at it.
1254 * a constant: pass it in arg2.
1258 sysctl_handle_16(SYSCTL_HANDLER_ARGS)
1264 * Attempt to get a coherent snapshot by making a copy of the data.
1267 tmpout = *(int16_t *)arg1;
1270 error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout));
1272 if (error || !req->newptr)
1278 error = SYSCTL_IN(req, arg1, sizeof(tmpout));
1283 * Handle an int32_t, signed or unsigned.
1285 * a variable: point arg1 at it.
1286 * a constant: pass it in arg2.
1290 sysctl_handle_32(SYSCTL_HANDLER_ARGS)
1296 * Attempt to get a coherent snapshot by making a copy of the data.
1299 tmpout = *(int32_t *)arg1;
1302 error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout));
1304 if (error || !req->newptr)
1310 error = SYSCTL_IN(req, arg1, sizeof(tmpout));
1315 * Handle an int, signed or unsigned.
1317 * a variable: point arg1 at it.
1318 * a constant: pass it in arg2.
1322 sysctl_handle_int(SYSCTL_HANDLER_ARGS)
1324 int tmpout, error = 0;
1327 * Attempt to get a coherent snapshot by making a copy of the data.
1330 tmpout = *(int *)arg1;
1333 error = SYSCTL_OUT(req, &tmpout, sizeof(int));
1335 if (error || !req->newptr)
1341 error = SYSCTL_IN(req, arg1, sizeof(int));
1346 * Based on on sysctl_handle_int() convert milliseconds into ticks.
1347 * Note: this is used by TCP.
1351 sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS)
1356 s = (int)((int64_t)tt * 1000 / hz);
1358 error = sysctl_handle_int(oidp, &s, 0, req);
1359 if (error || !req->newptr)
1362 tt = (int)((int64_t)s * hz / 1000);
1372 * Handle a long, signed or unsigned.
1374 * a variable: point arg1 at it.
1375 * a constant: pass it in arg2.
1379 sysctl_handle_long(SYSCTL_HANDLER_ARGS)
1388 * Attempt to get a coherent snapshot by making a copy of the data.
1391 tmplong = *(long *)arg1;
1395 if (req->flags & SCTL_MASK32) {
1397 error = SYSCTL_OUT(req, &tmpint, sizeof(int));
1400 error = SYSCTL_OUT(req, &tmplong, sizeof(long));
1402 if (error || !req->newptr)
1408 else if (req->flags & SCTL_MASK32) {
1409 error = SYSCTL_IN(req, &tmpint, sizeof(int));
1410 *(long *)arg1 = (long)tmpint;
1414 error = SYSCTL_IN(req, arg1, sizeof(long));
1419 * Handle a 64 bit int, signed or unsigned.
1421 * a variable: point arg1 at it.
1422 * a constant: pass it in arg2.
1425 sysctl_handle_64(SYSCTL_HANDLER_ARGS)
1431 * Attempt to get a coherent snapshot by making a copy of the data.
1434 tmpout = *(uint64_t *)arg1;
1437 error = SYSCTL_OUT(req, &tmpout, sizeof(uint64_t));
1439 if (error || !req->newptr)
1445 error = SYSCTL_IN(req, arg1, sizeof(uint64_t));
1450 * Handle our generic '\0' terminated 'C' string.
1452 * a variable string: point arg1 at it, arg2 is max length.
1453 * a constant string: point arg1 at it, arg2 is zero.
1457 sysctl_handle_string(SYSCTL_HANDLER_ARGS)
1460 int error = 0, ro_string = 0;
1463 * A zero-length buffer indicates a fixed size read-only
1467 arg2 = strlen((char *)arg1) + 1;
1471 if (req->oldptr != NULL) {
1477 /* try to make a coherent snapshot of the string */
1478 tmparg = malloc(arg2, M_SYSCTLTMP, M_WAITOK);
1479 memcpy(tmparg, arg1, arg2);
1482 outlen = strnlen(tmparg, arg2 - 1) + 1;
1483 error = SYSCTL_OUT(req, tmparg, outlen);
1486 free(tmparg, M_SYSCTLTMP);
1488 outlen = strnlen((char *)arg1, arg2 - 1) + 1;
1489 error = SYSCTL_OUT(req, NULL, outlen);
1491 if (error || !req->newptr)
1494 if ((req->newlen - req->newidx) >= arg2) {
1497 arg2 = (req->newlen - req->newidx);
1498 error = SYSCTL_IN(req, arg1, arg2);
1499 ((char *)arg1)[arg2] = '\0';
1505 * Handle any kind of opaque data.
1506 * arg1 points to it, arg2 is the size.
1510 sysctl_handle_opaque(SYSCTL_HANDLER_ARGS)
1514 struct sysctl_req req2;
1517 * Attempt to get a coherent snapshot, by using the thread
1518 * pre-emption counter updated from within mi_switch() to
1519 * determine if we were pre-empted during a bcopy() or
1520 * copyout(). Make 3 attempts at doing this before giving up.
1521 * If we encounter an error, stop immediately.
1526 generation = curthread->td_generation;
1527 error = SYSCTL_OUT(req, arg1, arg2);
1531 if (generation != curthread->td_generation && tries < 3) {
1536 error = SYSCTL_IN(req, arg1, arg2);
1542 * Transfer functions to/from kernel space.
1543 * XXX: rather untested at this point
1546 sysctl_old_kernel(struct sysctl_req *req, const void *p, size_t l)
1552 if (req->oldlen <= req->oldidx)
1555 if (i > req->oldlen - req->oldidx)
1556 i = req->oldlen - req->oldidx;
1558 bcopy(p, (char *)req->oldptr + req->oldidx, i);
1561 if (req->oldptr && i != l)
1567 sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l)
1571 if (req->newlen - req->newidx < l)
1573 bcopy((char *)req->newptr + req->newidx, p, l);
1579 kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old,
1580 size_t *oldlenp, void *new, size_t newlen, size_t *retval, int flags)
1583 struct sysctl_req req;
1585 bzero(&req, sizeof req);
1591 req.oldlen = *oldlenp;
1593 req.validlen = req.oldlen;
1600 req.newlen = newlen;
1604 req.oldfunc = sysctl_old_kernel;
1605 req.newfunc = sysctl_new_kernel;
1606 req.lock = REQ_UNWIRED;
1608 error = sysctl_root(0, name, namelen, &req);
1610 if (req.lock == REQ_WIRED && req.validlen > 0)
1611 vsunlock(req.oldptr, req.validlen);
1613 if (error && error != ENOMEM)
1617 if (req.oldptr && req.oldidx > req.validlen)
1618 *retval = req.validlen;
1620 *retval = req.oldidx;
1626 kernel_sysctlbyname(struct thread *td, char *name, void *old, size_t *oldlenp,
1627 void *new, size_t newlen, size_t *retval, int flags)
1629 int oid[CTL_MAXNAME];
1630 size_t oidlen, plen;
1633 oid[0] = 0; /* sysctl internal magic */
1634 oid[1] = 3; /* name2oid */
1635 oidlen = sizeof(oid);
1637 error = kernel_sysctl(td, oid, 2, oid, &oidlen,
1638 (void *)name, strlen(name), &plen, flags);
1642 error = kernel_sysctl(td, oid, plen / sizeof(int), old, oldlenp,
1643 new, newlen, retval, flags);
1648 * Transfer function to/from user space.
1651 sysctl_old_user(struct sysctl_req *req, const void *p, size_t l)
1653 size_t i, len, origidx;
1656 origidx = req->oldidx;
1658 if (req->oldptr == NULL)
1661 * If we have not wired the user supplied buffer and we are currently
1662 * holding locks, drop a witness warning, as it's possible that
1663 * write operations to the user page can sleep.
1665 if (req->lock != REQ_WIRED)
1666 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1667 "sysctl_old_user()");
1669 len = req->validlen;
1673 if (i > len - origidx)
1675 if (req->lock == REQ_WIRED) {
1676 error = copyout_nofault(p, (char *)req->oldptr +
1679 error = copyout(p, (char *)req->oldptr + origidx, i);
1689 sysctl_new_user(struct sysctl_req *req, void *p, size_t l)
1695 if (req->newlen - req->newidx < l)
1697 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1698 "sysctl_new_user()");
1699 error = copyin((char *)req->newptr + req->newidx, p, l);
1705 * Wire the user space destination buffer. If set to a value greater than
1706 * zero, the len parameter limits the maximum amount of wired memory.
1709 sysctl_wire_old_buffer(struct sysctl_req *req, size_t len)
1714 wiredlen = (len > 0 && len < req->oldlen) ? len : req->oldlen;
1716 if (req->lock != REQ_WIRED && req->oldptr &&
1717 req->oldfunc == sysctl_old_user) {
1718 if (wiredlen != 0) {
1719 ret = vslock(req->oldptr, wiredlen);
1726 req->lock = REQ_WIRED;
1727 req->validlen = wiredlen;
1733 sysctl_find_oid(int *name, u_int namelen, struct sysctl_oid **noid,
1734 int *nindx, struct sysctl_req *req)
1736 struct sysctl_oid_list *lsp;
1737 struct sysctl_oid *oid;
1740 SYSCTL_ASSERT_LOCKED();
1741 lsp = &sysctl__children;
1743 while (indx < CTL_MAXNAME) {
1744 SLIST_FOREACH(oid, lsp, oid_link) {
1745 if (oid->oid_number == name[indx])
1752 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1753 if (oid->oid_handler != NULL || indx == namelen) {
1757 KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0,
1758 ("%s found DYING node %p", __func__, oid));
1761 lsp = SYSCTL_CHILDREN(oid);
1762 } else if (indx == namelen) {
1766 KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0,
1767 ("%s found DYING node %p", __func__, oid));
1777 * Traverse our tree, and find the right node, execute whatever it points
1778 * to, and return the resulting error code.
1782 sysctl_root(SYSCTL_HANDLER_ARGS)
1784 struct sysctl_oid *oid;
1785 struct rm_priotracker tracker;
1786 int error, indx, lvl;
1788 SYSCTL_RLOCK(&tracker);
1790 error = sysctl_find_oid(arg1, arg2, &oid, &indx, req);
1794 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1796 * You can't call a sysctl when it's a node, but has
1797 * no handler. Inform the user that it's a node.
1798 * The indx may or may not be the same as namelen.
1800 if (oid->oid_handler == NULL) {
1806 /* Is this sysctl writable? */
1807 if (req->newptr && !(oid->oid_kind & CTLFLAG_WR)) {
1812 KASSERT(req->td != NULL, ("sysctl_root(): req->td == NULL"));
1814 #ifdef CAPABILITY_MODE
1816 * If the process is in capability mode, then don't permit reading or
1817 * writing unless specifically granted for the node.
1819 if (IN_CAPABILITY_MODE(req->td)) {
1820 if ((req->oldptr && !(oid->oid_kind & CTLFLAG_CAPRD)) ||
1821 (req->newptr && !(oid->oid_kind & CTLFLAG_CAPWR))) {
1828 /* Is this sysctl sensitive to securelevels? */
1829 if (req->newptr && (oid->oid_kind & CTLFLAG_SECURE)) {
1830 lvl = (oid->oid_kind & CTLMASK_SECURE) >> CTLSHIFT_SECURE;
1831 error = securelevel_gt(req->td->td_ucred, lvl);
1836 /* Is this sysctl writable by only privileged users? */
1837 if (req->newptr && !(oid->oid_kind & CTLFLAG_ANYBODY)) {
1840 if (oid->oid_kind & CTLFLAG_PRISON)
1841 priv = PRIV_SYSCTL_WRITEJAIL;
1843 else if ((oid->oid_kind & CTLFLAG_VNET) &&
1844 prison_owns_vnet(req->td->td_ucred))
1845 priv = PRIV_SYSCTL_WRITEJAIL;
1848 priv = PRIV_SYSCTL_WRITE;
1849 error = priv_check(req->td, priv);
1854 if (!oid->oid_handler) {
1859 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1860 arg1 = (int *)arg1 + indx;
1863 arg1 = oid->oid_arg1;
1864 arg2 = oid->oid_arg2;
1867 error = mac_system_check_sysctl(req->td->td_ucred, oid, arg1, arg2,
1873 if ((oid->oid_kind & CTLFLAG_VNET) && arg1 != NULL)
1874 arg1 = (void *)(curvnet->vnet_data_base + (uintptr_t)arg1);
1876 error = sysctl_root_handler_locked(oid, arg1, arg2, req, &tracker);
1879 SYSCTL_RUNLOCK(&tracker);
1883 #ifndef _SYS_SYSPROTO_H_
1884 struct sysctl_args {
1894 sys___sysctl(struct thread *td, struct sysctl_args *uap)
1896 int error, i, name[CTL_MAXNAME];
1899 if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
1902 error = copyin(uap->name, &name, uap->namelen * sizeof(int));
1906 error = userland_sysctl(td, name, uap->namelen,
1907 uap->old, uap->oldlenp, 0,
1908 uap->new, uap->newlen, &j, 0);
1909 if (error && error != ENOMEM)
1912 i = copyout(&j, uap->oldlenp, sizeof(j));
1920 * This is used from various compatibility syscalls too. That's why name
1921 * must be in kernel space.
1924 userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
1925 size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval,
1928 int error = 0, memlocked;
1929 struct sysctl_req req;
1931 bzero(&req, sizeof req);
1938 req.oldlen = *oldlenp;
1940 error = copyin(oldlenp, &req.oldlen, sizeof(*oldlenp));
1945 req.validlen = req.oldlen;
1948 if (!useracc(old, req.oldlen, VM_PROT_WRITE))
1954 if (!useracc(new, newlen, VM_PROT_READ))
1956 req.newlen = newlen;
1960 req.oldfunc = sysctl_old_user;
1961 req.newfunc = sysctl_new_user;
1962 req.lock = REQ_UNWIRED;
1965 if (KTRPOINT(curthread, KTR_SYSCTL))
1966 ktrsysctl(name, namelen);
1969 if (req.oldptr && req.oldlen > PAGE_SIZE) {
1971 sx_xlock(&sysctlmemlock);
1974 CURVNET_SET(TD_TO_VNET(td));
1979 error = sysctl_root(0, name, namelen, &req);
1980 if (error != EAGAIN)
1982 kern_yield(PRI_USER);
1987 if (req.lock == REQ_WIRED && req.validlen > 0)
1988 vsunlock(req.oldptr, req.validlen);
1990 sx_xunlock(&sysctlmemlock);
1992 if (error && error != ENOMEM)
1996 if (req.oldptr && req.oldidx > req.validlen)
1997 *retval = req.validlen;
1999 *retval = req.oldidx;
2005 * Drain into a sysctl struct. The user buffer should be wired if a page
2006 * fault would cause issue.
2009 sbuf_sysctl_drain(void *arg, const char *data, int len)
2011 struct sysctl_req *req = arg;
2014 error = SYSCTL_OUT(req, data, len);
2015 KASSERT(error >= 0, ("Got unexpected negative value %d", error));
2016 return (error == 0 ? len : -error);
2020 sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length,
2021 struct sysctl_req *req)
2024 /* Supply a default buffer size if none given. */
2025 if (buf == NULL && length == 0)
2027 s = sbuf_new(s, buf, length, SBUF_FIXEDLEN | SBUF_INCLUDENUL);
2028 sbuf_set_drain(s, sbuf_sysctl_drain, req);