1 /* $NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $ */
4 * SPDX-License-Identifier: BSD-3-Clause
6 * Copyright (c) 2009, Sun Microsystems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 * - Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 * - Neither the name of Sun Microsystems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #if defined(LIBC_SCCS) && !defined(lint)
34 static char *sccsid2 = "@(#)svc.c 1.44 88/02/08 Copyr 1984 Sun Micro";
35 static char *sccsid = "@(#)svc.c 2.4 88/08/11 4.0 RPCSRC";
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
41 * svc.c, Server-side remote procedure call interface.
43 * There are two sets of procedures here. The xprt routines are
44 * for handling transport handles. The svc routines handle the
45 * list of service routines.
47 * Copyright (C) 1984, Sun Microsystems, Inc.
50 #include <sys/param.h>
53 #include <sys/kernel.h>
54 #include <sys/kthread.h>
55 #include <sys/malloc.h>
57 #include <sys/mutex.h>
59 #include <sys/queue.h>
60 #include <sys/socketvar.h>
61 #include <sys/systm.h>
64 #include <sys/ucred.h>
67 #include <rpc/rpcb_clnt.h>
68 #include <rpc/replay.h>
70 #include <rpc/rpc_com.h>
72 #define SVC_VERSQUIET 0x0001 /* keep quiet about vers mismatch */
73 #define version_keepquiet(xp) (SVC_EXT(xp)->xp_flags & SVC_VERSQUIET)
75 static struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t,
77 static void svc_new_thread(SVCGROUP *grp);
78 static void xprt_unregister_locked(SVCXPRT *xprt);
79 static void svc_change_space_used(SVCPOOL *pool, long delta);
80 static bool_t svc_request_space_available(SVCPOOL *pool);
81 static void svcpool_cleanup(SVCPOOL *pool);
83 /* *************** SVCXPRT related stuff **************** */
85 static int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS);
86 static int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS);
87 static int svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS);
90 svcpool_create(const char *name, struct sysctl_oid_list *sysctl_base)
96 pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO);
98 mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF);
100 pool->sp_state = SVCPOOL_INIT;
101 pool->sp_proc = NULL;
102 TAILQ_INIT(&pool->sp_callouts);
103 TAILQ_INIT(&pool->sp_lcallouts);
104 pool->sp_minthreads = 1;
105 pool->sp_maxthreads = 1;
106 pool->sp_groupcount = 1;
107 for (g = 0; g < SVC_MAXGROUPS; g++) {
108 grp = &pool->sp_groups[g];
109 mtx_init(&grp->sg_lock, "sg_lock", NULL, MTX_DEF);
111 grp->sg_state = SVCPOOL_ACTIVE;
112 TAILQ_INIT(&grp->sg_xlist);
113 TAILQ_INIT(&grp->sg_active);
114 LIST_INIT(&grp->sg_idlethreads);
115 grp->sg_minthreads = 1;
116 grp->sg_maxthreads = 1;
120 * Don't use more than a quarter of mbuf clusters. Nota bene:
121 * nmbclusters is an int, but nmbclusters*MCLBYTES may overflow
122 * on LP64 architectures, so cast to u_long to avoid undefined
123 * behavior. (ILP32 architectures cannot have nmbclusters
124 * large enough to overflow for other reasons.)
126 pool->sp_space_high = (u_long)nmbclusters * MCLBYTES / 4;
127 pool->sp_space_low = (pool->sp_space_high / 3) * 2;
129 sysctl_ctx_init(&pool->sp_sysctl);
130 if (IS_DEFAULT_VNET(curvnet) && sysctl_base) {
131 SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
132 "minthreads", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
133 pool, 0, svcpool_minthread_sysctl, "I",
134 "Minimal number of threads");
135 SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
136 "maxthreads", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
137 pool, 0, svcpool_maxthread_sysctl, "I",
138 "Maximal number of threads");
139 SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
140 "threads", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
141 pool, 0, svcpool_threads_sysctl, "I",
142 "Current number of threads");
143 SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
144 "groups", CTLFLAG_RD, &pool->sp_groupcount, 0,
145 "Number of thread groups");
147 SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
148 "request_space_used", CTLFLAG_RD,
149 &pool->sp_space_used,
150 "Space in parsed but not handled requests.");
152 SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
153 "request_space_used_highest", CTLFLAG_RD,
154 &pool->sp_space_used_highest,
155 "Highest space used since reboot.");
157 SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
158 "request_space_high", CTLFLAG_RW,
159 &pool->sp_space_high,
160 "Maximum space in parsed but not handled requests.");
162 SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
163 "request_space_low", CTLFLAG_RW,
165 "Low water mark for request space.");
167 SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
168 "request_space_throttled", CTLFLAG_RD,
169 &pool->sp_space_throttled, 0,
170 "Whether nfs requests are currently throttled");
172 SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
173 "request_space_throttle_count", CTLFLAG_RD,
174 &pool->sp_space_throttle_count, 0,
175 "Count of times throttling based on request space has occurred");
182 * Code common to svcpool_destroy() and svcpool_close(), which cleans up
183 * the pool data structures.
186 svcpool_cleanup(SVCPOOL *pool)
189 SVCXPRT *xprt, *nxprt;
190 struct svc_callout *s;
191 struct svc_loss_callout *sl;
192 struct svcxprt_list cleanup;
195 TAILQ_INIT(&cleanup);
197 for (g = 0; g < SVC_MAXGROUPS; g++) {
198 grp = &pool->sp_groups[g];
199 mtx_lock(&grp->sg_lock);
200 while ((xprt = TAILQ_FIRST(&grp->sg_xlist)) != NULL) {
201 xprt_unregister_locked(xprt);
202 TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
204 mtx_unlock(&grp->sg_lock);
206 TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
207 if (xprt->xp_socket != NULL)
208 soshutdown(xprt->xp_socket, SHUT_WR);
212 mtx_lock(&pool->sp_lock);
213 while ((s = TAILQ_FIRST(&pool->sp_callouts)) != NULL) {
214 mtx_unlock(&pool->sp_lock);
215 svc_unreg(pool, s->sc_prog, s->sc_vers);
216 mtx_lock(&pool->sp_lock);
218 while ((sl = TAILQ_FIRST(&pool->sp_lcallouts)) != NULL) {
219 mtx_unlock(&pool->sp_lock);
220 svc_loss_unreg(pool, sl->slc_dispatch);
221 mtx_lock(&pool->sp_lock);
223 mtx_unlock(&pool->sp_lock);
227 svcpool_destroy(SVCPOOL *pool)
232 svcpool_cleanup(pool);
234 for (g = 0; g < SVC_MAXGROUPS; g++) {
235 grp = &pool->sp_groups[g];
236 mtx_destroy(&grp->sg_lock);
238 mtx_destroy(&pool->sp_lock);
241 replay_freecache(pool->sp_rcache);
243 sysctl_ctx_free(&pool->sp_sysctl);
248 * Similar to svcpool_destroy(), except that it does not destroy the actual
249 * data structures. As such, "pool" may be used again.
252 svcpool_close(SVCPOOL *pool)
257 svcpool_cleanup(pool);
259 /* Now, initialize the pool's state for a fresh svc_run() call. */
260 mtx_lock(&pool->sp_lock);
261 pool->sp_state = SVCPOOL_INIT;
262 mtx_unlock(&pool->sp_lock);
263 for (g = 0; g < SVC_MAXGROUPS; g++) {
264 grp = &pool->sp_groups[g];
265 mtx_lock(&grp->sg_lock);
266 grp->sg_state = SVCPOOL_ACTIVE;
267 mtx_unlock(&grp->sg_lock);
272 * Sysctl handler to get the present thread count on a pool
275 svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS)
278 int threads, error, g;
280 pool = oidp->oid_arg1;
282 mtx_lock(&pool->sp_lock);
283 for (g = 0; g < pool->sp_groupcount; g++)
284 threads += pool->sp_groups[g].sg_threadcount;
285 mtx_unlock(&pool->sp_lock);
286 error = sysctl_handle_int(oidp, &threads, 0, req);
291 * Sysctl handler to set the minimum thread count on a pool
294 svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS)
297 int newminthreads, error, g;
299 pool = oidp->oid_arg1;
300 newminthreads = pool->sp_minthreads;
301 error = sysctl_handle_int(oidp, &newminthreads, 0, req);
302 if (error == 0 && newminthreads != pool->sp_minthreads) {
303 if (newminthreads > pool->sp_maxthreads)
305 mtx_lock(&pool->sp_lock);
306 pool->sp_minthreads = newminthreads;
307 for (g = 0; g < pool->sp_groupcount; g++) {
308 pool->sp_groups[g].sg_minthreads = max(1,
309 pool->sp_minthreads / pool->sp_groupcount);
311 mtx_unlock(&pool->sp_lock);
317 * Sysctl handler to set the maximum thread count on a pool
320 svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS)
323 int newmaxthreads, error, g;
325 pool = oidp->oid_arg1;
326 newmaxthreads = pool->sp_maxthreads;
327 error = sysctl_handle_int(oidp, &newmaxthreads, 0, req);
328 if (error == 0 && newmaxthreads != pool->sp_maxthreads) {
329 if (newmaxthreads < pool->sp_minthreads)
331 mtx_lock(&pool->sp_lock);
332 pool->sp_maxthreads = newmaxthreads;
333 for (g = 0; g < pool->sp_groupcount; g++) {
334 pool->sp_groups[g].sg_maxthreads = max(1,
335 pool->sp_maxthreads / pool->sp_groupcount);
337 mtx_unlock(&pool->sp_lock);
343 * Activate a transport handle.
346 xprt_register(SVCXPRT *xprt)
348 SVCPOOL *pool = xprt->xp_pool;
353 g = atomic_fetchadd_int(&pool->sp_nextgroup, 1) % pool->sp_groupcount;
354 xprt->xp_group = grp = &pool->sp_groups[g];
355 mtx_lock(&grp->sg_lock);
356 xprt->xp_registered = TRUE;
357 xprt->xp_active = FALSE;
358 TAILQ_INSERT_TAIL(&grp->sg_xlist, xprt, xp_link);
359 mtx_unlock(&grp->sg_lock);
363 * De-activate a transport handle. Note: the locked version doesn't
364 * release the transport - caller must do that after dropping the pool
368 xprt_unregister_locked(SVCXPRT *xprt)
370 SVCGROUP *grp = xprt->xp_group;
372 mtx_assert(&grp->sg_lock, MA_OWNED);
373 KASSERT(xprt->xp_registered == TRUE,
374 ("xprt_unregister_locked: not registered"));
375 xprt_inactive_locked(xprt);
376 TAILQ_REMOVE(&grp->sg_xlist, xprt, xp_link);
377 xprt->xp_registered = FALSE;
381 xprt_unregister(SVCXPRT *xprt)
383 SVCGROUP *grp = xprt->xp_group;
385 mtx_lock(&grp->sg_lock);
386 if (xprt->xp_registered == FALSE) {
387 /* Already unregistered by another thread */
388 mtx_unlock(&grp->sg_lock);
391 xprt_unregister_locked(xprt);
392 mtx_unlock(&grp->sg_lock);
394 if (xprt->xp_socket != NULL)
395 soshutdown(xprt->xp_socket, SHUT_WR);
400 * Attempt to assign a service thread to this transport.
403 xprt_assignthread(SVCXPRT *xprt)
405 SVCGROUP *grp = xprt->xp_group;
408 mtx_assert(&grp->sg_lock, MA_OWNED);
409 st = LIST_FIRST(&grp->sg_idlethreads);
411 LIST_REMOVE(st, st_ilink);
413 xprt->xp_thread = st;
415 cv_signal(&st->st_cond);
419 * See if we can create a new thread. The
420 * actual thread creation happens in
421 * svc_run_internal because our locking state
422 * is poorly defined (we are typically called
423 * from a socket upcall). Don't create more
424 * than one thread per second.
426 if (grp->sg_state == SVCPOOL_ACTIVE
427 && grp->sg_lastcreatetime < time_uptime
428 && grp->sg_threadcount < grp->sg_maxthreads) {
429 grp->sg_state = SVCPOOL_THREADWANTED;
436 xprt_active(SVCXPRT *xprt)
438 SVCGROUP *grp = xprt->xp_group;
440 mtx_lock(&grp->sg_lock);
442 if (!xprt->xp_registered) {
444 * Race with xprt_unregister - we lose.
446 mtx_unlock(&grp->sg_lock);
450 if (!xprt->xp_active) {
451 xprt->xp_active = TRUE;
452 if (xprt->xp_thread == NULL) {
453 if (!svc_request_space_available(xprt->xp_pool) ||
454 !xprt_assignthread(xprt))
455 TAILQ_INSERT_TAIL(&grp->sg_active, xprt,
460 mtx_unlock(&grp->sg_lock);
464 xprt_inactive_locked(SVCXPRT *xprt)
466 SVCGROUP *grp = xprt->xp_group;
468 mtx_assert(&grp->sg_lock, MA_OWNED);
469 if (xprt->xp_active) {
470 if (xprt->xp_thread == NULL)
471 TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
472 xprt->xp_active = FALSE;
477 xprt_inactive(SVCXPRT *xprt)
479 SVCGROUP *grp = xprt->xp_group;
481 mtx_lock(&grp->sg_lock);
482 xprt_inactive_locked(xprt);
483 mtx_unlock(&grp->sg_lock);
487 * Variant of xprt_inactive() for use only when sure that port is
488 * assigned to thread. For example, within receive handlers.
491 xprt_inactive_self(SVCXPRT *xprt)
494 KASSERT(xprt->xp_thread != NULL,
495 ("xprt_inactive_self(%p) with NULL xp_thread", xprt));
496 xprt->xp_active = FALSE;
500 * Add a service program to the callout list.
501 * The dispatch routine will be called when a rpc request for this
502 * program number comes in.
505 svc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
506 void (*dispatch)(struct svc_req *, SVCXPRT *),
507 const struct netconfig *nconf)
509 SVCPOOL *pool = xprt->xp_pool;
510 struct svc_callout *s;
514 /* VARIABLES PROTECTED BY svc_lock: s, svc_head */
516 if (xprt->xp_netid) {
517 netid = strdup(xprt->xp_netid, M_RPC);
519 } else if (nconf && nconf->nc_netid) {
520 netid = strdup(nconf->nc_netid, M_RPC);
522 } /* must have been created with svc_raw_create */
523 if ((netid == NULL) && (flag == 1)) {
527 mtx_lock(&pool->sp_lock);
528 if ((s = svc_find(pool, prog, vers, netid)) != NULL) {
531 if (s->sc_dispatch == dispatch)
532 goto rpcb_it; /* he is registering another xptr */
533 mtx_unlock(&pool->sp_lock);
536 s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
540 mtx_unlock(&pool->sp_lock);
546 s->sc_dispatch = dispatch;
548 TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link);
550 if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
551 ((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC);
554 mtx_unlock(&pool->sp_lock);
555 /* now register the information with the local binder service */
558 struct netconfig tnc;
561 nb.buf = &xprt->xp_ltaddr;
562 nb.len = xprt->xp_ltaddr.ss_len;
563 dummy = rpcb_set(prog, vers, &tnc, &nb);
570 * Remove a service program from the callout list.
573 svc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers)
575 struct svc_callout *s;
577 /* unregister the information anyway */
578 (void) rpcb_unset(prog, vers, NULL);
579 mtx_lock(&pool->sp_lock);
580 while ((s = svc_find(pool, prog, vers, NULL)) != NULL) {
581 TAILQ_REMOVE(&pool->sp_callouts, s, sc_link);
583 mem_free(s->sc_netid, sizeof (s->sc_netid) + 1);
584 mem_free(s, sizeof (struct svc_callout));
586 mtx_unlock(&pool->sp_lock);
590 * Add a service connection loss program to the callout list.
591 * The dispatch routine will be called when some port in ths pool die.
594 svc_loss_reg(SVCXPRT *xprt, void (*dispatch)(SVCXPRT *))
596 SVCPOOL *pool = xprt->xp_pool;
597 struct svc_loss_callout *s;
599 mtx_lock(&pool->sp_lock);
600 TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
601 if (s->slc_dispatch == dispatch)
605 mtx_unlock(&pool->sp_lock);
608 s = malloc(sizeof(struct svc_loss_callout), M_RPC, M_NOWAIT);
610 mtx_unlock(&pool->sp_lock);
613 s->slc_dispatch = dispatch;
614 TAILQ_INSERT_TAIL(&pool->sp_lcallouts, s, slc_link);
615 mtx_unlock(&pool->sp_lock);
620 * Remove a service connection loss program from the callout list.
623 svc_loss_unreg(SVCPOOL *pool, void (*dispatch)(SVCXPRT *))
625 struct svc_loss_callout *s;
627 mtx_lock(&pool->sp_lock);
628 TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
629 if (s->slc_dispatch == dispatch) {
630 TAILQ_REMOVE(&pool->sp_lcallouts, s, slc_link);
635 mtx_unlock(&pool->sp_lock);
638 /* ********************** CALLOUT list related stuff ************* */
641 * Search the callout list for a program number, return the callout
644 static struct svc_callout *
645 svc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid)
647 struct svc_callout *s;
649 mtx_assert(&pool->sp_lock, MA_OWNED);
650 TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
651 if (s->sc_prog == prog && s->sc_vers == vers
652 && (netid == NULL || s->sc_netid == NULL ||
653 strcmp(netid, s->sc_netid) == 0))
660 /* ******************* REPLY GENERATION ROUTINES ************ */
663 svc_sendreply_common(struct svc_req *rqstp, struct rpc_msg *rply,
666 SVCXPRT *xprt = rqstp->rq_xprt;
669 if (rqstp->rq_args) {
670 m_freem(rqstp->rq_args);
671 rqstp->rq_args = NULL;
674 if (xprt->xp_pool->sp_rcache)
675 replay_setreply(xprt->xp_pool->sp_rcache,
676 rply, svc_getrpccaller(rqstp), body);
678 if (!SVCAUTH_WRAP(&rqstp->rq_auth, &body))
681 ok = SVC_REPLY(xprt, rply, rqstp->rq_addr, body, &rqstp->rq_reply_seq);
682 if (rqstp->rq_addr) {
683 free(rqstp->rq_addr, M_SONAME);
684 rqstp->rq_addr = NULL;
691 * Send a reply to an rpc request
694 svc_sendreply(struct svc_req *rqstp, xdrproc_t xdr_results, void * xdr_location)
701 rply.rm_xid = rqstp->rq_xid;
702 rply.rm_direction = REPLY;
703 rply.rm_reply.rp_stat = MSG_ACCEPTED;
704 rply.acpted_rply.ar_verf = rqstp->rq_verf;
705 rply.acpted_rply.ar_stat = SUCCESS;
706 rply.acpted_rply.ar_results.where = NULL;
707 rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
709 m = m_getcl(M_WAITOK, MT_DATA, 0);
710 xdrmbuf_create(&xdrs, m, XDR_ENCODE);
711 ok = xdr_results(&xdrs, xdr_location);
715 return (svc_sendreply_common(rqstp, &rply, m));
723 svc_sendreply_mbuf(struct svc_req *rqstp, struct mbuf *m)
727 rply.rm_xid = rqstp->rq_xid;
728 rply.rm_direction = REPLY;
729 rply.rm_reply.rp_stat = MSG_ACCEPTED;
730 rply.acpted_rply.ar_verf = rqstp->rq_verf;
731 rply.acpted_rply.ar_stat = SUCCESS;
732 rply.acpted_rply.ar_results.where = NULL;
733 rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
735 return (svc_sendreply_common(rqstp, &rply, m));
739 * No procedure error reply
742 svcerr_noproc(struct svc_req *rqstp)
744 SVCXPRT *xprt = rqstp->rq_xprt;
747 rply.rm_xid = rqstp->rq_xid;
748 rply.rm_direction = REPLY;
749 rply.rm_reply.rp_stat = MSG_ACCEPTED;
750 rply.acpted_rply.ar_verf = rqstp->rq_verf;
751 rply.acpted_rply.ar_stat = PROC_UNAVAIL;
753 if (xprt->xp_pool->sp_rcache)
754 replay_setreply(xprt->xp_pool->sp_rcache,
755 &rply, svc_getrpccaller(rqstp), NULL);
757 svc_sendreply_common(rqstp, &rply, NULL);
761 * Can't decode args error reply
764 svcerr_decode(struct svc_req *rqstp)
766 SVCXPRT *xprt = rqstp->rq_xprt;
769 rply.rm_xid = rqstp->rq_xid;
770 rply.rm_direction = REPLY;
771 rply.rm_reply.rp_stat = MSG_ACCEPTED;
772 rply.acpted_rply.ar_verf = rqstp->rq_verf;
773 rply.acpted_rply.ar_stat = GARBAGE_ARGS;
775 if (xprt->xp_pool->sp_rcache)
776 replay_setreply(xprt->xp_pool->sp_rcache,
777 &rply, (struct sockaddr *) &xprt->xp_rtaddr, NULL);
779 svc_sendreply_common(rqstp, &rply, NULL);
786 svcerr_systemerr(struct svc_req *rqstp)
788 SVCXPRT *xprt = rqstp->rq_xprt;
791 rply.rm_xid = rqstp->rq_xid;
792 rply.rm_direction = REPLY;
793 rply.rm_reply.rp_stat = MSG_ACCEPTED;
794 rply.acpted_rply.ar_verf = rqstp->rq_verf;
795 rply.acpted_rply.ar_stat = SYSTEM_ERR;
797 if (xprt->xp_pool->sp_rcache)
798 replay_setreply(xprt->xp_pool->sp_rcache,
799 &rply, svc_getrpccaller(rqstp), NULL);
801 svc_sendreply_common(rqstp, &rply, NULL);
805 * Authentication error reply
808 svcerr_auth(struct svc_req *rqstp, enum auth_stat why)
810 SVCXPRT *xprt = rqstp->rq_xprt;
813 rply.rm_xid = rqstp->rq_xid;
814 rply.rm_direction = REPLY;
815 rply.rm_reply.rp_stat = MSG_DENIED;
816 rply.rjcted_rply.rj_stat = AUTH_ERROR;
817 rply.rjcted_rply.rj_why = why;
819 if (xprt->xp_pool->sp_rcache)
820 replay_setreply(xprt->xp_pool->sp_rcache,
821 &rply, svc_getrpccaller(rqstp), NULL);
823 svc_sendreply_common(rqstp, &rply, NULL);
827 * Auth too weak error reply
830 svcerr_weakauth(struct svc_req *rqstp)
833 svcerr_auth(rqstp, AUTH_TOOWEAK);
837 * Program unavailable error reply
840 svcerr_noprog(struct svc_req *rqstp)
842 SVCXPRT *xprt = rqstp->rq_xprt;
845 rply.rm_xid = rqstp->rq_xid;
846 rply.rm_direction = REPLY;
847 rply.rm_reply.rp_stat = MSG_ACCEPTED;
848 rply.acpted_rply.ar_verf = rqstp->rq_verf;
849 rply.acpted_rply.ar_stat = PROG_UNAVAIL;
851 if (xprt->xp_pool->sp_rcache)
852 replay_setreply(xprt->xp_pool->sp_rcache,
853 &rply, svc_getrpccaller(rqstp), NULL);
855 svc_sendreply_common(rqstp, &rply, NULL);
859 * Program version mismatch error reply
862 svcerr_progvers(struct svc_req *rqstp, rpcvers_t low_vers, rpcvers_t high_vers)
864 SVCXPRT *xprt = rqstp->rq_xprt;
867 rply.rm_xid = rqstp->rq_xid;
868 rply.rm_direction = REPLY;
869 rply.rm_reply.rp_stat = MSG_ACCEPTED;
870 rply.acpted_rply.ar_verf = rqstp->rq_verf;
871 rply.acpted_rply.ar_stat = PROG_MISMATCH;
872 rply.acpted_rply.ar_vers.low = (uint32_t)low_vers;
873 rply.acpted_rply.ar_vers.high = (uint32_t)high_vers;
875 if (xprt->xp_pool->sp_rcache)
876 replay_setreply(xprt->xp_pool->sp_rcache,
877 &rply, svc_getrpccaller(rqstp), NULL);
879 svc_sendreply_common(rqstp, &rply, NULL);
883 * Allocate a new server transport structure. All fields are
884 * initialized to zero and xp_p3 is initialized to point at an
885 * extension structure to hold various flags and authentication
894 xprt = mem_alloc(sizeof(SVCXPRT));
895 ext = mem_alloc(sizeof(SVCXPRT_EXT));
897 refcount_init(&xprt->xp_refs, 1);
903 * Free a server transport structure.
906 svc_xprt_free(SVCXPRT *xprt)
909 mem_free(xprt->xp_p3, sizeof(SVCXPRT_EXT));
910 /* The size argument is ignored, so 0 is ok. */
911 mem_free(xprt->xp_gidp, 0);
912 mem_free(xprt, sizeof(SVCXPRT));
915 /* ******************* SERVER INPUT STUFF ******************* */
918 * Read RPC requests from a transport and queue them to be
919 * executed. We handle authentication and replay cache replies here.
920 * Actually dispatching the RPC is deferred till svc_executereq.
922 static enum xprt_stat
923 svc_getreq(SVCXPRT *xprt, struct svc_req **rqstp_ret)
925 SVCPOOL *pool = xprt->xp_pool;
929 struct svc_loss_callout *s;
932 /* now receive msgs from xprtprt (support batch calls) */
933 r = malloc(sizeof(*r), M_RPC, M_WAITOK|M_ZERO);
935 msg.rm_call.cb_cred.oa_base = r->rq_credarea;
936 msg.rm_call.cb_verf.oa_base = &r->rq_credarea[MAX_AUTH_BYTES];
937 r->rq_clntcred = &r->rq_credarea[2*MAX_AUTH_BYTES];
938 if (SVC_RECV(xprt, &msg, &r->rq_addr, &args)) {
942 * Handle replays and authenticate before queuing the
943 * request to be executed.
947 if (pool->sp_rcache) {
948 struct rpc_msg repmsg;
949 struct mbuf *repbody;
950 enum replay_state rs;
951 rs = replay_find(pool->sp_rcache, &msg,
952 svc_getrpccaller(r), &repmsg, &repbody);
957 SVC_REPLY(xprt, &repmsg, r->rq_addr,
958 repbody, &r->rq_reply_seq);
960 free(r->rq_addr, M_SONAME);
972 r->rq_xid = msg.rm_xid;
973 r->rq_prog = msg.rm_call.cb_prog;
974 r->rq_vers = msg.rm_call.cb_vers;
975 r->rq_proc = msg.rm_call.cb_proc;
976 r->rq_size = sizeof(*r) + m_length(args, NULL);
978 if ((why = _authenticate(r, &msg)) != AUTH_OK) {
980 * RPCSEC_GSS uses this return code
981 * for requests that form part of its
982 * context establishment protocol and
983 * should not be dispatched to the
986 if (why != RPCSEC_GSS_NODISPATCH)
991 if (!SVCAUTH_UNWRAP(&r->rq_auth, &r->rq_args)) {
997 * Everything checks out, return request to caller.
1007 if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
1008 TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link)
1009 (*s->slc_dispatch)(xprt);
1010 xprt_unregister(xprt);
1017 svc_executereq(struct svc_req *rqstp)
1019 SVCXPRT *xprt = rqstp->rq_xprt;
1020 SVCPOOL *pool = xprt->xp_pool;
1023 rpcvers_t high_vers;
1024 struct svc_callout *s;
1026 /* now match message with a registered service*/
1028 low_vers = (rpcvers_t) -1L;
1029 high_vers = (rpcvers_t) 0L;
1030 TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
1031 if (s->sc_prog == rqstp->rq_prog) {
1032 if (s->sc_vers == rqstp->rq_vers) {
1034 * We hand ownership of r to the
1035 * dispatch method - they must call
1038 (*s->sc_dispatch)(rqstp, xprt);
1040 } /* found correct version */
1042 if (s->sc_vers < low_vers)
1043 low_vers = s->sc_vers;
1044 if (s->sc_vers > high_vers)
1045 high_vers = s->sc_vers;
1046 } /* found correct program */
1050 * if we got here, the program or version
1054 svcerr_progvers(rqstp, low_vers, high_vers);
1056 svcerr_noprog(rqstp);
1062 svc_checkidle(SVCGROUP *grp)
1064 SVCXPRT *xprt, *nxprt;
1066 struct svcxprt_list cleanup;
1068 TAILQ_INIT(&cleanup);
1069 TAILQ_FOREACH_SAFE(xprt, &grp->sg_xlist, xp_link, nxprt) {
1071 * Only some transports have idle timers. Don't time
1072 * something out which is just waking up.
1074 if (!xprt->xp_idletimeout || xprt->xp_thread)
1077 timo = xprt->xp_lastactive + xprt->xp_idletimeout;
1078 if (time_uptime > timo) {
1079 xprt_unregister_locked(xprt);
1080 TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
1084 mtx_unlock(&grp->sg_lock);
1085 TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
1086 soshutdown(xprt->xp_socket, SHUT_WR);
1089 mtx_lock(&grp->sg_lock);
1093 svc_assign_waiting_sockets(SVCPOOL *pool)
1099 for (g = 0; g < pool->sp_groupcount; g++) {
1100 grp = &pool->sp_groups[g];
1101 mtx_lock(&grp->sg_lock);
1102 while ((xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) {
1103 if (xprt_assignthread(xprt))
1104 TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
1108 mtx_unlock(&grp->sg_lock);
1113 svc_change_space_used(SVCPOOL *pool, long delta)
1115 unsigned long value;
1117 value = atomic_fetchadd_long(&pool->sp_space_used, delta) + delta;
1119 if (value >= pool->sp_space_high && !pool->sp_space_throttled) {
1120 pool->sp_space_throttled = TRUE;
1121 pool->sp_space_throttle_count++;
1123 if (value > pool->sp_space_used_highest)
1124 pool->sp_space_used_highest = value;
1126 if (value < pool->sp_space_low && pool->sp_space_throttled) {
1127 pool->sp_space_throttled = FALSE;
1128 svc_assign_waiting_sockets(pool);
1134 svc_request_space_available(SVCPOOL *pool)
1137 if (pool->sp_space_throttled)
1143 svc_run_internal(SVCGROUP *grp, bool_t ismaster)
1145 SVCPOOL *pool = grp->sg_pool;
1146 SVCTHREAD *st, *stpref;
1148 enum xprt_stat stat;
1149 struct svc_req *rqstp;
1154 st = mem_alloc(sizeof(*st));
1155 mtx_init(&st->st_lock, "st_lock", NULL, MTX_DEF);
1158 STAILQ_INIT(&st->st_reqs);
1159 cv_init(&st->st_cond, "rpcsvc");
1161 mtx_lock(&grp->sg_lock);
1164 * If we are a new thread which was spawned to cope with
1165 * increased load, set the state back to SVCPOOL_ACTIVE.
1167 if (grp->sg_state == SVCPOOL_THREADSTARTING)
1168 grp->sg_state = SVCPOOL_ACTIVE;
1170 while (grp->sg_state != SVCPOOL_CLOSING) {
1172 * Create new thread if requested.
1174 if (grp->sg_state == SVCPOOL_THREADWANTED) {
1175 grp->sg_state = SVCPOOL_THREADSTARTING;
1176 grp->sg_lastcreatetime = time_uptime;
1177 mtx_unlock(&grp->sg_lock);
1178 svc_new_thread(grp);
1179 mtx_lock(&grp->sg_lock);
1184 * Check for idle transports once per second.
1186 if (time_uptime > grp->sg_lastidlecheck) {
1187 grp->sg_lastidlecheck = time_uptime;
1194 * Enforce maxthreads count.
1196 if (!ismaster && grp->sg_threadcount >
1201 * Before sleeping, see if we can find an
1202 * active transport which isn't being serviced
1205 if (svc_request_space_available(pool) &&
1206 (xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) {
1207 TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
1209 xprt->xp_thread = st;
1214 LIST_INSERT_HEAD(&grp->sg_idlethreads, st, st_ilink);
1215 if (ismaster || (!ismaster &&
1216 grp->sg_threadcount > grp->sg_minthreads))
1217 error = cv_timedwait_sig(&st->st_cond,
1218 &grp->sg_lock, 5 * hz);
1220 error = cv_wait_sig(&st->st_cond,
1222 if (st->st_xprt == NULL)
1223 LIST_REMOVE(st, st_ilink);
1226 * Reduce worker thread count when idle.
1228 if (error == EWOULDBLOCK) {
1230 && (grp->sg_threadcount
1231 > grp->sg_minthreads)
1234 } else if (error != 0) {
1235 KASSERT(error == EINTR || error == ERESTART,
1236 ("non-signal error %d", error));
1237 mtx_unlock(&grp->sg_lock);
1240 if (P_SHOULDSTOP(p) ||
1241 (p->p_flag & P_TOTAL_STOP) != 0) {
1242 thread_suspend_check(0);
1244 mtx_lock(&grp->sg_lock);
1248 mtx_lock(&grp->sg_lock);
1254 mtx_unlock(&grp->sg_lock);
1257 * Drain the transport socket and queue up any RPCs.
1259 xprt->xp_lastactive = time_uptime;
1261 if (!svc_request_space_available(pool))
1264 stat = svc_getreq(xprt, &rqstp);
1266 svc_change_space_used(pool, rqstp->rq_size);
1268 * See if the application has a preference
1269 * for some other thread.
1271 if (pool->sp_assign) {
1272 stpref = pool->sp_assign(st, rqstp);
1273 rqstp->rq_thread = stpref;
1274 STAILQ_INSERT_TAIL(&stpref->st_reqs,
1276 mtx_unlock(&stpref->st_lock);
1280 rqstp->rq_thread = st;
1281 STAILQ_INSERT_TAIL(&st->st_reqs,
1285 } while (rqstp == NULL && stat == XPRT_MOREREQS
1286 && grp->sg_state != SVCPOOL_CLOSING);
1289 * Move this transport to the end of the active list to
1290 * ensure fairness when multiple transports are active.
1291 * If this was the last queued request, svc_getreq will end
1292 * up calling xprt_inactive to remove from the active list.
1294 mtx_lock(&grp->sg_lock);
1295 xprt->xp_thread = NULL;
1297 if (xprt->xp_active) {
1298 if (!svc_request_space_available(pool) ||
1299 !xprt_assignthread(xprt))
1300 TAILQ_INSERT_TAIL(&grp->sg_active,
1303 mtx_unlock(&grp->sg_lock);
1307 * Execute what we have queued.
1309 mtx_lock(&st->st_lock);
1310 while ((rqstp = STAILQ_FIRST(&st->st_reqs)) != NULL) {
1311 STAILQ_REMOVE_HEAD(&st->st_reqs, rq_link);
1312 mtx_unlock(&st->st_lock);
1313 sz = (long)rqstp->rq_size;
1314 svc_executereq(rqstp);
1315 svc_change_space_used(pool, -sz);
1316 mtx_lock(&st->st_lock);
1318 mtx_unlock(&st->st_lock);
1319 mtx_lock(&grp->sg_lock);
1327 KASSERT(STAILQ_EMPTY(&st->st_reqs), ("stray reqs on exit"));
1328 mtx_destroy(&st->st_lock);
1329 cv_destroy(&st->st_cond);
1330 mem_free(st, sizeof(*st));
1332 grp->sg_threadcount--;
1335 mtx_unlock(&grp->sg_lock);
1339 svc_thread_start(void *arg)
1342 svc_run_internal((SVCGROUP *) arg, FALSE);
1347 svc_new_thread(SVCGROUP *grp)
1349 SVCPOOL *pool = grp->sg_pool;
1352 mtx_lock(&grp->sg_lock);
1353 grp->sg_threadcount++;
1354 mtx_unlock(&grp->sg_lock);
1355 kthread_add(svc_thread_start, grp, pool->sp_proc, &td, 0, 0,
1356 "%s: service", pool->sp_name);
1360 svc_run(SVCPOOL *pool)
1369 snprintf(td->td_name, sizeof(td->td_name),
1370 "%s: master", pool->sp_name);
1371 pool->sp_state = SVCPOOL_ACTIVE;
1374 /* Choose group count based on number of threads and CPUs. */
1375 pool->sp_groupcount = max(1, min(SVC_MAXGROUPS,
1376 min(pool->sp_maxthreads / 2, mp_ncpus) / 6));
1377 for (g = 0; g < pool->sp_groupcount; g++) {
1378 grp = &pool->sp_groups[g];
1379 grp->sg_minthreads = max(1,
1380 pool->sp_minthreads / pool->sp_groupcount);
1381 grp->sg_maxthreads = max(1,
1382 pool->sp_maxthreads / pool->sp_groupcount);
1383 grp->sg_lastcreatetime = time_uptime;
1386 /* Starting threads */
1387 pool->sp_groups[0].sg_threadcount++;
1388 for (g = 0; g < pool->sp_groupcount; g++) {
1389 grp = &pool->sp_groups[g];
1390 for (i = ((g == 0) ? 1 : 0); i < grp->sg_minthreads; i++)
1391 svc_new_thread(grp);
1393 svc_run_internal(&pool->sp_groups[0], TRUE);
1395 /* Waiting for threads to stop. */
1396 for (g = 0; g < pool->sp_groupcount; g++) {
1397 grp = &pool->sp_groups[g];
1398 mtx_lock(&grp->sg_lock);
1399 while (grp->sg_threadcount > 0)
1400 msleep(grp, &grp->sg_lock, 0, "svcexit", 0);
1401 mtx_unlock(&grp->sg_lock);
1406 svc_exit(SVCPOOL *pool)
1412 pool->sp_state = SVCPOOL_CLOSING;
1413 for (g = 0; g < pool->sp_groupcount; g++) {
1414 grp = &pool->sp_groups[g];
1415 mtx_lock(&grp->sg_lock);
1416 if (grp->sg_state != SVCPOOL_CLOSING) {
1417 grp->sg_state = SVCPOOL_CLOSING;
1418 LIST_FOREACH(st, &grp->sg_idlethreads, st_ilink)
1419 cv_signal(&st->st_cond);
1421 mtx_unlock(&grp->sg_lock);
1426 svc_getargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1433 rqstp->rq_args = NULL;
1435 xdrmbuf_create(&xdrs, m, XDR_DECODE);
1436 stat = xargs(&xdrs, args);
1443 svc_freeargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1447 if (rqstp->rq_addr) {
1448 free(rqstp->rq_addr, M_SONAME);
1449 rqstp->rq_addr = NULL;
1452 xdrs.x_op = XDR_FREE;
1453 return (xargs(&xdrs, args));
1457 svc_freereq(struct svc_req *rqstp)
1462 st = rqstp->rq_thread;
1466 pool->sp_done(st, rqstp);
1469 if (rqstp->rq_auth.svc_ah_ops)
1470 SVCAUTH_RELEASE(&rqstp->rq_auth);
1472 if (rqstp->rq_xprt) {
1473 SVC_RELEASE(rqstp->rq_xprt);
1477 free(rqstp->rq_addr, M_SONAME);
1480 m_freem(rqstp->rq_args);