]> CyberLeo.Net >> Repos - FreeBSD/releng/9.3.git/blob - sys/rpc/svc.c
Merge r309688: address regressions in SA-16:37.libc.
[FreeBSD/releng/9.3.git] / sys / rpc / svc.c
1 /*      $NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $        */
2
3 /*-
4  * Copyright (c) 2009, Sun Microsystems, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without 
8  * modification, are permitted provided that the following conditions are met:
9  * - Redistributions of source code must retain the above copyright notice, 
10  *   this list of conditions and the following disclaimer.
11  * - Redistributions in binary form must reproduce the above copyright notice, 
12  *   this list of conditions and the following disclaimer in the documentation 
13  *   and/or other materials provided with the distribution.
14  * - Neither the name of Sun Microsystems, Inc. nor the names of its 
15  *   contributors may be used to endorse or promote products derived 
16  *   from this software without specific prior written permission.
17  * 
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30
31 #if defined(LIBC_SCCS) && !defined(lint)
32 static char *sccsid2 = "@(#)svc.c 1.44 88/02/08 Copyr 1984 Sun Micro";
33 static char *sccsid = "@(#)svc.c        2.4 88/08/11 4.0 RPCSRC";
34 #endif
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 /*
39  * svc.c, Server-side remote procedure call interface.
40  *
41  * There are two sets of procedures here.  The xprt routines are
42  * for handling transport handles.  The svc routines handle the
43  * list of service routines.
44  *
45  * Copyright (C) 1984, Sun Microsystems, Inc.
46  */
47
48 #include <sys/param.h>
49 #include <sys/lock.h>
50 #include <sys/kernel.h>
51 #include <sys/kthread.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/queue.h>
57 #include <sys/socketvar.h>
58 #include <sys/systm.h>
59 #include <sys/sx.h>
60 #include <sys/ucred.h>
61
62 #include <rpc/rpc.h>
63 #include <rpc/rpcb_clnt.h>
64 #include <rpc/replay.h>
65
66 #include <rpc/rpc_com.h>
67
68 #define SVC_VERSQUIET 0x0001            /* keep quiet about vers mismatch */
69 #define version_keepquiet(xp) (SVC_EXT(xp)->xp_flags & SVC_VERSQUIET)
70
71 static struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t,
72     char *);
73 static void svc_new_thread(SVCPOOL *pool);
74 static void xprt_unregister_locked(SVCXPRT *xprt);
75 static void svc_change_space_used(SVCPOOL *pool, int delta);
76 static bool_t svc_request_space_available(SVCPOOL *pool);
77
78 /* ***************  SVCXPRT related stuff **************** */
79
80 static int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS);
81 static int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS);
82
83 SVCPOOL*
84 svcpool_create(const char *name, struct sysctl_oid_list *sysctl_base)
85 {
86         SVCPOOL *pool;
87
88         pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO);
89         
90         mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF);
91         pool->sp_name = name;
92         pool->sp_state = SVCPOOL_INIT;
93         pool->sp_proc = NULL;
94         TAILQ_INIT(&pool->sp_xlist);
95         TAILQ_INIT(&pool->sp_active);
96         TAILQ_INIT(&pool->sp_callouts);
97         TAILQ_INIT(&pool->sp_lcallouts);
98         LIST_INIT(&pool->sp_threads);
99         LIST_INIT(&pool->sp_idlethreads);
100         pool->sp_minthreads = 1;
101         pool->sp_maxthreads = 1;
102         pool->sp_threadcount = 0;
103
104         /*
105          * Don't use more than a quarter of mbuf clusters or more than
106          * 45Mb buffering requests.
107          */
108         pool->sp_space_high = nmbclusters * MCLBYTES / 4;
109         if (pool->sp_space_high > 45 << 20)
110                 pool->sp_space_high = 45 << 20;
111         pool->sp_space_low = 2 * pool->sp_space_high / 3;
112
113         sysctl_ctx_init(&pool->sp_sysctl);
114         if (sysctl_base) {
115                 SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
116                     "minthreads", CTLTYPE_INT | CTLFLAG_RW,
117                     pool, 0, svcpool_minthread_sysctl, "I", "");
118                 SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
119                     "maxthreads", CTLTYPE_INT | CTLFLAG_RW,
120                     pool, 0, svcpool_maxthread_sysctl, "I", "");
121                 SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
122                     "threads", CTLFLAG_RD, &pool->sp_threadcount, 0, "");
123
124                 SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
125                     "request_space_used", CTLFLAG_RD,
126                     &pool->sp_space_used, 0,
127                     "Space in parsed but not handled requests.");
128
129                 SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
130                     "request_space_used_highest", CTLFLAG_RD,
131                     &pool->sp_space_used_highest, 0,
132                     "Highest space used since reboot.");
133
134                 SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
135                     "request_space_high", CTLFLAG_RW,
136                     &pool->sp_space_high, 0,
137                     "Maximum space in parsed but not handled requests.");
138
139                 SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
140                     "request_space_low", CTLFLAG_RW,
141                     &pool->sp_space_low, 0,
142                     "Low water mark for request space.");
143
144                 SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
145                     "request_space_throttled", CTLFLAG_RD,
146                     &pool->sp_space_throttled, 0,
147                     "Whether nfs requests are currently throttled");
148
149                 SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
150                     "request_space_throttle_count", CTLFLAG_RD,
151                     &pool->sp_space_throttle_count, 0,
152                     "Count of times throttling based on request space has occurred");
153         }
154
155         return pool;
156 }
157
158 void
159 svcpool_destroy(SVCPOOL *pool)
160 {
161         SVCXPRT *xprt, *nxprt;
162         struct svc_callout *s;
163         struct svc_loss_callout *sl;
164         struct svcxprt_list cleanup;
165
166         TAILQ_INIT(&cleanup);
167         mtx_lock(&pool->sp_lock);
168
169         while (TAILQ_FIRST(&pool->sp_xlist)) {
170                 xprt = TAILQ_FIRST(&pool->sp_xlist);
171                 xprt_unregister_locked(xprt);
172                 TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
173         }
174
175         while ((s = TAILQ_FIRST(&pool->sp_callouts)) != NULL) {
176                 mtx_unlock(&pool->sp_lock);
177                 svc_unreg(pool, s->sc_prog, s->sc_vers);
178                 mtx_lock(&pool->sp_lock);
179         }
180         while ((sl = TAILQ_FIRST(&pool->sp_lcallouts)) != NULL) {
181                 mtx_unlock(&pool->sp_lock);
182                 svc_loss_unreg(pool, sl->slc_dispatch);
183                 mtx_lock(&pool->sp_lock);
184         }
185         mtx_unlock(&pool->sp_lock);
186
187         TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
188                 SVC_RELEASE(xprt);
189         }
190
191         mtx_destroy(&pool->sp_lock);
192
193         if (pool->sp_rcache)
194                 replay_freecache(pool->sp_rcache);
195
196         sysctl_ctx_free(&pool->sp_sysctl);
197         free(pool, M_RPC);
198 }
199
200 static bool_t
201 svcpool_active(SVCPOOL *pool)
202 {
203         enum svcpool_state state = pool->sp_state;
204
205         if (state == SVCPOOL_INIT || state == SVCPOOL_CLOSING)
206                 return (FALSE);
207         return (TRUE);
208 }
209
210 /*
211  * Sysctl handler to set the minimum thread count on a pool
212  */
213 static int
214 svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS)
215 {
216         SVCPOOL *pool;
217         int newminthreads, error, n;
218
219         pool = oidp->oid_arg1;
220         newminthreads = pool->sp_minthreads;
221         error = sysctl_handle_int(oidp, &newminthreads, 0, req);
222         if (error == 0 && newminthreads != pool->sp_minthreads) {
223                 if (newminthreads > pool->sp_maxthreads)
224                         return (EINVAL);
225                 mtx_lock(&pool->sp_lock);
226                 if (newminthreads > pool->sp_minthreads
227                     && svcpool_active(pool)) {
228                         /*
229                          * If the pool is running and we are
230                          * increasing, create some more threads now.
231                          */
232                         n = newminthreads - pool->sp_threadcount;
233                         if (n > 0) {
234                                 mtx_unlock(&pool->sp_lock);
235                                 while (n--)
236                                         svc_new_thread(pool);
237                                 mtx_lock(&pool->sp_lock);
238                         }
239                 }
240                 pool->sp_minthreads = newminthreads;
241                 mtx_unlock(&pool->sp_lock);
242         }
243         return (error);
244 }
245
246 /*
247  * Sysctl handler to set the maximum thread count on a pool
248  */
249 static int
250 svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS)
251 {
252         SVCPOOL *pool;
253         SVCTHREAD *st;
254         int newmaxthreads, error;
255
256         pool = oidp->oid_arg1;
257         newmaxthreads = pool->sp_maxthreads;
258         error = sysctl_handle_int(oidp, &newmaxthreads, 0, req);
259         if (error == 0 && newmaxthreads != pool->sp_maxthreads) {
260                 if (newmaxthreads < pool->sp_minthreads)
261                         return (EINVAL);
262                 mtx_lock(&pool->sp_lock);
263                 if (newmaxthreads < pool->sp_maxthreads
264                     && svcpool_active(pool)) {
265                         /*
266                          * If the pool is running and we are
267                          * decreasing, wake up some idle threads to
268                          * encourage them to exit.
269                          */
270                         LIST_FOREACH(st, &pool->sp_idlethreads, st_ilink)
271                                 cv_signal(&st->st_cond);
272                 }
273                 pool->sp_maxthreads = newmaxthreads;
274                 mtx_unlock(&pool->sp_lock);
275         }
276         return (error);
277 }
278
279 /*
280  * Activate a transport handle.
281  */
282 void
283 xprt_register(SVCXPRT *xprt)
284 {
285         SVCPOOL *pool = xprt->xp_pool;
286
287         SVC_ACQUIRE(xprt);
288         mtx_lock(&pool->sp_lock);
289         xprt->xp_registered = TRUE;
290         xprt->xp_active = FALSE;
291         TAILQ_INSERT_TAIL(&pool->sp_xlist, xprt, xp_link);
292         mtx_unlock(&pool->sp_lock);
293 }
294
295 /*
296  * De-activate a transport handle. Note: the locked version doesn't
297  * release the transport - caller must do that after dropping the pool
298  * lock.
299  */
300 static void
301 xprt_unregister_locked(SVCXPRT *xprt)
302 {
303         SVCPOOL *pool = xprt->xp_pool;
304
305         mtx_assert(&pool->sp_lock, MA_OWNED);
306         KASSERT(xprt->xp_registered == TRUE,
307             ("xprt_unregister_locked: not registered"));
308         xprt_inactive_locked(xprt);
309         TAILQ_REMOVE(&pool->sp_xlist, xprt, xp_link);
310         xprt->xp_registered = FALSE;
311 }
312
313 void
314 xprt_unregister(SVCXPRT *xprt)
315 {
316         SVCPOOL *pool = xprt->xp_pool;
317
318         mtx_lock(&pool->sp_lock);
319         if (xprt->xp_registered == FALSE) {
320                 /* Already unregistered by another thread */
321                 mtx_unlock(&pool->sp_lock);
322                 return;
323         }
324         xprt_unregister_locked(xprt);
325         mtx_unlock(&pool->sp_lock);
326
327         SVC_RELEASE(xprt);
328 }
329
330 /*
331  * Attempt to assign a service thread to this transport.
332  */
333 static int
334 xprt_assignthread(SVCXPRT *xprt)
335 {
336         SVCPOOL *pool = xprt->xp_pool;
337         SVCTHREAD *st;
338
339         mtx_assert(&pool->sp_lock, MA_OWNED);
340         st = LIST_FIRST(&pool->sp_idlethreads);
341         if (st) {
342                 LIST_REMOVE(st, st_ilink);
343                 st->st_idle = FALSE;
344                 SVC_ACQUIRE(xprt);
345                 xprt->xp_thread = st;
346                 st->st_xprt = xprt;
347                 cv_signal(&st->st_cond);
348                 return (TRUE);
349         } else {
350                 /*
351                  * See if we can create a new thread. The
352                  * actual thread creation happens in
353                  * svc_run_internal because our locking state
354                  * is poorly defined (we are typically called
355                  * from a socket upcall). Don't create more
356                  * than one thread per second.
357                  */
358                 if (pool->sp_state == SVCPOOL_ACTIVE
359                     && pool->sp_lastcreatetime < time_uptime
360                     && pool->sp_threadcount < pool->sp_maxthreads) {
361                         pool->sp_state = SVCPOOL_THREADWANTED;
362                 }
363         }
364         return (FALSE);
365 }
366
367 void
368 xprt_active(SVCXPRT *xprt)
369 {
370         SVCPOOL *pool = xprt->xp_pool;
371
372         mtx_lock(&pool->sp_lock);
373
374         if (!xprt->xp_registered) {
375                 /*
376                  * Race with xprt_unregister - we lose.
377                  */
378                 mtx_unlock(&pool->sp_lock);
379                 return;
380         }
381
382         if (!xprt->xp_active) {
383                 xprt->xp_active = TRUE;
384                 if (xprt->xp_thread == NULL) {
385                         if (!svc_request_space_available(pool) ||
386                             !xprt_assignthread(xprt))
387                                 TAILQ_INSERT_TAIL(&pool->sp_active, xprt,
388                                     xp_alink);
389                 }
390         }
391
392         mtx_unlock(&pool->sp_lock);
393 }
394
395 void
396 xprt_inactive_locked(SVCXPRT *xprt)
397 {
398         SVCPOOL *pool = xprt->xp_pool;
399
400         mtx_assert(&pool->sp_lock, MA_OWNED);
401         if (xprt->xp_active) {
402                 if (xprt->xp_thread == NULL)
403                         TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
404                 xprt->xp_active = FALSE;
405         }
406 }
407
408 void
409 xprt_inactive(SVCXPRT *xprt)
410 {
411         SVCPOOL *pool = xprt->xp_pool;
412
413         mtx_lock(&pool->sp_lock);
414         xprt_inactive_locked(xprt);
415         mtx_unlock(&pool->sp_lock);
416 }
417
418 /*
419  * Variant of xprt_inactive() for use only when sure that port is
420  * assigned to thread. For example, withing receive handlers.
421  */
422 void
423 xprt_inactive_self(SVCXPRT *xprt)
424 {
425
426         KASSERT(xprt->xp_thread != NULL,
427             ("xprt_inactive_self(%p) with NULL xp_thread", xprt));
428         xprt->xp_active = FALSE;
429 }
430
431 /*
432  * Add a service program to the callout list.
433  * The dispatch routine will be called when a rpc request for this
434  * program number comes in.
435  */
436 bool_t
437 svc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
438     void (*dispatch)(struct svc_req *, SVCXPRT *),
439     const struct netconfig *nconf)
440 {
441         SVCPOOL *pool = xprt->xp_pool;
442         struct svc_callout *s;
443         char *netid = NULL;
444         int flag = 0;
445
446 /* VARIABLES PROTECTED BY svc_lock: s, svc_head */
447
448         if (xprt->xp_netid) {
449                 netid = strdup(xprt->xp_netid, M_RPC);
450                 flag = 1;
451         } else if (nconf && nconf->nc_netid) {
452                 netid = strdup(nconf->nc_netid, M_RPC);
453                 flag = 1;
454         } /* must have been created with svc_raw_create */
455         if ((netid == NULL) && (flag == 1)) {
456                 return (FALSE);
457         }
458
459         mtx_lock(&pool->sp_lock);
460         if ((s = svc_find(pool, prog, vers, netid)) != NULL) {
461                 if (netid)
462                         free(netid, M_RPC);
463                 if (s->sc_dispatch == dispatch)
464                         goto rpcb_it; /* he is registering another xptr */
465                 mtx_unlock(&pool->sp_lock);
466                 return (FALSE);
467         }
468         s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
469         if (s == NULL) {
470                 if (netid)
471                         free(netid, M_RPC);
472                 mtx_unlock(&pool->sp_lock);
473                 return (FALSE);
474         }
475
476         s->sc_prog = prog;
477         s->sc_vers = vers;
478         s->sc_dispatch = dispatch;
479         s->sc_netid = netid;
480         TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link);
481
482         if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
483                 ((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC);
484
485 rpcb_it:
486         mtx_unlock(&pool->sp_lock);
487         /* now register the information with the local binder service */
488         if (nconf) {
489                 bool_t dummy;
490                 struct netconfig tnc;
491                 struct netbuf nb;
492                 tnc = *nconf;
493                 nb.buf = &xprt->xp_ltaddr;
494                 nb.len = xprt->xp_ltaddr.ss_len;
495                 dummy = rpcb_set(prog, vers, &tnc, &nb);
496                 return (dummy);
497         }
498         return (TRUE);
499 }
500
501 /*
502  * Remove a service program from the callout list.
503  */
504 void
505 svc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers)
506 {
507         struct svc_callout *s;
508
509         /* unregister the information anyway */
510         (void) rpcb_unset(prog, vers, NULL);
511         mtx_lock(&pool->sp_lock);
512         while ((s = svc_find(pool, prog, vers, NULL)) != NULL) {
513                 TAILQ_REMOVE(&pool->sp_callouts, s, sc_link);
514                 if (s->sc_netid)
515                         mem_free(s->sc_netid, sizeof (s->sc_netid) + 1);
516                 mem_free(s, sizeof (struct svc_callout));
517         }
518         mtx_unlock(&pool->sp_lock);
519 }
520
521 /*
522  * Add a service connection loss program to the callout list.
523  * The dispatch routine will be called when some port in ths pool die.
524  */
525 bool_t
526 svc_loss_reg(SVCXPRT *xprt, void (*dispatch)(SVCXPRT *))
527 {
528         SVCPOOL *pool = xprt->xp_pool;
529         struct svc_loss_callout *s;
530
531         mtx_lock(&pool->sp_lock);
532         TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
533                 if (s->slc_dispatch == dispatch)
534                         break;
535         }
536         if (s != NULL) {
537                 mtx_unlock(&pool->sp_lock);
538                 return (TRUE);
539         }
540         s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
541         if (s == NULL) {
542                 mtx_unlock(&pool->sp_lock);
543                 return (FALSE);
544         }
545         s->slc_dispatch = dispatch;
546         TAILQ_INSERT_TAIL(&pool->sp_lcallouts, s, slc_link);
547         mtx_unlock(&pool->sp_lock);
548         return (TRUE);
549 }
550
551 /*
552  * Remove a service connection loss program from the callout list.
553  */
554 void
555 svc_loss_unreg(SVCPOOL *pool, void (*dispatch)(SVCXPRT *))
556 {
557         struct svc_loss_callout *s;
558
559         mtx_lock(&pool->sp_lock);
560         TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
561                 if (s->slc_dispatch == dispatch) {
562                         TAILQ_REMOVE(&pool->sp_lcallouts, s, slc_link);
563                         free(s, M_RPC);
564                         break;
565                 }
566         }
567         mtx_unlock(&pool->sp_lock);
568 }
569
570 /* ********************** CALLOUT list related stuff ************* */
571
572 /*
573  * Search the callout list for a program number, return the callout
574  * struct.
575  */
576 static struct svc_callout *
577 svc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid)
578 {
579         struct svc_callout *s;
580
581         mtx_assert(&pool->sp_lock, MA_OWNED);
582         TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
583                 if (s->sc_prog == prog && s->sc_vers == vers
584                     && (netid == NULL || s->sc_netid == NULL ||
585                         strcmp(netid, s->sc_netid) == 0))
586                         break;
587         }
588
589         return (s);
590 }
591
592 /* ******************* REPLY GENERATION ROUTINES  ************ */
593
594 static bool_t
595 svc_sendreply_common(struct svc_req *rqstp, struct rpc_msg *rply,
596     struct mbuf *body)
597 {
598         SVCXPRT *xprt = rqstp->rq_xprt;
599         bool_t ok;
600
601         if (rqstp->rq_args) {
602                 m_freem(rqstp->rq_args);
603                 rqstp->rq_args = NULL;
604         }
605
606         if (xprt->xp_pool->sp_rcache)
607                 replay_setreply(xprt->xp_pool->sp_rcache,
608                     rply, svc_getrpccaller(rqstp), body);
609
610         if (!SVCAUTH_WRAP(&rqstp->rq_auth, &body))
611                 return (FALSE);
612
613         ok = SVC_REPLY(xprt, rply, rqstp->rq_addr, body, &rqstp->rq_reply_seq);
614         if (rqstp->rq_addr) {
615                 free(rqstp->rq_addr, M_SONAME);
616                 rqstp->rq_addr = NULL;
617         }
618
619         return (ok);
620 }
621
622 /*
623  * Send a reply to an rpc request
624  */
625 bool_t
626 svc_sendreply(struct svc_req *rqstp, xdrproc_t xdr_results, void * xdr_location)
627 {
628         struct rpc_msg rply; 
629         struct mbuf *m;
630         XDR xdrs;
631         bool_t ok;
632
633         rply.rm_xid = rqstp->rq_xid;
634         rply.rm_direction = REPLY;  
635         rply.rm_reply.rp_stat = MSG_ACCEPTED; 
636         rply.acpted_rply.ar_verf = rqstp->rq_verf; 
637         rply.acpted_rply.ar_stat = SUCCESS;
638         rply.acpted_rply.ar_results.where = NULL;
639         rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
640
641         MGET(m, M_WAIT, MT_DATA);
642         MCLGET(m, M_WAIT);
643         m->m_len = 0;
644         xdrmbuf_create(&xdrs, m, XDR_ENCODE);
645         ok = xdr_results(&xdrs, xdr_location);
646         XDR_DESTROY(&xdrs);
647
648         if (ok) {
649                 return (svc_sendreply_common(rqstp, &rply, m));
650         } else {
651                 m_freem(m);
652                 return (FALSE);
653         }
654 }
655
656 bool_t
657 svc_sendreply_mbuf(struct svc_req *rqstp, struct mbuf *m)
658 {
659         struct rpc_msg rply; 
660
661         rply.rm_xid = rqstp->rq_xid;
662         rply.rm_direction = REPLY;  
663         rply.rm_reply.rp_stat = MSG_ACCEPTED; 
664         rply.acpted_rply.ar_verf = rqstp->rq_verf; 
665         rply.acpted_rply.ar_stat = SUCCESS;
666         rply.acpted_rply.ar_results.where = NULL;
667         rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
668
669         return (svc_sendreply_common(rqstp, &rply, m));
670 }
671
672 /*
673  * No procedure error reply
674  */
675 void
676 svcerr_noproc(struct svc_req *rqstp)
677 {
678         SVCXPRT *xprt = rqstp->rq_xprt;
679         struct rpc_msg rply;
680
681         rply.rm_xid = rqstp->rq_xid;
682         rply.rm_direction = REPLY;
683         rply.rm_reply.rp_stat = MSG_ACCEPTED;
684         rply.acpted_rply.ar_verf = rqstp->rq_verf;
685         rply.acpted_rply.ar_stat = PROC_UNAVAIL;
686
687         if (xprt->xp_pool->sp_rcache)
688                 replay_setreply(xprt->xp_pool->sp_rcache,
689                     &rply, svc_getrpccaller(rqstp), NULL);
690
691         svc_sendreply_common(rqstp, &rply, NULL);
692 }
693
694 /*
695  * Can't decode args error reply
696  */
697 void
698 svcerr_decode(struct svc_req *rqstp)
699 {
700         SVCXPRT *xprt = rqstp->rq_xprt;
701         struct rpc_msg rply; 
702
703         rply.rm_xid = rqstp->rq_xid;
704         rply.rm_direction = REPLY; 
705         rply.rm_reply.rp_stat = MSG_ACCEPTED; 
706         rply.acpted_rply.ar_verf = rqstp->rq_verf;
707         rply.acpted_rply.ar_stat = GARBAGE_ARGS;
708
709         if (xprt->xp_pool->sp_rcache)
710                 replay_setreply(xprt->xp_pool->sp_rcache,
711                     &rply, (struct sockaddr *) &xprt->xp_rtaddr, NULL);
712
713         svc_sendreply_common(rqstp, &rply, NULL);
714 }
715
716 /*
717  * Some system error
718  */
719 void
720 svcerr_systemerr(struct svc_req *rqstp)
721 {
722         SVCXPRT *xprt = rqstp->rq_xprt;
723         struct rpc_msg rply; 
724
725         rply.rm_xid = rqstp->rq_xid;
726         rply.rm_direction = REPLY; 
727         rply.rm_reply.rp_stat = MSG_ACCEPTED; 
728         rply.acpted_rply.ar_verf = rqstp->rq_verf;
729         rply.acpted_rply.ar_stat = SYSTEM_ERR;
730
731         if (xprt->xp_pool->sp_rcache)
732                 replay_setreply(xprt->xp_pool->sp_rcache,
733                     &rply, svc_getrpccaller(rqstp), NULL);
734
735         svc_sendreply_common(rqstp, &rply, NULL);
736 }
737
738 /*
739  * Authentication error reply
740  */
741 void
742 svcerr_auth(struct svc_req *rqstp, enum auth_stat why)
743 {
744         SVCXPRT *xprt = rqstp->rq_xprt;
745         struct rpc_msg rply;
746
747         rply.rm_xid = rqstp->rq_xid;
748         rply.rm_direction = REPLY;
749         rply.rm_reply.rp_stat = MSG_DENIED;
750         rply.rjcted_rply.rj_stat = AUTH_ERROR;
751         rply.rjcted_rply.rj_why = why;
752
753         if (xprt->xp_pool->sp_rcache)
754                 replay_setreply(xprt->xp_pool->sp_rcache,
755                     &rply, svc_getrpccaller(rqstp), NULL);
756
757         svc_sendreply_common(rqstp, &rply, NULL);
758 }
759
760 /*
761  * Auth too weak error reply
762  */
763 void
764 svcerr_weakauth(struct svc_req *rqstp)
765 {
766
767         svcerr_auth(rqstp, AUTH_TOOWEAK);
768 }
769
770 /*
771  * Program unavailable error reply
772  */
773 void 
774 svcerr_noprog(struct svc_req *rqstp)
775 {
776         SVCXPRT *xprt = rqstp->rq_xprt;
777         struct rpc_msg rply;  
778
779         rply.rm_xid = rqstp->rq_xid;
780         rply.rm_direction = REPLY;   
781         rply.rm_reply.rp_stat = MSG_ACCEPTED;  
782         rply.acpted_rply.ar_verf = rqstp->rq_verf;  
783         rply.acpted_rply.ar_stat = PROG_UNAVAIL;
784
785         if (xprt->xp_pool->sp_rcache)
786                 replay_setreply(xprt->xp_pool->sp_rcache,
787                     &rply, svc_getrpccaller(rqstp), NULL);
788
789         svc_sendreply_common(rqstp, &rply, NULL);
790 }
791
792 /*
793  * Program version mismatch error reply
794  */
795 void  
796 svcerr_progvers(struct svc_req *rqstp, rpcvers_t low_vers, rpcvers_t high_vers)
797 {
798         SVCXPRT *xprt = rqstp->rq_xprt;
799         struct rpc_msg rply;
800
801         rply.rm_xid = rqstp->rq_xid;
802         rply.rm_direction = REPLY;
803         rply.rm_reply.rp_stat = MSG_ACCEPTED;
804         rply.acpted_rply.ar_verf = rqstp->rq_verf;
805         rply.acpted_rply.ar_stat = PROG_MISMATCH;
806         rply.acpted_rply.ar_vers.low = (uint32_t)low_vers;
807         rply.acpted_rply.ar_vers.high = (uint32_t)high_vers;
808
809         if (xprt->xp_pool->sp_rcache)
810                 replay_setreply(xprt->xp_pool->sp_rcache,
811                     &rply, svc_getrpccaller(rqstp), NULL);
812
813         svc_sendreply_common(rqstp, &rply, NULL);
814 }
815
816 /*
817  * Allocate a new server transport structure. All fields are
818  * initialized to zero and xp_p3 is initialized to point at an
819  * extension structure to hold various flags and authentication
820  * parameters.
821  */
822 SVCXPRT *
823 svc_xprt_alloc()
824 {
825         SVCXPRT *xprt;
826         SVCXPRT_EXT *ext;
827
828         xprt = mem_alloc(sizeof(SVCXPRT));
829         memset(xprt, 0, sizeof(SVCXPRT));
830         ext = mem_alloc(sizeof(SVCXPRT_EXT));
831         memset(ext, 0, sizeof(SVCXPRT_EXT));
832         xprt->xp_p3 = ext;
833         refcount_init(&xprt->xp_refs, 1);
834
835         return (xprt);
836 }
837
838 /*
839  * Free a server transport structure.
840  */
841 void
842 svc_xprt_free(xprt)
843         SVCXPRT *xprt;
844 {
845
846         mem_free(xprt->xp_p3, sizeof(SVCXPRT_EXT));
847         mem_free(xprt, sizeof(SVCXPRT));
848 }
849
850 /* ******************* SERVER INPUT STUFF ******************* */
851
852 /*
853  * Read RPC requests from a transport and queue them to be
854  * executed. We handle authentication and replay cache replies here.
855  * Actually dispatching the RPC is deferred till svc_executereq.
856  */
857 static enum xprt_stat
858 svc_getreq(SVCXPRT *xprt, struct svc_req **rqstp_ret)
859 {
860         SVCPOOL *pool = xprt->xp_pool;
861         struct svc_req *r;
862         struct rpc_msg msg;
863         struct mbuf *args;
864         struct svc_loss_callout *s;
865         enum xprt_stat stat;
866
867         /* now receive msgs from xprtprt (support batch calls) */
868         r = malloc(sizeof(*r), M_RPC, M_WAITOK|M_ZERO);
869
870         msg.rm_call.cb_cred.oa_base = r->rq_credarea;
871         msg.rm_call.cb_verf.oa_base = &r->rq_credarea[MAX_AUTH_BYTES];
872         r->rq_clntcred = &r->rq_credarea[2*MAX_AUTH_BYTES];
873         if (SVC_RECV(xprt, &msg, &r->rq_addr, &args)) {
874                 enum auth_stat why;
875
876                 /*
877                  * Handle replays and authenticate before queuing the
878                  * request to be executed.
879                  */
880                 SVC_ACQUIRE(xprt);
881                 r->rq_xprt = xprt;
882                 if (pool->sp_rcache) {
883                         struct rpc_msg repmsg;
884                         struct mbuf *repbody;
885                         enum replay_state rs;
886                         rs = replay_find(pool->sp_rcache, &msg,
887                             svc_getrpccaller(r), &repmsg, &repbody);
888                         switch (rs) {
889                         case RS_NEW:
890                                 break;
891                         case RS_DONE:
892                                 SVC_REPLY(xprt, &repmsg, r->rq_addr,
893                                     repbody, &r->rq_reply_seq);
894                                 if (r->rq_addr) {
895                                         free(r->rq_addr, M_SONAME);
896                                         r->rq_addr = NULL;
897                                 }
898                                 m_freem(args);
899                                 goto call_done;
900
901                         default:
902                                 m_freem(args);
903                                 goto call_done;
904                         }
905                 }
906
907                 r->rq_xid = msg.rm_xid;
908                 r->rq_prog = msg.rm_call.cb_prog;
909                 r->rq_vers = msg.rm_call.cb_vers;
910                 r->rq_proc = msg.rm_call.cb_proc;
911                 r->rq_size = sizeof(*r) + m_length(args, NULL);
912                 r->rq_args = args;
913                 if ((why = _authenticate(r, &msg)) != AUTH_OK) {
914                         /*
915                          * RPCSEC_GSS uses this return code
916                          * for requests that form part of its
917                          * context establishment protocol and
918                          * should not be dispatched to the
919                          * application.
920                          */
921                         if (why != RPCSEC_GSS_NODISPATCH)
922                                 svcerr_auth(r, why);
923                         goto call_done;
924                 }
925
926                 if (!SVCAUTH_UNWRAP(&r->rq_auth, &r->rq_args)) {
927                         svcerr_decode(r);
928                         goto call_done;
929                 }
930
931                 /*
932                  * Everything checks out, return request to caller.
933                  */
934                 *rqstp_ret = r;
935                 r = NULL;
936         }
937 call_done:
938         if (r) {
939                 svc_freereq(r);
940                 r = NULL;
941         }
942         if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
943                 TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link)
944                         (*s->slc_dispatch)(xprt);
945                 xprt_unregister(xprt);
946         }
947
948         return (stat);
949 }
950
951 static void
952 svc_executereq(struct svc_req *rqstp)
953 {
954         SVCXPRT *xprt = rqstp->rq_xprt;
955         SVCPOOL *pool = xprt->xp_pool;
956         int prog_found;
957         rpcvers_t low_vers;
958         rpcvers_t high_vers;
959         struct svc_callout *s;
960
961         /* now match message with a registered service*/
962         prog_found = FALSE;
963         low_vers = (rpcvers_t) -1L;
964         high_vers = (rpcvers_t) 0L;
965         TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
966                 if (s->sc_prog == rqstp->rq_prog) {
967                         if (s->sc_vers == rqstp->rq_vers) {
968                                 /*
969                                  * We hand ownership of r to the
970                                  * dispatch method - they must call
971                                  * svc_freereq.
972                                  */
973                                 (*s->sc_dispatch)(rqstp, xprt);
974                                 return;
975                         }  /* found correct version */
976                         prog_found = TRUE;
977                         if (s->sc_vers < low_vers)
978                                 low_vers = s->sc_vers;
979                         if (s->sc_vers > high_vers)
980                                 high_vers = s->sc_vers;
981                 }   /* found correct program */
982         }
983
984         /*
985          * if we got here, the program or version
986          * is not served ...
987          */
988         if (prog_found)
989                 svcerr_progvers(rqstp, low_vers, high_vers);
990         else
991                 svcerr_noprog(rqstp);
992
993         svc_freereq(rqstp);
994 }
995
996 static void
997 svc_checkidle(SVCPOOL *pool)
998 {
999         SVCXPRT *xprt, *nxprt;
1000         time_t timo;
1001         struct svcxprt_list cleanup;
1002
1003         TAILQ_INIT(&cleanup);
1004         TAILQ_FOREACH_SAFE(xprt, &pool->sp_xlist, xp_link, nxprt) {
1005                 /*
1006                  * Only some transports have idle timers. Don't time
1007                  * something out which is just waking up.
1008                  */
1009                 if (!xprt->xp_idletimeout || xprt->xp_thread)
1010                         continue;
1011
1012                 timo = xprt->xp_lastactive + xprt->xp_idletimeout;
1013                 if (time_uptime > timo) {
1014                         xprt_unregister_locked(xprt);
1015                         TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
1016                 }
1017         }
1018
1019         mtx_unlock(&pool->sp_lock);
1020         TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
1021                 SVC_RELEASE(xprt);
1022         }
1023         mtx_lock(&pool->sp_lock);
1024
1025 }
1026
1027 static void
1028 svc_assign_waiting_sockets(SVCPOOL *pool)
1029 {
1030         SVCXPRT *xprt;
1031
1032         mtx_lock(&pool->sp_lock);
1033         while ((xprt = TAILQ_FIRST(&pool->sp_active)) != NULL) {
1034                 if (xprt_assignthread(xprt))
1035                         TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
1036                 else
1037                         break;
1038         }
1039         mtx_unlock(&pool->sp_lock);
1040 }
1041
1042 static void
1043 svc_change_space_used(SVCPOOL *pool, int delta)
1044 {
1045         unsigned int value;
1046
1047         value = atomic_fetchadd_int(&pool->sp_space_used, delta) + delta;
1048         if (delta > 0) {
1049                 if (value >= pool->sp_space_high && !pool->sp_space_throttled) {
1050                         pool->sp_space_throttled = TRUE;
1051                         pool->sp_space_throttle_count++;
1052                 }
1053                 if (value > pool->sp_space_used_highest)
1054                         pool->sp_space_used_highest = value;
1055         } else {
1056                 if (value < pool->sp_space_low && pool->sp_space_throttled) {
1057                         pool->sp_space_throttled = FALSE;
1058                         svc_assign_waiting_sockets(pool);
1059                 }
1060         }
1061 }
1062
1063 static bool_t
1064 svc_request_space_available(SVCPOOL *pool)
1065 {
1066
1067         if (pool->sp_space_throttled)
1068                 return (FALSE);
1069         return (TRUE);
1070 }
1071
1072 static void
1073 svc_run_internal(SVCPOOL *pool, bool_t ismaster)
1074 {
1075         struct svc_reqlist reqs;
1076         SVCTHREAD *st, *stpref;
1077         SVCXPRT *xprt;
1078         enum xprt_stat stat;
1079         struct svc_req *rqstp;
1080         size_t sz;
1081         int error;
1082
1083         st = mem_alloc(sizeof(*st));
1084         st->st_pool = pool;
1085         st->st_xprt = NULL;
1086         STAILQ_INIT(&st->st_reqs);
1087         cv_init(&st->st_cond, "rpcsvc");
1088         STAILQ_INIT(&reqs);
1089
1090         mtx_lock(&pool->sp_lock);
1091         LIST_INSERT_HEAD(&pool->sp_threads, st, st_link);
1092
1093         /*
1094          * If we are a new thread which was spawned to cope with
1095          * increased load, set the state back to SVCPOOL_ACTIVE.
1096          */
1097         if (pool->sp_state == SVCPOOL_THREADSTARTING)
1098                 pool->sp_state = SVCPOOL_ACTIVE;
1099
1100         while (pool->sp_state != SVCPOOL_CLOSING) {
1101                 /*
1102                  * Create new thread if requested.
1103                  */
1104                 if (pool->sp_state == SVCPOOL_THREADWANTED) {
1105                         pool->sp_state = SVCPOOL_THREADSTARTING;
1106                         pool->sp_lastcreatetime = time_uptime;
1107                         mtx_unlock(&pool->sp_lock);
1108                         svc_new_thread(pool);
1109                         mtx_lock(&pool->sp_lock);
1110                         continue;
1111                 }
1112
1113                 /*
1114                  * Check for idle transports once per second.
1115                  */
1116                 if (time_uptime > pool->sp_lastidlecheck) {
1117                         pool->sp_lastidlecheck = time_uptime;
1118                         svc_checkidle(pool);
1119                 }
1120
1121                 xprt = st->st_xprt;
1122                 if (!xprt && STAILQ_EMPTY(&st->st_reqs)) {
1123                         /*
1124                          * Enforce maxthreads count.
1125                          */
1126                         if (pool->sp_threadcount > pool->sp_maxthreads)
1127                                 break;
1128
1129                         /*
1130                          * Before sleeping, see if we can find an
1131                          * active transport which isn't being serviced
1132                          * by a thread.
1133                          */
1134                         if (svc_request_space_available(pool) &&
1135                             (xprt = TAILQ_FIRST(&pool->sp_active)) != NULL) {
1136                                 TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
1137                                 SVC_ACQUIRE(xprt);
1138                                 xprt->xp_thread = st;
1139                                 st->st_xprt = xprt;
1140                                 continue;
1141                         }
1142
1143                         LIST_INSERT_HEAD(&pool->sp_idlethreads, st, st_ilink);
1144                         st->st_idle = TRUE;
1145                         if (ismaster || (!ismaster &&
1146                             pool->sp_threadcount > pool->sp_minthreads))
1147                                 error = cv_timedwait_sig(&st->st_cond,
1148                                     &pool->sp_lock, 5 * hz);
1149                         else
1150                                 error = cv_wait_sig(&st->st_cond,
1151                                     &pool->sp_lock);
1152                         if (st->st_idle) {
1153                                 LIST_REMOVE(st, st_ilink);
1154                                 st->st_idle = FALSE;
1155                         }
1156
1157                         /*
1158                          * Reduce worker thread count when idle.
1159                          */
1160                         if (error == EWOULDBLOCK) {
1161                                 if (!ismaster
1162                                     && (pool->sp_threadcount
1163                                         > pool->sp_minthreads)
1164                                         && !st->st_xprt
1165                                         && STAILQ_EMPTY(&st->st_reqs))
1166                                         break;
1167                         } else if (error) {
1168                                 mtx_unlock(&pool->sp_lock);
1169                                 svc_exit(pool);
1170                                 mtx_lock(&pool->sp_lock);
1171                                 break;
1172                         }
1173                         continue;
1174                 }
1175
1176                 if (xprt) {
1177                         /*
1178                          * Drain the transport socket and queue up any
1179                          * RPCs.
1180                          */
1181                         xprt->xp_lastactive = time_uptime;
1182                         do {
1183                                 if (!svc_request_space_available(pool))
1184                                         break;
1185                                 mtx_unlock(&pool->sp_lock);
1186                                 rqstp = NULL;
1187                                 stat = svc_getreq(xprt, &rqstp);
1188                                 if (rqstp) {
1189                                         svc_change_space_used(pool, rqstp->rq_size);
1190                                         /*
1191                                          * See if the application has
1192                                          * a preference for some other
1193                                          * thread.
1194                                          */
1195                                         stpref = st;
1196                                         if (pool->sp_assign)
1197                                                 stpref = pool->sp_assign(st,
1198                                                     rqstp);
1199                                         else
1200                                                 mtx_lock(&pool->sp_lock);
1201                                         
1202                                         rqstp->rq_thread = stpref;
1203                                         STAILQ_INSERT_TAIL(&stpref->st_reqs,
1204                                             rqstp, rq_link);
1205
1206                                         /*
1207                                          * If we assigned the request
1208                                          * to another thread, make
1209                                          * sure its awake and continue
1210                                          * reading from the
1211                                          * socket. Otherwise, try to
1212                                          * find some other thread to
1213                                          * read from the socket and
1214                                          * execute the request
1215                                          * immediately.
1216                                          */
1217                                         if (stpref == st)
1218                                                 break;
1219                                         if (stpref->st_idle) {
1220                                                 LIST_REMOVE(stpref, st_ilink);
1221                                                 stpref->st_idle = FALSE;
1222                                                 cv_signal(&stpref->st_cond);
1223                                         }
1224                                 } else
1225                                         mtx_lock(&pool->sp_lock);
1226                         } while (stat == XPRT_MOREREQS
1227                             && pool->sp_state != SVCPOOL_CLOSING);
1228                        
1229                         /*
1230                          * Move this transport to the end of the
1231                          * active list to ensure fairness when
1232                          * multiple transports are active. If this was
1233                          * the last queued request, svc_getreq will
1234                          * end up calling xprt_inactive to remove from
1235                          * the active list.
1236                          */
1237                         xprt->xp_thread = NULL;
1238                         st->st_xprt = NULL;
1239                         if (xprt->xp_active) {
1240                                 if (!svc_request_space_available(pool) ||
1241                                     !xprt_assignthread(xprt))
1242                                         TAILQ_INSERT_TAIL(&pool->sp_active,
1243                                             xprt, xp_alink);
1244                         }
1245                         STAILQ_CONCAT(&reqs, &st->st_reqs);
1246                         mtx_unlock(&pool->sp_lock);
1247                         SVC_RELEASE(xprt);
1248                 } else {
1249                         STAILQ_CONCAT(&reqs, &st->st_reqs);
1250                         mtx_unlock(&pool->sp_lock);
1251                 }
1252
1253                 /*
1254                  * Execute what we have queued.
1255                  */
1256                 sz = 0;
1257                 while ((rqstp = STAILQ_FIRST(&reqs)) != NULL) {
1258                         STAILQ_REMOVE_HEAD(&reqs, rq_link);
1259                         sz += rqstp->rq_size;
1260                         svc_executereq(rqstp);
1261                 }
1262                 svc_change_space_used(pool, -sz);
1263                 mtx_lock(&pool->sp_lock);
1264         }
1265
1266         if (st->st_xprt) {
1267                 xprt = st->st_xprt;
1268                 st->st_xprt = NULL;
1269                 SVC_RELEASE(xprt);
1270         }
1271
1272         KASSERT(STAILQ_EMPTY(&st->st_reqs), ("stray reqs on exit"));
1273         LIST_REMOVE(st, st_link);
1274         pool->sp_threadcount--;
1275
1276         mtx_unlock(&pool->sp_lock);
1277
1278         cv_destroy(&st->st_cond);
1279         mem_free(st, sizeof(*st));
1280
1281         if (!ismaster)
1282                 wakeup(pool);
1283 }
1284
1285 static void
1286 svc_thread_start(void *arg)
1287 {
1288
1289         svc_run_internal((SVCPOOL *) arg, FALSE);
1290         kthread_exit();
1291 }
1292
1293 static void
1294 svc_new_thread(SVCPOOL *pool)
1295 {
1296         struct thread *td;
1297
1298         pool->sp_threadcount++;
1299         kthread_add(svc_thread_start, pool,
1300             pool->sp_proc, &td, 0, 0,
1301             "%s: service", pool->sp_name);
1302 }
1303
1304 void
1305 svc_run(SVCPOOL *pool)
1306 {
1307         int i;
1308         struct proc *p;
1309         struct thread *td;
1310
1311         p = curproc;
1312         td = curthread;
1313         snprintf(td->td_name, sizeof(td->td_name),
1314             "%s: master", pool->sp_name);
1315         pool->sp_state = SVCPOOL_ACTIVE;
1316         pool->sp_proc = p;
1317         pool->sp_lastcreatetime = time_uptime;
1318         pool->sp_threadcount = 1;
1319
1320         for (i = 1; i < pool->sp_minthreads; i++) {
1321                 svc_new_thread(pool);
1322         }
1323
1324         svc_run_internal(pool, TRUE);
1325
1326         mtx_lock(&pool->sp_lock);
1327         while (pool->sp_threadcount > 0)
1328                 msleep(pool, &pool->sp_lock, 0, "svcexit", 0);
1329         mtx_unlock(&pool->sp_lock);
1330 }
1331
1332 void
1333 svc_exit(SVCPOOL *pool)
1334 {
1335         SVCTHREAD *st;
1336
1337         mtx_lock(&pool->sp_lock);
1338
1339         if (pool->sp_state != SVCPOOL_CLOSING) {
1340                 pool->sp_state = SVCPOOL_CLOSING;
1341                 LIST_FOREACH(st, &pool->sp_idlethreads, st_ilink)
1342                         cv_signal(&st->st_cond);
1343         }
1344
1345         mtx_unlock(&pool->sp_lock);
1346 }
1347
1348 bool_t
1349 svc_getargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1350 {
1351         struct mbuf *m;
1352         XDR xdrs;
1353         bool_t stat;
1354
1355         m = rqstp->rq_args;
1356         rqstp->rq_args = NULL;
1357
1358         xdrmbuf_create(&xdrs, m, XDR_DECODE);
1359         stat = xargs(&xdrs, args);
1360         XDR_DESTROY(&xdrs);
1361
1362         return (stat);
1363 }
1364
1365 bool_t
1366 svc_freeargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1367 {
1368         XDR xdrs;
1369
1370         if (rqstp->rq_addr) {
1371                 free(rqstp->rq_addr, M_SONAME);
1372                 rqstp->rq_addr = NULL;
1373         }
1374
1375         xdrs.x_op = XDR_FREE;
1376         return (xargs(&xdrs, args));
1377 }
1378
1379 void
1380 svc_freereq(struct svc_req *rqstp)
1381 {
1382         SVCTHREAD *st;
1383         SVCPOOL *pool;
1384
1385         st = rqstp->rq_thread;
1386         if (st) {
1387                 pool = st->st_pool;
1388                 if (pool->sp_done)
1389                         pool->sp_done(st, rqstp);
1390         }
1391
1392         if (rqstp->rq_auth.svc_ah_ops)
1393                 SVCAUTH_RELEASE(&rqstp->rq_auth);
1394
1395         if (rqstp->rq_xprt) {
1396                 SVC_RELEASE(rqstp->rq_xprt);
1397         }
1398
1399         if (rqstp->rq_addr)
1400                 free(rqstp->rq_addr, M_SONAME);
1401
1402         if (rqstp->rq_args)
1403                 m_freem(rqstp->rq_args);
1404
1405         free(rqstp, M_RPC);
1406 }