1 /* $NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $ */
4 * SPDX-License-Identifier: BSD-3-Clause
6 * Copyright (c) 2009, Sun Microsystems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 * - Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 * - Neither the name of Sun Microsystems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #if defined(LIBC_SCCS) && !defined(lint)
34 static char *sccsid2 = "@(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro";
35 static char *sccsid = "@(#)clnt_tcp.c 2.2 88/08/01 4.0 RPCSRC";
36 static char sccsid3[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro";
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
42 * clnt_tcp.c, Implements a TCP/IP based, client side RPC.
44 * Copyright (C) 1984, Sun Microsystems, Inc.
46 * TCP based RPC supports 'batched calls'.
47 * A sequence of calls may be batched-up in a send buffer. The rpc call
48 * return immediately to the client even though the call was not necessarily
49 * sent. The batching occurs if the results' xdr routine is NULL (0) AND
50 * the rpc timeout value is zero (see clnt.h, rpc).
52 * Clients should NOT casually batch calls that in fact return results; that is,
53 * the server side should be aware that a call is batched and not produce any
54 * return message. Batched calls that produce many result messages can
55 * deadlock (netlock) the client and the server....
57 * Now go hang yourself.
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
64 #include <sys/malloc.h>
66 #include <sys/mutex.h>
69 #include <sys/protosw.h>
70 #include <sys/socket.h>
71 #include <sys/socketvar.h>
73 #include <sys/syslog.h>
79 #include <netinet/tcp.h>
82 #include <rpc/rpc_com.h>
87 struct cmsgcred cmcred;
90 static enum clnt_stat clnt_vc_call(CLIENT *, struct rpc_callextra *,
91 rpcproc_t, struct mbuf *, struct mbuf **, struct timeval);
92 static void clnt_vc_geterr(CLIENT *, struct rpc_err *);
93 static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *);
94 static void clnt_vc_abort(CLIENT *);
95 static bool_t clnt_vc_control(CLIENT *, u_int, void *);
96 static void clnt_vc_close(CLIENT *);
97 static void clnt_vc_destroy(CLIENT *);
98 static bool_t time_not_ok(struct timeval *);
99 static int clnt_vc_soupcall(struct socket *so, void *arg, int waitflag);
101 static struct clnt_ops clnt_vc_ops = {
102 .cl_call = clnt_vc_call,
103 .cl_abort = clnt_vc_abort,
104 .cl_geterr = clnt_vc_geterr,
105 .cl_freeres = clnt_vc_freeres,
106 .cl_close = clnt_vc_close,
107 .cl_destroy = clnt_vc_destroy,
108 .cl_control = clnt_vc_control
111 static void clnt_vc_upcallsdone(struct ct_data *);
113 static int fake_wchan;
116 * Create a client handle for a connection.
117 * Default options are set, which the user can change using clnt_control()'s.
118 * The rpc/vc package does buffering similar to stdio, so the client
119 * must pick send and receive buffer sizes, 0 => use the default.
120 * NB: fd is copied into a private area.
121 * NB: The rpch->cl_auth is set null authentication. Caller may wish to
122 * set this something more useful.
124 * fd should be an open socket
128 struct socket *so, /* open file descriptor */
129 struct sockaddr *raddr, /* servers address */
130 const rpcprog_t prog, /* program number */
131 const rpcvers_t vers, /* version number */
132 size_t sendsz, /* buffer recv size */
133 size_t recvsz, /* buffer send size */
134 int intrflag) /* interruptible */
136 CLIENT *cl; /* client handle */
137 struct ct_data *ct = NULL; /* client handle */
139 struct rpc_msg call_msg;
140 static uint32_t disrupt;
141 struct __rpc_sockinfo si;
143 int error, interrupted, one = 1, sleep_flag;
147 disrupt = (uint32_t)(long)raddr;
149 cl = (CLIENT *)mem_alloc(sizeof (*cl));
150 ct = (struct ct_data *)mem_alloc(sizeof (*ct));
152 mtx_init(&ct->ct_lock, "ct->ct_lock", NULL, MTX_DEF);
154 ct->ct_closing = FALSE;
155 ct->ct_closed = FALSE;
156 ct->ct_upcallrefs = 0;
158 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
159 error = soconnect(so, raddr, curthread);
164 sleep_flag |= PCATCH;
165 while ((so->so_state & SS_ISCONNECTING)
166 && so->so_error == 0) {
167 error = msleep(&so->so_timeo, SOCK_MTX(so),
168 sleep_flag, "connec", 0);
170 if (error == EINTR || error == ERESTART)
176 error = so->so_error;
182 so->so_state &= ~SS_ISCONNECTING;
183 rpc_createerr.cf_stat = RPC_SYSTEMERROR;
184 rpc_createerr.cf_error.re_errno = error;
189 if (!__rpc_socket2sockinfo(so, &si)) {
193 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
194 bzero(&sopt, sizeof(sopt));
195 sopt.sopt_dir = SOPT_SET;
196 sopt.sopt_level = SOL_SOCKET;
197 sopt.sopt_name = SO_KEEPALIVE;
198 sopt.sopt_val = &one;
199 sopt.sopt_valsize = sizeof(one);
203 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
204 bzero(&sopt, sizeof(sopt));
205 sopt.sopt_dir = SOPT_SET;
206 sopt.sopt_level = IPPROTO_TCP;
207 sopt.sopt_name = TCP_NODELAY;
208 sopt.sopt_val = &one;
209 sopt.sopt_valsize = sizeof(one);
213 ct->ct_closeit = FALSE;
216 * Set up private data struct
219 ct->ct_wait.tv_sec = -1;
220 ct->ct_wait.tv_usec = -1;
221 memcpy(&ct->ct_addr, raddr, raddr->sa_len);
224 * Initialize call message
227 ct->ct_xid = ((uint32_t)++disrupt) ^ __RPC_GETXID(&now);
228 call_msg.rm_xid = ct->ct_xid;
229 call_msg.rm_direction = CALL;
230 call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION;
231 call_msg.rm_call.cb_prog = (uint32_t)prog;
232 call_msg.rm_call.cb_vers = (uint32_t)vers;
235 * pre-serialize the static part of the call msg and stash it away
237 xdrmem_create(&xdrs, ct->ct_mcallc, MCALL_MSG_SIZE,
239 if (! xdr_callhdr(&xdrs, &call_msg)) {
240 if (ct->ct_closeit) {
241 soclose(ct->ct_socket);
245 ct->ct_mpos = XDR_GETPOS(&xdrs);
247 ct->ct_waitchan = "rpcrecv";
251 * Create a client handle which uses xdrrec for serialization
252 * and authnone for authentication.
254 sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
255 recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
256 error = soreserve(ct->ct_socket, sendsz, recvsz);
258 if (ct->ct_closeit) {
259 soclose(ct->ct_socket);
264 cl->cl_ops = &clnt_vc_ops;
266 cl->cl_auth = authnone_create();
268 SOCKBUF_LOCK(&ct->ct_socket->so_rcv);
269 soupcall_set(ct->ct_socket, SO_RCV, clnt_vc_soupcall, ct);
270 SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv);
273 ct->ct_record = NULL;
274 ct->ct_record_resid = 0;
275 TAILQ_INIT(&ct->ct_pending);
279 mtx_destroy(&ct->ct_lock);
280 mem_free(ct, sizeof (struct ct_data));
281 mem_free(cl, sizeof (CLIENT));
283 return ((CLIENT *)NULL);
286 static enum clnt_stat
288 CLIENT *cl, /* client handle */
289 struct rpc_callextra *ext, /* call metadata */
290 rpcproc_t proc, /* procedure number */
291 struct mbuf *args, /* pointer to args */
292 struct mbuf **resultsp, /* pointer to results */
293 struct timeval utimeout)
295 struct ct_data *ct = (struct ct_data *) cl->cl_private;
297 struct rpc_err *errp;
300 struct rpc_msg reply_msg;
302 int nrefreshes = 2; /* number of times to refresh cred */
303 struct timeval timeout;
305 struct mbuf *mreq = NULL, *results;
306 struct ct_request *cr;
309 cr = malloc(sizeof(struct ct_request), M_RPC, M_WAITOK);
311 mtx_lock(&ct->ct_lock);
313 if (ct->ct_closing || ct->ct_closed) {
314 mtx_unlock(&ct->ct_lock);
316 return (RPC_CANTSEND);
325 errp = &ct->ct_error;
331 if (ct->ct_wait.tv_usec == -1) {
332 timeout = utimeout; /* use supplied timeout */
334 timeout = ct->ct_wait; /* use default timeout */
338 * After 15sec of looping, allow it to return RPC_CANTSEND, which will
339 * cause the clnt_reconnect layer to create a new TCP connection.
343 mtx_assert(&ct->ct_lock, MA_OWNED);
344 if (ct->ct_closing || ct->ct_closed) {
347 mtx_unlock(&ct->ct_lock);
349 return (RPC_CANTSEND);
355 mtx_unlock(&ct->ct_lock);
358 * Leave space to pre-pend the record mark.
360 mreq = m_gethdr(M_WAITOK, MT_DATA);
361 mreq->m_data += sizeof(uint32_t);
362 KASSERT(ct->ct_mpos + sizeof(uint32_t) <= MHLEN,
363 ("RPC header too big"));
364 bcopy(ct->ct_mcallc, mreq->m_data, ct->ct_mpos);
365 mreq->m_len = ct->ct_mpos;
368 * The XID is the first thing in the request.
370 *mtod(mreq, uint32_t *) = htonl(xid);
372 xdrmbuf_create(&xdrs, mreq, XDR_ENCODE);
374 errp->re_status = stat = RPC_SUCCESS;
376 if ((! XDR_PUTINT32(&xdrs, &proc)) ||
377 (! AUTH_MARSHALL(auth, xid, &xdrs,
378 m_copym(args, 0, M_COPYALL, M_WAITOK)))) {
379 errp->re_status = stat = RPC_CANTENCODEARGS;
380 mtx_lock(&ct->ct_lock);
383 mreq->m_pkthdr.len = m_length(mreq, NULL);
386 * Prepend a record marker containing the packet length.
388 M_PREPEND(mreq, sizeof(uint32_t), M_WAITOK);
389 *mtod(mreq, uint32_t *) =
390 htonl(0x80000000 | (mreq->m_pkthdr.len - sizeof(uint32_t)));
393 mtx_lock(&ct->ct_lock);
395 * Check to see if the other end has already started to close down
396 * the connection. The upcall will have set ct_error.re_status
397 * to RPC_CANTRECV if this is the case.
398 * If the other end starts to close down the connection after this
399 * point, it will be detected later when cr_error is checked,
400 * since the request is in the ct_pending queue.
402 if (ct->ct_error.re_status == RPC_CANTRECV) {
403 if (errp != &ct->ct_error) {
404 errp->re_errno = ct->ct_error.re_errno;
405 errp->re_status = RPC_CANTRECV;
410 TAILQ_INSERT_TAIL(&ct->ct_pending, cr, cr_link);
411 mtx_unlock(&ct->ct_lock);
414 * sosend consumes mreq.
416 error = sosend(ct->ct_socket, NULL, NULL, mreq, NULL, 0, curthread);
418 if (error == EMSGSIZE || (error == ERESTART &&
419 (ct->ct_waitflag & PCATCH) == 0 && trycnt-- > 0)) {
420 SOCKBUF_LOCK(&ct->ct_socket->so_snd);
421 sbwait(&ct->ct_socket->so_snd);
422 SOCKBUF_UNLOCK(&ct->ct_socket->so_snd);
423 AUTH_VALIDATE(auth, xid, NULL, NULL);
424 mtx_lock(&ct->ct_lock);
425 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
426 /* Sleep for 1 clock tick before trying the sosend() again. */
427 msleep(&fake_wchan, &ct->ct_lock, 0, "rpclpsnd", 1);
431 reply_msg.acpted_rply.ar_verf.oa_flavor = AUTH_NULL;
432 reply_msg.acpted_rply.ar_verf.oa_base = cr->cr_verf;
433 reply_msg.acpted_rply.ar_verf.oa_length = 0;
434 reply_msg.acpted_rply.ar_results.where = NULL;
435 reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
437 mtx_lock(&ct->ct_lock);
439 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
440 errp->re_errno = error;
441 errp->re_status = stat = RPC_CANTSEND;
446 * Check to see if we got an upcall while waiting for the
447 * lock. In both these cases, the request has been removed
448 * from ct->ct_pending.
451 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
452 errp->re_errno = cr->cr_error;
453 errp->re_status = stat = RPC_CANTRECV;
457 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
462 * Hack to provide rpc-based message passing
464 if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
465 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
466 errp->re_status = stat = RPC_TIMEDOUT;
470 error = msleep(cr, &ct->ct_lock, ct->ct_waitflag, ct->ct_waitchan,
473 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
477 * The sleep returned an error so our request is still
478 * on the list. Turn the error code into an
479 * appropriate client status.
481 errp->re_errno = error;
492 errp->re_status = stat;
496 * We were woken up by the upcall. If the
497 * upcall had a receive error, report that,
498 * otherwise we have a reply.
501 errp->re_errno = cr->cr_error;
502 errp->re_status = stat = RPC_CANTRECV;
509 * Now decode and validate the response. We need to drop the
510 * lock since xdr_replymsg may end up sleeping in malloc.
512 mtx_unlock(&ct->ct_lock);
514 if (ext && ext->rc_feedback)
515 ext->rc_feedback(FEEDBACK_OK, proc, ext->rc_feedback_arg);
517 xdrmbuf_create(&xdrs, cr->cr_mrep, XDR_DECODE);
518 ok = xdr_replymsg(&xdrs, &reply_msg);
522 if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
523 (reply_msg.acpted_rply.ar_stat == SUCCESS))
524 errp->re_status = stat = RPC_SUCCESS;
526 stat = _seterr_reply(&reply_msg, errp);
528 if (stat == RPC_SUCCESS) {
529 results = xdrmbuf_getall(&xdrs);
530 if (!AUTH_VALIDATE(auth, xid,
531 &reply_msg.acpted_rply.ar_verf,
533 errp->re_status = stat = RPC_AUTHERROR;
534 errp->re_why = AUTH_INVALIDRESP;
537 ("auth validated but no result"));
540 } /* end successful completion */
542 * If unsuccessful AND error is an authentication error
543 * then refresh credentials and try again, else break
545 else if (stat == RPC_AUTHERROR)
546 /* maybe our credentials need to be refreshed ... */
547 if (nrefreshes > 0 &&
548 AUTH_REFRESH(auth, &reply_msg)) {
551 mtx_lock(&ct->ct_lock);
554 /* end of unsuccessful completion */
555 } /* end of valid reply message */
557 errp->re_status = stat = RPC_CANTDECODERES;
560 mtx_lock(&ct->ct_lock);
562 mtx_assert(&ct->ct_lock, MA_OWNED);
564 KASSERT(stat != RPC_SUCCESS || *resultsp,
565 ("RPC_SUCCESS without reply"));
570 m_freem(cr->cr_mrep);
576 mtx_unlock(&ct->ct_lock);
578 if (auth && stat != RPC_SUCCESS)
579 AUTH_VALIDATE(auth, xid, NULL, NULL);
587 clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp)
589 struct ct_data *ct = (struct ct_data *) cl->cl_private;
591 *errp = ct->ct_error;
595 clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
600 xdrs.x_op = XDR_FREE;
601 dummy = (*xdr_res)(&xdrs, res_ptr);
608 clnt_vc_abort(CLIENT *cl)
613 clnt_vc_control(CLIENT *cl, u_int request, void *info)
615 struct ct_data *ct = (struct ct_data *)cl->cl_private;
619 mtx_lock(&ct->ct_lock);
623 ct->ct_closeit = TRUE;
624 mtx_unlock(&ct->ct_lock);
626 case CLSET_FD_NCLOSE:
627 ct->ct_closeit = FALSE;
628 mtx_unlock(&ct->ct_lock);
634 /* for other requests which use info */
636 mtx_unlock(&ct->ct_lock);
641 if (time_not_ok((struct timeval *)info)) {
642 mtx_unlock(&ct->ct_lock);
645 ct->ct_wait = *(struct timeval *)infop;
648 *(struct timeval *)infop = ct->ct_wait;
650 case CLGET_SERVER_ADDR:
651 (void) memcpy(info, &ct->ct_addr, (size_t)ct->ct_addr.ss_len);
655 * Slightly different semantics to userland - we use
656 * sockaddr instead of netbuf.
658 memcpy(info, &ct->ct_addr, ct->ct_addr.ss_len);
660 case CLSET_SVC_ADDR: /* set to new address */
661 mtx_unlock(&ct->ct_lock);
664 *(uint32_t *)info = ct->ct_xid;
667 /* This will set the xid of the NEXT call */
668 /* decrement by 1 as clnt_vc_call() increments once */
669 ct->ct_xid = *(uint32_t *)info - 1;
673 * This RELIES on the information that, in the call body,
674 * the version number field is the fifth field from the
675 * beginning of the RPC header. MUST be changed if the
676 * call_struct is changed
679 ntohl(*(uint32_t *)(void *)(ct->ct_mcallc +
680 4 * BYTES_PER_XDR_UNIT));
684 *(uint32_t *)(void *)(ct->ct_mcallc +
685 4 * BYTES_PER_XDR_UNIT) =
686 htonl(*(uint32_t *)info);
691 * This RELIES on the information that, in the call body,
692 * the program number field is the fourth field from the
693 * beginning of the RPC header. MUST be changed if the
694 * call_struct is changed
697 ntohl(*(uint32_t *)(void *)(ct->ct_mcallc +
698 3 * BYTES_PER_XDR_UNIT));
702 *(uint32_t *)(void *)(ct->ct_mcallc +
703 3 * BYTES_PER_XDR_UNIT) =
704 htonl(*(uint32_t *)info);
708 ct->ct_waitchan = (const char *)info;
712 *(const char **) info = ct->ct_waitchan;
715 case CLSET_INTERRUPTIBLE:
717 ct->ct_waitflag = PCATCH;
722 case CLGET_INTERRUPTIBLE:
724 *(int *) info = TRUE;
726 *(int *) info = FALSE;
729 case CLSET_BACKCHANNEL:
730 xprt = (SVCXPRT *)info;
731 if (ct->ct_backchannelxprt == NULL) {
733 ct->ct_backchannelxprt = xprt;
738 mtx_unlock(&ct->ct_lock);
742 mtx_unlock(&ct->ct_lock);
747 clnt_vc_close(CLIENT *cl)
749 struct ct_data *ct = (struct ct_data *) cl->cl_private;
750 struct ct_request *cr;
752 mtx_lock(&ct->ct_lock);
755 mtx_unlock(&ct->ct_lock);
759 if (ct->ct_closing) {
760 while (ct->ct_closing)
761 msleep(ct, &ct->ct_lock, 0, "rpcclose", 0);
762 KASSERT(ct->ct_closed, ("client should be closed"));
763 mtx_unlock(&ct->ct_lock);
768 ct->ct_closing = TRUE;
769 mtx_unlock(&ct->ct_lock);
771 SOCKBUF_LOCK(&ct->ct_socket->so_rcv);
772 soupcall_clear(ct->ct_socket, SO_RCV);
773 clnt_vc_upcallsdone(ct);
774 SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv);
777 * Abort any pending requests and wait until everyone
778 * has finished with clnt_vc_call.
780 mtx_lock(&ct->ct_lock);
781 TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) {
783 cr->cr_error = ESHUTDOWN;
787 while (ct->ct_threads)
788 msleep(ct, &ct->ct_lock, 0, "rpcclose", 0);
791 ct->ct_closing = FALSE;
792 ct->ct_closed = TRUE;
793 mtx_unlock(&ct->ct_lock);
798 clnt_vc_destroy(CLIENT *cl)
800 struct ct_data *ct = (struct ct_data *) cl->cl_private;
801 struct socket *so = NULL;
806 mtx_lock(&ct->ct_lock);
807 xprt = ct->ct_backchannelxprt;
808 ct->ct_backchannelxprt = NULL;
810 mtx_unlock(&ct->ct_lock); /* To avoid a LOR. */
811 sx_xlock(&xprt->xp_lock);
812 mtx_lock(&ct->ct_lock);
814 sx_xunlock(&xprt->xp_lock);
818 if (ct->ct_closeit) {
823 mtx_unlock(&ct->ct_lock);
825 mtx_destroy(&ct->ct_lock);
827 soshutdown(so, SHUT_WR);
830 m_freem(ct->ct_record);
832 mem_free(ct, sizeof(struct ct_data));
833 if (cl->cl_netid && cl->cl_netid[0])
834 mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
835 if (cl->cl_tp && cl->cl_tp[0])
836 mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
837 mem_free(cl, sizeof(CLIENT));
841 * Make sure that the time is not garbage. -1 value is disallowed.
842 * Note this is different from time_not_ok in clnt_dg.c
845 time_not_ok(struct timeval *t)
847 return (t->tv_sec <= -1 || t->tv_sec > 100000000 ||
848 t->tv_usec <= -1 || t->tv_usec > 1000000);
852 clnt_vc_soupcall(struct socket *so, void *arg, int waitflag)
854 struct ct_data *ct = (struct ct_data *) arg;
857 struct ct_request *cr;
858 int error, rcvflag, foundreq;
859 uint32_t xid_plus_direction[2], header;
865 * If another thread is already here, it must be in
866 * soreceive(), so just return to avoid races with it.
867 * ct_upcallrefs is protected by the SOCKBUF_LOCK(),
868 * which is held in this function, except when
869 * soreceive() is called.
871 if (ct->ct_upcallrefs > 0)
876 * Read as much as possible off the socket and link it
880 uio.uio_resid = 1000000000;
881 uio.uio_td = curthread;
883 rcvflag = MSG_DONTWAIT | MSG_SOCALLBCK;
884 SOCKBUF_UNLOCK(&so->so_rcv);
885 error = soreceive(so, NULL, &uio, &m, NULL, &rcvflag);
886 SOCKBUF_LOCK(&so->so_rcv);
888 if (error == EWOULDBLOCK) {
890 * We must re-test for readability after
891 * taking the lock to protect us in the case
892 * where a new packet arrives on the socket
893 * after our call to soreceive fails with
901 if (error == 0 && m == NULL) {
903 * We must have got EOF trying
904 * to read from the stream.
911 if (ct->ct_raw != NULL)
912 m_last(ct->ct_raw)->m_next = m;
916 rawlen = m_length(ct->ct_raw, NULL);
918 /* Now, process as much of ct_raw as possible. */
921 * If ct_record_resid is zero, we are waiting for a
924 if (ct->ct_record_resid == 0) {
925 if (rawlen < sizeof(uint32_t))
927 m_copydata(ct->ct_raw, 0, sizeof(uint32_t),
929 header = ntohl(header);
930 ct->ct_record_resid = header & 0x7fffffff;
931 ct->ct_record_eor = ((header & 0x80000000) != 0);
932 m_adj(ct->ct_raw, sizeof(uint32_t));
933 rawlen -= sizeof(uint32_t);
936 * Move as much of the record as possible to
941 if (rawlen <= ct->ct_record_resid) {
942 if (ct->ct_record != NULL)
943 m_last(ct->ct_record)->m_next =
946 ct->ct_record = ct->ct_raw;
948 ct->ct_record_resid -= rawlen;
951 m = m_split(ct->ct_raw, ct->ct_record_resid,
955 if (ct->ct_record != NULL)
956 m_last(ct->ct_record)->m_next =
959 ct->ct_record = ct->ct_raw;
960 rawlen -= ct->ct_record_resid;
961 ct->ct_record_resid = 0;
964 if (ct->ct_record_resid > 0)
968 * If we have the entire record, see if we can
969 * match it to a request.
971 if (ct->ct_record_eor) {
973 * The XID is in the first uint32_t of
974 * the reply and the message direction
977 if (ct->ct_record->m_len <
978 sizeof(xid_plus_direction) &&
979 m_length(ct->ct_record, NULL) <
980 sizeof(xid_plus_direction)) {
983 * The data in the TCP stream is
984 * corrupted such that there is no
985 * valid RPC message to parse.
986 * I think it best to close this
987 * connection and allow
988 * clnt_reconnect_call() to try
989 * and establish a new one.
991 printf("clnt_vc_soupcall: "
992 "connection data corrupted\n");
996 m_copydata(ct->ct_record, 0,
997 sizeof(xid_plus_direction),
998 (char *)xid_plus_direction);
999 xid_plus_direction[0] =
1000 ntohl(xid_plus_direction[0]);
1001 xid_plus_direction[1] =
1002 ntohl(xid_plus_direction[1]);
1003 /* Check message direction. */
1004 if (xid_plus_direction[1] == CALL) {
1005 /* This is a backchannel request. */
1006 mtx_lock(&ct->ct_lock);
1007 xprt = ct->ct_backchannelxprt;
1009 mtx_unlock(&ct->ct_lock);
1010 /* Just throw it away. */
1011 m_freem(ct->ct_record);
1012 ct->ct_record = NULL;
1014 cd = (struct cf_conn *)
1018 * The requests are chained
1019 * in the m_nextpkt list.
1021 while (m2 != NULL &&
1022 m2->m_nextpkt != NULL)
1023 /* Find end of list. */
1031 ct->ct_record->m_nextpkt =
1033 ct->ct_record = NULL;
1035 mtx_unlock(&ct->ct_lock);
1038 mtx_lock(&ct->ct_lock);
1040 TAILQ_FOREACH(cr, &ct->ct_pending,
1043 xid_plus_direction[0]) {
1049 * the XID to zero so
1050 * that we will ignore
1063 mtx_unlock(&ct->ct_lock);
1066 m_freem(ct->ct_record);
1067 ct->ct_record = NULL;
1076 * This socket is broken, so mark that it cannot
1077 * receive and fail all RPCs waiting for a reply
1078 * on it, so that they will be retried on a new
1079 * TCP connection created by clnt_reconnect_X().
1081 mtx_lock(&ct->ct_lock);
1082 ct->ct_error.re_status = RPC_CANTRECV;
1083 ct->ct_error.re_errno = error;
1084 TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) {
1085 cr->cr_error = error;
1088 mtx_unlock(&ct->ct_lock);
1091 ct->ct_upcallrefs--;
1092 if (ct->ct_upcallrefs < 0)
1093 panic("rpcvc upcall refcnt");
1094 if (ct->ct_upcallrefs == 0)
1095 wakeup(&ct->ct_upcallrefs);
1100 * Wait for all upcalls in progress to complete.
1103 clnt_vc_upcallsdone(struct ct_data *ct)
1106 SOCKBUF_LOCK_ASSERT(&ct->ct_socket->so_rcv);
1108 while (ct->ct_upcallrefs > 0)
1109 (void) msleep(&ct->ct_upcallrefs,
1110 SOCKBUF_MTX(&ct->ct_socket->so_rcv), 0, "rpcvcup", 0);