2 * Copyright (c) 1989, 1991, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 * Socket operations for use by nfs
42 #include "opt_inet6.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
48 #include <sys/malloc.h>
50 #include <sys/mount.h>
51 #include <sys/mutex.h>
53 #include <sys/protosw.h>
54 #include <sys/signalvar.h>
55 #include <sys/syscallsubr.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/sysctl.h>
59 #include <sys/syslog.h>
60 #include <sys/vnode.h>
62 #include <netinet/in.h>
63 #include <netinet/tcp.h>
65 #include <rpc/rpcclnt.h>
67 #include <nfs/rpcv2.h>
68 #include <nfs/nfsproto.h>
69 #include <nfsclient/nfs.h>
70 #include <nfs/xdr_subs.h>
71 #include <nfsclient/nfsm_subs.h>
72 #include <nfsclient/nfsmount.h>
73 #include <nfsclient/nfsnode.h>
75 #include <nfs4client/nfs4.h>
81 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
82 * Use the mean and mean deviation of rtt for the appropriate type of rpc
83 * for the frequent rpcs and a default for the others.
84 * The justification for doing "other" this way is that these rpcs
85 * happen so infrequently that timer est. would probably be stale.
86 * Also, since many of these rpcs are
87 * non-idempotent, a conservative timeout is desired.
88 * getattr, lookup - A+2D
92 #define NFS_RTO(n, t) \
93 ((t) == 0 ? (n)->nm_timeo : \
95 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
96 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
97 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
98 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
101 * Defines which timer to use for the procnum.
108 static int proct[NFS_NPROCS] = {
109 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0,
112 static int nfs_realign_test;
113 static int nfs_realign_count;
114 static int nfs_bufpackets = 4;
115 static int nfs_reconnects;
117 SYSCTL_DECL(_vfs_nfs);
119 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, "");
120 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, "");
121 SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0, "");
122 SYSCTL_INT(_vfs_nfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
123 "number of times the nfs client has had to reconnect");
127 * There is a congestion window for outstanding rpcs maintained per mount
128 * point. The cwnd size is adjusted in roughly the way that:
129 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
130 * SIGCOMM '88". ACM, August 1988.
131 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
132 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
133 * of rpcs is in progress.
134 * (The sent count and cwnd are scaled for integer arith.)
135 * Variants of "slow start" were tried and were found to be too much of a
136 * performance hit (ave. rtt 3 times larger),
137 * I suspect due to the large rtt that nfs rpcs have.
139 #define NFS_CWNDSCALE 256
140 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
141 #define NFS_NBACKOFF 8
142 static int nfs_backoff[NFS_NBACKOFF] = { 2, 4, 8, 16, 32, 64, 128, 256, };
143 struct callout nfs_callout;
145 static int nfs_msg(struct thread *, const char *, const char *, int);
146 static int nfs_realign(struct mbuf **pm, int hsiz);
147 static int nfs_reply(struct nfsreq *);
148 static void nfs_softterm(struct nfsreq *rep);
149 static int nfs_reconnect(struct nfsreq *rep);
150 static void nfs_clnt_tcp_soupcall(struct socket *so, void *arg, int waitflag);
151 static void nfs_clnt_udp_soupcall(struct socket *so, void *arg, int waitflag);
152 static void wakeup_nfsreq(struct nfsreq *req);
154 extern struct mtx nfs_reqq_mtx;
155 extern struct mtx nfs_reply_mtx;
158 * Initialize sockets and congestion for a new NFS connection.
159 * We do not free the sockaddr if error.
162 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
165 int error, rcvreserve, sndreserve;
167 struct sockaddr *saddr;
168 struct thread *td = &thread0; /* only used for socreate and sobind */
172 if (nmp->nm_sotype == SOCK_STREAM) {
173 mtx_lock(&nmp->nm_nfstcpstate.mtx);
174 nmp->nm_nfstcpstate.flags |= NFS_TCP_EXPECT_RPCMARKER;
175 nmp->nm_nfstcpstate.rpcresid = 0;
176 mtx_unlock(&nmp->nm_nfstcpstate.mtx);
180 error = socreate(saddr->sa_family, &nmp->nm_so, nmp->nm_sotype,
181 nmp->nm_soproto, nmp->nm_mountp->mnt_cred, td);
185 nmp->nm_soflags = so->so_proto->pr_flags;
188 * Some servers require that the client port be a reserved port number.
190 if (nmp->nm_flag & NFSMNT_RESVPORT) {
193 struct sockaddr_in6 ssin;
196 bzero(&sopt, sizeof sopt);
197 switch(saddr->sa_family) {
199 sopt.sopt_level = IPPROTO_IP;
200 sopt.sopt_name = IP_PORTRANGE;
201 ip = IP_PORTRANGE_LOW;
202 ip2 = IP_PORTRANGE_DEFAULT;
203 len = sizeof (struct sockaddr_in);
207 sopt.sopt_level = IPPROTO_IPV6;
208 sopt.sopt_name = IPV6_PORTRANGE;
209 ip = IPV6_PORTRANGE_LOW;
210 ip2 = IPV6_PORTRANGE_DEFAULT;
211 len = sizeof (struct sockaddr_in6);
217 sa = (struct sockaddr *)&ssin;
220 sa->sa_family = saddr->sa_family;
221 sopt.sopt_dir = SOPT_SET;
222 sopt.sopt_val = (void *)&ip;
223 sopt.sopt_valsize = sizeof(ip);
224 error = sosetopt(so, &sopt);
227 error = sobind(so, sa, td);
231 error = sosetopt(so, &sopt);
238 * Protocols that do not require connections may be optionally left
239 * unconnected for servers that reply from a port other than NFS_PORT.
241 if (nmp->nm_flag & NFSMNT_NOCONN) {
242 if (nmp->nm_soflags & PR_CONNREQUIRED) {
247 error = soconnect(so, nmp->nm_nam, td);
252 * Wait for the connection to complete. Cribbed from the
253 * connect system call but with the wait timing out so
254 * that interruptible mounts don't hang here for a long time.
257 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
258 (void) msleep(&so->so_timeo, SOCK_MTX(so),
259 PSOCK, "nfscon", 2 * hz);
260 if ((so->so_state & SS_ISCONNECTING) &&
261 so->so_error == 0 && rep &&
262 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0) {
263 so->so_state &= ~SS_ISCONNECTING;
269 error = so->so_error;
276 so->so_rcv.sb_timeo = 12 * hz;
277 so->so_snd.sb_timeo = 5 * hz;
280 * Get buffer reservation size from sysctl, but impose reasonable
283 pktscale = nfs_bufpackets;
289 if (nmp->nm_sotype == SOCK_DGRAM) {
290 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
291 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
292 NFS_MAXPKTHDR) * pktscale;
293 } else if (nmp->nm_sotype == SOCK_SEQPACKET) {
294 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
295 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
296 NFS_MAXPKTHDR) * pktscale;
298 if (nmp->nm_sotype != SOCK_STREAM)
299 panic("nfscon sotype");
300 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
304 bzero(&sopt, sizeof sopt);
305 sopt.sopt_dir = SOPT_SET;
306 sopt.sopt_level = SOL_SOCKET;
307 sopt.sopt_name = SO_KEEPALIVE;
308 sopt.sopt_val = &val;
309 sopt.sopt_valsize = sizeof val;
313 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
317 bzero(&sopt, sizeof sopt);
318 sopt.sopt_dir = SOPT_SET;
319 sopt.sopt_level = IPPROTO_TCP;
320 sopt.sopt_name = TCP_NODELAY;
321 sopt.sopt_val = &val;
322 sopt.sopt_valsize = sizeof val;
326 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR +
327 sizeof (u_int32_t)) * pktscale;
328 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR +
329 sizeof (u_int32_t)) * pktscale;
331 error = soreserve(so, sndreserve, rcvreserve);
334 SOCKBUF_LOCK(&so->so_rcv);
335 so->so_rcv.sb_flags |= SB_NOINTR;
336 so->so_upcallarg = (caddr_t)nmp;
337 if (so->so_type == SOCK_STREAM)
338 so->so_upcall = nfs_clnt_tcp_soupcall;
340 so->so_upcall = nfs_clnt_udp_soupcall;
341 so->so_rcv.sb_flags |= SB_UPCALL;
342 SOCKBUF_UNLOCK(&so->so_rcv);
343 SOCKBUF_LOCK(&so->so_snd);
344 so->so_snd.sb_flags |= SB_NOINTR;
345 SOCKBUF_UNLOCK(&so->so_snd);
347 /* Initialize other non-zero congestion variables */
348 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
349 nmp->nm_srtt[3] = (NFS_TIMEO << 3);
350 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
351 nmp->nm_sdrtt[3] = 0;
352 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */
354 nmp->nm_timeouts = 0;
364 * Called when a connection is broken on a reliable protocol.
365 * - clean up the old socket
366 * - nfs_connect() again
367 * - set R_MUSTRESEND for all outstanding requests on mount point
368 * If this fails the mount point is DEAD!
369 * nb: Must be called with the nfs_sndlock() set on the mount point.
372 nfs_reconnect(struct nfsreq *rep)
375 struct nfsmount *nmp = rep->r_nmp;
380 while ((error = nfs_connect(nmp, rep)) != 0) {
381 if (error == ERESTART)
383 if (error == EIO || error == EINTR)
385 (void) tsleep(&lbolt, PSOCK, "nfscon", 0);
389 * Clear the FORCE_RECONNECT flag only after the connect
390 * succeeds. To prevent races between multiple processes
391 * waiting on the mountpoint where the connection is being
392 * torn down. The first one to acquire the sndlock will
393 * retry the connection. The others block on the sndlock
394 * until the connection is established successfully, and
395 * then re-transmit the request.
397 mtx_lock(&nmp->nm_nfstcpstate.mtx);
398 nmp->nm_nfstcpstate.flags &= ~NFS_TCP_FORCE_RECONNECT;
399 mtx_unlock(&nmp->nm_nfstcpstate.mtx);
402 * Loop through outstanding request list and fix up all requests
405 mtx_lock(&nfs_reqq_mtx);
406 TAILQ_FOREACH(rp, &nfs_reqq, r_chain) {
407 if (rp->r_nmp == nmp)
408 rp->r_flags |= R_MUSTRESEND;
410 mtx_unlock(&nfs_reqq_mtx);
415 * NFS disconnect. Clean up and unlink.
418 nfs_disconnect(struct nfsmount *nmp)
427 SOCKBUF_LOCK(&so->so_rcv);
428 so->so_upcallarg = NULL;
429 so->so_upcall = NULL;
430 so->so_rcv.sb_flags &= ~SB_UPCALL;
431 SOCKBUF_UNLOCK(&so->so_rcv);
432 soshutdown(so, SHUT_WR);
438 nfs_safedisconnect(struct nfsmount *nmp)
440 struct nfsreq dummyreq;
442 bzero(&dummyreq, sizeof(dummyreq));
443 dummyreq.r_nmp = nmp;
448 * This is the nfs send routine. For connection based socket types, it
449 * must be called with an nfs_sndlock() on the socket.
450 * - return EINTR if the RPC is terminated, 0 otherwise
451 * - set R_MUSTRESEND if the send fails for any reason
452 * - do any cleanup required by recoverable socket errors (?)
455 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
458 struct sockaddr *sendnam;
459 int error, error2, soflags, flags;
463 KASSERT(rep, ("nfs_send: called with rep == NULL"));
465 error = nfs_sigintr(rep->r_nmp, rep, rep->r_td);
470 if ((so = rep->r_nmp->nm_so) == NULL) {
471 rep->r_flags |= R_MUSTRESEND;
475 rep->r_flags &= ~R_MUSTRESEND;
476 soflags = rep->r_nmp->nm_soflags;
478 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
482 if (so->so_type == SOCK_SEQPACKET)
487 error = so->so_proto->pr_usrreqs->pru_sosend(so, sendnam, 0, top, 0,
488 flags, curthread /*XXX*/);
489 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) {
491 rep->r_flags |= R_MUSTRESEND;
496 * Don't report EPIPE errors on nfs sockets.
497 * These can be due to idle tcp mounts which will be closed by
498 * netapp, solaris, etc. if left idle too long.
500 if (error != EPIPE) {
501 log(LOG_INFO, "nfs send error %d for server %s\n",
503 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
506 * Deal with errors for the client side.
508 error2 = NFS_SIGREP(rep);
512 rep->r_flags |= R_MUSTRESEND;
515 * Handle any recoverable (soft) socket errors here. (?)
517 if (error != EINTR && error != ERESTART && error != EIO &&
518 error != EWOULDBLOCK && error != EPIPE)
525 nfs_reply(struct nfsreq *rep)
527 register struct socket *so;
528 register struct mbuf *m;
529 int error = 0, sotype, slpflag;
533 sotype = rep->r_nmp->nm_sotype;
535 * For reliable protocols, lock against other senders/receivers
536 * in case a reconnect is necessary.
538 if (sotype != SOCK_DGRAM) {
539 error = nfs_sndlock(rep);
547 if (rep->r_flags & R_SOFTTERM) {
551 so = rep->r_nmp->nm_so;
552 mtx_lock(&rep->r_nmp->nm_nfstcpstate.mtx);
554 (rep->r_nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT)) {
555 mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
556 error = nfs_reconnect(rep);
563 mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
564 while (rep->r_flags & R_MUSTRESEND) {
565 m = m_copym(rep->r_mreq, 0, M_COPYALL, M_WAIT);
566 nfsstats.rpcretries++;
567 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep);
569 if (error == EINTR || error == ERESTART ||
570 (error = nfs_reconnect(rep)) != 0) {
580 if (rep->r_nmp->nm_flag & NFSMNT_INT)
582 mtx_lock(&nfs_reply_mtx);
583 while ((rep->r_mrep == NULL) && (error == 0) &&
584 ((rep->r_flags & R_SOFTTERM) == 0) &&
585 ((sotype == SOCK_DGRAM) || ((rep->r_flags & R_MUSTRESEND) == 0)))
586 error = msleep((caddr_t)rep, &nfs_reply_mtx,
587 slpflag | (PZERO - 1), "nfsreq", 0);
588 mtx_unlock(&nfs_reply_mtx);
589 if (error == EINTR || error == ERESTART)
590 /* NFS operations aren't restartable. Map ERESTART to EINTR */
592 if (rep->r_flags & R_SOFTTERM)
593 /* Request was terminated because we exceeded the retries (soft mount) */
595 if (sotype == SOCK_STREAM) {
596 mtx_lock(&rep->r_nmp->nm_nfstcpstate.mtx);
597 if (((rep->r_nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT) ||
598 (rep->r_flags & R_MUSTRESEND))) {
599 mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
600 error = nfs_sndlock(rep);
605 mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
612 * Make nfs_realign() non-blocking. Also make nfsm_dissect() nonblocking.
615 nfs_clnt_match_xid(struct socket *so,
616 struct nfsmount *nmp,
627 * Search for any mbufs that are not a multiple of 4 bytes long
628 * or with m_data not longword aligned.
629 * These could cause pointer alignment problems, so copy them to
630 * well aligned mbufs.
632 if (nfs_realign(&mrep, 5 * NFSX_UNSIGNED) == ENOMEM) {
634 nfsstats.rpcinvalid++;
639 * Get the xid and check that it is an rpc reply
642 dpos = mtod(md, caddr_t);
643 tl = nfsm_dissect_nonblock(u_int32_t *, 2*NFSX_UNSIGNED);
645 if (*tl != rpc_reply) {
648 nfsstats.rpcinvalid++;
652 mtx_lock(&nfs_reqq_mtx);
654 * Loop through the request list to match up the reply
655 * Iff no match, just drop the datagram
657 TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
658 if (rep->r_mrep == NULL && rxid == rep->r_xid) {
664 * Update congestion window.
665 * Do the additive increase of
668 if (nmp->nm_cwnd <= nmp->nm_sent) {
670 (NFS_CWNDSCALE * NFS_CWNDSCALE +
671 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
672 if (nmp->nm_cwnd > NFS_MAXCWND)
673 nmp->nm_cwnd = NFS_MAXCWND;
675 if (rep->r_flags & R_SENT) {
676 rep->r_flags &= ~R_SENT;
677 nmp->nm_sent -= NFS_CWNDSCALE;
680 * Update rtt using a gain of 0.125 on the mean
681 * and a gain of 0.25 on the deviation.
683 if (rep->r_flags & R_TIMING) {
685 * Since the timer resolution of
686 * NFS_HZ is so course, it can often
687 * result in r_rtt == 0. Since
688 * r_rtt == N means that the actual
689 * rtt is between N+dt and N+2-dt ticks,
693 t1 -= (NFS_SRTT(rep) >> 3);
697 t1 -= (NFS_SDRTT(rep) >> 2);
698 NFS_SDRTT(rep) += t1;
700 nmp->nm_timeouts = 0;
705 * If not matched to a request, drop it.
706 * If it's mine, wake up requestor.
709 nfsstats.rpcunexpected++;
713 mtx_unlock(&nfs_reqq_mtx);
717 * The wakeup of the requestor should be done under the mutex
718 * to avoid potential missed wakeups.
721 wakeup_nfsreq(struct nfsreq *req)
723 mtx_lock(&nfs_reply_mtx);
724 wakeup((caddr_t)req);
725 mtx_unlock(&nfs_reply_mtx);
729 nfs_mark_for_reconnect(struct nfsmount *nmp)
733 mtx_lock(&nmp->nm_nfstcpstate.mtx);
734 nmp->nm_nfstcpstate.flags |= NFS_TCP_FORCE_RECONNECT;
735 mtx_unlock(&nmp->nm_nfstcpstate.mtx);
737 * Wakeup all processes that are waiting for replies
738 * on this mount point. One of them does the reconnect.
740 mtx_lock(&nfs_reqq_mtx);
741 TAILQ_FOREACH(rp, &nfs_reqq, r_chain) {
742 if (rp->r_nmp == nmp) {
743 rp->r_flags |= R_MUSTRESEND;
747 mtx_unlock(&nfs_reqq_mtx);
751 nfstcp_readable(struct socket *so, int bytes)
755 SOCKBUF_LOCK(&so->so_rcv);
756 retval = (so->so_rcv.sb_cc >= (bytes) ||
757 (so->so_state & SBS_CANTRCVMORE) ||
759 SOCKBUF_UNLOCK(&so->so_rcv);
763 #define nfstcp_marker_readable(so) nfstcp_readable(so, sizeof(u_int32_t))
766 nfs_clnt_tcp_soupcall(struct socket *so, void *arg, int waitflag)
768 struct nfsmount *nmp = (struct nfsmount *)arg;
769 struct mbuf *mp = NULL;
776 * Don't pick any more data from the socket if we've marked the
777 * mountpoint for reconnect.
779 mtx_lock(&nmp->nm_nfstcpstate.mtx);
780 if (nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT) {
781 mtx_unlock(&nmp->nm_nfstcpstate.mtx);
784 mtx_unlock(&nmp->nm_nfstcpstate.mtx);
785 auio.uio_td = curthread;
786 auio.uio_segflg = UIO_SYSSPACE;
787 auio.uio_rw = UIO_READ;
789 if (nmp->nm_nfstcpstate.flags & NFS_TCP_EXPECT_RPCMARKER) {
790 if (!nfstcp_marker_readable(so)) {
791 /* Marker is not readable */
794 auio.uio_resid = sizeof(u_int32_t);
798 rcvflg = (MSG_DONTWAIT | MSG_SOCALLBCK);
799 error = so->so_proto->pr_usrreqs->pru_soreceive
800 (so, (struct sockaddr **)0,
801 &auio, &mp, (struct mbuf **)0, &rcvflg);
803 * We've already tested that the socket is readable. 2 cases
804 * here, we either read 0 bytes (client closed connection),
805 * or got some other error. In both cases, we tear down the
808 if (error || auio.uio_resid > 0) {
809 if (error != ECONNRESET) {
811 "nfs/tcp clnt: Error %d reading socket, tearing down TCP connection\n",
817 panic("nfs_clnt_tcp_soupcall: Got empty mbuf chain from sorecv\n");
818 bcopy(mtod(mp, u_int32_t *), &len, sizeof(len));
819 len = ntohl(len) & ~0x80000000;
822 * This is SERIOUS! We are out of sync with the sender
823 * and forcing a disconnect/reconnect is all I can do.
825 if (len > NFS_MAXPACKET || len == 0) {
826 log(LOG_ERR, "%s (%d) from nfs server %s\n",
827 "impossible packet length",
829 nmp->nm_mountp->mnt_stat.f_mntfromname);
832 nmp->nm_nfstcpstate.rpcresid = len;
833 nmp->nm_nfstcpstate.flags &= ~(NFS_TCP_EXPECT_RPCMARKER);
836 * Processed RPC marker or no RPC marker to process.
837 * Pull in and process data.
839 if (nmp->nm_nfstcpstate.rpcresid > 0) {
840 if (!nfstcp_readable(so, nmp->nm_nfstcpstate.rpcresid)) {
841 /* All data not readable */
844 auio.uio_resid = nmp->nm_nfstcpstate.rpcresid;
848 rcvflg = (MSG_DONTWAIT | MSG_SOCALLBCK);
849 error = so->so_proto->pr_usrreqs->pru_soreceive
850 (so, (struct sockaddr **)0,
851 &auio, &mp, (struct mbuf **)0, &rcvflg);
852 if (error || auio.uio_resid > 0) {
853 if (error != ECONNRESET) {
855 "nfs/tcp clnt: Error %d reading socket, tearing down TCP connection\n",
861 panic("nfs_clnt_tcp_soupcall: Got empty mbuf chain from sorecv\n");
862 nmp->nm_nfstcpstate.rpcresid = 0;
863 nmp->nm_nfstcpstate.flags |= NFS_TCP_EXPECT_RPCMARKER;
864 /* We got the entire RPC reply. Match XIDs and wake up requestor */
865 nfs_clnt_match_xid(so, nmp, mp);
870 nfs_mark_for_reconnect(nmp);
874 nfs_clnt_udp_soupcall(struct socket *so, void *arg, int waitflag)
876 struct nfsmount *nmp = (struct nfsmount *)arg;
878 struct mbuf *mp = NULL;
879 struct mbuf *control = NULL;
882 auio.uio_resid = 1000000;
883 auio.uio_td = curthread;
884 rcvflag = MSG_DONTWAIT;
885 auio.uio_resid = 1000000000;
888 error = so->so_proto->pr_usrreqs->pru_soreceive(so,
894 nfs_clnt_match_xid(so, nmp, mp);
895 } while (mp && !error);
899 * nfs_request - goes something like this
900 * - fill in request struct
901 * - links it into list
902 * - calls nfs_send() for first transmit
903 * - calls nfs_receive() to get reply
904 * - break down rpc header and return with nfs reply pointed to
906 * nb: always frees up mreq mbuf list
908 /* XXX overloaded before */
909 #define NQ_TRYLATERDEL 15 /* Initial try later delay (sec) */
912 nfs_request(struct vnode *vp, struct mbuf *mrest, int procnum,
913 struct thread *td, struct ucred *cred, struct mbuf **mrp,
914 struct mbuf **mdp, caddr_t *dposp)
916 struct mbuf *mrep, *m2;
920 struct nfsmount *nmp;
921 struct mbuf *m, *md, *mheadend;
924 int s, error = 0, mrest_len, auth_len, auth_type;
925 int trylater_delay = NQ_TRYLATERDEL, trylater_cnt = 0;
929 /* Reject requests while attempting a forced unmount. */
930 if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
934 nmp = VFSTONFS(vp->v_mount);
935 if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
936 return nfs4_request(vp, mrest, procnum, td, cred, mrp, mdp, dposp);
937 MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK);
938 rep->r_mrep = rep->r_md = NULL;
942 rep->r_procnum = procnum;
944 getmicrouptime(&now);
945 rep->r_lastmsg = now.tv_sec -
946 ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
947 mrest_len = m_length(mrest, NULL);
950 * Get the RPC header with authorization.
952 auth_type = RPCAUTH_UNIX;
953 if (cred->cr_ngroups < 1)
954 panic("nfsreq nogrps");
955 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
956 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
958 m = nfsm_rpchead(cred, nmp->nm_flag, procnum, auth_type, auth_len,
959 mrest, mrest_len, &mheadend, &xid);
962 * For stream protocols, insert a Sun RPC Record Mark.
964 if (nmp->nm_sotype == SOCK_STREAM) {
965 M_PREPEND(m, NFSX_UNSIGNED, M_TRYWAIT);
966 *mtod(m, u_int32_t *) = htonl(0x80000000 |
967 (m->m_pkthdr.len - NFSX_UNSIGNED));
972 if (nmp->nm_flag & NFSMNT_SOFT)
973 rep->r_retry = nmp->nm_retry;
975 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
976 rep->r_rtt = rep->r_rexmit = 0;
977 if (proct[procnum] > 0)
978 rep->r_flags = R_TIMING;
984 * Do the client side RPC.
986 nfsstats.rpcrequests++;
988 * Chain request into list of outstanding requests. Be sure
989 * to put it LAST so timer finds oldest requests first.
992 mtx_lock(&nfs_reqq_mtx);
993 if (TAILQ_EMPTY(&nfs_reqq))
994 callout_reset(&nfs_callout, nfs_ticks, nfs_timer, NULL);
995 TAILQ_INSERT_TAIL(&nfs_reqq, rep, r_chain);
996 mtx_unlock(&nfs_reqq_mtx);
999 * If backing off another request or avoiding congestion, don't
1000 * send this one now but let timer do it. If not timing a request,
1003 if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM ||
1004 (nmp->nm_flag & NFSMNT_DUMBTIMR) ||
1005 nmp->nm_sent < nmp->nm_cwnd)) {
1007 error = nfs_sndlock(rep);
1009 m2 = m_copym(m, 0, M_COPYALL, M_TRYWAIT);
1010 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
1013 mtx_lock(&nfs_reqq_mtx);
1014 if (!error && (rep->r_flags & R_MUSTRESEND) == 0) {
1015 nmp->nm_sent += NFS_CWNDSCALE;
1016 rep->r_flags |= R_SENT;
1018 mtx_unlock(&nfs_reqq_mtx);
1025 * Wait for the reply from our send or the timer's.
1027 if (!error || error == EPIPE)
1028 error = nfs_reply(rep);
1031 * RPC done, unlink the request.
1034 mtx_lock(&nfs_reqq_mtx);
1036 * nfs_timer() may be in the process of re-transmitting this request.
1037 * nfs_timer() drops the nfs_reqq_mtx before the pru_send() (to avoid LORs).
1038 * Wait till nfs_timer() completes the re-transmission. When the reply
1039 * comes back, it will be discarded (since the req struct for it no longer
1042 while (rep->r_flags & R_REXMIT_INPROG) {
1043 msleep((caddr_t)&rep->r_flags, &nfs_reqq_mtx,
1044 (PZERO - 1), "nfsrxmt", 0);
1046 TAILQ_REMOVE(&nfs_reqq, rep, r_chain);
1047 if (TAILQ_EMPTY(&nfs_reqq))
1048 callout_stop(&nfs_callout);
1050 * Decrement the outstanding request count.
1052 if (rep->r_flags & R_SENT) {
1053 rep->r_flags &= ~R_SENT; /* paranoia */
1054 nmp->nm_sent -= NFS_CWNDSCALE;
1056 mtx_unlock(&nfs_reqq_mtx);
1060 * If there was a successful reply and a tprintf msg.
1061 * tprintf a response.
1065 nfs_up(rep, nmp, rep->r_td, "is alive again", NFSSTA_TIMEO);
1073 * If we got interrupted by a signal in nfs_reply(), there's
1074 * a very small window where the reply could've come in before
1075 * this process got scheduled in. To handle that case, we need
1076 * to free the reply if it was delivered.
1078 if (rep->r_mrep != NULL)
1079 m_freem(rep->r_mrep);
1080 m_freem(rep->r_mreq);
1081 free((caddr_t)rep, M_NFSREQ);
1085 if (rep->r_mrep == NULL)
1086 panic("nfs_request: rep->r_mrep shouldn't be NULL if no error\n");
1089 * break down the rpc header and check if ok
1091 tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
1092 if (*tl++ == rpc_msgdenied) {
1093 if (*tl == rpc_mismatch)
1098 m_freem(rep->r_mreq);
1099 free((caddr_t)rep, M_NFSREQ);
1104 * Just throw away any verifyer (ie: kerberos etc).
1106 i = fxdr_unsigned(int, *tl++); /* verf type */
1107 i = fxdr_unsigned(int32_t, *tl); /* len */
1109 nfsm_adv(nfsm_rndup(i));
1110 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
1113 tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
1115 error = fxdr_unsigned(int, *tl);
1116 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
1117 error == NFSERR_TRYLATER) {
1120 waituntil = time_second + trylater_delay;
1121 while (time_second < waituntil)
1122 (void) tsleep(&lbolt,
1123 PSOCK, "nqnfstry", 0);
1124 trylater_delay *= nfs_backoff[trylater_cnt];
1125 if (trylater_cnt < NFS_NBACKOFF - 1)
1131 * If the File Handle was stale, invalidate the
1132 * lookup cache, just in case.
1134 if (error == ESTALE)
1136 if (nmp->nm_flag & NFSMNT_NFSV3) {
1140 error |= NFSERR_RETERR;
1143 m_freem(rep->r_mreq);
1144 free((caddr_t)rep, M_NFSREQ);
1151 m_freem(rep->r_mreq);
1152 FREE((caddr_t)rep, M_NFSREQ);
1156 error = EPROTONOSUPPORT;
1158 m_freem(rep->r_mreq);
1159 free((caddr_t)rep, M_NFSREQ);
1165 * Scan the nfsreq list and retranmit any requests that have timed out
1166 * To avoid retransmission attempts on STREAM sockets (in the future) make
1167 * sure to set the r_retry field to 0 (implies nm_retry == 0).
1169 * The nfs reqq lock cannot be held while we do the pru_send() because of a
1170 * lock ordering violation. The NFS client socket callback acquires
1171 * inp_lock->nfsreq mutex and pru_send acquires inp_lock. So we drop the
1172 * reqq mutex (and reacquire it after the pru_send()). The req structure
1173 * (for the rexmit) is prevented from being removed by the R_REXMIT_INPROG flag.
1176 nfs_timer(void *arg)
1181 struct nfsmount *nmp;
1186 getmicrouptime(&now);
1188 mtx_lock(&Giant); /* nfs_down -> tprintf */
1189 mtx_lock(&nfs_reqq_mtx);
1190 TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
1192 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM))
1194 if (nfs_sigintr(nmp, rep, rep->r_td))
1196 if (nmp->nm_tprintf_initial_delay != 0 &&
1197 (rep->r_rexmit > 2 || (rep->r_flags & R_RESENDERR)) &&
1198 rep->r_lastmsg + nmp->nm_tprintf_delay < now.tv_sec) {
1199 rep->r_lastmsg = now.tv_sec;
1200 nfs_down(rep, nmp, rep->r_td, "not responding",
1203 if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
1204 /* we're not yet completely mounted and */
1205 /* we can't complete an RPC, so we fail */
1206 nfsstats.rpctimeouts++;
1212 if (rep->r_rtt >= 0) {
1214 if (nmp->nm_flag & NFSMNT_DUMBTIMR)
1215 timeo = nmp->nm_timeo;
1217 timeo = NFS_RTO(nmp, proct[rep->r_procnum]);
1218 if (nmp->nm_timeouts > 0)
1219 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
1220 if (rep->r_rtt <= timeo)
1222 if (nmp->nm_timeouts < NFS_NBACKOFF)
1225 if (rep->r_rexmit >= rep->r_retry) { /* too many */
1226 nfsstats.rpctimeouts++;
1230 if (nmp->nm_sotype != SOCK_DGRAM) {
1231 if (++rep->r_rexmit > NFS_MAXREXMIT)
1232 rep->r_rexmit = NFS_MAXREXMIT;
1234 * For NFS/TCP, setting R_MUSTRESEND and waking up
1235 * the requester will cause the request to be
1236 * retransmitted (in nfs_reply()), re-connecting
1239 rep->r_flags |= R_MUSTRESEND;
1243 if ((so = nmp->nm_so) == NULL)
1246 * If there is enough space and the window allows..
1248 * Set r_rtt to -1 in case we fail to send it now.
1251 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len &&
1252 ((nmp->nm_flag & NFSMNT_DUMBTIMR) ||
1253 (rep->r_flags & R_SENT) ||
1254 nmp->nm_sent < nmp->nm_cwnd) &&
1255 (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))) {
1257 * Mark the request to indicate that a XMIT is in progress
1258 * to prevent the req structure being removed in nfs_request().
1260 rep->r_flags |= R_REXMIT_INPROG;
1261 mtx_unlock(&nfs_reqq_mtx);
1263 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
1264 error = (*so->so_proto->pr_usrreqs->pru_send)
1265 (so, 0, m, NULL, NULL, curthread);
1267 error = (*so->so_proto->pr_usrreqs->pru_send)
1268 (so, 0, m, nmp->nm_nam, NULL, curthread);
1270 mtx_lock(&nfs_reqq_mtx);
1271 rep->r_flags &= ~R_REXMIT_INPROG;
1272 wakeup((caddr_t)&rep->r_flags);
1274 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
1276 rep->r_flags |= R_RESENDERR;
1279 * Iff first send, start timing
1280 * else turn timing off, backoff timer
1281 * and divide congestion window by 2.
1283 rep->r_flags &= ~R_RESENDERR;
1284 if (rep->r_flags & R_SENT) {
1285 rep->r_flags &= ~R_TIMING;
1286 if (++rep->r_rexmit > NFS_MAXREXMIT)
1287 rep->r_rexmit = NFS_MAXREXMIT;
1289 if (nmp->nm_cwnd < NFS_CWNDSCALE)
1290 nmp->nm_cwnd = NFS_CWNDSCALE;
1291 nfsstats.rpcretries++;
1293 rep->r_flags |= R_SENT;
1294 nmp->nm_sent += NFS_CWNDSCALE;
1300 mtx_unlock(&nfs_reqq_mtx);
1301 mtx_unlock(&Giant); /* nfs_down -> tprintf */
1303 callout_reset(&nfs_callout, nfs_ticks, nfs_timer, NULL);
1307 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
1308 * wait for all requests to complete. This is used by forced unmounts
1309 * to terminate any outstanding RPCs.
1312 nfs_nmcancelreqs(nmp)
1313 struct nfsmount *nmp;
1319 mtx_lock(&nfs_reqq_mtx);
1320 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
1321 if (nmp != req->r_nmp || req->r_mrep != NULL ||
1322 (req->r_flags & R_SOFTTERM))
1326 mtx_unlock(&nfs_reqq_mtx);
1329 for (i = 0; i < 30; i++) {
1331 mtx_lock(&nfs_reqq_mtx);
1332 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
1333 if (nmp == req->r_nmp)
1336 mtx_unlock(&nfs_reqq_mtx);
1340 tsleep(&lbolt, PSOCK, "nfscancel", 0);
1346 * Flag a request as being about to terminate (due to NFSMNT_INT/NFSMNT_SOFT).
1347 * The nm_send count is decremented now to avoid deadlocks when the process in
1348 * soreceive() hasn't yet managed to send its own request.
1352 nfs_softterm(struct nfsreq *rep)
1355 rep->r_flags |= R_SOFTTERM;
1356 if (rep->r_flags & R_SENT) {
1357 rep->r_nmp->nm_sent -= NFS_CWNDSCALE;
1358 rep->r_flags &= ~R_SENT;
1361 * Request terminated, wakeup the blocked process, so that we
1362 * can return EINTR back.
1368 * Any signal that can interrupt an NFS operation in an intr mount
1369 * should be added to this set. SIGSTOP and SIGKILL cannot be masked.
1371 int nfs_sig_set[] = {
1381 * Check to see if one of the signals in our subset is pending on
1382 * the process (in an intr mount).
1385 nfs_sig_pending(sigset_t set)
1389 for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++)
1390 if (SIGISMEMBER(set, nfs_sig_set[i]))
1396 * The set/restore sigmask functions are used to (temporarily) overwrite
1397 * the process p_sigmask during an RPC call (for example). These are also
1398 * used in other places in the NFS client that might tsleep().
1401 nfs_set_sigmask(struct thread *td, sigset_t *oldset)
1409 td = curthread; /* XXX */
1411 /* Remove the NFS set of signals from newset */
1413 mtx_lock(&p->p_sigacts->ps_mtx);
1414 for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++) {
1416 * But make sure we leave the ones already masked
1417 * by the process, ie. remove the signal from the
1418 * temporary signalmask only if it wasn't already
1421 if (!SIGISMEMBER(td->td_sigmask, nfs_sig_set[i]) &&
1422 !SIGISMEMBER(p->p_sigacts->ps_sigignore, nfs_sig_set[i]))
1423 SIGDELSET(newset, nfs_sig_set[i]);
1425 mtx_unlock(&p->p_sigacts->ps_mtx);
1427 kern_sigprocmask(td, SIG_SETMASK, &newset, oldset, 0);
1431 nfs_restore_sigmask(struct thread *td, sigset_t *set)
1434 td = curthread; /* XXX */
1435 kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0);
1439 * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the
1440 * old one after msleep() returns.
1443 nfs_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo)
1449 if ((priority & PCATCH) == 0)
1450 return msleep(ident, mtx, priority, wmesg, timo);
1452 td = curthread; /* XXX */
1453 nfs_set_sigmask(td, &oldset);
1454 error = msleep(ident, mtx, priority, wmesg, timo);
1455 nfs_restore_sigmask(td, &oldset);
1461 * NFS wrapper to tsleep(), that shoves a new p_sigmask and restores the
1462 * old one after tsleep() returns.
1465 nfs_tsleep(struct thread *td, void *ident, int priority, char *wmesg, int timo)
1471 if ((priority & PCATCH) == 0)
1472 return tsleep(ident, priority, wmesg, timo);
1474 td = curthread; /* XXX */
1475 nfs_set_sigmask(td, &oldset);
1476 error = tsleep(ident, priority, wmesg, timo);
1477 nfs_restore_sigmask(td, &oldset);
1483 * Test for a termination condition pending on the process.
1484 * This is used for NFSMNT_INT mounts.
1487 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
1492 if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
1493 return nfs4_sigintr(nmp, rep, td);
1494 if (rep && (rep->r_flags & R_SOFTTERM))
1496 /* Terminate all requests while attempting a forced unmount. */
1497 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
1499 if (!(nmp->nm_flag & NFSMNT_INT))
1506 tmpset = p->p_siglist;
1507 SIGSETNAND(tmpset, td->td_sigmask);
1508 mtx_lock(&p->p_sigacts->ps_mtx);
1509 SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore);
1510 mtx_unlock(&p->p_sigacts->ps_mtx);
1511 if (SIGNOTEMPTY(p->p_siglist) && nfs_sig_pending(tmpset)) {
1521 * Lock a socket against others.
1522 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
1523 * and also to avoid race conditions between the processes with nfs requests
1524 * in progress when a reconnect is necessary.
1527 nfs_sndlock(struct nfsreq *rep)
1529 int *statep = &rep->r_nmp->nm_state;
1531 int error, slpflag = 0, slptimeo = 0;
1534 if (rep->r_nmp->nm_flag & NFSMNT_INT)
1536 while (*statep & NFSSTA_SNDLOCK) {
1537 error = nfs_sigintr(rep->r_nmp, rep, td);
1540 *statep |= NFSSTA_WANTSND;
1541 (void) tsleep(statep, slpflag | (PZERO - 1),
1542 "nfsndlck", slptimeo);
1543 if (slpflag == PCATCH) {
1548 *statep |= NFSSTA_SNDLOCK;
1553 * Unlock the stream socket for others.
1556 nfs_sndunlock(struct nfsreq *rep)
1558 int *statep = &rep->r_nmp->nm_state;
1560 if ((*statep & NFSSTA_SNDLOCK) == 0)
1561 panic("nfs sndunlock");
1562 *statep &= ~NFSSTA_SNDLOCK;
1563 if (*statep & NFSSTA_WANTSND) {
1564 *statep &= ~NFSSTA_WANTSND;
1572 * Check for badly aligned mbuf data and realign by copying the unaligned
1573 * portion of the data into a new mbuf chain and freeing the portions
1574 * of the old chain that were replaced.
1576 * We cannot simply realign the data within the existing mbuf chain
1577 * because the underlying buffers may contain other rpc commands and
1578 * we cannot afford to overwrite them.
1580 * We would prefer to avoid this situation entirely. The situation does
1581 * not occur with NFS/UDP and is supposed to only occassionally occur
1582 * with TCP. Use vfs.nfs.realign_count and realign_test to check this.
1586 nfs_realign(struct mbuf **pm, int hsiz)
1589 struct mbuf *n = NULL;
1593 while ((m = *pm) != NULL) {
1594 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
1595 MGET(n, M_DONTWAIT, MT_DATA);
1598 if (m->m_len >= MINCLSIZE) {
1599 MCLGET(n, M_DONTWAIT);
1600 if (n->m_ext.ext_buf == NULL) {
1611 * If n is non-NULL, loop on m copying data, then replace the
1612 * portion of the chain that had to be realigned.
1615 ++nfs_realign_count;
1617 m_copyback(n, off, m->m_len, mtod(m, caddr_t));
1629 nfs_msg(struct thread *td, const char *server, const char *msg, int error)
1633 GIANT_REQUIRED; /* tprintf */
1635 p = td ? td->td_proc : NULL;
1637 tprintf(p, LOG_INFO, "nfs server %s: %s, error %d\n", server,
1640 tprintf(p, LOG_INFO, "nfs server %s: %s\n", server, msg);
1646 nfs_down(rep, nmp, td, msg, error, flags)
1648 struct nfsmount *nmp;
1654 GIANT_REQUIRED; /* nfs_msg */
1658 if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
1659 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1661 nmp->nm_state |= NFSSTA_TIMEO;
1663 #ifdef NFSSTA_LOCKTIMEO
1664 if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
1665 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1667 nmp->nm_state |= NFSSTA_LOCKTIMEO;
1671 rep->r_flags |= R_TPRINTFMSG;
1672 nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, error);
1676 nfs_up(rep, nmp, td, msg, flags)
1678 struct nfsmount *nmp;
1684 GIANT_REQUIRED; /* nfs_msg */
1688 if ((rep == NULL) || (rep->r_flags & R_TPRINTFMSG) != 0)
1689 nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0);
1690 if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
1691 nmp->nm_state &= ~NFSSTA_TIMEO;
1692 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1695 #ifdef NFSSTA_LOCKTIMEO
1696 if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
1697 nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
1698 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,