2 * Copyright (c) 2000-2001 Boris Popov
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/malloc.h>
42 #include <sys/unistd.h>
44 #include <netsmb/smb.h>
45 #include <netsmb/smb_conn.h>
46 #include <netsmb/smb_rq.h>
47 #include <netsmb/smb_tran.h>
48 #include <netsmb/smb_trantcp.h>
51 #define SMBIOD_SLEEP_TIMO 2
52 #define SMBIOD_PING_TIMO 60 /* seconds */
54 #define SMB_IOD_EVLOCKPTR(iod) (&((iod)->iod_evlock))
55 #define SMB_IOD_EVLOCK(iod) smb_sl_lock(&((iod)->iod_evlock))
56 #define SMB_IOD_EVUNLOCK(iod) smb_sl_unlock(&((iod)->iod_evlock))
58 #define SMB_IOD_RQLOCKPTR(iod) (&((iod)->iod_rqlock))
59 #define SMB_IOD_RQLOCK(iod) smb_sl_lock(&((iod)->iod_rqlock))
60 #define SMB_IOD_RQUNLOCK(iod) smb_sl_unlock(&((iod)->iod_rqlock))
62 #define smb_iod_wakeup(iod) wakeup(&(iod)->iod_flags)
65 static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
67 static int smb_iod_next;
69 static int smb_iod_sendall(struct smbiod *iod);
70 static int smb_iod_disconnect(struct smbiod *iod);
71 static void smb_iod_thread(void *);
74 smb_iod_rqprocessed(struct smb_rq *rqp, int error)
77 rqp->sr_lerror = error;
79 rqp->sr_state = SMBRQ_NOTIFIED;
80 wakeup(&rqp->sr_state);
85 smb_iod_invrq(struct smbiod *iod)
90 * Invalidate all outstanding requests for this connection
93 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
94 if (rqp->sr_flags & SMBR_INTERNAL)
96 rqp->sr_flags |= SMBR_RESTART;
97 smb_iod_rqprocessed(rqp, ENOTCONN);
99 SMB_IOD_RQUNLOCK(iod);
103 smb_iod_closetran(struct smbiod *iod)
105 struct smb_vc *vcp = iod->iod_vc;
106 struct proc *p = iod->iod_p;
108 if (vcp->vc_tdata == NULL)
110 SMB_TRAN_DISCONNECT(vcp, p);
111 SMB_TRAN_DONE(vcp, p);
112 vcp->vc_tdata = NULL;
116 smb_iod_dead(struct smbiod *iod)
118 iod->iod_state = SMBIOD_ST_DEAD;
119 smb_iod_closetran(iod);
124 smb_iod_connect(struct smbiod *iod)
126 struct smb_vc *vcp = iod->iod_vc;
127 struct proc *p = iod->iod_p;
130 SMBIODEBUG("%d\n", iod->iod_state);
131 switch(iod->iod_state) {
132 case SMBIOD_ST_VCACTIVE:
133 SMBERROR("called for already opened connection\n");
136 return ENOTCONN; /* XXX: last error code ? */
143 ithrow(SMB_TRAN_CREATE(vcp, p));
144 SMBIODEBUG("tcreate\n");
146 ithrow(SMB_TRAN_BIND(vcp, vcp->vc_laddr, p));
148 SMBIODEBUG("tbind\n");
149 ithrow(SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, p));
150 SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
151 iod->iod_state = SMBIOD_ST_TRANACTIVE;
152 SMBIODEBUG("tconnect\n");
153 /* vcp->vc_mid = 0;*/
154 ithrow(smb_smb_negotiate(vcp, &iod->iod_scred));
155 SMBIODEBUG("snegotiate\n");
156 ithrow(smb_smb_ssnsetup(vcp, &iod->iod_scred));
157 iod->iod_state = SMBIOD_ST_VCACTIVE;
158 SMBIODEBUG("completed\n");
168 smb_iod_disconnect(struct smbiod *iod)
170 struct smb_vc *vcp = iod->iod_vc;
173 if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
174 smb_smb_ssnclose(vcp, &iod->iod_scred);
175 iod->iod_state = SMBIOD_ST_TRANACTIVE;
177 vcp->vc_smbuid = SMB_UID_UNKNOWN;
178 smb_iod_closetran(iod);
179 iod->iod_state = SMBIOD_ST_NOTCONN;
184 smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
188 if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
189 if (iod->iod_state != SMBIOD_ST_DEAD)
191 iod->iod_state = SMBIOD_ST_RECONNECT;
192 error = smb_iod_connect(iod);
196 SMBIODEBUG("tree reconnect\n");
198 ssp->ss_flags |= SMBS_RECONNECTING;
200 error = smb_smb_treeconnect(ssp, &iod->iod_scred);
202 ssp->ss_flags &= ~SMBS_RECONNECTING;
204 wakeup(&ssp->ss_vcgenid);
209 smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
211 struct proc *p = iod->iod_p;
212 struct smb_vc *vcp = iod->iod_vc;
213 struct smb_share *ssp = rqp->sr_share;
217 SMBIODEBUG("iod_state = %d\n", iod->iod_state);
218 switch (iod->iod_state) {
219 case SMBIOD_ST_NOTCONN:
220 smb_iod_rqprocessed(rqp, ENOTCONN);
223 iod->iod_state = SMBIOD_ST_RECONNECT;
225 case SMBIOD_ST_RECONNECT:
230 if (rqp->sr_sendcnt == 0) {
231 #ifdef movedtoanotherplace
232 if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
235 *rqp->sr_rqtid = htoles(ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
236 *rqp->sr_rquid = htoles(vcp ? vcp->vc_smbuid : 0);
237 mb_fixhdr(&rqp->sr_rq);
239 if (rqp->sr_sendcnt++ > 5) {
240 rqp->sr_flags |= SMBR_RESTART;
241 smb_iod_rqprocessed(rqp, rqp->sr_lerror);
243 * If all attempts to send a request failed, then
244 * something is seriously hosed.
248 SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
249 m_dumpm(rqp->sr_rq.mb_top);
250 m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_WAIT);
251 error = rqp->sr_lerror = m ? SMB_TRAN_SEND(vcp, m, p) : ENOBUFS;
253 getnanotime(&rqp->sr_timesent);
254 iod->iod_lastrqsent = rqp->sr_timesent;
255 rqp->sr_flags |= SMBR_SENT;
256 rqp->sr_state = SMBRQ_SENT;
260 * Check for fatal errors
262 if (SMB_TRAN_FATAL(vcp, error)) {
264 * No further attempts should be made
268 if (smb_rq_intr(rqp))
269 smb_iod_rqprocessed(rqp, EINTR);
274 * Process incoming packets
277 smb_iod_recvall(struct smbiod *iod)
279 struct smb_vc *vcp = iod->iod_vc;
280 struct proc *p = iod->iod_p;
287 switch (iod->iod_state) {
288 case SMBIOD_ST_NOTCONN:
290 case SMBIOD_ST_RECONNECT:
297 error = SMB_TRAN_RECV(vcp, &m, p);
298 if (error == EWOULDBLOCK)
300 if (SMB_TRAN_FATAL(vcp, error)) {
307 SMBERROR("tran return NULL without error\n");
311 m = m_pullup(m, SMB_HDRLEN);
313 continue; /* wait for a good packet */
315 * Now we got an entire and possibly invalid SMB packet.
316 * Be careful while parsing it.
319 hp = mtod(m, u_char*);
320 if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
324 mid = SMB_HDRMID(hp);
325 SMBSDEBUG("mid %04x\n", (u_int)mid);
327 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
328 if (rqp->sr_mid != mid)
331 if (rqp->sr_rp.md_top == NULL) {
332 md_initm(&rqp->sr_rp, m);
334 if (rqp->sr_flags & SMBR_MULTIPACKET) {
335 md_append_record(&rqp->sr_rp, m);
338 SMBERROR("duplicate response %d (ignored)\n", mid);
343 smb_iod_rqprocessed(rqp, 0);
346 SMB_IOD_RQUNLOCK(iod);
348 SMBERROR("drop resp with mid %d\n", (u_int)mid);
349 /* smb_printrqlist(vcp);*/
354 * check for interrupts
357 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
358 if (smb_proc_intr(rqp->sr_cred->scr_p)) {
359 smb_iod_rqprocessed(rqp, EINTR);
362 SMB_IOD_RQUNLOCK(iod);
367 smb_iod_request(struct smbiod *iod, int event, void *ident)
369 struct smbiod_event *evp;
373 evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
374 evp->ev_type = event;
375 evp->ev_ident = ident;
377 STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
378 if ((event & SMBIOD_EV_SYNC) == 0) {
379 SMB_IOD_EVUNLOCK(iod);
384 msleep(evp, SMB_IOD_EVLOCKPTR(iod), PWAIT | PDROP, "90evw", 0);
385 error = evp->ev_error;
391 * Place request in the queue.
392 * Request from smbiod have a high priority.
395 smb_iod_addrq(struct smb_rq *rqp)
397 struct smb_vc *vcp = rqp->sr_vc;
398 struct smbiod *iod = vcp->vc_iod;
402 if (rqp->sr_cred->scr_p == iod->iod_p) {
403 rqp->sr_flags |= SMBR_INTERNAL;
405 TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
406 SMB_IOD_RQUNLOCK(iod);
408 if (smb_iod_sendrq(iod, rqp) != 0) {
413 * we don't need to lock state field here
415 if (rqp->sr_state != SMBRQ_NOTSENT)
417 tsleep(&iod->iod_flags, PWAIT, "90sndw", hz);
420 smb_iod_removerq(rqp);
421 return rqp->sr_lerror;
424 switch (iod->iod_state) {
425 case SMBIOD_ST_NOTCONN:
428 error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
438 if (vcp->vc_maxmux == 0) {
439 SMBERROR("maxmux == 0\n");
442 if (iod->iod_muxcnt < vcp->vc_maxmux)
445 msleep(&iod->iod_muxwant, SMB_IOD_RQLOCKPTR(iod),
449 TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
450 SMB_IOD_RQUNLOCK(iod);
456 smb_iod_removerq(struct smb_rq *rqp)
458 struct smb_vc *vcp = rqp->sr_vc;
459 struct smbiod *iod = vcp->vc_iod;
462 if (rqp->sr_flags & SMBR_INTERNAL) {
464 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
465 SMB_IOD_RQUNLOCK(iod);
469 while (rqp->sr_flags & SMBR_XLOCK) {
470 rqp->sr_flags |= SMBR_XLOCKWANT;
471 msleep(rqp, SMB_IOD_RQLOCKPTR(iod), PWAIT, "90xrm", 0);
473 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
475 if (iod->iod_muxwant) {
477 wakeup(&iod->iod_muxwant);
479 SMB_IOD_RQUNLOCK(iod);
484 smb_iod_waitrq(struct smb_rq *rqp)
486 struct smbiod *iod = rqp->sr_vc->vc_iod;
490 if (rqp->sr_flags & SMBR_INTERNAL) {
492 smb_iod_sendall(iod);
493 smb_iod_recvall(iod);
494 if (rqp->sr_rpgen != rqp->sr_rplast)
496 tsleep(&iod->iod_flags, PWAIT, "90irq", hz);
498 smb_iod_removerq(rqp);
499 return rqp->sr_lerror;
503 if (rqp->sr_rpgen == rqp->sr_rplast)
504 msleep(&rqp->sr_state, SMBRQ_SLOCKPTR(rqp), PWAIT, "90wrq", 0);
507 error = rqp->sr_lerror;
508 if (rqp->sr_flags & SMBR_MULTIPACKET) {
510 * If request should stay in the list, then reinsert it
511 * at the end of queue so other waiters have chance to concur
514 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
515 TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
516 SMB_IOD_RQUNLOCK(iod);
518 smb_iod_removerq(rqp);
524 smb_iod_sendall(struct smbiod *iod)
526 struct smb_vc *vcp = iod->iod_vc;
528 struct timespec ts, tstimeout;
533 * Loop through the list of requests and send them if possible
536 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
537 switch (rqp->sr_state) {
539 rqp->sr_flags |= SMBR_XLOCK;
540 SMB_IOD_RQUNLOCK(iod);
541 herror = smb_iod_sendrq(iod, rqp);
543 rqp->sr_flags &= ~SMBR_XLOCK;
544 if (rqp->sr_flags & SMBR_XLOCKWANT) {
545 rqp->sr_flags &= ~SMBR_XLOCKWANT;
550 SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout);
551 timespecadd(&tstimeout, &tstimeout);
553 timespecsub(&ts, &tstimeout);
554 if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
555 smb_iod_rqprocessed(rqp, ETIMEDOUT);
563 SMB_IOD_RQUNLOCK(iod);
564 if (herror == ENOTCONN)
570 * "main" function for smbiod daemon
573 smb_iod_main(struct smbiod *iod)
575 /* struct smb_vc *vcp = iod->iod_vc;*/
576 struct smbiod_event *evp;
577 /* struct timespec tsnow;*/
584 * Check all interesting events
588 evp = STAILQ_FIRST(&iod->iod_evlist);
590 SMB_IOD_EVUNLOCK(iod);
593 STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
594 evp->ev_type |= SMBIOD_EV_PROCESSING;
595 SMB_IOD_EVUNLOCK(iod);
596 switch (evp->ev_type & SMBIOD_EV_MASK) {
597 case SMBIOD_EV_CONNECT:
598 iod->iod_state = SMBIOD_ST_RECONNECT;
599 evp->ev_error = smb_iod_connect(iod);
601 case SMBIOD_EV_DISCONNECT:
602 evp->ev_error = smb_iod_disconnect(iod);
604 case SMBIOD_EV_TREECONNECT:
605 evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
607 case SMBIOD_EV_SHUTDOWN:
608 iod->iod_flags |= SMBIOD_SHUTDOWN;
610 case SMBIOD_EV_NEWRQ:
613 if (evp->ev_type & SMBIOD_EV_SYNC) {
616 SMB_IOD_EVUNLOCK(iod);
621 if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
623 timespecsub(&tsnow, &iod->iod_pingtimo);
624 if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
625 smb_smb_echo(vcp, &iod->iod_scred);
629 smb_iod_sendall(iod);
630 smb_iod_recvall(iod);
635 #define kthread_create_compat kthread_create2
637 #define kthread_create_compat kthread_create
642 smb_iod_thread(void *arg)
644 struct smbiod *iod = arg;
647 smb_makescred(&iod->iod_scred, iod->iod_p, NULL);
648 while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
650 SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo);
651 /* mtx_unlock(&Giant, MTX_DEF);*/
652 if (iod->iod_flags & SMBIOD_SHUTDOWN)
654 tsleep(&iod->iod_flags, PWAIT, "90idle", iod->iod_sleeptimo);
656 /* mtx_lock(&Giant, MTX_DEF);*/
661 smb_iod_create(struct smb_vc *vcp)
666 iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
667 iod->iod_id = smb_iod_next++;
668 iod->iod_state = SMBIOD_ST_NOTCONN;
670 iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO;
671 iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
672 getnanotime(&iod->iod_lastrqsent);
674 smb_sl_init(&iod->iod_rqlock, "90rql");
675 TAILQ_INIT(&iod->iod_rqlist);
676 smb_sl_init(&iod->iod_evlock, "90evl");
677 STAILQ_INIT(&iod->iod_evlist);
678 error = kthread_create_compat(smb_iod_thread, iod, &iod->iod_p,
679 RFNOWAIT, "smbiod%d", iod->iod_id);
681 SMBERROR("can't start smbiod: %d", error);
689 smb_iod_destroy(struct smbiod *iod)
691 smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
692 mtx_destroy(&iod->iod_rqlock);
693 mtx_destroy(&iod->iod_evlock);