2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2007-2009 Google Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
17 * * Neither the name of Google Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (C) 2005 Csaba Henk.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
45 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 #include <sys/cdefs.h>
59 __FBSDID("$FreeBSD$");
61 #include <sys/types.h>
62 #include <sys/module.h>
63 #include <sys/systm.h>
64 #include <sys/errno.h>
65 #include <sys/param.h>
66 #include <sys/kernel.h>
69 #include <sys/malloc.h>
70 #include <sys/queue.h>
73 #include <sys/mutex.h>
75 #include <sys/mount.h>
78 #include <sys/fcntl.h>
79 #include <sys/sysctl.h>
81 #include <sys/selinfo.h>
86 SDT_PROVIDER_DECLARE(fusefs);
89 * arg0: verbosity. Higher numbers give more verbose messages
90 * arg1: Textual message
92 SDT_PROBE_DEFINE2(fusefs, , device, trace, "int", "char*");
94 static struct cdev *fuse_dev;
96 static d_kqfilter_t fuse_device_filter;
97 static d_open_t fuse_device_open;
98 static d_poll_t fuse_device_poll;
99 static d_read_t fuse_device_read;
100 static d_write_t fuse_device_write;
102 static struct cdevsw fuse_device_cdevsw = {
103 .d_kqfilter = fuse_device_filter,
104 .d_open = fuse_device_open,
106 .d_poll = fuse_device_poll,
107 .d_read = fuse_device_read,
108 .d_write = fuse_device_write,
109 .d_version = D_VERSION,
112 static int fuse_device_filt_read(struct knote *kn, long hint);
113 static void fuse_device_filt_detach(struct knote *kn);
115 struct filterops fuse_device_rfiltops = {
117 .f_detach = fuse_device_filt_detach,
118 .f_event = fuse_device_filt_read,
121 /****************************
123 * >>> Fuse device op defs
125 ****************************/
128 fdata_dtor(void *arg)
130 struct fuse_data *fdata;
131 struct fuse_ticket *tick;
137 fdata_set_dead(fdata);
140 fuse_lck_mtx_lock(fdata->aw_mtx);
141 /* wakup poll()ers */
142 selwakeuppri(&fdata->ks_rsel, PZERO + 1);
143 /* Don't let syscall handlers wait in vain */
144 while ((tick = fuse_aw_pop(fdata))) {
145 fuse_lck_mtx_lock(tick->tk_aw_mtx);
146 fticket_set_answered(tick);
147 tick->tk_aw_errno = ENOTCONN;
149 fuse_lck_mtx_unlock(tick->tk_aw_mtx);
150 FUSE_ASSERT_AW_DONE(tick);
151 fuse_ticket_drop(tick);
153 fuse_lck_mtx_unlock(fdata->aw_mtx);
156 fdata_trydestroy(fdata);
160 fuse_device_filter(struct cdev *dev, struct knote *kn)
162 struct fuse_data *data;
165 error = devfs_get_cdevpriv((void **)&data);
167 /* EVFILT_WRITE is not supported; the device is always ready to write */
168 if (error == 0 && kn->kn_filter == EVFILT_READ) {
169 kn->kn_fop = &fuse_device_rfiltops;
171 knlist_add(&data->ks_rsel.si_note, kn, 0);
173 } else if (error == 0) {
182 fuse_device_filt_detach(struct knote *kn)
184 struct fuse_data *data;
186 data = (struct fuse_data*)kn->kn_hook;
188 knlist_remove(&data->ks_rsel.si_note, kn, 0);
193 fuse_device_filt_read(struct knote *kn, long hint)
195 struct fuse_data *data;
198 data = (struct fuse_data*)kn->kn_hook;
201 mtx_assert(&data->ms_mtx, MA_OWNED);
202 if (fdata_get_dead(data)) {
203 kn->kn_flags |= EV_EOF;
204 kn->kn_fflags = ENODEV;
207 } else if (STAILQ_FIRST(&data->ms_head)) {
209 * There is at least one event to read.
210 * TODO: keep a counter of the number of events to read
222 * Resources are set up on a per-open basis
225 fuse_device_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
227 struct fuse_data *fdata;
230 SDT_PROBE2(fusefs, , device, trace, 1, "device open");
232 fdata = fdata_alloc(dev, td->td_ucred);
233 error = devfs_set_cdevpriv(fdata, fdata_dtor);
235 fdata_trydestroy(fdata);
237 SDT_PROBE2(fusefs, , device, trace, 1, "device open success");
242 fuse_device_poll(struct cdev *dev, int events, struct thread *td)
244 struct fuse_data *data;
245 int error, revents = 0;
247 error = devfs_get_cdevpriv((void **)&data);
250 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
252 if (events & (POLLIN | POLLRDNORM)) {
253 fuse_lck_mtx_lock(data->ms_mtx);
254 if (fdata_get_dead(data) || STAILQ_FIRST(&data->ms_head))
255 revents |= events & (POLLIN | POLLRDNORM);
257 selrecord(td, &data->ks_rsel);
258 fuse_lck_mtx_unlock(data->ms_mtx);
260 if (events & (POLLOUT | POLLWRNORM)) {
261 revents |= events & (POLLOUT | POLLWRNORM);
267 * fuse_device_read hangs on the queue of VFS messages.
268 * When it's notified that there is a new one, it picks that and
269 * passes up to the daemon
272 fuse_device_read(struct cdev *dev, struct uio *uio, int ioflag)
275 struct fuse_data *data;
276 struct fuse_ticket *tick;
277 void *buf[] = {NULL, NULL, NULL};
281 SDT_PROBE2(fusefs, , device, trace, 1, "fuse device read");
283 err = devfs_get_cdevpriv((void **)&data);
287 fuse_lck_mtx_lock(data->ms_mtx);
289 if (fdata_get_dead(data)) {
290 SDT_PROBE2(fusefs, , device, trace, 2,
291 "we know early on that reader should be kicked so we "
292 "don't wait for news");
293 fuse_lck_mtx_unlock(data->ms_mtx);
296 if (!(tick = fuse_ms_pop(data))) {
297 /* check if we may block */
298 if (ioflag & O_NONBLOCK) {
299 /* get outa here soon */
300 fuse_lck_mtx_unlock(data->ms_mtx);
303 err = msleep(data, &data->ms_mtx, PCATCH, "fu_msg", 0);
305 fuse_lck_mtx_unlock(data->ms_mtx);
306 return (fdata_get_dead(data) ? ENODEV : err);
308 tick = fuse_ms_pop(data);
313 * We can get here if fuse daemon suddenly terminates,
314 * eg, by being hit by a SIGKILL
315 * -- and some other cases, too, tho not totally clear, when
316 * (cv_signal/wakeup_one signals the whole process ?)
318 SDT_PROBE2(fusefs, , device, trace, 1, "no message on thread");
321 fuse_lck_mtx_unlock(data->ms_mtx);
323 if (fdata_get_dead(data)) {
325 * somebody somewhere -- eg., umount routine --
326 * wants this liaison finished off
328 SDT_PROBE2(fusefs, , device, trace, 2,
329 "reader is to be sacked");
331 SDT_PROBE2(fusefs, , device, trace, 2, "weird -- "
332 "\"kick\" is set tho there is message");
333 FUSE_ASSERT_MS_DONE(tick);
334 fuse_ticket_drop(tick);
336 return (ENODEV); /* This should make the daemon get off
339 SDT_PROBE2(fusefs, , device, trace, 1,
340 "fuse device read message successfully");
342 KASSERT(tick->tk_ms_bufdata || tick->tk_ms_bufsize == 0,
343 ("non-null buf pointer with positive size"));
345 switch (tick->tk_ms_type) {
347 buf[0] = tick->tk_ms_fiov.base;
348 buflen[0] = tick->tk_ms_fiov.len;
351 buf[0] = tick->tk_ms_fiov.base;
352 buflen[0] = tick->tk_ms_fiov.len;
353 buf[1] = tick->tk_ms_bufdata;
354 buflen[1] = tick->tk_ms_bufsize;
357 panic("unknown message type for fuse_ticket %p", tick);
360 for (i = 0; buf[i]; i++) {
362 * Why not ban mercilessly stupid daemons who can't keep up
363 * with us? (There is no much use of a partial read here...)
366 * XXX note that in such cases Linux FUSE throws EIO at the
367 * syscall invoker and stands back to the message queue. The
368 * rationale should be made clear (and possibly adopt that
369 * behaviour). Keeping the current scheme at least makes
370 * fallacy as loud as possible...
372 if (uio->uio_resid < buflen[i]) {
373 fdata_set_dead(data);
374 SDT_PROBE2(fusefs, , device, trace, 2,
375 "daemon is stupid, kick it off...");
379 err = uiomove(buf[i], buflen[i], uio);
384 FUSE_ASSERT_MS_DONE(tick);
385 fuse_ticket_drop(tick);
391 fuse_ohead_audit(struct fuse_out_header *ohead, struct uio *uio)
393 if (uio->uio_resid + sizeof(struct fuse_out_header) != ohead->len) {
394 SDT_PROBE2(fusefs, , device, trace, 1,
395 "Format error: body size "
396 "differs from size claimed by header");
399 if (uio->uio_resid && ohead->error) {
400 SDT_PROBE2(fusefs, , device, trace, 1,
401 "Format error: non zero error but message had a body");
404 /* Sanitize the linuxism of negative errnos */
405 ohead->error = -(ohead->error);
410 SDT_PROBE_DEFINE1(fusefs, , device, fuse_device_write_missing_ticket,
412 SDT_PROBE_DEFINE1(fusefs, , device, fuse_device_write_found,
413 "struct fuse_ticket*");
415 * fuse_device_write first reads the header sent by the daemon.
416 * If that's OK, looks up ticket/callback node by the unique id seen in header.
417 * If the callback node contains a handler function, the uio is passed over
421 fuse_device_write(struct cdev *dev, struct uio *uio, int ioflag)
423 struct fuse_out_header ohead;
425 struct fuse_data *data;
426 struct fuse_ticket *tick, *itick, *x_tick;
429 err = devfs_get_cdevpriv((void **)&data);
433 if (uio->uio_resid < sizeof(struct fuse_out_header)) {
434 SDT_PROBE2(fusefs, , device, trace, 1,
435 "fuse_device_write got less than a header!");
436 fdata_set_dead(data);
439 if ((err = uiomove(&ohead, sizeof(struct fuse_out_header), uio)) != 0)
443 * We check header information (which is redundant) and compare it
444 * with what we see. If we see some inconsistency we discard the
445 * whole answer and proceed on as if it had never existed. In
446 * particular, no pretender will be woken up, regardless the
447 * "unique" value in the header.
449 if ((err = fuse_ohead_audit(&ohead, uio))) {
450 fdata_set_dead(data);
453 /* Pass stuff over to callback if there is one installed */
455 /* Looking for ticket with the unique id of header */
456 fuse_lck_mtx_lock(data->aw_mtx);
457 TAILQ_FOREACH_SAFE(tick, &data->aw_head, tk_aw_link,
459 if (tick->tk_unique == ohead.unique) {
460 SDT_PROBE1(fusefs, , device, fuse_device_write_found,
463 fuse_aw_remove(tick);
467 if (found && tick->irq_unique > 0) {
469 * Discard the FUSE_INTERRUPT ticket that tried to interrupt
472 TAILQ_FOREACH_SAFE(itick, &data->aw_head, tk_aw_link,
474 if (itick->tk_unique == tick->irq_unique) {
475 fuse_aw_remove(itick);
479 tick->irq_unique = 0;
481 fuse_lck_mtx_unlock(data->aw_mtx);
484 if (tick->tk_aw_handler) {
486 * We found a callback with proper handler. In this
487 * case the out header will be 0wnd by the callback,
488 * so the fun of freeing that is left for her.
489 * (Then, by all chance, she'll just get that's done
490 * via ticket_drop(), so no manual mucking
493 SDT_PROBE2(fusefs, , device, trace, 1,
494 "pass ticket to a callback");
495 memcpy(&tick->tk_aw_ohead, &ohead, sizeof(ohead));
496 err = tick->tk_aw_handler(tick, uio);
498 /* pretender doesn't wanna do anything with answer */
499 SDT_PROBE2(fusefs, , device, trace, 1,
500 "stuff devalidated, so we drop it");
504 * As aw_mtx was not held during the callback execution the
505 * ticket may have been inserted again. However, this is safe
506 * because fuse_ticket_drop() will deal with refcount anyway.
508 fuse_ticket_drop(tick);
510 /* no callback at all! */
511 SDT_PROBE1(fusefs, , device, fuse_device_write_missing_ticket,
513 if (ohead.error == EAGAIN) {
515 * This was probably a response to a FUSE_INTERRUPT
516 * operation whose original operation is already
517 * complete. We can't store FUSE_INTERRUPT tickets
518 * indefinitely because their responses are optional.
519 * So we delete them when the original operation
520 * completes. And sadly the fuse_header_out doesn't
521 * identify the opcode, so we have to guess.
533 fuse_device_init(void)
536 fuse_dev = make_dev(&fuse_device_cdevsw, 0, UID_ROOT, GID_OPERATOR,
537 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH, "fuse");
538 if (fuse_dev == NULL)
544 fuse_device_destroy(void)
547 MPASS(fuse_dev != NULL);
548 destroy_dev(fuse_dev);