2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2007-2009 Google Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
17 * * Neither the name of Google Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (C) 2005 Csaba Henk.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
45 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 #include <sys/cdefs.h>
59 __FBSDID("$FreeBSD$");
61 #include <sys/types.h>
62 #include <sys/module.h>
63 #include <sys/systm.h>
64 #include <sys/errno.h>
65 #include <sys/param.h>
66 #include <sys/kernel.h>
69 #include <sys/malloc.h>
70 #include <sys/queue.h>
73 #include <sys/mutex.h>
75 #include <sys/mount.h>
77 #include <sys/fcntl.h>
78 #include <sys/sysctl.h>
80 #include <sys/selinfo.h>
85 #define FUSE_DEBUG_MODULE DEVICE
86 #include "fuse_debug.h"
88 static struct cdev *fuse_dev;
90 static d_open_t fuse_device_open;
91 static d_close_t fuse_device_close;
92 static d_poll_t fuse_device_poll;
93 static d_read_t fuse_device_read;
94 static d_write_t fuse_device_write;
96 static struct cdevsw fuse_device_cdevsw = {
97 .d_open = fuse_device_open,
98 .d_close = fuse_device_close,
100 .d_poll = fuse_device_poll,
101 .d_read = fuse_device_read,
102 .d_write = fuse_device_write,
103 .d_version = D_VERSION,
106 /****************************
108 * >>> Fuse device op defs
110 ****************************/
113 fdata_dtor(void *arg)
115 struct fuse_data *fdata;
118 fdata_trydestroy(fdata);
122 * Resources are set up on a per-open basis
125 fuse_device_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
127 struct fuse_data *fdata;
130 FS_DEBUG("device %p\n", dev);
132 fdata = fdata_alloc(dev, td->td_ucred);
133 error = devfs_set_cdevpriv(fdata, fdata_dtor);
135 fdata_trydestroy(fdata);
137 FS_DEBUG("%s: device opened by thread %d.\n", dev->si_name,
143 fuse_device_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
145 struct fuse_data *data;
146 struct fuse_ticket *tick;
149 error = devfs_get_cdevpriv((void **)&data);
153 panic("no fuse data upon fuse device close");
154 fdata_set_dead(data);
157 fuse_lck_mtx_lock(data->aw_mtx);
158 /* wakup poll()ers */
159 selwakeuppri(&data->ks_rsel, PZERO + 1);
160 /* Don't let syscall handlers wait in vain */
161 while ((tick = fuse_aw_pop(data))) {
162 fuse_lck_mtx_lock(tick->tk_aw_mtx);
163 fticket_set_answered(tick);
164 tick->tk_aw_errno = ENOTCONN;
166 fuse_lck_mtx_unlock(tick->tk_aw_mtx);
167 FUSE_ASSERT_AW_DONE(tick);
168 fuse_ticket_drop(tick);
170 fuse_lck_mtx_unlock(data->aw_mtx);
173 FS_DEBUG("%s: device closed by thread %d.\n", dev->si_name, td->td_tid);
178 fuse_device_poll(struct cdev *dev, int events, struct thread *td)
180 struct fuse_data *data;
181 int error, revents = 0;
183 error = devfs_get_cdevpriv((void **)&data);
186 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
188 if (events & (POLLIN | POLLRDNORM)) {
189 fuse_lck_mtx_lock(data->ms_mtx);
190 if (fdata_get_dead(data) || STAILQ_FIRST(&data->ms_head))
191 revents |= events & (POLLIN | POLLRDNORM);
193 selrecord(td, &data->ks_rsel);
194 fuse_lck_mtx_unlock(data->ms_mtx);
196 if (events & (POLLOUT | POLLWRNORM)) {
197 revents |= events & (POLLOUT | POLLWRNORM);
203 * fuse_device_read hangs on the queue of VFS messages.
204 * When it's notified that there is a new one, it picks that and
205 * passes up to the daemon
208 fuse_device_read(struct cdev *dev, struct uio *uio, int ioflag)
211 struct fuse_data *data;
212 struct fuse_ticket *tick;
213 void *buf[] = {NULL, NULL, NULL};
217 FS_DEBUG("fuse device being read on thread %d\n", uio->uio_td->td_tid);
219 err = devfs_get_cdevpriv((void **)&data);
223 fuse_lck_mtx_lock(data->ms_mtx);
225 if (fdata_get_dead(data)) {
226 FS_DEBUG2G("we know early on that reader should be kicked so we don't wait for news\n");
227 fuse_lck_mtx_unlock(data->ms_mtx);
230 if (!(tick = fuse_ms_pop(data))) {
231 /* check if we may block */
232 if (ioflag & O_NONBLOCK) {
233 /* get outa here soon */
234 fuse_lck_mtx_unlock(data->ms_mtx);
237 err = msleep(data, &data->ms_mtx, PCATCH, "fu_msg", 0);
239 fuse_lck_mtx_unlock(data->ms_mtx);
240 return (fdata_get_dead(data) ? ENODEV : err);
242 tick = fuse_ms_pop(data);
247 * We can get here if fuse daemon suddenly terminates,
248 * eg, by being hit by a SIGKILL
249 * -- and some other cases, too, tho not totally clear, when
250 * (cv_signal/wakeup_one signals the whole process ?)
252 FS_DEBUG("no message on thread #%d\n", uio->uio_td->td_tid);
255 fuse_lck_mtx_unlock(data->ms_mtx);
257 if (fdata_get_dead(data)) {
259 * somebody somewhere -- eg., umount routine --
260 * wants this liaison finished off
262 FS_DEBUG2G("reader is to be sacked\n");
264 FS_DEBUG2G("weird -- \"kick\" is set tho there is message\n");
265 FUSE_ASSERT_MS_DONE(tick);
266 fuse_ticket_drop(tick);
268 return (ENODEV); /* This should make the daemon get off
271 FS_DEBUG("message got on thread #%d\n", uio->uio_td->td_tid);
273 KASSERT(tick->tk_ms_bufdata || tick->tk_ms_bufsize == 0,
274 ("non-null buf pointer with positive size"));
276 switch (tick->tk_ms_type) {
278 buf[0] = tick->tk_ms_fiov.base;
279 buflen[0] = tick->tk_ms_fiov.len;
282 buf[0] = tick->tk_ms_fiov.base;
283 buflen[0] = tick->tk_ms_fiov.len;
284 buf[1] = tick->tk_ms_bufdata;
285 buflen[1] = tick->tk_ms_bufsize;
288 panic("unknown message type for fuse_ticket %p", tick);
291 for (i = 0; buf[i]; i++) {
293 * Why not ban mercilessly stupid daemons who can't keep up
294 * with us? (There is no much use of a partial read here...)
297 * XXX note that in such cases Linux FUSE throws EIO at the
298 * syscall invoker and stands back to the message queue. The
299 * rationale should be made clear (and possibly adopt that
300 * behaviour). Keeping the current scheme at least makes
301 * fallacy as loud as possible...
303 if (uio->uio_resid < buflen[i]) {
304 fdata_set_dead(data);
305 FS_DEBUG2G("daemon is stupid, kick it off...\n");
309 err = uiomove(buf[i], buflen[i], uio);
314 FUSE_ASSERT_MS_DONE(tick);
315 fuse_ticket_drop(tick);
321 fuse_ohead_audit(struct fuse_out_header *ohead, struct uio *uio)
323 FS_DEBUG("Out header -- len: %i, error: %i, unique: %llu; iovecs: %d\n",
324 ohead->len, ohead->error, (unsigned long long)ohead->unique,
327 if (uio->uio_resid + sizeof(struct fuse_out_header) != ohead->len) {
328 FS_DEBUG("Format error: body size differs from size claimed by header\n");
331 if (uio->uio_resid && ohead->error) {
332 FS_DEBUG("Format error: non zero error but message had a body\n");
335 /* Sanitize the linuxism of negative errnos */
336 ohead->error = -(ohead->error);
342 * fuse_device_write first reads the header sent by the daemon.
343 * If that's OK, looks up ticket/callback node by the unique id seen in header.
344 * If the callback node contains a handler function, the uio is passed over
348 fuse_device_write(struct cdev *dev, struct uio *uio, int ioflag)
350 struct fuse_out_header ohead;
352 struct fuse_data *data;
353 struct fuse_ticket *tick, *x_tick;
356 FS_DEBUG("resid: %zd, iovcnt: %d, thread: %d\n",
357 uio->uio_resid, uio->uio_iovcnt, uio->uio_td->td_tid);
359 err = devfs_get_cdevpriv((void **)&data);
363 if (uio->uio_resid < sizeof(struct fuse_out_header)) {
364 FS_DEBUG("got less than a header!\n");
365 fdata_set_dead(data);
368 if ((err = uiomove(&ohead, sizeof(struct fuse_out_header), uio)) != 0)
372 * We check header information (which is redundant) and compare it
373 * with what we see. If we see some inconsistency we discard the
374 * whole answer and proceed on as if it had never existed. In
375 * particular, no pretender will be woken up, regardless the
376 * "unique" value in the header.
378 if ((err = fuse_ohead_audit(&ohead, uio))) {
379 fdata_set_dead(data);
382 /* Pass stuff over to callback if there is one installed */
384 /* Looking for ticket with the unique id of header */
385 fuse_lck_mtx_lock(data->aw_mtx);
386 TAILQ_FOREACH_SAFE(tick, &data->aw_head, tk_aw_link,
388 FS_DEBUG("bumped into callback #%llu\n",
389 (unsigned long long)tick->tk_unique);
390 if (tick->tk_unique == ohead.unique) {
392 fuse_aw_remove(tick);
396 fuse_lck_mtx_unlock(data->aw_mtx);
399 if (tick->tk_aw_handler) {
401 * We found a callback with proper handler. In this
402 * case the out header will be 0wnd by the callback,
403 * so the fun of freeing that is left for her.
404 * (Then, by all chance, she'll just get that's done
405 * via ticket_drop(), so no manual mucking
408 FS_DEBUG("pass ticket to a callback\n");
409 memcpy(&tick->tk_aw_ohead, &ohead, sizeof(ohead));
410 err = tick->tk_aw_handler(tick, uio);
412 /* pretender doesn't wanna do anything with answer */
413 FS_DEBUG("stuff devalidated, so we drop it\n");
417 * As aw_mtx was not held during the callback execution the
418 * ticket may have been inserted again. However, this is safe
419 * because fuse_ticket_drop() will deal with refcount anyway.
421 fuse_ticket_drop(tick);
423 /* no callback at all! */
424 FS_DEBUG("erhm, no handler for this response\n");
432 fuse_device_init(void)
435 fuse_dev = make_dev(&fuse_device_cdevsw, 0, UID_ROOT, GID_OPERATOR,
436 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP, "fuse");
437 if (fuse_dev == NULL)
443 fuse_device_destroy(void)
446 MPASS(fuse_dev != NULL);
447 destroy_dev(fuse_dev);