2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)vnode.h 8.7 (Berkeley) 2/4/94
38 #include <sys/bufobj.h>
39 #include <sys/queue.h>
41 #include <sys/lockmgr.h>
42 #include <sys/mutex.h>
43 #include <sys/rangelock.h>
44 #include <sys/selinfo.h>
48 #include <sys/_seqc.h>
51 * The vnode is the focus of all file activity in UNIX. There is a
52 * unique vnode allocated for each active file, each current directory,
53 * each mounted-on file, text file, and the root.
57 * Vnode types. VNON means no type.
59 enum vtype { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO, VBAD,
62 enum vgetstate { VGET_NONE, VGET_HOLDCNT, VGET_USECOUNT };
64 * Each underlying filesystem allocates its own private area and hangs
65 * it from v_data. If non-null, this area is freed in getnewvnode().
71 struct mtx vpi_lock; /* lock to protect below */
72 struct selinfo vpi_selinfo; /* identity of poller(s) */
73 short vpi_events; /* what they are looking for */
74 short vpi_revents; /* what has happened */
78 * Reading or writing any of these items requires holding the appropriate lock.
83 * l - mp mnt_listmtx or freelist mutex
84 * I - updated with atomics, 0->1 and 1->0 transitions with interlock held
85 * m - mount point interlock
87 * u - Only a reference to the vnode is needed to read.
90 * Vnodes may be found on many lists. The general way to deal with operating
91 * on a vnode that is on a list is:
92 * 1) Lock the list and find the vnode.
93 * 2) Lock interlock so that the vnode does not go away.
94 * 3) Unlock the list to avoid lock order reversals.
95 * 4) vget with LK_INTERLOCK and check for ENOENT, or
96 * 5) Check for DOOMED if the vnode lock is not required.
97 * 6) Perform your operation, then vput().
100 #if defined(_KERNEL) || defined(_KVM_VNODE)
104 * Fields which define the identity of the vnode. These fields are
105 * owned by the filesystem (XXX: and vgone() ?)
107 enum vtype v_type:8; /* u vnode type */
108 short v_irflag; /* i frequently read flags */
109 seqc_t v_seqc; /* i modification count */
110 struct vop_vector *v_op; /* u vnode operations vector */
111 void *v_data; /* u private data for fs */
114 * Filesystem instance stuff
116 struct mount *v_mount; /* u ptr to vfs we are in */
117 TAILQ_ENTRY(vnode) v_nmntvnodes; /* m vnodes for mount point */
120 * Type specific fields, only one applies to any given vnode.
123 struct mount *v_mountedhere; /* v ptr to mountpoint (VDIR) */
124 struct unpcb *v_unpcb; /* v unix domain net (VSOCK) */
125 struct cdev *v_rdev; /* v device (VCHR, VBLK) */
126 struct fifoinfo *v_fifoinfo; /* v fifo (VFIFO) */
130 * vfs_hash: (mount + inode) -> vnode hash. The hash value
131 * itself is grouped with other int fields, to avoid padding.
133 LIST_ENTRY(vnode) v_hashlist;
136 * VFS_namecache stuff
138 LIST_HEAD(, namecache) v_cache_src; /* c Cache entries from us */
139 TAILQ_HEAD(, namecache) v_cache_dst; /* c Cache entries to us */
140 struct namecache *v_cache_dd; /* c Cache entry for .. vnode */
145 struct lock v_lock; /* u (if fs don't have one) */
146 struct mtx v_interlock; /* lock for "i" things */
147 struct lock *v_vnlock; /* u pointer to vnode lock */
150 * The machinery of being a vnode
152 TAILQ_ENTRY(vnode) v_vnodelist; /* l vnode lists */
153 TAILQ_ENTRY(vnode) v_lazylist; /* l vnode lazy list */
154 struct bufobj v_bufobj; /* * Buffer cache object */
157 * Hooks for various subsystems and features.
159 struct vpollinfo *v_pollinfo; /* i Poll events, p for *v_pi */
160 struct label *v_label; /* MAC label for vnode */
161 struct lockf *v_lockf; /* Byte-level advisory lock list */
162 struct rangelock v_rl; /* Byte-range lock */
167 daddr_t v_cstart; /* v start block of cluster */
168 daddr_t v_lasta; /* v last allocation */
169 daddr_t v_lastw; /* v last write */
170 int v_clen; /* v length of cur. cluster */
172 u_int v_holdcnt; /* I prevents recycling. */
173 u_int v_usecount; /* I ref count of users */
174 u_int v_iflag; /* i vnode flags (see below) */
175 u_int v_vflag; /* v vnode flags */
176 u_short v_mflag; /* l mnt-specific vnode flags */
177 short v_dbatchcpu; /* i LRU requeue deferral batch */
178 int v_writecount; /* I ref count of writers or
179 (negative) text users */
180 int v_seqc_users; /* i modifications pending */
184 #endif /* defined(_KERNEL) || defined(_KVM_VNODE) */
186 #define bo2vnode(bo) __containerof((bo), struct vnode, v_bufobj)
188 /* XXX: These are temporary to avoid a source sweep at this time */
189 #define v_object v_bufobj.bo_object
192 * Userland version of struct vnode, for sysctl.
195 size_t xv_size; /* sizeof(struct xvnode) */
196 void *xv_vnode; /* address of real vnode */
197 u_long xv_flag; /* vnode vflags */
198 int xv_usecount; /* reference count of users */
199 int xv_writecount; /* reference count of writers */
200 int xv_holdcnt; /* page & buffer references */
201 u_long xv_id; /* capability identifier */
202 void *xv_mount; /* address of parent mount */
203 long xv_numoutput; /* num of writes in progress */
204 enum vtype xv_type; /* vnode type */
206 void *xvu_socket; /* unpcb, if VSOCK */
207 void *xvu_fifo; /* fifo, if VFIFO */
208 dev_t xvu_rdev; /* maj/min, if VBLK/VCHR */
210 dev_t xvu_dev; /* device, if VDIR/VREG/VLNK */
211 ino_t xvu_ino; /* id, if VDIR/VREG/VLNK */
215 #define xv_socket xv_un.xvu_socket
216 #define xv_fifo xv_un.xvu_fifo
217 #define xv_rdev xv_un.xvu_rdev
218 #define xv_dev xv_un.xv_uns.xvu_dev
219 #define xv_ino xv_un.xv_uns.xvu_ino
221 /* We don't need to lock the knlist */
222 #define VN_KNLIST_EMPTY(vp) ((vp)->v_pollinfo == NULL || \
223 KNLIST_EMPTY(&(vp)->v_pollinfo->vpi_selinfo.si_note))
225 #define VN_KNOTE(vp, b, a) \
227 if (!VN_KNLIST_EMPTY(vp)) \
228 KNOTE(&vp->v_pollinfo->vpi_selinfo.si_note, (b), \
229 (a) | KNF_NOKQLOCK); \
231 #define VN_KNOTE_LOCKED(vp, b) VN_KNOTE(vp, b, KNF_LISTLOCKED)
232 #define VN_KNOTE_UNLOCKED(vp, b) VN_KNOTE(vp, b, 0)
236 * VI flags are protected by interlock and live in v_iflag
237 * VV flags are protected by the vnode lock and live in v_vflag
239 * VIRF_DOOMED is doubly protected by the interlock and vnode lock. Both
240 * are required for writing but the status may be checked with either.
242 #define VHOLD_NO_SMR (1<<29) /* Disable vhold_smr */
243 #define VHOLD_ALL_FLAGS (VHOLD_NO_SMR)
245 #define VIRF_DOOMED 0x0001 /* This vnode is being recycled */
247 #define VI_TEXT_REF 0x0001 /* Text ref grabbed use ref */
248 #define VI_MOUNT 0x0020 /* Mount in progress */
249 #define VI_DOINGINACT 0x0800 /* VOP_INACTIVE is in progress */
250 #define VI_OWEINACT 0x1000 /* Need to call inactive */
251 #define VI_DEFINACT 0x2000 /* deferred inactive */
253 #define VV_ROOT 0x0001 /* root of its filesystem */
254 #define VV_ISTTY 0x0002 /* vnode represents a tty */
255 #define VV_NOSYNC 0x0004 /* unlinked, stop syncing */
256 #define VV_ETERNALDEV 0x0008 /* device that is never destroyed */
257 #define VV_CACHEDLABEL 0x0010 /* Vnode has valid cached MAC label */
258 #define VV_VMSIZEVNLOCK 0x0020 /* object size check requires vnode lock */
259 #define VV_COPYONWRITE 0x0040 /* vnode is doing copy-on-write */
260 #define VV_SYSTEM 0x0080 /* vnode being used by kernel */
261 #define VV_PROCDEP 0x0100 /* vnode is process dependent */
262 #define VV_NOKNOTE 0x0200 /* don't activate knotes on this vnode */
263 #define VV_DELETED 0x0400 /* should be removed */
264 #define VV_MD 0x0800 /* vnode backs the md device */
265 #define VV_FORCEINSMQ 0x1000 /* force the insmntque to succeed */
266 #define VV_READLINK 0x2000 /* fdescfs linux vnode */
268 #define VMP_LAZYLIST 0x0001 /* Vnode is on mnt's lazy list */
271 * Vnode attributes. A field value of VNOVAL represents a field whose value
272 * is unavailable (getattr) or which is not to be changed (setattr).
275 enum vtype va_type; /* vnode type (for create) */
276 u_short va_mode; /* files access mode and type */
278 uid_t va_uid; /* owner user id */
279 gid_t va_gid; /* owner group id */
280 nlink_t va_nlink; /* number of references to file */
281 dev_t va_fsid; /* filesystem id */
282 ino_t va_fileid; /* file id */
283 u_quad_t va_size; /* file size in bytes */
284 long va_blocksize; /* blocksize preferred for i/o */
285 struct timespec va_atime; /* time of last access */
286 struct timespec va_mtime; /* time of last modification */
287 struct timespec va_ctime; /* time file changed */
288 struct timespec va_birthtime; /* time file created */
289 u_long va_gen; /* generation number of file */
290 u_long va_flags; /* flags defined for file */
291 dev_t va_rdev; /* device the special file represents */
292 u_quad_t va_bytes; /* bytes of disk space held by file */
293 u_quad_t va_filerev; /* file modification number */
294 u_int va_vaflags; /* operations flags, see below */
295 long va_spare; /* remain quad aligned */
299 * Flags for va_vaflags.
301 #define VA_UTIMES_NULL 0x01 /* utimes argument was NULL */
302 #define VA_EXCLUSIVE 0x02 /* exclusive create request */
303 #define VA_SYNC 0x04 /* O_SYNC truncation */
306 * Flags for ioflag. (high 16 bits used to ask for read-ahead and
307 * help with write clustering)
308 * NB: IO_NDELAY and IO_DIRECT are linked to fcntl.h
310 #define IO_UNIT 0x0001 /* do I/O as atomic unit */
311 #define IO_APPEND 0x0002 /* append write to end */
312 #define IO_NDELAY 0x0004 /* FNDELAY flag set in file table */
313 #define IO_NODELOCKED 0x0008 /* underlying node already locked */
314 #define IO_ASYNC 0x0010 /* bawrite rather then bdwrite */
315 #define IO_VMIO 0x0020 /* data already in VMIO space */
316 #define IO_INVAL 0x0040 /* invalidate after I/O */
317 #define IO_SYNC 0x0080 /* do I/O synchronously */
318 #define IO_DIRECT 0x0100 /* attempt to bypass buffer cache */
319 #define IO_NOREUSE 0x0200 /* VMIO data won't be reused */
320 #define IO_EXT 0x0400 /* operate on external attributes */
321 #define IO_NORMAL 0x0800 /* operate on regular data */
322 #define IO_NOMACCHECK 0x1000 /* MAC checks unnecessary */
323 #define IO_BUFLOCKED 0x2000 /* ffs flag; indir buf is locked */
324 #define IO_RANGELOCKED 0x4000 /* range locked */
326 #define IO_SEQMAX 0x7F /* seq heuristic max value */
327 #define IO_SEQSHIFT 16 /* seq heuristic in upper 16 bits */
330 * Flags for accmode_t.
332 #define VEXEC 000000000100 /* execute/search permission */
333 #define VWRITE 000000000200 /* write permission */
334 #define VREAD 000000000400 /* read permission */
335 #define VADMIN 000000010000 /* being the file owner */
336 #define VAPPEND 000000040000 /* permission to write/append */
338 * VEXPLICIT_DENY makes VOP_ACCESSX(9) return EPERM or EACCES only
339 * if permission was denied explicitly, by a "deny" rule in NFSv4 ACL,
340 * and 0 otherwise. This never happens with ordinary unix access rights
341 * or POSIX.1e ACLs. Obviously, VEXPLICIT_DENY must be OR-ed with
342 * some other V* constant.
344 #define VEXPLICIT_DENY 000000100000
345 #define VREAD_NAMED_ATTRS 000000200000 /* not used */
346 #define VWRITE_NAMED_ATTRS 000000400000 /* not used */
347 #define VDELETE_CHILD 000001000000
348 #define VREAD_ATTRIBUTES 000002000000 /* permission to stat(2) */
349 #define VWRITE_ATTRIBUTES 000004000000 /* change {m,c,a}time */
350 #define VDELETE 000010000000
351 #define VREAD_ACL 000020000000 /* read ACL and file mode */
352 #define VWRITE_ACL 000040000000 /* change ACL and/or file mode */
353 #define VWRITE_OWNER 000100000000 /* change file owner */
354 #define VSYNCHRONIZE 000200000000 /* not used */
355 #define VCREAT 000400000000 /* creating new file */
356 #define VVERIFY 001000000000 /* verification required */
359 * Permissions that were traditionally granted only to the file owner.
361 #define VADMIN_PERMS (VADMIN | VWRITE_ATTRIBUTES | VWRITE_ACL | \
365 * Permissions that were traditionally granted to everyone.
367 #define VSTAT_PERMS (VREAD_ATTRIBUTES | VREAD_ACL)
370 * Permissions that allow to change the state of the file in any way.
372 #define VMODIFY_PERMS (VWRITE | VAPPEND | VADMIN_PERMS | VDELETE_CHILD | \
376 * Token indicating no attribute value yet assigned.
381 * LK_TIMELOCK timeout for vnode locks (used mainly by the pageout daemon)
383 #define VLKTIMEOUT (hz / 20 + 1)
387 #ifdef MALLOC_DECLARE
388 MALLOC_DECLARE(M_VNODE);
391 extern u_int ncsizefactor;
394 * Convert between vnode types and inode formats (since POSIX.1
395 * defines mode word of stat structure in terms of inode formats).
397 extern enum vtype iftovt_tab[];
398 extern int vttoif_tab[];
399 #define IFTOVT(mode) (iftovt_tab[((mode) & S_IFMT) >> 12])
400 #define VTTOIF(indx) (vttoif_tab[(int)(indx)])
401 #define MAKEIMODE(indx, mode) (int)(VTTOIF(indx) | (mode))
404 * Flags to various vnode functions.
406 #define SKIPSYSTEM 0x0001 /* vflush: skip vnodes marked VSYSTEM */
407 #define FORCECLOSE 0x0002 /* vflush: force file closure */
408 #define WRITECLOSE 0x0004 /* vflush: only close writable files */
409 #define EARLYFLUSH 0x0008 /* vflush: early call for ffs_flushfiles */
410 #define V_SAVE 0x0001 /* vinvalbuf: sync file first */
411 #define V_ALT 0x0002 /* vinvalbuf: invalidate only alternate bufs */
412 #define V_NORMAL 0x0004 /* vinvalbuf: invalidate only regular bufs */
413 #define V_CLEANONLY 0x0008 /* vinvalbuf: invalidate only clean bufs */
414 #define V_VMIO 0x0010 /* vinvalbuf: called during pageout */
415 #define V_ALLOWCLEAN 0x0020 /* vinvalbuf: allow clean buffers after flush */
416 #define REVOKEALL 0x0001 /* vop_revoke: revoke all aliases */
417 #define V_WAIT 0x0001 /* vn_start_write: sleep for suspend */
418 #define V_NOWAIT 0x0002 /* vn_start_write: don't sleep for suspend */
419 #define V_XSLEEP 0x0004 /* vn_start_write: just return after sleep */
420 #define V_MNTREF 0x0010 /* vn_start_write: mp is already ref-ed */
422 #define VR_START_WRITE 0x0001 /* vfs_write_resume: start write atomically */
423 #define VR_NO_SUSPCLR 0x0002 /* vfs_write_resume: do not clear suspension */
425 #define VS_SKIP_UNMOUNT 0x0001 /* vfs_write_suspend: fail if the
426 filesystem is being unmounted */
428 #define VREF(vp) vref(vp)
431 #define VATTR_NULL(vap) vattr_null(vap)
433 #define VATTR_NULL(vap) (*(vap) = va_null) /* initialize a vattr */
434 #endif /* DIAGNOSTIC */
436 #define NULLVP ((struct vnode *)NULL)
441 extern struct vnode *rootvnode; /* root (i.e. "/") vnode */
442 extern struct mount *rootdevmp; /* "/dev" mount */
443 extern u_long desiredvnodes; /* number of vnodes desired */
444 extern struct uma_zone *namei_zone;
445 extern struct vattr va_null; /* predefined null vattr structure */
447 #define VI_LOCK(vp) mtx_lock(&(vp)->v_interlock)
448 #define VI_LOCK_FLAGS(vp, flags) mtx_lock_flags(&(vp)->v_interlock, (flags))
449 #define VI_TRYLOCK(vp) mtx_trylock(&(vp)->v_interlock)
450 #define VI_UNLOCK(vp) mtx_unlock(&(vp)->v_interlock)
451 #define VI_MTX(vp) (&(vp)->v_interlock)
453 #define VN_LOCK_AREC(vp) lockallowrecurse((vp)->v_vnlock)
454 #define VN_LOCK_ASHARE(vp) lockallowshare((vp)->v_vnlock)
455 #define VN_LOCK_DSHARE(vp) lockdisableshare((vp)->v_vnlock)
460 * Mods for extensibility.
464 * Flags for vdesc_flags:
466 #define VDESC_MAX_VPS 16
467 /* Low order 16 flag bits are reserved for willrele flags for vp arguments. */
468 #define VDESC_VP0_WILLRELE 0x0001
469 #define VDESC_VP1_WILLRELE 0x0002
470 #define VDESC_VP2_WILLRELE 0x0004
471 #define VDESC_VP3_WILLRELE 0x0008
474 * A generic structure.
475 * This can be used by bypass routines to identify generic arguments.
477 struct vop_generic_args {
478 struct vnodeop_desc *a_desc;
479 /* other random data follows, presumably */
482 typedef int vop_bypass_t(struct vop_generic_args *);
485 * VDESC_NO_OFFSET is used to identify the end of the offset list
486 * and in places where no such field exists.
488 #define VDESC_NO_OFFSET -1
491 * This structure describes the vnode operation taking place.
493 struct vnodeop_desc {
494 char *vdesc_name; /* a readable name for debugging */
495 int vdesc_flags; /* VDESC_* flags */
496 int vdesc_vop_offset;
497 vop_bypass_t *vdesc_call; /* Function to call */
500 * These ops are used by bypass routines to map and locate arguments.
501 * Creds and procs are not needed in bypass routines, but sometimes
502 * they are useful to (for example) transport layers.
503 * Nameidata is useful because it has a cred in it.
505 int *vdesc_vp_offsets; /* list ended by VDESC_NO_OFFSET */
506 int vdesc_vpp_offset; /* return vpp location */
507 int vdesc_cred_offset; /* cred location, if any */
508 int vdesc_thread_offset; /* thread location, if any */
509 int vdesc_componentname_offset; /* if any */
514 * A list of all the operation descs.
516 extern struct vnodeop_desc *vnodeop_descs[];
518 #define VOPARG_OFFSETOF(s_type, field) __offsetof(s_type, field)
519 #define VOPARG_OFFSETTO(s_type, s_offset, struct_p) \
520 ((s_type)(((char*)(struct_p)) + (s_offset)))
523 #ifdef DEBUG_VFS_LOCKS
525 * Support code to aid in debugging VFS locking problems. Not totally
526 * reliable since if the thread sleeps between changing the lock
527 * state and checking it with the assert, some other thread could
528 * change the state. They are good enough for debugging a single
529 * filesystem using a single-threaded test. Note that the unreliability is
530 * limited to false negatives; efforts were made to ensure that false
531 * positives cannot occur.
533 void assert_vi_locked(struct vnode *vp, const char *str);
534 void assert_vi_unlocked(struct vnode *vp, const char *str);
535 void assert_vop_elocked(struct vnode *vp, const char *str);
536 void assert_vop_locked(struct vnode *vp, const char *str);
537 void assert_vop_unlocked(struct vnode *vp, const char *str);
539 #define ASSERT_VI_LOCKED(vp, str) assert_vi_locked((vp), (str))
540 #define ASSERT_VI_UNLOCKED(vp, str) assert_vi_unlocked((vp), (str))
541 #define ASSERT_VOP_ELOCKED(vp, str) assert_vop_elocked((vp), (str))
542 #define ASSERT_VOP_LOCKED(vp, str) assert_vop_locked((vp), (str))
543 #define ASSERT_VOP_UNLOCKED(vp, str) assert_vop_unlocked((vp), (str))
545 #define ASSERT_VOP_IN_SEQC(vp) do { \
546 struct vnode *_vp = (vp); \
548 VNPASS(seqc_in_modify(_vp->v_seqc), _vp); \
551 #define ASSERT_VOP_NOT_IN_SEQC(vp) do { \
552 struct vnode *_vp = (vp); \
554 VNPASS(!seqc_in_modify(_vp->v_seqc), _vp); \
557 #else /* !DEBUG_VFS_LOCKS */
559 #define ASSERT_VI_LOCKED(vp, str) ((void)0)
560 #define ASSERT_VI_UNLOCKED(vp, str) ((void)0)
561 #define ASSERT_VOP_ELOCKED(vp, str) ((void)0)
562 #define ASSERT_VOP_LOCKED(vp, str) ((void)0)
563 #define ASSERT_VOP_UNLOCKED(vp, str) ((void)0)
565 #define ASSERT_VOP_IN_SEQC(vp) ((void)0)
566 #define ASSERT_VOP_NOT_IN_SEQC(vp) ((void)0)
568 #endif /* DEBUG_VFS_LOCKS */
572 * This call works for vnodes in the kernel.
574 #define VCALL(c) ((c)->a_desc->vdesc_call(c))
576 #define DOINGASYNC(vp) \
577 (((vp)->v_mount->mnt_kern_flag & MNTK_ASYNC) != 0 && \
578 ((curthread->td_pflags & TDP_SYNCIO) == 0))
581 * VMIO support inline
584 extern int vmiodirenable;
587 vn_canvmio(struct vnode *vp)
589 if (vp && (vp->v_type == VREG || (vmiodirenable && vp->v_type == VDIR)))
595 * Finally, include the default set of vnode operations.
597 typedef void vop_getpages_iodone_t(void *, vm_page_t *, int, int);
598 #include "vnode_if.h"
601 #define VN_OPEN_NOAUDIT 0x00000001
602 #define VN_OPEN_NOCAPCHECK 0x00000002
603 #define VN_OPEN_NAMECACHE 0x00000004
604 #define VN_OPEN_INVFS 0x00000008
607 * Public vnode manipulation functions.
609 struct componentname;
614 struct freebsd11_stat;
625 typedef int (*vn_get_ino_t)(struct mount *, void *, int, struct vnode **);
627 int bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn,
629 /* cache_* may belong in namei.h. */
630 void cache_changesize(u_long newhashsize);
631 #define cache_enter(dvp, vp, cnp) \
632 cache_enter_time(dvp, vp, cnp, NULL, NULL)
633 void cache_enter_time(struct vnode *dvp, struct vnode *vp,
634 struct componentname *cnp, struct timespec *tsp,
635 struct timespec *dtsp);
636 int cache_lookup(struct vnode *dvp, struct vnode **vpp,
637 struct componentname *cnp, struct timespec *tsp, int *ticksp);
638 void cache_purge(struct vnode *vp);
639 void cache_purge_negative(struct vnode *vp);
640 void cache_purgevfs(struct mount *mp, bool force);
641 int change_dir(struct vnode *vp, struct thread *td);
642 void cvtstat(struct stat *st, struct ostat *ost);
643 void freebsd11_cvtnstat(struct stat *sb, struct nstat *nsb);
644 int freebsd11_cvtstat(struct stat *st, struct freebsd11_stat *ost);
645 int getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
647 void getnewvnode_reserve(void);
648 void getnewvnode_drop_reserve(void);
649 int insmntque1(struct vnode *vp, struct mount *mp,
650 void (*dtr)(struct vnode *, void *), void *dtr_arg);
651 int insmntque(struct vnode *vp, struct mount *mp);
652 u_quad_t init_va_filerev(void);
653 int speedup_syncer(void);
654 int vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf,
656 int vn_getcwd(struct thread *td, char *buf, char **retbuf, size_t *buflen);
657 int vn_fullpath(struct thread *td, struct vnode *vn,
658 char **retbuf, char **freebuf);
659 int vn_fullpath_global(struct thread *td, struct vnode *vn,
660 char **retbuf, char **freebuf);
662 vn_dir_dd_ino(struct vnode *vp);
663 int vn_commname(struct vnode *vn, char *buf, u_int buflen);
664 int vn_path_to_global_path(struct thread *td, struct vnode *vp,
665 char *path, u_int pathlen);
666 int vaccess(enum vtype type, mode_t file_mode, uid_t file_uid,
667 gid_t file_gid, accmode_t accmode, struct ucred *cred,
669 int vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid,
671 int vaccess_acl_nfs4(enum vtype type, uid_t file_uid, gid_t file_gid,
672 struct acl *aclp, accmode_t accmode, struct ucred *cred,
674 int vaccess_acl_posix1e(enum vtype type, uid_t file_uid,
675 gid_t file_gid, struct acl *acl, accmode_t accmode,
676 struct ucred *cred, int *privused);
677 void vattr_null(struct vattr *vap);
678 int vcount(struct vnode *vp);
679 void vlazy(struct vnode *);
680 void vdrop(struct vnode *);
681 void vdropl(struct vnode *);
682 int vflush(struct mount *mp, int rootrefs, int flags, struct thread *td);
683 int vget(struct vnode *vp, int flags, struct thread *td);
684 enum vgetstate vget_prep_smr(struct vnode *vp);
685 enum vgetstate vget_prep(struct vnode *vp);
686 int vget_finish(struct vnode *vp, int flags, enum vgetstate vs);
687 void vget_finish_ref(struct vnode *vp, enum vgetstate vs);
688 void vget_abort(struct vnode *vp, enum vgetstate vs);
689 void vgone(struct vnode *vp);
690 void vhold(struct vnode *);
691 void vholdl(struct vnode *);
692 void vholdnz(struct vnode *);
693 bool vhold_smr(struct vnode *);
694 void vinactive(struct vnode *vp);
695 int vinvalbuf(struct vnode *vp, int save, int slpflag, int slptimeo);
696 int vtruncbuf(struct vnode *vp, off_t length, int blksize);
697 void v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn,
699 void vunref(struct vnode *);
700 void vn_printf(struct vnode *vp, const char *fmt, ...) __printflike(2,3);
701 int vrecycle(struct vnode *vp);
702 int vrecyclel(struct vnode *vp);
703 int vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off,
705 int vn_close(struct vnode *vp,
706 int flags, struct ucred *file_cred, struct thread *td);
707 int vn_copy_file_range(struct vnode *invp, off_t *inoffp,
708 struct vnode *outvp, off_t *outoffp, size_t *lenp,
709 unsigned int flags, struct ucred *incred, struct ucred *outcred,
710 struct thread *fsize_td);
711 void vn_finished_write(struct mount *mp);
712 void vn_finished_secondary_write(struct mount *mp);
713 int vn_fsync_buf(struct vnode *vp, int waitfor);
714 int vn_generic_copy_file_range(struct vnode *invp, off_t *inoffp,
715 struct vnode *outvp, off_t *outoffp, size_t *lenp,
716 unsigned int flags, struct ucred *incred, struct ucred *outcred,
717 struct thread *fsize_td);
718 int vn_need_pageq_flush(struct vnode *vp);
719 int vn_isdisk(struct vnode *vp, int *errp);
720 int _vn_lock(struct vnode *vp, int flags, const char *file, int line);
721 #define vn_lock(vp, flags) _vn_lock(vp, flags, __FILE__, __LINE__)
722 int vn_open(struct nameidata *ndp, int *flagp, int cmode, struct file *fp);
723 int vn_open_cred(struct nameidata *ndp, int *flagp, int cmode,
724 u_int vn_open_flags, struct ucred *cred, struct file *fp);
725 int vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred,
726 struct thread *td, struct file *fp);
727 void vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end);
728 int vn_pollrecord(struct vnode *vp, struct thread *p, int events);
729 int vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base,
730 int len, off_t offset, enum uio_seg segflg, int ioflg,
731 struct ucred *active_cred, struct ucred *file_cred, ssize_t *aresid,
733 int vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, void *base,
734 size_t len, off_t offset, enum uio_seg segflg, int ioflg,
735 struct ucred *active_cred, struct ucred *file_cred, size_t *aresid,
737 int vn_rlimit_fsize(const struct vnode *vn, const struct uio *uio,
739 int vn_stat(struct vnode *vp, struct stat *sb, struct ucred *active_cred,
740 struct ucred *file_cred, struct thread *td);
741 int vn_start_write(struct vnode *vp, struct mount **mpp, int flags);
742 int vn_start_secondary_write(struct vnode *vp, struct mount **mpp,
744 int vn_truncate_locked(struct vnode *vp, off_t length, bool sync,
746 int vn_writechk(struct vnode *vp);
747 int vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
748 const char *attrname, int *buflen, char *buf, struct thread *td);
749 int vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
750 const char *attrname, int buflen, char *buf, struct thread *td);
751 int vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
752 const char *attrname, struct thread *td);
753 int vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags,
755 int vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc,
756 void *alloc_arg, int lkflags, struct vnode **rvp);
757 int vn_utimes_perm(struct vnode *vp, struct vattr *vap,
758 struct ucred *cred, struct thread *td);
760 int vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio);
761 int vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize,
764 void vn_seqc_write_begin_locked(struct vnode *vp);
765 void vn_seqc_write_begin(struct vnode *vp);
766 void vn_seqc_write_end_locked(struct vnode *vp);
767 void vn_seqc_write_end(struct vnode *vp);
768 #define vn_seqc_read_any(vp) seqc_read_any(&(vp)->v_seqc)
769 #define vn_seqc_consistent(vp, seq) seqc_consistent(&(vp)->v_seqc, seq)
771 #define vn_rangelock_unlock(vp, cookie) \
772 rangelock_unlock(&(vp)->v_rl, (cookie), VI_MTX(vp))
773 #define vn_rangelock_unlock_range(vp, cookie, start, end) \
774 rangelock_unlock_range(&(vp)->v_rl, (cookie), (start), (end), \
776 #define vn_rangelock_rlock(vp, start, end) \
777 rangelock_rlock(&(vp)->v_rl, (start), (end), VI_MTX(vp))
778 #define vn_rangelock_tryrlock(vp, start, end) \
779 rangelock_tryrlock(&(vp)->v_rl, (start), (end), VI_MTX(vp))
780 #define vn_rangelock_wlock(vp, start, end) \
781 rangelock_wlock(&(vp)->v_rl, (start), (end), VI_MTX(vp))
782 #define vn_rangelock_trywlock(vp, start, end) \
783 rangelock_trywlock(&(vp)->v_rl, (start), (end), VI_MTX(vp))
785 int vfs_cache_lookup(struct vop_lookup_args *ap);
786 int vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp);
787 void vfs_timestamp(struct timespec *);
788 void vfs_write_resume(struct mount *mp, int flags);
789 int vfs_write_suspend(struct mount *mp, int flags);
790 int vfs_write_suspend_umnt(struct mount *mp);
791 void vnlru_free(int, struct vfsops *);
792 int vop_stdbmap(struct vop_bmap_args *);
793 int vop_stdfdatasync_buf(struct vop_fdatasync_args *);
794 int vop_stdfsync(struct vop_fsync_args *);
795 int vop_stdgetwritemount(struct vop_getwritemount_args *);
796 int vop_stdgetpages(struct vop_getpages_args *);
797 int vop_stdinactive(struct vop_inactive_args *);
798 int vop_stdioctl(struct vop_ioctl_args *);
799 int vop_stdneed_inactive(struct vop_need_inactive_args *);
800 int vop_stdkqfilter(struct vop_kqfilter_args *);
801 int vop_stdlock(struct vop_lock1_args *);
802 int vop_stdunlock(struct vop_unlock_args *);
803 int vop_stdislocked(struct vop_islocked_args *);
804 int vop_lock(struct vop_lock1_args *);
805 int vop_unlock(struct vop_unlock_args *);
806 int vop_islocked(struct vop_islocked_args *);
807 int vop_stdputpages(struct vop_putpages_args *);
808 int vop_nopoll(struct vop_poll_args *);
809 int vop_stdaccess(struct vop_access_args *ap);
810 int vop_stdaccessx(struct vop_accessx_args *ap);
811 int vop_stdadvise(struct vop_advise_args *ap);
812 int vop_stdadvlock(struct vop_advlock_args *ap);
813 int vop_stdadvlockasync(struct vop_advlockasync_args *ap);
814 int vop_stdadvlockpurge(struct vop_advlockpurge_args *ap);
815 int vop_stdallocate(struct vop_allocate_args *ap);
816 int vop_stdset_text(struct vop_set_text_args *ap);
817 int vop_stdpathconf(struct vop_pathconf_args *);
818 int vop_stdpoll(struct vop_poll_args *);
819 int vop_stdvptocnp(struct vop_vptocnp_args *ap);
820 int vop_stdvptofh(struct vop_vptofh_args *ap);
821 int vop_stdunp_bind(struct vop_unp_bind_args *ap);
822 int vop_stdunp_connect(struct vop_unp_connect_args *ap);
823 int vop_stdunp_detach(struct vop_unp_detach_args *ap);
824 int vop_eopnotsupp(struct vop_generic_args *ap);
825 int vop_ebadf(struct vop_generic_args *ap);
826 int vop_einval(struct vop_generic_args *ap);
827 int vop_enoent(struct vop_generic_args *ap);
828 int vop_enotty(struct vop_generic_args *ap);
829 int vop_null(struct vop_generic_args *ap);
830 int vop_panic(struct vop_generic_args *ap);
831 int dead_poll(struct vop_poll_args *ap);
832 int dead_read(struct vop_read_args *ap);
833 int dead_write(struct vop_write_args *ap);
835 /* These are called from within the actual VOPS. */
836 void vop_close_post(void *a, int rc);
837 void vop_create_pre(void *a);
838 void vop_create_post(void *a, int rc);
839 void vop_whiteout_pre(void *a);
840 void vop_whiteout_post(void *a, int rc);
841 void vop_deleteextattr_pre(void *a);
842 void vop_deleteextattr_post(void *a, int rc);
843 void vop_link_pre(void *a);
844 void vop_link_post(void *a, int rc);
845 void vop_lookup_post(void *a, int rc);
846 void vop_lookup_pre(void *a);
847 void vop_mkdir_pre(void *a);
848 void vop_mkdir_post(void *a, int rc);
849 void vop_mknod_pre(void *a);
850 void vop_mknod_post(void *a, int rc);
851 void vop_open_post(void *a, int rc);
852 void vop_read_post(void *a, int rc);
853 void vop_readdir_post(void *a, int rc);
854 void vop_reclaim_post(void *a, int rc);
855 void vop_remove_pre(void *a);
856 void vop_remove_post(void *a, int rc);
857 void vop_rename_post(void *a, int rc);
858 void vop_rename_pre(void *a);
859 void vop_rmdir_pre(void *a);
860 void vop_rmdir_post(void *a, int rc);
861 void vop_setattr_pre(void *a);
862 void vop_setattr_post(void *a, int rc);
863 void vop_setacl_pre(void *a);
864 void vop_setacl_post(void *a, int rc);
865 void vop_setextattr_pre(void *a);
866 void vop_setextattr_post(void *a, int rc);
867 void vop_symlink_pre(void *a);
868 void vop_symlink_post(void *a, int rc);
869 int vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a);
871 #ifdef DEBUG_VFS_LOCKS
872 void vop_fplookup_vexec_debugpre(void *a);
873 void vop_fplookup_vexec_debugpost(void *a, int rc);
874 void vop_strategy_debugpre(void *a);
875 void vop_lock_debugpre(void *a);
876 void vop_lock_debugpost(void *a, int rc);
877 void vop_unlock_debugpre(void *a);
878 void vop_need_inactive_debugpre(void *a);
879 void vop_need_inactive_debugpost(void *a, int rc);
881 #define vop_fplookup_vexec_debugpre(x) do { } while (0)
882 #define vop_fplookup_vexec_debugpost(x, y) do { } while (0)
883 #define vop_strategy_debugpre(x) do { } while (0)
884 #define vop_lock_debugpre(x) do { } while (0)
885 #define vop_lock_debugpost(x, y) do { } while (0)
886 #define vop_unlock_debugpre(x) do { } while (0)
887 #define vop_need_inactive_debugpre(x) do { } while (0)
888 #define vop_need_inactive_debugpost(x, y) do { } while (0)
891 void vop_rename_fail(struct vop_rename_args *ap);
893 #define VOP_WRITE_PRE(ap) \
896 off_t osize, ooffset, noffset; \
898 osize = ooffset = noffset = 0; \
899 if (!VN_KNLIST_EMPTY((ap)->a_vp)) { \
900 error = VOP_GETATTR((ap)->a_vp, &va, (ap)->a_cred); \
903 ooffset = (ap)->a_uio->uio_offset; \
904 osize = (off_t)va.va_size; \
907 #define VOP_WRITE_POST(ap, ret) \
908 noffset = (ap)->a_uio->uio_offset; \
909 if (noffset > ooffset && !VN_KNLIST_EMPTY((ap)->a_vp)) { \
910 VFS_KNOTE_LOCKED((ap)->a_vp, NOTE_WRITE \
911 | (noffset > osize ? NOTE_EXTEND : 0)); \
914 #define VOP_LOCK(vp, flags) VOP_LOCK1(vp, flags, __FILE__, __LINE__)
917 #define VOP_ADD_WRITECOUNT_CHECKED(vp, cnt) \
921 error_ = VOP_ADD_WRITECOUNT((vp), (cnt)); \
922 VNASSERT(error_ == 0, (vp), ("VOP_ADD_WRITECOUNT returned %d", \
925 #define VOP_SET_TEXT_CHECKED(vp) \
929 error_ = VOP_SET_TEXT((vp)); \
930 VNASSERT(error_ == 0, (vp), ("VOP_SET_TEXT returned %d", \
933 #define VOP_UNSET_TEXT_CHECKED(vp) \
937 error_ = VOP_UNSET_TEXT((vp)); \
938 VNASSERT(error_ == 0, (vp), ("VOP_UNSET_TEXT returned %d", \
942 #define VOP_ADD_WRITECOUNT_CHECKED(vp, cnt) VOP_ADD_WRITECOUNT((vp), (cnt))
943 #define VOP_SET_TEXT_CHECKED(vp) VOP_SET_TEXT((vp))
944 #define VOP_UNSET_TEXT_CHECKED(vp) VOP_UNSET_TEXT((vp))
947 #define VN_IS_DOOMED(vp) __predict_false((vp)->v_irflag & VIRF_DOOMED)
949 void vput(struct vnode *vp);
950 void vrele(struct vnode *vp);
951 void vref(struct vnode *vp);
952 void vrefl(struct vnode *vp);
953 void vrefact(struct vnode *vp);
954 void vrefactn(struct vnode *vp, u_int n);
955 int vrefcnt(struct vnode *vp);
956 void v_addpollinfo(struct vnode *vp);
958 int vnode_create_vobject(struct vnode *vp, off_t size, struct thread *td);
959 void vnode_destroy_vobject(struct vnode *vp);
961 extern struct vop_vector fifo_specops;
962 extern struct vop_vector dead_vnodeops;
963 extern struct vop_vector default_vnodeops;
965 #define VOP_PANIC ((void*)(uintptr_t)vop_panic)
966 #define VOP_NULL ((void*)(uintptr_t)vop_null)
967 #define VOP_EBADF ((void*)(uintptr_t)vop_ebadf)
968 #define VOP_ENOTTY ((void*)(uintptr_t)vop_enotty)
969 #define VOP_EINVAL ((void*)(uintptr_t)vop_einval)
970 #define VOP_ENOENT ((void*)(uintptr_t)vop_enoent)
971 #define VOP_EOPNOTSUPP ((void*)(uintptr_t)vop_eopnotsupp)
974 int fifo_printinfo(struct vnode *);
977 typedef int vfs_hash_cmp_t(struct vnode *vp, void *arg);
979 void vfs_hash_changesize(u_long newhashsize);
980 int vfs_hash_get(const struct mount *mp, u_int hash, int flags,
981 struct thread *td, struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg);
982 u_int vfs_hash_index(struct vnode *vp);
983 int vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td,
984 struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg);
985 void vfs_hash_ref(const struct mount *mp, u_int hash, struct thread *td,
986 struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg);
987 void vfs_hash_rehash(struct vnode *vp, u_int hash);
988 void vfs_hash_remove(struct vnode *vp);
990 int vfs_kqfilter(struct vop_kqfilter_args *);
992 int vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off);
993 int vfs_emptydir(struct vnode *vp);
995 int vfs_unixify_accmode(accmode_t *accmode);
997 void vfs_unp_reclaim(struct vnode *vp);
999 int setfmode(struct thread *td, struct ucred *cred, struct vnode *vp, int mode);
1000 int setfown(struct thread *td, struct ucred *cred, struct vnode *vp, uid_t uid,
1002 int vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1004 int vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1007 void vn_fsid(struct vnode *vp, struct vattr *va);
1009 int vn_dir_check_exec(struct vnode *vp, struct componentname *cnp);
1011 #define VOP_UNLOCK_FLAGS(vp, flags) ({ \
1012 struct vnode *_vp = (vp); \
1013 int _flags = (flags); \
1016 if ((_flags & ~(LK_INTERLOCK | LK_RELEASE)) != 0) \
1017 panic("%s: unsupported flags %x\n", __func__, flags); \
1018 _error = VOP_UNLOCK(_vp); \
1019 if (_flags & LK_INTERLOCK) \
1024 #include <sys/kernel.h>
1026 #define VFS_VOP_VECTOR_REGISTER(vnodeops) \
1027 SYSINIT(vfs_vector_##vnodeops##_f, SI_SUB_VFS, SI_ORDER_ANY, \
1028 vfs_vector_op_register, &vnodeops)
1030 #define VFS_SMR_DECLARE \
1031 extern smr_t vfs_smr
1033 #define VFS_SMR() vfs_smr
1034 #define vfs_smr_enter() smr_enter(VFS_SMR())
1035 #define vfs_smr_exit() smr_exit(VFS_SMR())
1036 #define vfs_smr_entered_load(ptr) smr_entered_load((ptr), VFS_SMR())
1037 #define VFS_SMR_ASSERT_ENTERED() SMR_ASSERT_ENTERED(VFS_SMR())
1038 #define VFS_SMR_ASSERT_NOT_ENTERED() SMR_ASSERT_NOT_ENTERED(VFS_SMR())
1039 #define VFS_SMR_ZONE_SET(zone) uma_zone_set_smr((zone), VFS_SMR())
1041 #define vn_load_v_data_smr(vp) ({ \
1042 struct vnode *_vp = (vp); \
1044 VFS_SMR_ASSERT_ENTERED(); \
1045 atomic_load_ptr(&(_vp)->v_data); \
1048 #endif /* _KERNEL */
1050 #endif /* !_SYS_VNODE_H_ */