2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)vnode.h 8.7 (Berkeley) 2/4/94
41 * XXX - compatability until lockmgr() goes away or all the #includes are
44 #include <sys/lockmgr.h>
46 #include <sys/queue.h>
47 #include <sys/_lock.h>
48 #include <sys/_mutex.h>
49 #include <sys/selinfo.h>
54 * The vnode is the focus of all file activity in UNIX. There is a
55 * unique vnode allocated for each active file, each current directory,
56 * each mounted-on file, text file, and the root.
60 * Vnode types. VNON means no type.
62 enum vtype { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO, VBAD };
66 * These are for the benefit of external programs only (e.g., pstat)
67 * and should NEVER be inspected by the kernel.
70 VT_NON, VT_UFS, VT_NFS, VT_UNUSED, VT_PC, VT_LFS, VT_LOFS, VT_FDESC,
71 VT_PORTAL, VT_NULL, VT_UMAP, VT_KERNFS, VT_PROCFS, VT_AFS, VT_ISOFS,
72 VT_UNION, VT_MSDOSFS, VT_DEVFS, VT_TFS, VT_VFS, VT_CODA, VT_NTFS,
73 VT_HPFS, VT_NWFS, VT_PSEUDOFS, VT_SMBFS
77 * Each underlying filesystem allocates its own private area and hangs
78 * it from v_data. If non-null, this area is freed in getnewvnode().
80 TAILQ_HEAD(buflists, buf);
82 typedef int vop_t __P((void *));
86 struct mtx vpi_lock; /* lock to protect below */
87 struct selinfo vpi_selinfo; /* identity of poller(s) */
88 short vpi_events; /* what they are looking for */
89 short vpi_revents; /* what has happened */
93 * Reading or writing any of these items requires holding the appropriate lock.
94 * v_freelist is locked by the global vnode_free_list mutex.
95 * v_mntvnodes is locked by the global mntvnodes mutex.
96 * v_flag, v_usecount, v_holdcount and v_writecount are
97 * locked by the v_interlock mutex.
98 * v_pollinfo is locked by the lock contained inside it.
101 u_long v_flag; /* vnode flags (see below) */
102 int v_usecount; /* reference count of users */
103 int v_writecount; /* reference count of writers */
104 int v_holdcnt; /* page & buffer references */
105 u_long v_id; /* capability identifier */
106 struct mount *v_mount; /* ptr to vfs we are in */
107 vop_t **v_op; /* vnode operations vector */
108 TAILQ_ENTRY(vnode) v_freelist; /* vnode freelist */
109 TAILQ_ENTRY(vnode) v_nmntvnodes; /* vnodes for mount point */
110 struct buflists v_cleanblkhd; /* clean blocklist head */
111 struct buflists v_dirtyblkhd; /* dirty blocklist head */
112 LIST_ENTRY(vnode) v_synclist; /* vnodes with dirty buffers */
113 long v_numoutput; /* num of writes in progress */
114 enum vtype v_type; /* vnode type */
116 struct mount *vu_mountedhere;/* ptr to mounted vfs (VDIR) */
117 struct socket *vu_socket; /* unix ipc (VSOCK) */
119 struct specinfo *vu_specinfo; /* device (VCHR, VBLK) */
120 SLIST_ENTRY(vnode) vu_specnext;
122 struct fifoinfo *vu_fifoinfo; /* fifo (VFIFO) */
124 daddr_t v_lastw; /* last write (write cluster) */
125 daddr_t v_cstart; /* start block of cluster */
126 daddr_t v_lasta; /* last allocation (cluster) */
127 int v_clen; /* length of current cluster */
128 struct vm_object *v_object; /* Place to store VM object */
129 struct mtx v_interlock; /* lock on usecount and flag */
130 struct lock v_lock; /* used if fs don't have one */
131 struct lock *v_vnlock; /* pointer to vnode lock */
132 enum vtagtype v_tag; /* type of underlying data */
133 void *v_data; /* private data for fs */
134 LIST_HEAD(, namecache) v_cache_src; /* Cache entries from us */
135 TAILQ_HEAD(, namecache) v_cache_dst; /* Cache entries to us */
136 struct vnode *v_dd; /* .. vnode */
137 u_long v_ddid; /* .. capability identifier */
138 struct vpollinfo *v_pollinfo;
139 struct thread *v_vxproc; /* thread owning VXLOCK */
141 const char *filename; /* Source file doing locking */
142 int line; /* Line number doing locking */
145 #define v_mountedhere v_un.vu_mountedhere
146 #define v_socket v_un.vu_socket
147 #define v_rdev v_un.vu_spec.vu_specinfo
148 #define v_specnext v_un.vu_spec.vu_specnext
149 #define v_fifoinfo v_un.vu_fifoinfo
151 #define VN_POLLEVENT(vp, events) \
153 if ((vp)->v_pollinfo != NULL && \
154 (vp)->v_pollinfo->vpi_events & (events)) \
155 vn_pollevent((vp), (events)); \
158 #define VN_KNOTE(vp, b) \
160 if ((vp)->v_pollinfo != NULL) \
161 KNOTE(&vp->v_pollinfo->vpi_selinfo.si_note, (b)); \
167 #define VROOT 0x00001 /* root of its file system */
168 #define VTEXT 0x00002 /* vnode is a pure text prototype */
169 #define VSYSTEM 0x00004 /* vnode being used by kernel */
170 #define VISTTY 0x00008 /* vnode represents a tty */
171 #define VXLOCK 0x00100 /* vnode is locked to change underlying type */
172 #define VXWANT 0x00200 /* thread is waiting for vnode */
173 #define VBWAIT 0x00400 /* waiting for output to complete */
174 #define VNOSYNC 0x01000 /* unlinked, stop syncing */
175 /* open for business 0x01000 */
176 #define VOBJBUF 0x02000 /* Allocate buffers in VM object */
177 #define VCOPYONWRITE 0x04000 /* vnode is doing copy-on-write */
178 #define VAGE 0x08000 /* Insert vnode at head of free list */
179 #define VOLOCK 0x10000 /* vnode is locked waiting for an object */
180 #define VOWANT 0x20000 /* a thread is waiting for VOLOCK */
181 #define VDOOMED 0x40000 /* This vnode is being recycled */
182 #define VFREE 0x80000 /* This vnode is on the freelist */
183 /* open for business 0x100000 */
184 #define VONWORKLST 0x200000 /* On syncer work-list */
185 #define VMOUNT 0x400000 /* Mount in progress */
186 #define VOBJDIRTY 0x800000 /* object might be dirty */
189 * Vnode attributes. A field value of VNOVAL represents a field whose value
190 * is unavailable (getattr) or which is not to be changed (setattr).
193 enum vtype va_type; /* vnode type (for create) */
194 u_short va_mode; /* files access mode and type */
195 short va_nlink; /* number of references to file */
196 uid_t va_uid; /* owner user id */
197 gid_t va_gid; /* owner group id */
198 udev_t va_fsid; /* file system id */
199 long va_fileid; /* file id */
200 u_quad_t va_size; /* file size in bytes */
201 long va_blocksize; /* blocksize preferred for i/o */
202 struct timespec va_atime; /* time of last access */
203 struct timespec va_mtime; /* time of last modification */
204 struct timespec va_ctime; /* time file changed */
205 u_long va_gen; /* generation number of file */
206 u_long va_flags; /* flags defined for file */
207 udev_t va_rdev; /* device the special file represents */
208 u_quad_t va_bytes; /* bytes of disk space held by file */
209 u_quad_t va_filerev; /* file modification number */
210 u_int va_vaflags; /* operations flags, see below */
211 long va_spare; /* remain quad aligned */
215 * Flags for va_vaflags.
217 #define VA_UTIMES_NULL 0x01 /* utimes argument was NULL */
218 #define VA_EXCLUSIVE 0x02 /* exclusive create request */
221 * Flags for ioflag. (high 16 bits used to ask for read-ahead and
222 * help with write clustering)
224 #define IO_UNIT 0x01 /* do I/O as atomic unit */
225 #define IO_APPEND 0x02 /* append write to end */
226 #define IO_SYNC 0x04 /* do I/O synchronously */
227 #define IO_NODELOCKED 0x08 /* underlying node already locked */
228 #define IO_NDELAY 0x10 /* FNDELAY flag set in file table */
229 #define IO_VMIO 0x20 /* data already in VMIO space */
230 #define IO_INVAL 0x40 /* invalidate after I/O */
231 #define IO_ASYNC 0x80 /* bawrite rather then bdwrite */
232 #define IO_DIRECT 0x100 /* attempt to bypass buffer cache */
233 #define IO_NOWDRAIN 0x200 /* do not block on wdrain */
236 * Modes. Some values same as Ixxx entries from inode.h for now.
238 #define VADMIN 010000 /* permission to administer vnode */
239 #define VSUID 004000 /* set user id on execution */
240 #define VSGID 002000 /* set group id on execution */
241 #define VSVTX 001000 /* save swapped text even after use */
242 #define VREAD 000400 /* read, write, execute permissions */
243 #define VWRITE 000200
247 * Token indicating no attribute value yet assigned.
252 * LK_TIMELOCK timeout for vnode locks (used mainly by the pageout daemon)
254 #define VLKTIMEOUT (hz / 20 + 1)
258 #ifdef MALLOC_DECLARE
259 MALLOC_DECLARE(M_VNODE);
263 * Convert between vnode types and inode formats (since POSIX.1
264 * defines mode word of stat structure in terms of inode formats).
266 extern enum vtype iftovt_tab[];
267 extern int vttoif_tab[];
268 #define IFTOVT(mode) (iftovt_tab[((mode) & S_IFMT) >> 12])
269 #define VTTOIF(indx) (vttoif_tab[(int)(indx)])
270 #define MAKEIMODE(indx, mode) (int)(VTTOIF(indx) | (mode))
273 * Flags to various vnode functions.
275 #define SKIPSYSTEM 0x0001 /* vflush: skip vnodes marked VSYSTEM */
276 #define FORCECLOSE 0x0002 /* vflush: force file closure */
277 #define WRITECLOSE 0x0004 /* vflush: only close writable files */
278 #define DOCLOSE 0x0008 /* vclean: close active files */
279 #define V_SAVE 0x0001 /* vinvalbuf: sync file first */
280 #define REVOKEALL 0x0001 /* vop_revoke: revoke all aliases */
281 #define V_WAIT 0x0001 /* vn_start_write: sleep for suspend */
282 #define V_NOWAIT 0x0002 /* vn_start_write: don't sleep for suspend */
283 #define V_XSLEEP 0x0004 /* vn_start_write: just return after sleep */
285 #define VREF(vp) vref(vp)
289 #define VATTR_NULL(vap) vattr_null(vap)
291 #define VATTR_NULL(vap) (*(vap) = va_null) /* initialize a vattr */
292 #endif /* DIAGNOSTIC */
294 #define NULLVP ((struct vnode *)NULL)
296 #define VNODEOP_SET(f) \
297 C_SYSINIT(f##init, SI_SUB_VFS, SI_ORDER_SECOND, vfs_add_vnodeops, &f); \
298 C_SYSUNINIT(f##uninit, SI_SUB_VFS, SI_ORDER_SECOND, vfs_rm_vnodeops, &f);
303 extern struct vnode *rootvnode; /* root (i.e. "/") vnode */
304 extern int desiredvnodes; /* number of vnodes desired */
305 extern struct vm_zone *namei_zone;
306 extern int prtactive; /* nonzero to call vprint() */
307 extern struct vattr va_null; /* predefined null vattr structure */
308 extern int vfs_ioopt;
311 * Macro/function to check for client cache inconsistency w.r.t. leasing.
313 #define LEASE_READ 0x1 /* Check lease for readers */
314 #define LEASE_WRITE 0x2 /* Check lease for modifiers */
317 extern void (*lease_updatetime) __P((int deltat));
319 #define VSHOULDFREE(vp) \
320 (!((vp)->v_flag & (VFREE|VDOOMED)) && \
321 !(vp)->v_holdcnt && !(vp)->v_usecount && \
322 (!(vp)->v_object || \
323 !((vp)->v_object->ref_count || (vp)->v_object->resident_page_count)))
325 #define VMIGHTFREE(vp) \
326 (!((vp)->v_flag & (VFREE|VDOOMED|VXLOCK)) && \
327 LIST_EMPTY(&(vp)->v_cache_src) && !(vp)->v_usecount)
329 #define VSHOULDBUSY(vp) \
330 (((vp)->v_flag & VFREE) && \
331 ((vp)->v_holdcnt || (vp)->v_usecount))
333 #define VI_LOCK(vp) mtx_lock(&(vp)->v_interlock)
334 #define VI_TRYLOCK(vp) mtx_trylock(&(vp)->v_interlock)
335 #define VI_UNLOCK(vp) mtx_unlock(&(vp)->v_interlock)
341 * Mods for extensibility.
345 * Flags for vdesc_flags:
347 #define VDESC_MAX_VPS 16
348 /* Low order 16 flag bits are reserved for willrele flags for vp arguments. */
349 #define VDESC_VP0_WILLRELE 0x0001
350 #define VDESC_VP1_WILLRELE 0x0002
351 #define VDESC_VP2_WILLRELE 0x0004
352 #define VDESC_VP3_WILLRELE 0x0008
353 #define VDESC_NOMAP_VPP 0x0100
354 #define VDESC_VPP_WILLRELE 0x0200
357 * VDESC_NO_OFFSET is used to identify the end of the offset list
358 * and in places where no such field exists.
360 #define VDESC_NO_OFFSET -1
363 * This structure describes the vnode operation taking place.
365 struct vnodeop_desc {
366 int vdesc_offset; /* offset in vector,first for speed */
367 char *vdesc_name; /* a readable name for debugging */
368 int vdesc_flags; /* VDESC_* flags */
371 * These ops are used by bypass routines to map and locate arguments.
372 * Creds and procs are not needed in bypass routines, but sometimes
373 * they are useful to (for example) transport layers.
374 * Nameidata is useful because it has a cred in it.
376 int *vdesc_vp_offsets; /* list ended by VDESC_NO_OFFSET */
377 int vdesc_vpp_offset; /* return vpp location */
378 int vdesc_cred_offset; /* cred location, if any */
379 int vdesc_thread_offset; /* thread location, if any */
380 int vdesc_componentname_offset; /* if any */
382 * Finally, we've got a list of private data (about each operation)
383 * for each transport layer. (Support to manage this list is not
386 caddr_t *vdesc_transports;
391 * A list of all the operation descs.
393 extern struct vnodeop_desc *vnodeop_descs[];
396 * Interlock for scanning list of vnodes attached to a mountpoint
398 extern struct mtx mntvnode_mtx;
401 * This macro is very helpful in defining those offsets in the vdesc struct.
403 * This is stolen from X11R4. I ignored all the fancy stuff for
404 * Crays, so if you decide to port this to such a serious machine,
405 * you might want to consult Intrinsic.h's XtOffset{,Of,To}.
407 #define VOPARG_OFFSET(p_type,field) \
408 ((int) (((char *) (&(((p_type)NULL)->field))) - ((char *) NULL)))
409 #define VOPARG_OFFSETOF(s_type,field) \
410 VOPARG_OFFSET(s_type*,field)
411 #define VOPARG_OFFSETTO(S_TYPE,S_OFFSET,STRUCT_P) \
412 ((S_TYPE)(((char*)(STRUCT_P))+(S_OFFSET)))
416 * This structure is used to configure the new vnodeops vector.
418 struct vnodeopv_entry_desc {
419 struct vnodeop_desc *opve_op; /* which operation this is */
420 vop_t *opve_impl; /* code implementing this operation */
422 struct vnodeopv_desc {
423 /* ptr to the ptr to the vector where op should go */
424 vop_t ***opv_desc_vector_p;
425 struct vnodeopv_entry_desc *opv_desc_ops; /* null terminated list */
429 * A generic structure.
430 * This can be used by bypass routines to identify generic arguments.
432 struct vop_generic_args {
433 struct vnodeop_desc *a_desc;
434 /* other random data follows, presumably */
438 #ifdef DEBUG_VFS_LOCKS
440 * Macros to aid in tracing VFS locking problems. Not totally
441 * reliable since if the thread sleeps between changing the lock
442 * state and checking it with the assert, some other thread could
443 * change the state. They are good enough for debugging a single
444 * filesystem using a single-threaded test. I find that 'cvs co src'
445 * is a pretty good test.
449 * [dfr] Kludge until I get around to fixing all the vfs locking.
451 #define IS_LOCKING_VFS(vp) ((vp)->v_tag == VT_UFS \
452 || (vp)->v_tag == VT_NFS \
453 || (vp)->v_tag == VT_LFS \
454 || (vp)->v_tag == VT_ISOFS \
455 || (vp)->v_tag == VT_MSDOSFS \
456 || (vp)->v_tag == VT_DEVFS)
458 #define ASSERT_VOP_LOCKED(vp, str) \
460 struct vnode *_vp = (vp); \
462 if (_vp && IS_LOCKING_VFS(_vp) && !VOP_ISLOCKED(_vp, NULL)) \
463 panic("%s: %p is not locked but should be", str, _vp); \
466 #define ASSERT_VOP_UNLOCKED(vp, str) \
468 struct vnode *_vp = (vp); \
471 if (_vp && IS_LOCKING_VFS(_vp)) { \
472 lockstate = VOP_ISLOCKED(_vp, curthread); \
473 if (lockstate == LK_EXCLUSIVE) \
474 panic("%s: %p is locked but should not be", \
479 #define ASSERT_VOP_ELOCKED(vp, str) \
481 struct vnode *_vp = (vp); \
483 if (_vp && IS_LOCKING_VFS(_vp) && \
484 VOP_ISLOCKED(_vp, curthread) != LK_EXCLUSIVE) \
485 panic("%s: %p is not exclusive locked but should be", \
489 #define ASSERT_VOP_ELOCKED_OTHER(vp, str) \
491 struct vnode *_vp = (vp); \
493 if (_vp && IS_LOCKING_VFS(_vp) && \
494 VOP_ISLOCKED(_vp, curthread) != LK_EXCLOTHER) \
495 panic("%s: %p is not exclusive locked by another thread", \
499 #define ASSERT_VOP_SLOCKED(vp, str) \
501 struct vnode *_vp = (vp); \
503 if (_vp && IS_LOCKING_VFS(_vp) && \
504 VOP_ISLOCKED(_vp, NULL) != LK_SHARED) \
505 panic("%s: %p is not locked shared but should be", \
511 #define ASSERT_VOP_LOCKED(vp, str)
512 #define ASSERT_VOP_UNLOCKED(vp, str)
517 * VOCALL calls an op given an ops vector. We break it out because BSD's
518 * vclean changes the ops vector and then wants to call ops with the old
521 #define VOCALL(OPSV,OFF,AP) (( *((OPSV)[(OFF)])) (AP))
524 * This call works for vnodes in the kernel.
526 #define VCALL(VP,OFF,AP) VOCALL((VP)->v_op,(OFF),(AP))
527 #define VDESC(OP) (& __CONCAT(OP,_desc))
528 #define VOFFSET(OP) (VDESC(OP)->vdesc_offset)
531 * VMIO support inline
534 extern int vmiodirenable;
537 vn_canvmio(struct vnode *vp)
539 if (vp && (vp->v_type == VREG || (vmiodirenable && vp->v_type == VDIR)))
545 * Finally, include the default set of vnode operations.
547 #include "vnode_if.h"
550 * Public vnode manipulation functions.
552 struct componentname;
566 extern int (*lease_check_hook) __P((struct vop_lease_args *));
568 struct vnode *addaliasu __P((struct vnode *vp, udev_t nvp_rdev));
569 int bdevvp __P((dev_t dev, struct vnode **vpp));
570 /* cache_* may belong in namei.h. */
571 void cache_enter __P((struct vnode *dvp, struct vnode *vp,
572 struct componentname *cnp));
573 int cache_lookup __P((struct vnode *dvp, struct vnode **vpp,
574 struct componentname *cnp));
575 void cache_purge __P((struct vnode *vp));
576 void cache_purgevfs __P((struct mount *mp));
577 int cache_leaf_test __P((struct vnode *vp));
578 void cvtstat __P((struct stat *st, struct ostat *ost));
579 void cvtnstat __P((struct stat *sb, struct nstat *nsb));
580 int getnewvnode __P((enum vtagtype tag,
581 struct mount *mp, vop_t **vops, struct vnode **vpp));
582 int lease_check __P((struct vop_lease_args *ap));
583 int spec_vnoperate __P((struct vop_generic_args *));
584 int speedup_syncer __P((void));
585 #define textvp_fullpath(p, rb, rfb) \
586 vn_fullpath(FIRST_THREAD_IN_PROC(p), (p)->p_textvp, rb, rfb)
587 int vn_fullpath __P((struct thread *td, struct vnode *vn,
588 char **retbuf, char **freebuf));
589 int vaccess __P((enum vtype type, mode_t file_mode, uid_t uid, gid_t gid,
590 mode_t acc_mode, struct ucred *cred, int *privused));
591 int vaccess_acl_posix1e __P((enum vtype type, uid_t file_uid,
592 gid_t file_gid, struct acl *acl, mode_t acc_mode,
593 struct ucred *cred, int *privused));
594 void vattr_null __P((struct vattr *vap));
595 int vcount __P((struct vnode *vp));
596 void vdrop __P((struct vnode *));
597 int vfinddev __P((dev_t dev, enum vtype type, struct vnode **vpp));
598 void vfs_add_vnodeops __P((const void *));
599 void vfs_rm_vnodeops __P((const void *));
600 int vflush __P((struct mount *mp, int rootrefs, int flags));
601 int vget __P((struct vnode *vp, int lockflag, struct thread *td));
602 void vgone __P((struct vnode *vp));
603 void vgonel __P((struct vnode *vp, struct thread *td));
604 void vhold __P((struct vnode *));
605 int vinvalbuf __P((struct vnode *vp, int save, struct ucred *cred,
606 struct thread *td, int slpflag, int slptimeo));
607 int vtruncbuf __P((struct vnode *vp, struct ucred *cred, struct thread *td,
608 off_t length, int blksize));
609 void vprint __P((char *label, struct vnode *vp));
610 int vrecycle __P((struct vnode *vp, struct mtx *inter_lkp,
612 int vn_close __P((struct vnode *vp,
613 int flags, struct ucred *cred, struct thread *td));
614 void vn_finished_write __P((struct mount *mp));
615 int vn_isdisk __P((struct vnode *vp, int *errp));
616 int vn_lock __P((struct vnode *vp, int flags, struct thread *td));
618 int debug_vn_lock __P((struct vnode *vp, int flags, struct thread *p,
619 const char *filename, int line));
620 #define vn_lock(vp,flags,p) debug_vn_lock(vp,flags,p,__FILE__,__LINE__)
622 int vn_mkdir __P((char *path, int mode, enum uio_seg segflg, struct thread *td));
623 int vn_open __P((struct nameidata *ndp, int *flagp, int cmode));
624 int vn_open_cred __P((struct nameidata *ndp, int *flagp, int cmode,
625 struct ucred *cred));
626 void vn_pollevent __P((struct vnode *vp, int events));
627 void vn_pollgone __P((struct vnode *vp));
628 int vn_pollrecord __P((struct vnode *vp, struct thread *p, int events));
629 int vn_rdwr __P((enum uio_rw rw, struct vnode *vp, caddr_t base,
630 int len, off_t offset, enum uio_seg segflg, int ioflg,
631 struct ucred *cred, int *aresid, struct thread *td));
632 int vn_rdwr_inchunks __P((enum uio_rw rw, struct vnode *vp, caddr_t base,
633 int len, off_t offset, enum uio_seg segflg, int ioflg,
634 struct ucred *cred, int *aresid, struct thread *td));
635 int vn_stat __P((struct vnode *vp, struct stat *sb, struct thread *td));
636 int vn_start_write __P((struct vnode *vp, struct mount **mpp, int flags));
637 dev_t vn_todev __P((struct vnode *vp));
638 int vn_write_suspend_wait __P((struct vnode *vp, struct mount *mp,
640 int vn_writechk __P((struct vnode *vp));
641 int vn_extattr_get __P((struct vnode *vp, int ioflg, int attrnamespace,
642 const char *attrname, int *buflen, char *buf, struct thread *td));
643 int vn_extattr_set __P((struct vnode *vp, int ioflg, int attrnamespace,
644 const char *attrname, int buflen, char *buf, struct thread *td));
645 int vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
646 const char *attrname, struct thread *td);
647 int vfs_cache_lookup __P((struct vop_lookup_args *ap));
648 int vfs_object_create __P((struct vnode *vp, struct thread *td,
649 struct ucred *cred));
650 void vfs_timestamp __P((struct timespec *));
651 void vfs_write_resume __P((struct mount *mp));
652 void vfs_write_suspend __P((struct mount *mp));
653 int vop_stdbmap __P((struct vop_bmap_args *));
654 int vop_stdgetwritemount __P((struct vop_getwritemount_args *));
655 int vop_stdgetpages __P((struct vop_getpages_args *));
656 int vop_stdinactive __P((struct vop_inactive_args *));
657 int vop_stdislocked __P((struct vop_islocked_args *));
658 int vop_stdlock __P((struct vop_lock_args *));
659 int vop_stdputpages __P((struct vop_putpages_args *));
660 int vop_stdunlock __P((struct vop_unlock_args *));
661 int vop_noislocked __P((struct vop_islocked_args *));
662 int vop_nolock __P((struct vop_lock_args *));
663 int vop_nopoll __P((struct vop_poll_args *));
664 int vop_nounlock __P((struct vop_unlock_args *));
665 int vop_stdpathconf __P((struct vop_pathconf_args *));
666 int vop_stdpoll __P((struct vop_poll_args *));
667 int vop_revoke __P((struct vop_revoke_args *));
668 int vop_sharedlock __P((struct vop_lock_args *));
669 int vop_eopnotsupp __P((struct vop_generic_args *ap));
670 int vop_ebadf __P((struct vop_generic_args *ap));
671 int vop_einval __P((struct vop_generic_args *ap));
672 int vop_enotty __P((struct vop_generic_args *ap));
673 int vop_defaultop __P((struct vop_generic_args *ap));
674 int vop_null __P((struct vop_generic_args *ap));
675 int vop_panic __P((struct vop_generic_args *ap));
676 int vop_stdcreatevobject __P((struct vop_createvobject_args *ap));
677 int vop_stddestroyvobject __P((struct vop_destroyvobject_args *ap));
678 int vop_stdgetvobject __P((struct vop_getvobject_args *ap));
680 void vfree __P((struct vnode *));
681 void vput __P((struct vnode *vp));
682 void vrele __P((struct vnode *vp));
683 void vref __P((struct vnode *vp));
684 void vbusy __P((struct vnode *vp));
685 void v_addpollinfo(struct vnode *vp);
687 extern vop_t **default_vnodeop_p;
688 extern vop_t **spec_vnodeop_p;
689 extern vop_t **dead_vnodeop_p;
693 #endif /* !_SYS_VNODE_H_ */