4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 /* Portions Copyright 2007 Jeremy Teo */
28 #pragma ident "%Z%%M% %I% %E% SMI"
30 #include <sys/types.h>
31 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/sysmacros.h>
35 #include <sys/resource.h>
37 #include <sys/vnode.h>
41 #include <sys/taskq.h>
43 #include <sys/atomic.h>
44 #include <sys/namei.h>
46 #include <sys/cmn_err.h>
47 #include <sys/errno.h>
48 #include <sys/unistd.h>
49 #include <sys/zfs_vfsops.h>
50 #include <sys/zfs_dir.h>
51 #include <sys/zfs_acl.h>
52 #include <sys/zfs_ioctl.h>
53 #include <sys/fs/zfs.h>
59 #include <sys/dirent.h>
60 #include <sys/policy.h>
61 #include <sys/sunddi.h>
62 #include <sys/filio.h>
63 #include <sys/zfs_ctldir.h>
65 #include <sys/zfs_rlock.h>
68 #include <sys/sf_buf.h>
69 #include <sys/sched.h>
74 * Each vnode op performs some logical unit of work. To do this, the ZPL must
75 * properly lock its in-core state, create a DMU transaction, do the work,
76 * record this work in the intent log (ZIL), commit the DMU transaction,
77 * and wait the the intent log to commit if it's is a synchronous operation.
78 * Morover, the vnode ops must work in both normal and log replay context.
79 * The ordering of events is important to avoid deadlocks and references
80 * to freed memory. The example below illustrates the following Big Rules:
82 * (1) A check must be made in each zfs thread for a mounted file system.
83 * This is done avoiding races using ZFS_ENTER(zfsvfs).
84 * A ZFS_EXIT(zfsvfs) is needed before all returns.
86 * (2) VN_RELE() should always be the last thing except for zil_commit()
87 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
88 * First, if it's the last reference, the vnode/znode
89 * can be freed, so the zp may point to freed memory. Second, the last
90 * reference will call zfs_zinactive(), which may induce a lot of work --
91 * pushing cached pages (which acquires range locks) and syncing out
92 * cached atime changes. Third, zfs_zinactive() may require a new tx,
93 * which could deadlock the system if you were already holding one.
95 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
96 * as they can span dmu_tx_assign() calls.
98 * (4) Always pass zfsvfs->z_assign as the second argument to dmu_tx_assign().
99 * In normal operation, this will be TXG_NOWAIT. During ZIL replay,
100 * it will be a specific txg. Either way, dmu_tx_assign() never blocks.
101 * This is critical because we don't want to block while holding locks.
102 * Note, in particular, that if a lock is sometimes acquired before
103 * the tx assigns, and sometimes after (e.g. z_lock), then failing to
104 * use a non-blocking assign can deadlock the system. The scenario:
106 * Thread A has grabbed a lock before calling dmu_tx_assign().
107 * Thread B is in an already-assigned tx, and blocks for this lock.
108 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
109 * forever, because the previous txg can't quiesce until B's tx commits.
111 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
112 * then drop all locks, call dmu_tx_wait(), and try again.
114 * (5) If the operation succeeded, generate the intent log entry for it
115 * before dropping locks. This ensures that the ordering of events
116 * in the intent log matches the order in which they actually occurred.
118 * (6) At the end of each vnode op, the DMU tx must always commit,
119 * regardless of whether there were any errors.
121 * (7) After dropping all locks, invoke zil_commit(zilog, seq, foid)
122 * to ensure that synchronous semantics are provided when necessary.
124 * In general, this is how things should be ordered in each vnode op:
126 * ZFS_ENTER(zfsvfs); // exit if unmounted
128 * zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD())
129 * rw_enter(...); // grab any other locks you need
130 * tx = dmu_tx_create(...); // get DMU tx
131 * dmu_tx_hold_*(); // hold each object you might modify
132 * error = dmu_tx_assign(tx, zfsvfs->z_assign); // try to assign
134 * rw_exit(...); // drop locks
135 * zfs_dirent_unlock(dl); // unlock directory entry
136 * VN_RELE(...); // release held vnodes
137 * if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
142 * dmu_tx_abort(tx); // abort DMU tx
143 * ZFS_EXIT(zfsvfs); // finished in zfs
144 * return (error); // really out of space
146 * error = do_real_work(); // do whatever this VOP does
148 * zfs_log_*(...); // on success, make ZIL entry
149 * dmu_tx_commit(tx); // commit DMU tx -- error or not
150 * rw_exit(...); // drop locks
151 * zfs_dirent_unlock(dl); // unlock directory entry
152 * VN_RELE(...); // release held vnodes
153 * zil_commit(zilog, seq, foid); // synchronous when necessary
154 * ZFS_EXIT(zfsvfs); // finished in zfs
155 * return (error); // done, report error
159 zfs_open(vnode_t **vpp, int flag, cred_t *cr)
161 znode_t *zp = VTOZ(*vpp);
163 /* Keep a count of the synchronous opens in the znode */
164 if (flag & (FSYNC | FDSYNC))
165 atomic_inc_32(&zp->z_sync_cnt);
171 zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr)
173 znode_t *zp = VTOZ(vp);
175 /* Decrement the synchronous opens in the znode */
176 if (flag & (FSYNC | FDSYNC))
177 atomic_dec_32(&zp->z_sync_cnt);
180 * Clean up any locks held by this process on the vp.
182 cleanlocks(vp, ddi_get_pid(), 0);
183 cleanshares(vp, ddi_get_pid());
189 * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
190 * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
193 zfs_holey(vnode_t *vp, u_long cmd, offset_t *off)
195 znode_t *zp = VTOZ(vp);
196 uint64_t noff = (uint64_t)*off; /* new offset */
201 file_sz = zp->z_phys->zp_size;
202 if (noff >= file_sz) {
206 if (cmd == _FIO_SEEK_HOLE)
211 error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff);
214 if ((error == ESRCH) || (noff > file_sz)) {
216 * Handle the virtual hole at the end of file.
233 zfs_ioctl(vnode_t *vp, u_long com, intptr_t data, int flag, cred_t *cred,
245 * The following two ioctls are used by bfu. Faking out,
246 * necessary to avoid bfu errors.
254 if (ddi_copyin((void *)data, &off, sizeof (off), flag))
257 zfsvfs = VTOZ(vp)->z_zfsvfs;
260 /* offset parameter is in/out */
261 error = zfs_holey(vp, com, &off);
265 if (ddi_copyout(&off, (void *)data, sizeof (off), flag))
273 * When a file is memory mapped, we must keep the IO data synchronized
274 * between the DMU cache and the memory mapped pages. What this means:
276 * On Write: If we find a memory mapped page, we write to *both*
277 * the page and the dmu buffer.
279 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
280 * the file is memory mapped.
283 mappedwrite(vnode_t *vp, int nbytes, uio_t *uio, dmu_tx_t *tx)
285 znode_t *zp = VTOZ(vp);
286 objset_t *os = zp->z_zfsvfs->z_os;
295 ASSERT(vp->v_mount != NULL);
299 start = uio->uio_loffset;
300 off = start & PAGEOFFSET;
303 for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
304 uint64_t bytes = MIN(PAGESIZE - off, len);
308 if ((m = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
309 vm_page_is_valid(m, (vm_offset_t)off, bytes)) {
313 if (vm_page_sleep_if_busy(m, FALSE, "zfsmwb"))
315 fsize = obj->un_pager.vnp.vnp_size;
317 vm_page_lock_queues();
319 vm_page_unlock_queues();
320 VM_OBJECT_UNLOCK(obj);
322 error = dmu_write_uio(os, zp->z_id, uio,
328 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
329 va = (caddr_t)sf_buf_kva(sf);
330 woff = uio->uio_loffset - off;
331 error = uiomove(va + off, bytes, UIO_WRITE, uio);
333 * The uiomove() above could have been partially
334 * successful, that's why we call dmu_write()
335 * below unconditionally. The page was marked
336 * non-dirty above and we would lose the changes
337 * without doing so. If the uiomove() failed
338 * entirely, well, we just write what we got
339 * before one more time.
341 dmu_write(os, zp->z_id, woff,
342 MIN(PAGESIZE, fsize - woff), va, tx);
349 if (__predict_false(obj->cache != NULL)) {
350 vm_page_cache_free(obj, OFF_TO_IDX(start),
351 OFF_TO_IDX(start) + 1);
360 VM_OBJECT_UNLOCK(obj);
361 if (error == 0 && dirbytes > 0)
362 error = dmu_write_uio(os, zp->z_id, uio, dirbytes, tx);
367 * When a file is memory mapped, we must keep the IO data synchronized
368 * between the DMU cache and the memory mapped pages. What this means:
370 * On Read: We "read" preferentially from memory mapped pages,
371 * else we default from the dmu buffer.
373 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
374 * the file is memory mapped.
377 mappedread(vnode_t *vp, int nbytes, uio_t *uio)
379 znode_t *zp = VTOZ(vp);
380 objset_t *os = zp->z_zfsvfs->z_os;
390 ASSERT(vp->v_mount != NULL);
394 start = uio->uio_loffset;
395 off = start & PAGEOFFSET;
398 for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
399 uint64_t bytes = MIN(PAGESIZE - off, len);
402 if ((m = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
403 vm_page_is_valid(m, (vm_offset_t)off, bytes)) {
404 if (vm_page_sleep_if_busy(m, FALSE, "zfsmrb"))
407 VM_OBJECT_UNLOCK(obj);
409 error = dmu_read_uio(os, zp->z_id, uio,
415 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
416 va = (caddr_t)sf_buf_kva(sf);
417 error = uiomove(va + off, bytes, UIO_READ, uio);
423 } else if (m != NULL && uio->uio_segflg == UIO_NOCOPY) {
425 * The code below is here to make sendfile(2) work
426 * correctly with ZFS. As pointed out by ups@
427 * sendfile(2) should be changed to use VOP_GETPAGES(),
428 * but it pessimize performance of sendfile/UFS, that's
429 * why I handle this special case in ZFS code.
431 if (vm_page_sleep_if_busy(m, FALSE, "zfsmrb"))
434 VM_OBJECT_UNLOCK(obj);
436 error = dmu_read_uio(os, zp->z_id, uio,
442 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
443 va = (caddr_t)sf_buf_kva(sf);
444 error = dmu_read(os, zp->z_id, start + off,
445 bytes, (void *)(va + off));
452 uio->uio_resid -= bytes;
461 VM_OBJECT_UNLOCK(obj);
462 if (error == 0 && dirbytes > 0)
463 error = dmu_read_uio(os, zp->z_id, uio, dirbytes);
467 offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
470 * Read bytes from specified file into supplied buffer.
472 * IN: vp - vnode of file to be read from.
473 * uio - structure supplying read location, range info,
475 * ioflag - SYNC flags; used to provide FRSYNC semantics.
476 * cr - credentials of caller.
478 * OUT: uio - updated offset and range, buffer filled.
480 * RETURN: 0 if success
481 * error code if failure
484 * vp - atime updated if byte count > 0
488 zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
490 znode_t *zp = VTOZ(vp);
491 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
492 objset_t *os = zfsvfs->z_os;
500 * Validate file offset
502 if (uio->uio_loffset < (offset_t)0) {
508 * Fasttrack empty reads
510 if (uio->uio_resid == 0) {
516 * Check for mandatory locks
518 if (MANDMODE((mode_t)zp->z_phys->zp_mode)) {
519 if (error = chklock(vp, FREAD,
520 uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) {
527 * If we're in FRSYNC mode, sync out this znode before reading it.
530 zil_commit(zfsvfs->z_log, zp->z_last_itx, zp->z_id);
533 * Lock the range against changes.
535 rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER);
538 * If we are reading past end-of-file we can skip
539 * to the end; but we might still need to set atime.
541 if (uio->uio_loffset >= zp->z_phys->zp_size) {
546 ASSERT(uio->uio_loffset < zp->z_phys->zp_size);
547 n = MIN(uio->uio_resid, zp->z_phys->zp_size - uio->uio_loffset);
550 nbytes = MIN(n, zfs_read_chunk_size -
551 P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
553 if (vn_has_cached_data(vp))
554 error = mappedread(vp, nbytes, uio);
556 error = dmu_read_uio(os, zp->z_id, uio, nbytes);
564 zfs_range_unlock(rl);
566 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
572 * Fault in the pages of the first n bytes specified by the uio structure.
573 * 1 byte in each page is touched and the uio struct is unmodified.
574 * Any error will exit this routine as this is only a best
575 * attempt to get the pages resident. This is a copy of ufs_trans_touch().
578 zfs_prefault_write(ssize_t n, struct uio *uio)
584 if (uio->uio_segflg != UIO_USERSPACE)
590 cnt = MIN(iov->iov_len, n);
592 /* empty iov entry */
598 * touch each page in this segment.
604 incr = MIN(cnt, PAGESIZE);
609 * touch the last byte in case it straddles a page.
619 * Write the bytes to a file.
621 * IN: vp - vnode of file to be written to.
622 * uio - structure supplying write location, range info,
624 * ioflag - IO_APPEND flag set if in append mode.
625 * cr - credentials of caller.
627 * OUT: uio - updated offset and range.
629 * RETURN: 0 if success
630 * error code if failure
633 * vp - ctime|mtime updated if byte count > 0
637 zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
639 znode_t *zp = VTOZ(vp);
640 rlim64_t limit = MAXOFFSET_T;
641 ssize_t start_resid = uio->uio_resid;
645 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
646 zilog_t *zilog = zfsvfs->z_log;
650 int max_blksz = zfsvfs->z_max_blksz;
654 * Fasttrack empty write
660 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
666 * Pre-fault the pages to ensure slow (eg NFS) pages
669 zfs_prefault_write(n, uio);
672 * If in append mode, set the io offset pointer to eof.
674 if (ioflag & IO_APPEND) {
676 * Range lock for a file append:
677 * The value for the start of range will be determined by
678 * zfs_range_lock() (to guarantee append semantics).
679 * If this write will cause the block size to increase,
680 * zfs_range_lock() will lock the entire file, so we must
681 * later reduce the range after we grow the block size.
683 rl = zfs_range_lock(zp, 0, n, RL_APPEND);
684 if (rl->r_len == UINT64_MAX) {
685 /* overlocked, zp_size can't change */
686 woff = uio->uio_loffset = zp->z_phys->zp_size;
688 woff = uio->uio_loffset = rl->r_off;
691 woff = uio->uio_loffset;
693 * Validate file offset
701 * If we need to grow the block size then zfs_range_lock()
702 * will lock a wider range than we request here.
703 * Later after growing the block size we reduce the range.
705 rl = zfs_range_lock(zp, woff, n, RL_WRITER);
709 zfs_range_unlock(rl);
714 if ((woff + n) > limit || woff > (limit - n))
718 * Check for mandatory locks
720 if (MANDMODE((mode_t)zp->z_phys->zp_mode) &&
721 (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) {
722 zfs_range_unlock(rl);
726 end_size = MAX(zp->z_phys->zp_size, woff + n);
729 * Write the file in reasonable size chunks. Each chunk is written
730 * in a separate transaction; this keeps the intent log records small
731 * and allows us to do more fine-grained space accounting.
735 * Start a transaction.
737 woff = uio->uio_loffset;
738 tx = dmu_tx_create(zfsvfs->z_os);
739 dmu_tx_hold_bonus(tx, zp->z_id);
740 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
741 error = dmu_tx_assign(tx, zfsvfs->z_assign);
743 if (error == ERESTART &&
744 zfsvfs->z_assign == TXG_NOWAIT) {
754 * If zfs_range_lock() over-locked we grow the blocksize
755 * and then reduce the lock range. This will only happen
756 * on the first iteration since zfs_range_reduce() will
757 * shrink down r_len to the appropriate size.
759 if (rl->r_len == UINT64_MAX) {
762 if (zp->z_blksz > max_blksz) {
763 ASSERT(!ISP2(zp->z_blksz));
764 new_blksz = MIN(end_size, SPA_MAXBLOCKSIZE);
766 new_blksz = MIN(end_size, max_blksz);
768 zfs_grow_blocksize(zp, new_blksz, tx);
769 zfs_range_reduce(rl, woff, n);
773 * XXX - should we really limit each write to z_max_blksz?
774 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
776 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
778 if (woff + nbytes > zp->z_phys->zp_size)
779 vnode_pager_setsize(vp, woff + nbytes);
781 rw_enter(&zp->z_map_lock, RW_READER);
783 tx_bytes = uio->uio_resid;
784 if (vn_has_cached_data(vp)) {
785 rw_exit(&zp->z_map_lock);
786 error = mappedwrite(vp, nbytes, uio, tx);
788 error = dmu_write_uio(zfsvfs->z_os, zp->z_id,
790 rw_exit(&zp->z_map_lock);
792 tx_bytes -= uio->uio_resid;
795 * If we made no progress, we're done. If we made even
796 * partial progress, update the znode and ZIL accordingly.
805 * Clear Set-UID/Set-GID bits on successful write if not
806 * privileged and at least one of the excute bits is set.
808 * It would be nice to to this after all writes have
809 * been done, but that would still expose the ISUID/ISGID
810 * to another app after the partial write is committed.
812 mutex_enter(&zp->z_acl_lock);
813 if ((zp->z_phys->zp_mode & (S_IXUSR | (S_IXUSR >> 3) |
814 (S_IXUSR >> 6))) != 0 &&
815 (zp->z_phys->zp_mode & (S_ISUID | S_ISGID)) != 0 &&
816 secpolicy_vnode_setid_retain(cr,
817 (zp->z_phys->zp_mode & S_ISUID) != 0 &&
818 zp->z_phys->zp_uid == 0) != 0) {
819 zp->z_phys->zp_mode &= ~(S_ISUID | S_ISGID);
821 mutex_exit(&zp->z_acl_lock);
824 * Update time stamp. NOTE: This marks the bonus buffer as
825 * dirty, so we don't have to do it again for zp_size.
827 zfs_time_stamper(zp, CONTENT_MODIFIED, tx);
830 * Update the file size (zp_size) if it has changed;
831 * account for possible concurrent updates.
833 while ((end_size = zp->z_phys->zp_size) < uio->uio_loffset)
834 (void) atomic_cas_64(&zp->z_phys->zp_size, end_size,
836 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag);
841 ASSERT(tx_bytes == nbytes);
845 zfs_range_unlock(rl);
848 * If we're in replay mode, or we made no progress, return error.
849 * Otherwise, it's at least a partial write, so it's successful.
851 if (zfsvfs->z_assign >= TXG_INITIAL || uio->uio_resid == start_resid) {
856 if (ioflag & (FSYNC | FDSYNC))
857 zil_commit(zilog, zp->z_last_itx, zp->z_id);
864 zfs_get_done(dmu_buf_t *db, void *vzgd)
866 zgd_t *zgd = (zgd_t *)vzgd;
867 rl_t *rl = zgd->zgd_rl;
868 vnode_t *vp = ZTOV(rl->r_zp);
871 vfslocked = VFS_LOCK_GIANT(vp->v_vfsp);
872 dmu_buf_rele(db, vzgd);
873 zfs_range_unlock(rl);
875 zil_add_vdev(zgd->zgd_zilog, DVA_GET_VDEV(BP_IDENTITY(zgd->zgd_bp)));
876 kmem_free(zgd, sizeof (zgd_t));
877 VFS_UNLOCK_GIANT(vfslocked);
881 * Get data to generate a TX_WRITE intent log record.
884 zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
886 zfsvfs_t *zfsvfs = arg;
887 objset_t *os = zfsvfs->z_os;
889 uint64_t off = lr->lr_offset;
893 int dlen = lr->lr_length; /* length of user data */
900 * Nothing to do if the file has been removed
902 if (zfs_zget(zfsvfs, lr->lr_foid, &zp) != 0)
904 if (zp->z_unlinked) {
910 * Write records come in two flavors: immediate and indirect.
911 * For small writes it's cheaper to store the data with the
912 * log record (immediate); for large writes it's cheaper to
913 * sync the data and get a pointer to it (indirect) so that
914 * we don't have to write the data twice.
916 if (buf != NULL) { /* immediate write */
917 rl = zfs_range_lock(zp, off, dlen, RL_READER);
918 /* test for truncation needs to be done while range locked */
919 if (off >= zp->z_phys->zp_size) {
923 VERIFY(0 == dmu_read(os, lr->lr_foid, off, dlen, buf));
924 } else { /* indirect write */
925 uint64_t boff; /* block starting offset */
928 * Have to lock the whole block to ensure when it's
929 * written out and it's checksum is being calculated
930 * that no one can change the data. We need to re-check
931 * blocksize after we get the lock in case it's changed!
934 if (ISP2(zp->z_blksz)) {
935 boff = P2ALIGN_TYPED(off, zp->z_blksz,
941 rl = zfs_range_lock(zp, boff, dlen, RL_READER);
942 if (zp->z_blksz == dlen)
944 zfs_range_unlock(rl);
946 /* test for truncation needs to be done while range locked */
947 if (off >= zp->z_phys->zp_size) {
951 zgd = (zgd_t *)kmem_alloc(sizeof (zgd_t), KM_SLEEP);
953 zgd->zgd_zilog = zfsvfs->z_log;
954 zgd->zgd_bp = &lr->lr_blkptr;
955 VERIFY(0 == dmu_buf_hold(os, lr->lr_foid, boff, zgd, &db));
956 ASSERT(boff == db->db_offset);
957 lr->lr_blkoff = off - boff;
958 error = dmu_sync(zio, db, &lr->lr_blkptr,
959 lr->lr_common.lrc_txg, zfs_get_done, zgd);
960 ASSERT(error == EEXIST || lr->lr_length <= zp->z_blksz);
962 zil_add_vdev(zfsvfs->z_log,
963 DVA_GET_VDEV(BP_IDENTITY(&lr->lr_blkptr)));
966 * If we get EINPROGRESS, then we need to wait for a
967 * write IO initiated by dmu_sync() to complete before
968 * we can release this dbuf. We will finish everything
969 * up in the zfs_get_done() callback.
971 if (error == EINPROGRESS)
973 dmu_buf_rele(db, zgd);
974 kmem_free(zgd, sizeof (zgd_t));
977 zfs_range_unlock(rl);
984 zfs_access(vnode_t *vp, int mode, int flags, cred_t *cr)
986 znode_t *zp = VTOZ(vp);
987 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
991 error = zfs_zaccess_rwx(zp, mode, cr);
997 * Lookup an entry in a directory, or an extended attribute directory.
998 * If it exists, return a held vnode reference for it.
1000 * IN: dvp - vnode of directory to search.
1001 * nm - name of entry to lookup.
1002 * pnp - full pathname to lookup [UNUSED].
1003 * flags - LOOKUP_XATTR set if looking for an attribute.
1004 * rdir - root directory vnode [UNUSED].
1005 * cr - credentials of caller.
1007 * OUT: vpp - vnode of located entry, NULL if not found.
1009 * RETURN: 0 if success
1010 * error code if failure
1017 zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
1018 int nameiop, cred_t *cr, kthread_t *td)
1021 znode_t *zdp = VTOZ(dvp);
1022 zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
1030 if (flags & LOOKUP_XATTR) {
1032 * If the xattr property is off, refuse the lookup request.
1034 if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) {
1040 * We don't allow recursive attributes..
1041 * Maybe someday we will.
1043 if (zdp->z_phys->zp_flags & ZFS_XATTR) {
1048 if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) {
1054 * Do we have permission to get into attribute directory?
1057 if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, cr)) {
1066 if (dvp->v_type != VDIR) {
1072 * Check accessibility of directory.
1075 if (error = zfs_zaccess(zdp, ACE_EXECUTE, cr)) {
1080 if ((error = zfs_dirlook(zdp, nm, vpp)) == 0) {
1083 * Convert device special files
1085 if (IS_DEVVP(*vpp)) {
1088 svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
1099 /* Translate errors and add SAVENAME when needed. */
1100 if (cnp->cn_flags & ISLASTCN) {
1104 if (error == ENOENT) {
1105 error = EJUSTRETURN;
1106 cnp->cn_flags |= SAVENAME;
1112 cnp->cn_flags |= SAVENAME;
1116 if (error == 0 && (nm[0] != '.' || nm[1] != '\0')) {
1119 if (cnp->cn_flags & ISDOTDOT) {
1120 ltype = VOP_ISLOCKED(dvp);
1123 error = vn_lock(*vpp, cnp->cn_lkflags);
1124 if (cnp->cn_flags & ISDOTDOT)
1125 vn_lock(dvp, ltype | LK_RETRY);
1133 #ifdef FREEBSD_NAMECACHE
1135 * Insert name into cache (as non-existent) if appropriate.
1137 if (error == ENOENT && (cnp->cn_flags & MAKEENTRY) && nameiop != CREATE)
1138 cache_enter(dvp, *vpp, cnp);
1140 * Insert name into cache if appropriate.
1142 if (error == 0 && (cnp->cn_flags & MAKEENTRY)) {
1143 if (!(cnp->cn_flags & ISLASTCN) ||
1144 (nameiop != DELETE && nameiop != RENAME)) {
1145 cache_enter(dvp, *vpp, cnp);
1154 * Attempt to create a new entry in a directory. If the entry
1155 * already exists, truncate the file if permissible, else return
1156 * an error. Return the vp of the created or trunc'd file.
1158 * IN: dvp - vnode of directory to put new file entry in.
1159 * name - name of new file entry.
1160 * vap - attributes of new file.
1161 * excl - flag indicating exclusive or non-exclusive mode.
1162 * mode - mode to open file with.
1163 * cr - credentials of caller.
1164 * flag - large file flag [UNUSED].
1166 * OUT: vpp - vnode of created or trunc'd entry.
1168 * RETURN: 0 if success
1169 * error code if failure
1172 * dvp - ctime|mtime updated if new entry created
1173 * vp - ctime|mtime always, atime if new
1177 zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode,
1178 vnode_t **vpp, cred_t *cr)
1180 znode_t *zp, *dzp = VTOZ(dvp);
1181 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1182 zilog_t *zilog = zfsvfs->z_log;
1183 objset_t *os = zfsvfs->z_os;
1194 if ((vap->va_mode & S_ISVTX) && secpolicy_vnode_stky_modify(cr))
1195 vap->va_mode &= ~S_ISVTX;
1197 if (*name == '\0') {
1199 * Null component name refers to the directory itself.
1206 /* possible VN_HOLD(zp) */
1207 if (error = zfs_dirent_lock(&dl, dzp, name, &zp, 0)) {
1208 if (strcmp(name, "..") == 0)
1215 zoid = zp ? zp->z_id : -1ULL;
1219 * Create a new file object and update the directory
1222 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, cr)) {
1227 * We only support the creation of regular files in
1228 * extended attribute directories.
1230 if ((dzp->z_phys->zp_flags & ZFS_XATTR) &&
1231 (vap->va_type != VREG)) {
1236 tx = dmu_tx_create(os);
1237 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1238 dmu_tx_hold_bonus(tx, dzp->z_id);
1239 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1240 if (dzp->z_phys->zp_flags & ZFS_INHERIT_ACE)
1241 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1242 0, SPA_MAXBLOCKSIZE);
1243 error = dmu_tx_assign(tx, zfsvfs->z_assign);
1245 zfs_dirent_unlock(dl);
1246 if (error == ERESTART &&
1247 zfsvfs->z_assign == TXG_NOWAIT) {
1256 zfs_mknode(dzp, vap, &zoid, tx, cr, 0, &zp, 0);
1257 ASSERT(zp->z_id == zoid);
1258 (void) zfs_link_create(dl, zp, tx, ZNEW);
1259 zfs_log_create(zilog, tx, TX_CREATE, dzp, zp, name);
1263 * A directory entry already exists for this name.
1266 * Can't truncate an existing file if in exclusive mode.
1273 * Can't open a directory for writing.
1275 if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) {
1280 * Verify requested access to file.
1282 if (mode && (error = zfs_zaccess_rwx(zp, mode, cr))) {
1286 mutex_enter(&dzp->z_lock);
1288 mutex_exit(&dzp->z_lock);
1291 * Truncate regular files if requested.
1293 if ((ZTOV(zp)->v_type == VREG) &&
1294 (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) {
1295 error = zfs_freesp(zp, 0, 0, mode, TRUE);
1296 if (error == ERESTART &&
1297 zfsvfs->z_assign == TXG_NOWAIT) {
1298 /* NB: we already did dmu_tx_wait() */
1299 zfs_dirent_unlock(dl);
1307 zfs_dirent_unlock(dl);
1315 * If vnode is for a device return a specfs vnode instead.
1317 if (IS_DEVVP(*vpp)) {
1320 svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
1334 * Remove an entry from a directory.
1336 * IN: dvp - vnode of directory to remove entry from.
1337 * name - name of entry to remove.
1338 * cr - credentials of caller.
1340 * RETURN: 0 if success
1341 * error code if failure
1345 * vp - ctime (if nlink > 0)
1348 zfs_remove(vnode_t *dvp, char *name, cred_t *cr)
1350 znode_t *zp, *dzp = VTOZ(dvp);
1351 znode_t *xzp = NULL;
1353 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1354 zilog_t *zilog = zfsvfs->z_log;
1355 uint64_t acl_obj, xattr_obj;
1358 boolean_t may_delete_now, delete_now = FALSE;
1366 * Attempt to lock directory; fail if entry doesn't exist.
1368 if (error = zfs_dirent_lock(&dl, dzp, name, &zp, ZEXISTS)) {
1375 if (error = zfs_zaccess_delete(dzp, zp, cr)) {
1380 * Need to use rmdir for removing directories.
1382 if (vp->v_type == VDIR) {
1389 dnlc_remove(dvp, name);
1391 may_delete_now = FALSE;
1394 * We may delete the znode now, or we may put it in the unlinked set;
1395 * it depends on whether we're the last link, and on whether there are
1396 * other holds on the vnode. So we dmu_tx_hold() the right things to
1397 * allow for either case.
1399 tx = dmu_tx_create(zfsvfs->z_os);
1400 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1401 dmu_tx_hold_bonus(tx, zp->z_id);
1403 dmu_tx_hold_free(tx, zp->z_id, 0, DMU_OBJECT_END);
1405 /* are there any extended attributes? */
1406 if ((xattr_obj = zp->z_phys->zp_xattr) != 0) {
1407 /* XXX - do we need this if we are deleting? */
1408 dmu_tx_hold_bonus(tx, xattr_obj);
1411 /* are there any additional acls */
1412 if ((acl_obj = zp->z_phys->zp_acl.z_acl_extern_obj) != 0 &&
1414 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1416 /* charge as an update -- would be nice not to charge at all */
1417 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1419 error = dmu_tx_assign(tx, zfsvfs->z_assign);
1421 zfs_dirent_unlock(dl);
1423 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
1434 * Remove the directory entry.
1436 error = zfs_link_destroy(dl, zp, tx, 0, &unlinked);
1443 if (0 && unlinked) {
1445 delete_now = may_delete_now &&
1446 vp->v_count == 1 && !vn_has_cached_data(vp) &&
1447 zp->z_phys->zp_xattr == xattr_obj &&
1448 zp->z_phys->zp_acl.z_acl_extern_obj == acl_obj;
1453 if (zp->z_phys->zp_xattr) {
1454 error = zfs_zget(zfsvfs, zp->z_phys->zp_xattr, &xzp);
1455 ASSERT3U(error, ==, 0);
1456 ASSERT3U(xzp->z_phys->zp_links, ==, 2);
1457 dmu_buf_will_dirty(xzp->z_dbuf, tx);
1458 mutex_enter(&xzp->z_lock);
1459 xzp->z_unlinked = 1;
1460 xzp->z_phys->zp_links = 0;
1461 mutex_exit(&xzp->z_lock);
1462 zfs_unlinked_add(xzp, tx);
1463 zp->z_phys->zp_xattr = 0; /* probably unnecessary */
1465 mutex_enter(&zp->z_lock);
1468 ASSERT3U(vp->v_count, ==, 0);
1470 mutex_exit(&zp->z_lock);
1471 zfs_znode_delete(zp, tx);
1472 VFS_RELE(zfsvfs->z_vfs);
1473 } else if (unlinked) {
1474 zfs_unlinked_add(zp, tx);
1477 zfs_log_remove(zilog, tx, TX_REMOVE, dzp, name);
1481 zfs_dirent_unlock(dl);
1486 /* this rele delayed to prevent nesting transactions */
1495 * Create a new directory and insert it into dvp using the name
1496 * provided. Return a pointer to the inserted directory.
1498 * IN: dvp - vnode of directory to add subdir to.
1499 * dirname - name of new directory.
1500 * vap - attributes of new directory.
1501 * cr - credentials of caller.
1503 * OUT: vpp - vnode of created directory.
1505 * RETURN: 0 if success
1506 * error code if failure
1509 * dvp - ctime|mtime updated
1510 * vp - ctime|mtime|atime updated
1513 zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr)
1515 znode_t *zp, *dzp = VTOZ(dvp);
1516 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1517 zilog_t *zilog = zfsvfs->z_log;
1523 ASSERT(vap->va_type == VDIR);
1527 if (dzp->z_phys->zp_flags & ZFS_XATTR) {
1535 * First make sure the new directory doesn't exist.
1537 if (error = zfs_dirent_lock(&dl, dzp, dirname, &zp, ZNEW)) {
1542 if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, cr)) {
1543 zfs_dirent_unlock(dl);
1549 * Add a new entry to the directory.
1551 tx = dmu_tx_create(zfsvfs->z_os);
1552 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
1553 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1554 if (dzp->z_phys->zp_flags & ZFS_INHERIT_ACE)
1555 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1556 0, SPA_MAXBLOCKSIZE);
1557 error = dmu_tx_assign(tx, zfsvfs->z_assign);
1559 zfs_dirent_unlock(dl);
1560 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
1573 zfs_mknode(dzp, vap, &zoid, tx, cr, 0, &zp, 0);
1576 * Now put new name in parent dir.
1578 (void) zfs_link_create(dl, zp, tx, ZNEW);
1582 zfs_log_create(zilog, tx, TX_MKDIR, dzp, zp, dirname);
1585 zfs_dirent_unlock(dl);
1592 * Remove a directory subdir entry. If the current working
1593 * directory is the same as the subdir to be removed, the
1596 * IN: dvp - vnode of directory to remove from.
1597 * name - name of directory to be removed.
1598 * cwd - vnode of current working directory.
1599 * cr - credentials of caller.
1601 * RETURN: 0 if success
1602 * error code if failure
1605 * dvp - ctime|mtime updated
1608 zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr)
1610 znode_t *dzp = VTOZ(dvp);
1613 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1614 zilog_t *zilog = zfsvfs->z_log;
1625 * Attempt to lock directory; fail if entry doesn't exist.
1627 if (error = zfs_dirent_lock(&dl, dzp, name, &zp, ZEXISTS)) {
1634 if (error = zfs_zaccess_delete(dzp, zp, cr)) {
1638 if (vp->v_type != VDIR) {
1651 * Grab a lock on the directory to make sure that noone is
1652 * trying to add (or lookup) entries while we are removing it.
1654 rw_enter(&zp->z_name_lock, RW_WRITER);
1657 * Grab a lock on the parent pointer to make sure we play well
1658 * with the treewalk and directory rename code.
1660 rw_enter(&zp->z_parent_lock, RW_WRITER);
1662 tx = dmu_tx_create(zfsvfs->z_os);
1663 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1664 dmu_tx_hold_bonus(tx, zp->z_id);
1665 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1666 error = dmu_tx_assign(tx, zfsvfs->z_assign);
1668 rw_exit(&zp->z_parent_lock);
1669 rw_exit(&zp->z_name_lock);
1670 zfs_dirent_unlock(dl);
1672 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
1682 #ifdef FREEBSD_NAMECACHE
1686 error = zfs_link_destroy(dl, zp, tx, 0, NULL);
1689 zfs_log_remove(zilog, tx, TX_RMDIR, dzp, name);
1693 rw_exit(&zp->z_parent_lock);
1694 rw_exit(&zp->z_name_lock);
1695 #ifdef FREEBSD_NAMECACHE
1699 zfs_dirent_unlock(dl);
1708 * Read as many directory entries as will fit into the provided
1709 * buffer from the given directory cursor position (specified in
1710 * the uio structure.
1712 * IN: vp - vnode of directory to read.
1713 * uio - structure supplying read location, range info,
1714 * and return buffer.
1715 * cr - credentials of caller.
1717 * OUT: uio - updated offset and range, buffer filled.
1718 * eofp - set to true if end-of-file detected.
1720 * RETURN: 0 if success
1721 * error code if failure
1724 * vp - atime updated
1726 * Note that the low 4 bits of the cookie returned by zap is always zero.
1727 * This allows us to use the low range for "special" directory entries:
1728 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
1729 * we use the offset 2 for the '.zfs' directory.
1733 zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, int *ncookies, u_long **cookies)
1735 znode_t *zp = VTOZ(vp);
1738 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1743 zap_attribute_t zap;
1744 uint_t bytes_wanted;
1745 uint64_t offset; /* must be unsigned; checks for < 1 */
1752 u_long *cooks = NULL;
1757 * If we are not given an eof variable,
1764 * Check for valid iov_len.
1766 if (uio->uio_iov->iov_len <= 0) {
1772 * Quit if directory has been removed (posix)
1774 if ((*eofp = zp->z_unlinked) != 0) {
1781 offset = uio->uio_loffset;
1782 prefetch = zp->z_zn_prefetch;
1785 * Initialize the iterator cursor.
1789 * Start iteration from the beginning of the directory.
1791 zap_cursor_init(&zc, os, zp->z_id);
1794 * The offset is a serialized cursor.
1796 zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
1800 * Get space to change directory entries into fs independent format.
1802 iovp = uio->uio_iov;
1803 bytes_wanted = iovp->iov_len;
1804 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) {
1805 bufsize = bytes_wanted;
1806 outbuf = kmem_alloc(bufsize, KM_SLEEP);
1807 odp = (struct dirent64 *)outbuf;
1809 bufsize = bytes_wanted;
1810 odp = (struct dirent64 *)iovp->iov_base;
1813 if (ncookies != NULL) {
1815 * Minimum entry size is dirent size and 1 byte for a file name.
1817 ncooks = uio->uio_resid / (sizeof(struct dirent) - sizeof(((struct dirent *)NULL)->d_name) + 1);
1818 cooks = malloc(ncooks * sizeof(u_long), M_TEMP, M_WAITOK);
1824 * Transform to file-system independent format
1827 while (outcount < bytes_wanted) {
1832 * Special case `.', `..', and `.zfs'.
1835 (void) strcpy(zap.za_name, ".");
1838 } else if (offset == 1) {
1839 (void) strcpy(zap.za_name, "..");
1840 objnum = zp->z_phys->zp_parent;
1842 } else if (offset == 2 && zfs_show_ctldir(zp)) {
1843 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
1844 objnum = ZFSCTL_INO_ROOT;
1850 if (error = zap_cursor_retrieve(&zc, &zap)) {
1851 if ((*eofp = (error == ENOENT)) != 0)
1857 if (zap.za_integer_length != 8 ||
1858 zap.za_num_integers != 1) {
1859 cmn_err(CE_WARN, "zap_readdir: bad directory "
1860 "entry, obj = %lld, offset = %lld\n",
1861 (u_longlong_t)zp->z_id,
1862 (u_longlong_t)offset);
1867 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
1869 * MacOS X can extract the object type here such as:
1870 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
1872 type = ZFS_DIRENT_TYPE(zap.za_first_integer);
1874 reclen = DIRENT64_RECLEN(strlen(zap.za_name));
1877 * Will this entry fit in the buffer?
1879 if (outcount + reclen > bufsize) {
1881 * Did we manage to fit anything in the buffer?
1892 odp->d_ino = objnum;
1893 odp->d_reclen = reclen;
1894 odp->d_namlen = strlen(zap.za_name);
1895 (void) strlcpy(odp->d_name, zap.za_name, odp->d_namlen + 1);
1898 odp = (dirent64_t *)((intptr_t)odp + reclen);
1900 ASSERT(outcount <= bufsize);
1902 /* Prefetch znode */
1904 dmu_prefetch(os, objnum, 0, 0);
1907 * Move to the next entry, fill in the previous offset.
1909 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
1910 zap_cursor_advance(&zc);
1911 offset = zap_cursor_serialize(&zc);
1916 if (cooks != NULL) {
1919 KASSERT(ncooks >= 0, ("ncookies=%d", ncooks));
1922 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
1924 /* Subtract unused cookies */
1925 if (ncookies != NULL)
1926 *ncookies -= ncooks;
1928 if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) {
1929 iovp->iov_base += outcount;
1930 iovp->iov_len -= outcount;
1931 uio->uio_resid -= outcount;
1932 } else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) {
1934 * Reset the pointer.
1936 offset = uio->uio_loffset;
1940 zap_cursor_fini(&zc);
1941 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
1942 kmem_free(outbuf, bufsize);
1944 if (error == ENOENT)
1947 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
1949 uio->uio_loffset = offset;
1951 if (error != 0 && cookies != NULL) {
1952 free(*cookies, M_TEMP);
1960 zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr)
1962 znode_t *zp = VTOZ(vp);
1963 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1966 zil_commit(zfsvfs->z_log, zp->z_last_itx, zp->z_id);
1972 * Get the requested file attributes and place them in the provided
1975 * IN: vp - vnode of file.
1976 * vap - va_mask identifies requested attributes.
1978 * cr - credentials of caller.
1980 * OUT: vap - attribute values.
1982 * RETURN: 0 (always succeeds)
1986 zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr)
1988 znode_t *zp = VTOZ(vp);
1989 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1990 znode_phys_t *pzp = zp->z_phys;
1992 u_longlong_t nblocks;
1998 * Return all attributes. It's cheaper to provide the answer
1999 * than to determine whether we were asked the question.
2001 mutex_enter(&zp->z_lock);
2003 vap->va_type = IFTOVT(pzp->zp_mode);
2004 vap->va_mode = pzp->zp_mode & ~S_IFMT;
2005 vap->va_uid = zp->z_phys->zp_uid;
2006 vap->va_gid = zp->z_phys->zp_gid;
2007 vap->va_nodeid = zp->z_id;
2008 vap->va_nlink = MIN(pzp->zp_links, UINT32_MAX); /* nlink_t limit! */
2009 vap->va_size = pzp->zp_size;
2010 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
2011 vap->va_rdev = zfs_cmpldev(pzp->zp_rdev);
2012 vap->va_seq = zp->z_seq;
2013 vap->va_flags = 0; /* FreeBSD: Reset chflags(2) flags. */
2015 ZFS_TIME_DECODE(&vap->va_atime, pzp->zp_atime);
2016 ZFS_TIME_DECODE(&vap->va_mtime, pzp->zp_mtime);
2017 ZFS_TIME_DECODE(&vap->va_ctime, pzp->zp_ctime);
2018 ZFS_TIME_DECODE(&vap->va_birthtime, pzp->zp_crtime);
2021 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2022 * Also, if we are the owner don't bother, since owner should
2023 * always be allowed to read basic attributes of file.
2025 if (!(zp->z_phys->zp_flags & ZFS_ACL_TRIVIAL) &&
2026 (zp->z_phys->zp_uid != crgetuid(cr))) {
2027 if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, cr)) {
2028 mutex_exit(&zp->z_lock);
2034 mutex_exit(&zp->z_lock);
2036 dmu_object_size_from_db(zp->z_dbuf, &blksize, &nblocks);
2037 vap->va_blksize = blksize;
2038 vap->va_bytes = nblocks << 9; /* nblocks * 512 */
2040 if (zp->z_blksz == 0) {
2042 * Block size hasn't been set; suggest maximal I/O transfers.
2044 vap->va_blksize = zfsvfs->z_max_blksz;
2052 * Set the file attributes to the values contained in the
2055 * IN: vp - vnode of file to be modified.
2056 * vap - new attribute values.
2057 * flags - ATTR_UTIME set if non-default time values provided.
2058 * cr - credentials of caller.
2060 * RETURN: 0 if success
2061 * error code if failure
2064 * vp - ctime updated, mtime updated if size changed.
2068 zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2069 caller_context_t *ct)
2071 struct znode *zp = VTOZ(vp);
2072 znode_phys_t *pzp = zp->z_phys;
2073 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2074 zilog_t *zilog = zfsvfs->z_log;
2077 uint_t mask = vap->va_mask;
2082 int need_policy = FALSE;
2088 if (mask & AT_NOSET)
2091 if (mask & AT_SIZE && vp->v_type == VDIR)
2094 if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO)
2102 if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
2108 * First validate permissions
2111 if (mask & AT_SIZE) {
2112 err = zfs_zaccess(zp, ACE_WRITE_DATA, cr);
2118 * XXX - Note, we are not providing any open
2119 * mode flags here (like FNDELAY), so we may
2120 * block if there are locks present... this
2121 * should be addressed in openat().
2124 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2125 /* NB: we already did dmu_tx_wait() if necessary */
2126 } while (err == ERESTART && zfsvfs->z_assign == TXG_NOWAIT);
2133 if (mask & (AT_ATIME|AT_MTIME))
2134 need_policy = zfs_zaccess_v4_perm(zp, ACE_WRITE_ATTRIBUTES, cr);
2136 if (mask & (AT_UID|AT_GID)) {
2137 int idmask = (mask & (AT_UID|AT_GID));
2142 * NOTE: even if a new mode is being set,
2143 * we may clear S_ISUID/S_ISGID bits.
2146 if (!(mask & AT_MODE))
2147 vap->va_mode = pzp->zp_mode;
2150 * Take ownership or chgrp to group we are a member of
2153 take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr));
2154 take_group = (mask & AT_GID) && groupmember(vap->va_gid, cr);
2157 * If both AT_UID and AT_GID are set then take_owner and
2158 * take_group must both be set in order to allow taking
2161 * Otherwise, send the check through secpolicy_vnode_setattr()
2165 if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) ||
2166 ((idmask == AT_UID) && take_owner) ||
2167 ((idmask == AT_GID) && take_group)) {
2168 if (zfs_zaccess_v4_perm(zp, ACE_WRITE_OWNER, cr) == 0) {
2170 * Remove setuid/setgid for non-privileged users
2172 secpolicy_setid_clear(vap, cr);
2173 trim_mask = (mask & (AT_UID|AT_GID));
2182 mutex_enter(&zp->z_lock);
2183 oldva.va_mode = pzp->zp_mode;
2184 oldva.va_uid = zp->z_phys->zp_uid;
2185 oldva.va_gid = zp->z_phys->zp_gid;
2186 mutex_exit(&zp->z_lock);
2188 if (mask & AT_MODE) {
2189 if (zfs_zaccess_v4_perm(zp, ACE_WRITE_ACL, cr) == 0) {
2190 err = secpolicy_setid_setsticky_clear(vp, vap,
2196 trim_mask |= AT_MODE;
2204 * If trim_mask is set then take ownership
2205 * has been granted or write_acl is present and user
2206 * has the ability to modify mode. In that case remove
2207 * UID|GID and or MODE from mask so that
2208 * secpolicy_vnode_setattr() doesn't revoke it.
2212 saved_mask = vap->va_mask;
2213 vap->va_mask &= ~trim_mask;
2216 err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
2217 (int (*)(void *, int, cred_t *))zfs_zaccess_rwx, zp);
2224 vap->va_mask |= saved_mask;
2228 * secpolicy_vnode_setattr, or take ownership may have
2231 mask = vap->va_mask;
2233 tx = dmu_tx_create(zfsvfs->z_os);
2234 dmu_tx_hold_bonus(tx, zp->z_id);
2236 if (mask & AT_MODE) {
2237 uint64_t pmode = pzp->zp_mode;
2239 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
2241 if (zp->z_phys->zp_acl.z_acl_extern_obj)
2242 dmu_tx_hold_write(tx,
2243 pzp->zp_acl.z_acl_extern_obj, 0, SPA_MAXBLOCKSIZE);
2245 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2246 0, ZFS_ACL_SIZE(MAX_ACL_SIZE));
2249 if ((mask & (AT_UID | AT_GID)) && zp->z_phys->zp_xattr != 0) {
2250 err = zfs_zget(zp->z_zfsvfs, zp->z_phys->zp_xattr, &attrzp);
2256 dmu_tx_hold_bonus(tx, attrzp->z_id);
2259 err = dmu_tx_assign(tx, zfsvfs->z_assign);
2262 VN_RELE(ZTOV(attrzp));
2263 if (err == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
2273 dmu_buf_will_dirty(zp->z_dbuf, tx);
2276 * Set each attribute requested.
2277 * We group settings according to the locks they need to acquire.
2279 * Note: you cannot set ctime directly, although it will be
2280 * updated as a side-effect of calling this function.
2283 mutex_enter(&zp->z_lock);
2285 if (mask & AT_MODE) {
2286 err = zfs_acl_chmod_setattr(zp, new_mode, tx);
2287 ASSERT3U(err, ==, 0);
2291 mutex_enter(&attrzp->z_lock);
2293 if (mask & AT_UID) {
2294 zp->z_phys->zp_uid = (uint64_t)vap->va_uid;
2296 attrzp->z_phys->zp_uid = (uint64_t)vap->va_uid;
2300 if (mask & AT_GID) {
2301 zp->z_phys->zp_gid = (uint64_t)vap->va_gid;
2303 attrzp->z_phys->zp_gid = (uint64_t)vap->va_gid;
2307 mutex_exit(&attrzp->z_lock);
2309 if (mask & AT_ATIME)
2310 ZFS_TIME_ENCODE(&vap->va_atime, pzp->zp_atime);
2312 if (mask & AT_MTIME)
2313 ZFS_TIME_ENCODE(&vap->va_mtime, pzp->zp_mtime);
2316 zfs_time_stamper_locked(zp, CONTENT_MODIFIED, tx);
2318 zfs_time_stamper_locked(zp, STATE_CHANGED, tx);
2321 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask);
2323 mutex_exit(&zp->z_lock);
2326 VN_RELE(ZTOV(attrzp));
2334 typedef struct zfs_zlock {
2335 krwlock_t *zl_rwlock; /* lock we acquired */
2336 znode_t *zl_znode; /* znode we held */
2337 struct zfs_zlock *zl_next; /* next in list */
2341 * Drop locks and release vnodes that were held by zfs_rename_lock().
2344 zfs_rename_unlock(zfs_zlock_t **zlpp)
2348 while ((zl = *zlpp) != NULL) {
2349 if (zl->zl_znode != NULL)
2350 VN_RELE(ZTOV(zl->zl_znode));
2351 rw_exit(zl->zl_rwlock);
2352 *zlpp = zl->zl_next;
2353 kmem_free(zl, sizeof (*zl));
2358 * Search back through the directory tree, using the ".." entries.
2359 * Lock each directory in the chain to prevent concurrent renames.
2360 * Fail any attempt to move a directory into one of its own descendants.
2361 * XXX - z_parent_lock can overlap with map or grow locks
2364 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
2368 uint64_t rootid = zp->z_zfsvfs->z_root;
2369 uint64_t *oidp = &zp->z_id;
2370 krwlock_t *rwlp = &szp->z_parent_lock;
2371 krw_t rw = RW_WRITER;
2374 * First pass write-locks szp and compares to zp->z_id.
2375 * Later passes read-lock zp and compare to zp->z_parent.
2378 if (!rw_tryenter(rwlp, rw)) {
2380 * Another thread is renaming in this path.
2381 * Note that if we are a WRITER, we don't have any
2382 * parent_locks held yet.
2384 if (rw == RW_READER && zp->z_id > szp->z_id) {
2386 * Drop our locks and restart
2388 zfs_rename_unlock(&zl);
2392 rwlp = &szp->z_parent_lock;
2397 * Wait for other thread to drop its locks
2403 zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
2404 zl->zl_rwlock = rwlp;
2405 zl->zl_znode = NULL;
2406 zl->zl_next = *zlpp;
2409 if (*oidp == szp->z_id) /* We're a descendant of szp */
2412 if (*oidp == rootid) /* We've hit the top */
2415 if (rw == RW_READER) { /* i.e. not the first pass */
2416 int error = zfs_zget(zp->z_zfsvfs, *oidp, &zp);
2421 oidp = &zp->z_phys->zp_parent;
2422 rwlp = &zp->z_parent_lock;
2425 } while (zp->z_id != sdzp->z_id);
2431 * Move an entry from the provided source directory to the target
2432 * directory. Change the entry name as indicated.
2434 * IN: sdvp - Source directory containing the "old entry".
2435 * snm - Old entry name.
2436 * tdvp - Target directory to contain the "new entry".
2437 * tnm - New entry name.
2438 * cr - credentials of caller.
2440 * RETURN: 0 if success
2441 * error code if failure
2444 * sdvp,tdvp - ctime|mtime updated
2447 zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr)
2449 znode_t *tdzp, *szp, *tzp;
2450 znode_t *sdzp = VTOZ(sdvp);
2451 zfsvfs_t *zfsvfs = sdzp->z_zfsvfs;
2452 zilog_t *zilog = zfsvfs->z_log;
2454 zfs_dirlock_t *sdl, *tdl;
2457 int cmp, serr, terr, error;
2462 * Make sure we have the real vp for the target directory.
2464 if (VOP_REALVP(tdvp, &realvp) == 0)
2467 if (tdvp->v_vfsp != sdvp->v_vfsp) {
2479 * This is to prevent the creation of links into attribute space
2480 * by renaming a linked file into/outof an attribute directory.
2481 * See the comment in zfs_link() for why this is considered bad.
2483 if ((tdzp->z_phys->zp_flags & ZFS_XATTR) !=
2484 (sdzp->z_phys->zp_flags & ZFS_XATTR)) {
2490 * Lock source and target directory entries. To prevent deadlock,
2491 * a lock ordering must be defined. We lock the directory with
2492 * the smallest object id first, or if it's a tie, the one with
2493 * the lexically first name.
2495 if (sdzp->z_id < tdzp->z_id) {
2497 } else if (sdzp->z_id > tdzp->z_id) {
2500 cmp = strcmp(snm, tnm);
2503 * POSIX: "If the old argument and the new argument
2504 * both refer to links to the same existing file,
2505 * the rename() function shall return successfully
2506 * and perform no other action."
2513 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp, ZEXISTS);
2514 terr = zfs_dirent_lock(&tdl, tdzp, tnm, &tzp, 0);
2516 terr = zfs_dirent_lock(&tdl, tdzp, tnm, &tzp, 0);
2517 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp, ZEXISTS);
2522 * Source entry invalid or not there.
2525 zfs_dirent_unlock(tdl);
2529 if (strcmp(snm, ".") == 0 || strcmp(snm, "..") == 0)
2535 zfs_dirent_unlock(sdl);
2537 if (strcmp(tnm, "..") == 0)
2544 * Must have write access at the source to remove the old entry
2545 * and write access at the target to create the new entry.
2546 * Note that if target and source are the same, this can be
2547 * done in a single check.
2550 if (error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr))
2553 if (ZTOV(szp)->v_type == VDIR) {
2555 * Check to make sure rename is valid.
2556 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
2558 if (error = zfs_rename_lock(szp, tdzp, sdzp, &zl))
2563 * Does target exist?
2567 * Source and target must be the same type.
2569 if (ZTOV(szp)->v_type == VDIR) {
2570 if (ZTOV(tzp)->v_type != VDIR) {
2575 if (ZTOV(tzp)->v_type == VDIR) {
2581 * POSIX dictates that when the source and target
2582 * entries refer to the same file object, rename
2583 * must do nothing and exit without error.
2585 if (szp->z_id == tzp->z_id) {
2591 vnevent_rename_src(ZTOV(szp));
2593 vnevent_rename_dest(ZTOV(tzp));
2595 tx = dmu_tx_create(zfsvfs->z_os);
2596 dmu_tx_hold_bonus(tx, szp->z_id); /* nlink changes */
2597 dmu_tx_hold_bonus(tx, sdzp->z_id); /* nlink changes */
2598 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
2599 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
2601 dmu_tx_hold_bonus(tx, tdzp->z_id); /* nlink changes */
2603 dmu_tx_hold_bonus(tx, tzp->z_id); /* parent changes */
2604 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2605 error = dmu_tx_assign(tx, zfsvfs->z_assign);
2608 zfs_rename_unlock(&zl);
2609 zfs_dirent_unlock(sdl);
2610 zfs_dirent_unlock(tdl);
2614 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
2624 if (tzp) /* Attempt to remove the existing target */
2625 error = zfs_link_destroy(tdl, tzp, tx, 0, NULL);
2628 error = zfs_link_create(tdl, szp, tx, ZRENAMING);
2630 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
2632 zfs_log_rename(zilog, tx, TX_RENAME, sdzp,
2633 sdl->dl_name, tdzp, tdl->dl_name, szp);
2635 #ifdef FREEBSD_NAMECACHE
2646 zfs_rename_unlock(&zl);
2648 zfs_dirent_unlock(sdl);
2649 zfs_dirent_unlock(tdl);
2661 * Insert the indicated symbolic reference entry into the directory.
2663 * IN: dvp - Directory to contain new symbolic link.
2664 * link - Name for new symlink entry.
2665 * vap - Attributes of new entry.
2666 * target - Target path of new symlink.
2667 * cr - credentials of caller.
2669 * RETURN: 0 if success
2670 * error code if failure
2673 * dvp - ctime|mtime updated
2676 zfs_symlink(vnode_t *dvp, vnode_t **vpp, char *name, vattr_t *vap, char *link, cred_t *cr, kthread_t *td)
2678 znode_t *zp, *dzp = VTOZ(dvp);
2681 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
2682 zilog_t *zilog = zfsvfs->z_log;
2684 int len = strlen(link);
2687 ASSERT(vap->va_type == VLNK);
2691 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, cr)) {
2696 if (len > MAXPATHLEN) {
2698 return (ENAMETOOLONG);
2702 * Attempt to lock directory; fail if entry already exists.
2704 if (error = zfs_dirent_lock(&dl, dzp, name, &zp, ZNEW)) {
2709 tx = dmu_tx_create(zfsvfs->z_os);
2710 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
2711 dmu_tx_hold_bonus(tx, dzp->z_id);
2712 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
2713 if (dzp->z_phys->zp_flags & ZFS_INHERIT_ACE)
2714 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, SPA_MAXBLOCKSIZE);
2715 error = dmu_tx_assign(tx, zfsvfs->z_assign);
2717 zfs_dirent_unlock(dl);
2718 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
2728 dmu_buf_will_dirty(dzp->z_dbuf, tx);
2731 * Create a new object for the symlink.
2732 * Put the link content into bonus buffer if it will fit;
2733 * otherwise, store it just like any other file data.
2736 if (sizeof (znode_phys_t) + len <= dmu_bonus_max()) {
2737 zfs_mknode(dzp, vap, &zoid, tx, cr, 0, &zp, len);
2739 bcopy(link, zp->z_phys + 1, len);
2743 zfs_mknode(dzp, vap, &zoid, tx, cr, 0, &zp, 0);
2746 * Nothing can access the znode yet so no locking needed
2747 * for growing the znode's blocksize.
2749 zfs_grow_blocksize(zp, len, tx);
2751 VERIFY(0 == dmu_buf_hold(zfsvfs->z_os, zoid, 0, FTAG, &dbp));
2752 dmu_buf_will_dirty(dbp, tx);
2754 ASSERT3U(len, <=, dbp->db_size);
2755 bcopy(link, dbp->db_data, len);
2756 dmu_buf_rele(dbp, FTAG);
2758 zp->z_phys->zp_size = len;
2761 * Insert the new object into the directory.
2763 (void) zfs_link_create(dl, zp, tx, ZNEW);
2766 zfs_log_symlink(zilog, tx, TX_SYMLINK, dzp, zp, name, link);
2772 zfs_dirent_unlock(dl);
2779 * Return, in the buffer contained in the provided uio structure,
2780 * the symbolic path referred to by vp.
2782 * IN: vp - vnode of symbolic link.
2783 * uoip - structure to contain the link path.
2784 * cr - credentials of caller.
2786 * OUT: uio - structure to contain the link path.
2788 * RETURN: 0 if success
2789 * error code if failure
2792 * vp - atime updated
2796 zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr)
2798 znode_t *zp = VTOZ(vp);
2799 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2805 bufsz = (size_t)zp->z_phys->zp_size;
2806 if (bufsz + sizeof (znode_phys_t) <= zp->z_dbuf->db_size) {
2807 error = uiomove(zp->z_phys + 1,
2808 MIN((size_t)bufsz, uio->uio_resid), UIO_READ, uio);
2811 error = dmu_buf_hold(zfsvfs->z_os, zp->z_id, 0, FTAG, &dbp);
2816 error = uiomove(dbp->db_data,
2817 MIN((size_t)bufsz, uio->uio_resid), UIO_READ, uio);
2818 dmu_buf_rele(dbp, FTAG);
2821 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
2827 * Insert a new entry into directory tdvp referencing svp.
2829 * IN: tdvp - Directory to contain new entry.
2830 * svp - vnode of new entry.
2831 * name - name of new entry.
2832 * cr - credentials of caller.
2834 * RETURN: 0 if success
2835 * error code if failure
2838 * tdvp - ctime|mtime updated
2839 * svp - ctime updated
2843 zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr)
2845 znode_t *dzp = VTOZ(tdvp);
2847 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
2848 zilog_t *zilog = zfsvfs->z_log;
2854 ASSERT(tdvp->v_type == VDIR);
2858 if (VOP_REALVP(svp, &realvp) == 0)
2861 if (svp->v_vfsp != tdvp->v_vfsp) {
2869 * We do not support links between attributes and non-attributes
2870 * because of the potential security risk of creating links
2871 * into "normal" file space in order to circumvent restrictions
2872 * imposed in attribute space.
2874 if ((szp->z_phys->zp_flags & ZFS_XATTR) !=
2875 (dzp->z_phys->zp_flags & ZFS_XATTR)) {
2881 * POSIX dictates that we return EPERM here.
2882 * Better choices include ENOTSUP or EISDIR.
2884 if (svp->v_type == VDIR) {
2889 if ((uid_t)szp->z_phys->zp_uid != crgetuid(cr) &&
2890 secpolicy_basic_link(cr) != 0) {
2895 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, cr)) {
2901 * Attempt to lock directory; fail if entry already exists.
2903 if (error = zfs_dirent_lock(&dl, dzp, name, &tzp, ZNEW)) {
2908 tx = dmu_tx_create(zfsvfs->z_os);
2909 dmu_tx_hold_bonus(tx, szp->z_id);
2910 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
2911 error = dmu_tx_assign(tx, zfsvfs->z_assign);
2913 zfs_dirent_unlock(dl);
2914 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
2924 error = zfs_link_create(dl, szp, tx, 0);
2927 zfs_log_link(zilog, tx, TX_LINK, dzp, szp, name);
2931 zfs_dirent_unlock(dl);
2938 zfs_inactive(vnode_t *vp, cred_t *cr)
2940 znode_t *zp = VTOZ(vp);
2941 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2944 rw_enter(&zfsvfs->z_um_lock, RW_READER);
2945 if (zfsvfs->z_unmounted2) {
2946 ASSERT(zp->z_dbuf_held == 0);
2948 mutex_enter(&zp->z_lock);
2950 vp->v_count = 0; /* count arrives as 1 */
2952 if (zp->z_dbuf == NULL) {
2953 mutex_exit(&zp->z_lock);
2956 mutex_exit(&zp->z_lock);
2958 rw_exit(&zfsvfs->z_um_lock);
2959 VFS_RELE(zfsvfs->z_vfs);
2963 if (zp->z_atime_dirty && zp->z_unlinked == 0) {
2964 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
2966 dmu_tx_hold_bonus(tx, zp->z_id);
2967 error = dmu_tx_assign(tx, TXG_WAIT);
2971 dmu_buf_will_dirty(zp->z_dbuf, tx);
2972 mutex_enter(&zp->z_lock);
2973 zp->z_atime_dirty = 0;
2974 mutex_exit(&zp->z_lock);
2980 rw_exit(&zfsvfs->z_um_lock);
2983 CTASSERT(sizeof(struct zfid_short) <= sizeof(struct fid));
2984 CTASSERT(sizeof(struct zfid_long) <= sizeof(struct fid));
2987 zfs_fid(vnode_t *vp, fid_t *fidp)
2989 znode_t *zp = VTOZ(vp);
2990 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2991 uint32_t gen = (uint32_t)zp->z_phys->zp_gen;
2992 uint64_t object = zp->z_id;
2998 size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
2999 fidp->fid_len = size;
3001 zfid = (zfid_short_t *)fidp;
3003 zfid->zf_len = size;
3005 for (i = 0; i < sizeof (zfid->zf_object); i++)
3006 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
3008 /* Must have a non-zero generation number to distinguish from .zfs */
3011 for (i = 0; i < sizeof (zfid->zf_gen); i++)
3012 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
3014 if (size == LONG_FID_LEN) {
3015 uint64_t objsetid = dmu_objset_id(zfsvfs->z_os);
3018 zlfid = (zfid_long_t *)fidp;
3020 for (i = 0; i < sizeof (zlfid->zf_setid); i++)
3021 zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
3023 /* XXX - this should be the generation number for the objset */
3024 for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
3025 zlfid->zf_setgen[i] = 0;
3033 zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr)
3045 case _PC_FILESIZEBITS:
3050 case _PC_XATTR_EXISTS:
3052 zfsvfs = zp->z_zfsvfs;
3055 error = zfs_dirent_lock(&dl, zp, "", &xzp,
3056 ZXATTR | ZEXISTS | ZSHARED);
3058 zfs_dirent_unlock(dl);
3059 if (!zfs_dirempty(xzp))
3062 } else if (error == ENOENT) {
3064 * If there aren't extended attributes, it's the
3065 * same as having zero of them.
3073 case _PC_ACL_EXTENDED:
3074 *valp = 0; /* TODO */
3077 case _PC_MIN_HOLE_SIZE:
3078 *valp = (int)SPA_MINBLOCKSIZE;
3082 return (EOPNOTSUPP);
3089 zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr)
3091 znode_t *zp = VTOZ(vp);
3092 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3096 error = zfs_getacl(zp, vsecp, cr);
3106 zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr)
3108 znode_t *zp = VTOZ(vp);
3109 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3113 error = zfs_setacl(zp, vsecp, cr);
3120 zfs_freebsd_open(ap)
3121 struct vop_open_args /* {
3124 struct ucred *a_cred;
3125 struct thread *a_td;
3128 vnode_t *vp = ap->a_vp;
3129 znode_t *zp = VTOZ(vp);
3132 error = zfs_open(&vp, ap->a_mode, ap->a_cred);
3134 vnode_create_vobject(vp, zp->z_phys->zp_size, ap->a_td);
3139 zfs_freebsd_close(ap)
3140 struct vop_close_args /* {
3143 struct ucred *a_cred;
3144 struct thread *a_td;
3148 return (zfs_close(ap->a_vp, ap->a_fflag, 0, 0, ap->a_cred));
3152 zfs_freebsd_ioctl(ap)
3153 struct vop_ioctl_args /* {
3163 return (zfs_ioctl(ap->a_vp, ap->a_command, (intptr_t)ap->a_data,
3164 ap->a_fflag, ap->a_cred, NULL));
3168 zfs_freebsd_read(ap)
3169 struct vop_read_args /* {
3173 struct ucred *a_cred;
3177 return (zfs_read(ap->a_vp, ap->a_uio, ap->a_ioflag, ap->a_cred, NULL));
3181 zfs_freebsd_write(ap)
3182 struct vop_write_args /* {
3186 struct ucred *a_cred;
3190 return (zfs_write(ap->a_vp, ap->a_uio, ap->a_ioflag, ap->a_cred, NULL));
3194 zfs_freebsd_access(ap)
3195 struct vop_access_args /* {
3198 struct ucred *a_cred;
3199 struct thread *a_td;
3203 return (zfs_access(ap->a_vp, ap->a_mode, 0, ap->a_cred));
3207 zfs_freebsd_lookup(ap)
3208 struct vop_lookup_args /* {
3209 struct vnode *a_dvp;
3210 struct vnode **a_vpp;
3211 struct componentname *a_cnp;
3214 struct componentname *cnp = ap->a_cnp;
3215 char nm[NAME_MAX + 1];
3217 ASSERT(cnp->cn_namelen < sizeof(nm));
3218 strlcpy(nm, cnp->cn_nameptr, MIN(cnp->cn_namelen + 1, sizeof(nm)));
3220 return (zfs_lookup(ap->a_dvp, nm, ap->a_vpp, cnp, cnp->cn_nameiop,
3221 cnp->cn_cred, cnp->cn_thread));
3225 zfs_freebsd_create(ap)
3226 struct vop_create_args /* {
3227 struct vnode *a_dvp;
3228 struct vnode **a_vpp;
3229 struct componentname *a_cnp;
3230 struct vattr *a_vap;
3233 struct componentname *cnp = ap->a_cnp;
3234 vattr_t *vap = ap->a_vap;
3237 ASSERT(cnp->cn_flags & SAVENAME);
3239 vattr_init_mask(vap);
3240 mode = vap->va_mode & ALLPERMS;
3242 return (zfs_create(ap->a_dvp, cnp->cn_nameptr, vap, !EXCL, mode,
3243 ap->a_vpp, cnp->cn_cred));
3247 zfs_freebsd_remove(ap)
3248 struct vop_remove_args /* {
3249 struct vnode *a_dvp;
3251 struct componentname *a_cnp;
3255 ASSERT(ap->a_cnp->cn_flags & SAVENAME);
3257 return (zfs_remove(ap->a_dvp, ap->a_cnp->cn_nameptr,
3258 ap->a_cnp->cn_cred));
3262 zfs_freebsd_mkdir(ap)
3263 struct vop_mkdir_args /* {
3264 struct vnode *a_dvp;
3265 struct vnode **a_vpp;
3266 struct componentname *a_cnp;
3267 struct vattr *a_vap;
3270 vattr_t *vap = ap->a_vap;
3272 ASSERT(ap->a_cnp->cn_flags & SAVENAME);
3274 vattr_init_mask(vap);
3276 return (zfs_mkdir(ap->a_dvp, ap->a_cnp->cn_nameptr, vap, ap->a_vpp,
3277 ap->a_cnp->cn_cred));
3281 zfs_freebsd_rmdir(ap)
3282 struct vop_rmdir_args /* {
3283 struct vnode *a_dvp;
3285 struct componentname *a_cnp;
3288 struct componentname *cnp = ap->a_cnp;
3290 ASSERT(cnp->cn_flags & SAVENAME);
3292 return (zfs_rmdir(ap->a_dvp, cnp->cn_nameptr, NULL, cnp->cn_cred));
3296 zfs_freebsd_readdir(ap)
3297 struct vop_readdir_args /* {
3300 struct ucred *a_cred;
3307 return (zfs_readdir(ap->a_vp, ap->a_uio, ap->a_cred, ap->a_eofflag,
3308 ap->a_ncookies, ap->a_cookies));
3312 zfs_freebsd_fsync(ap)
3313 struct vop_fsync_args /* {
3316 struct thread *a_td;
3321 return (zfs_fsync(ap->a_vp, 0, ap->a_td->td_ucred));
3325 zfs_freebsd_getattr(ap)
3326 struct vop_getattr_args /* {
3328 struct vattr *a_vap;
3329 struct ucred *a_cred;
3333 return (zfs_getattr(ap->a_vp, ap->a_vap, 0, ap->a_cred));
3337 zfs_freebsd_setattr(ap)
3338 struct vop_setattr_args /* {
3340 struct vattr *a_vap;
3341 struct ucred *a_cred;
3344 vattr_t *vap = ap->a_vap;
3346 /* No support for FreeBSD's chflags(2). */
3347 if (vap->va_flags != VNOVAL)
3348 return (EOPNOTSUPP);
3350 vattr_init_mask(vap);
3351 vap->va_mask &= ~AT_NOSET;
3353 return (zfs_setattr(ap->a_vp, vap, 0, ap->a_cred, NULL));
3357 zfs_freebsd_rename(ap)
3358 struct vop_rename_args /* {
3359 struct vnode *a_fdvp;
3360 struct vnode *a_fvp;
3361 struct componentname *a_fcnp;
3362 struct vnode *a_tdvp;
3363 struct vnode *a_tvp;
3364 struct componentname *a_tcnp;
3367 vnode_t *fdvp = ap->a_fdvp;
3368 vnode_t *fvp = ap->a_fvp;
3369 vnode_t *tdvp = ap->a_tdvp;
3370 vnode_t *tvp = ap->a_tvp;
3373 ASSERT(ap->a_fcnp->cn_flags & SAVENAME);
3374 ASSERT(ap->a_tcnp->cn_flags & SAVENAME);
3376 error = zfs_rename(fdvp, ap->a_fcnp->cn_nameptr, tdvp,
3377 ap->a_tcnp->cn_nameptr, ap->a_fcnp->cn_cred);
3392 zfs_freebsd_symlink(ap)
3393 struct vop_symlink_args /* {
3394 struct vnode *a_dvp;
3395 struct vnode **a_vpp;
3396 struct componentname *a_cnp;
3397 struct vattr *a_vap;
3401 struct componentname *cnp = ap->a_cnp;
3402 vattr_t *vap = ap->a_vap;
3404 ASSERT(cnp->cn_flags & SAVENAME);
3406 vap->va_type = VLNK; /* FreeBSD: Syscall only sets va_mode. */
3407 vattr_init_mask(vap);
3409 return (zfs_symlink(ap->a_dvp, ap->a_vpp, cnp->cn_nameptr, vap,
3410 ap->a_target, cnp->cn_cred, cnp->cn_thread));
3414 zfs_freebsd_readlink(ap)
3415 struct vop_readlink_args /* {
3418 struct ucred *a_cred;
3422 return (zfs_readlink(ap->a_vp, ap->a_uio, ap->a_cred));
3426 zfs_freebsd_link(ap)
3427 struct vop_link_args /* {
3428 struct vnode *a_tdvp;
3430 struct componentname *a_cnp;
3433 struct componentname *cnp = ap->a_cnp;
3435 ASSERT(cnp->cn_flags & SAVENAME);
3437 return (zfs_link(ap->a_tdvp, ap->a_vp, cnp->cn_nameptr, cnp->cn_cred));
3441 zfs_freebsd_inactive(ap)
3442 struct vop_inactive_args /* {
3444 struct thread *a_td;
3447 vnode_t *vp = ap->a_vp;
3449 zfs_inactive(vp, ap->a_td->td_ucred);
3454 zfs_freebsd_reclaim(ap)
3455 struct vop_reclaim_args /* {
3457 struct thread *a_td;
3460 vnode_t *vp = ap->a_vp;
3461 znode_t *zp = VTOZ(vp);
3468 * Destroy the vm object and flush associated pages.
3470 vnode_destroy_vobject(vp);
3472 mutex_enter(&zp->z_lock);
3474 ASSERT(zp->z_dbuf_held);
3475 zfsvfs = zp->z_zfsvfs;
3476 if (!zp->z_unlinked) {
3477 zp->z_dbuf_held = 0;
3479 mutex_exit(&zp->z_lock);
3480 dmu_buf_rele(zp->z_dbuf, NULL);
3482 mutex_exit(&zp->z_lock);
3485 if (vp->v_count > 0)
3488 ASSERT(vp->v_holdcnt >= 1);
3490 if (!zp->z_unlinked && rele)
3491 VFS_RELE(zfsvfs->z_vfs);
3497 struct vop_fid_args /* {
3503 return (zfs_fid(ap->a_vp, (void *)ap->a_fid));
3507 zfs_freebsd_pathconf(ap)
3508 struct vop_pathconf_args /* {
3511 register_t *a_retval;
3517 error = zfs_pathconf(ap->a_vp, ap->a_name, &val, curthread->td_ucred);
3519 *ap->a_retval = val;
3520 else if (error == EOPNOTSUPP)
3521 error = vop_stdpathconf(ap);
3525 struct vop_vector zfs_vnodeops;
3526 struct vop_vector zfs_fifoops;
3528 struct vop_vector zfs_vnodeops = {
3529 .vop_default = &default_vnodeops,
3530 .vop_inactive = zfs_freebsd_inactive,
3531 .vop_reclaim = zfs_freebsd_reclaim,
3532 .vop_access = zfs_freebsd_access,
3533 #ifdef FREEBSD_NAMECACHE
3534 .vop_lookup = vfs_cache_lookup,
3535 .vop_cachedlookup = zfs_freebsd_lookup,
3537 .vop_lookup = zfs_freebsd_lookup,
3539 .vop_getattr = zfs_freebsd_getattr,
3540 .vop_setattr = zfs_freebsd_setattr,
3541 .vop_create = zfs_freebsd_create,
3542 .vop_mknod = zfs_freebsd_create,
3543 .vop_mkdir = zfs_freebsd_mkdir,
3544 .vop_readdir = zfs_freebsd_readdir,
3545 .vop_fsync = zfs_freebsd_fsync,
3546 .vop_open = zfs_freebsd_open,
3547 .vop_close = zfs_freebsd_close,
3548 .vop_rmdir = zfs_freebsd_rmdir,
3549 .vop_ioctl = zfs_freebsd_ioctl,
3550 .vop_link = zfs_freebsd_link,
3551 .vop_symlink = zfs_freebsd_symlink,
3552 .vop_readlink = zfs_freebsd_readlink,
3553 .vop_read = zfs_freebsd_read,
3554 .vop_write = zfs_freebsd_write,
3555 .vop_remove = zfs_freebsd_remove,
3556 .vop_rename = zfs_freebsd_rename,
3557 .vop_pathconf = zfs_freebsd_pathconf,
3558 .vop_bmap = VOP_EOPNOTSUPP,
3559 .vop_fid = zfs_freebsd_fid,
3562 struct vop_vector zfs_fifoops = {
3563 .vop_default = &fifo_specops,
3564 .vop_fsync = VOP_PANIC,
3565 .vop_access = zfs_freebsd_access,
3566 .vop_getattr = zfs_freebsd_getattr,
3567 .vop_inactive = zfs_freebsd_inactive,
3568 .vop_read = VOP_PANIC,
3569 .vop_reclaim = zfs_freebsd_reclaim,
3570 .vop_setattr = zfs_freebsd_setattr,
3571 .vop_write = VOP_PANIC,
3572 .vop_fid = zfs_freebsd_fid,