4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
28 /* Portions Copyright 2007 Jeremy Teo */
29 /* Portions Copyright 2010 Robert Milkowski */
31 #include <sys/types.h>
32 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sysmacros.h>
36 #include <sys/resource.h>
39 #include <sys/vnode.h>
43 #include <sys/taskq.h>
45 #include <sys/atomic.h>
46 #include <sys/namei.h>
48 #include <sys/cmn_err.h>
49 #include <sys/errno.h>
50 #include <sys/unistd.h>
51 #include <sys/zfs_dir.h>
52 #include <sys/zfs_ioctl.h>
53 #include <sys/fs/zfs.h>
55 #include <sys/dmu_objset.h>
61 #include <sys/dirent.h>
62 #include <sys/policy.h>
63 #include <sys/sunddi.h>
64 #include <sys/filio.h>
66 #include <sys/zfs_ctldir.h>
67 #include <sys/zfs_fuid.h>
68 #include <sys/zfs_sa.h>
70 #include <sys/zfs_rlock.h>
71 #include <sys/extdirent.h>
72 #include <sys/kidmap.h>
75 #include <sys/sched.h>
77 #include <vm/vm_param.h>
78 #include <vm/vm_pageout.h>
83 * Each vnode op performs some logical unit of work. To do this, the ZPL must
84 * properly lock its in-core state, create a DMU transaction, do the work,
85 * record this work in the intent log (ZIL), commit the DMU transaction,
86 * and wait for the intent log to commit if it is a synchronous operation.
87 * Moreover, the vnode ops must work in both normal and log replay context.
88 * The ordering of events is important to avoid deadlocks and references
89 * to freed memory. The example below illustrates the following Big Rules:
91 * (1) A check must be made in each zfs thread for a mounted file system.
92 * This is done avoiding races using ZFS_ENTER(zfsvfs).
93 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
94 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
95 * can return EIO from the calling function.
97 * (2) VN_RELE() should always be the last thing except for zil_commit()
98 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
99 * First, if it's the last reference, the vnode/znode
100 * can be freed, so the zp may point to freed memory. Second, the last
101 * reference will call zfs_zinactive(), which may induce a lot of work --
102 * pushing cached pages (which acquires range locks) and syncing out
103 * cached atime changes. Third, zfs_zinactive() may require a new tx,
104 * which could deadlock the system if you were already holding one.
105 * If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
107 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
108 * as they can span dmu_tx_assign() calls.
110 * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
111 * dmu_tx_assign(). This is critical because we don't want to block
112 * while holding locks.
114 * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
115 * reduces lock contention and CPU usage when we must wait (note that if
116 * throughput is constrained by the storage, nearly every transaction
119 * Note, in particular, that if a lock is sometimes acquired before
120 * the tx assigns, and sometimes after (e.g. z_lock), then failing
121 * to use a non-blocking assign can deadlock the system. The scenario:
123 * Thread A has grabbed a lock before calling dmu_tx_assign().
124 * Thread B is in an already-assigned tx, and blocks for this lock.
125 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
126 * forever, because the previous txg can't quiesce until B's tx commits.
128 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
129 * then drop all locks, call dmu_tx_wait(), and try again. On subsequent
130 * calls to dmu_tx_assign(), pass TXG_WAITED rather than TXG_NOWAIT,
131 * to indicate that this operation has already called dmu_tx_wait().
132 * This will ensure that we don't retry forever, waiting a short bit
135 * (5) If the operation succeeded, generate the intent log entry for it
136 * before dropping locks. This ensures that the ordering of events
137 * in the intent log matches the order in which they actually occurred.
138 * During ZIL replay the zfs_log_* functions will update the sequence
139 * number to indicate the zil transaction has replayed.
141 * (6) At the end of each vnode op, the DMU tx must always commit,
142 * regardless of whether there were any errors.
144 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
145 * to ensure that synchronous semantics are provided when necessary.
147 * In general, this is how things should be ordered in each vnode op:
149 * ZFS_ENTER(zfsvfs); // exit if unmounted
151 * zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD())
152 * rw_enter(...); // grab any other locks you need
153 * tx = dmu_tx_create(...); // get DMU tx
154 * dmu_tx_hold_*(); // hold each object you might modify
155 * error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
157 * rw_exit(...); // drop locks
158 * zfs_dirent_unlock(dl); // unlock directory entry
159 * VN_RELE(...); // release held vnodes
160 * if (error == ERESTART) {
166 * dmu_tx_abort(tx); // abort DMU tx
167 * ZFS_EXIT(zfsvfs); // finished in zfs
168 * return (error); // really out of space
170 * error = do_real_work(); // do whatever this VOP does
172 * zfs_log_*(...); // on success, make ZIL entry
173 * dmu_tx_commit(tx); // commit DMU tx -- error or not
174 * rw_exit(...); // drop locks
175 * zfs_dirent_unlock(dl); // unlock directory entry
176 * VN_RELE(...); // release held vnodes
177 * zil_commit(zilog, foid); // synchronous when necessary
178 * ZFS_EXIT(zfsvfs); // finished in zfs
179 * return (error); // done, report error
184 zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
186 znode_t *zp = VTOZ(*vpp);
187 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
192 if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
193 ((flag & FAPPEND) == 0)) {
195 return (SET_ERROR(EPERM));
198 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
199 ZTOV(zp)->v_type == VREG &&
200 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
201 if (fs_vscan(*vpp, cr, 0) != 0) {
203 return (SET_ERROR(EACCES));
207 /* Keep a count of the synchronous opens in the znode */
208 if (flag & (FSYNC | FDSYNC))
209 atomic_inc_32(&zp->z_sync_cnt);
217 zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
218 caller_context_t *ct)
220 znode_t *zp = VTOZ(vp);
221 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
224 * Clean up any locks held by this process on the vp.
226 cleanlocks(vp, ddi_get_pid(), 0);
227 cleanshares(vp, ddi_get_pid());
232 /* Decrement the synchronous opens in the znode */
233 if ((flag & (FSYNC | FDSYNC)) && (count == 1))
234 atomic_dec_32(&zp->z_sync_cnt);
236 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
237 ZTOV(zp)->v_type == VREG &&
238 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
239 VERIFY(fs_vscan(vp, cr, 1) == 0);
246 * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
247 * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
250 zfs_holey(vnode_t *vp, u_long cmd, offset_t *off)
252 znode_t *zp = VTOZ(vp);
253 uint64_t noff = (uint64_t)*off; /* new offset */
258 file_sz = zp->z_size;
259 if (noff >= file_sz) {
260 return (SET_ERROR(ENXIO));
263 if (cmd == _FIO_SEEK_HOLE)
268 error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff);
271 return (SET_ERROR(ENXIO));
274 * We could find a hole that begins after the logical end-of-file,
275 * because dmu_offset_next() only works on whole blocks. If the
276 * EOF falls mid-block, then indicate that the "virtual hole"
277 * at the end of the file begins at the logical EOF, rather than
278 * at the end of the last block.
280 if (noff > file_sz) {
293 zfs_ioctl(vnode_t *vp, u_long com, intptr_t data, int flag, cred_t *cred,
294 int *rvalp, caller_context_t *ct)
298 dmu_object_info_t doi;
309 * The following two ioctls are used by bfu. Faking out,
310 * necessary to avoid bfu errors.
323 if (ddi_copyin((void *)data, &off, sizeof (off), flag))
324 return (SET_ERROR(EFAULT));
326 off = *(offset_t *)data;
329 zfsvfs = zp->z_zfsvfs;
333 /* offset parameter is in/out */
334 error = zfs_holey(vp, com, &off);
339 if (ddi_copyout(&off, (void *)data, sizeof (off), flag))
340 return (SET_ERROR(EFAULT));
342 *(offset_t *)data = off;
347 case _FIO_COUNT_FILLED:
350 * _FIO_COUNT_FILLED adds a new ioctl command which
351 * exposes the number of filled blocks in a
355 zfsvfs = zp->z_zfsvfs;
360 * Wait for all dirty blocks for this object
361 * to get synced out to disk, and the DMU info
364 error = dmu_object_wait_synced(zfsvfs->z_os, zp->z_id);
371 * Retrieve fill count from DMU object.
373 error = dmu_object_info(zfsvfs->z_os, zp->z_id, &doi);
379 ndata = doi.doi_fill_count;
382 if (ddi_copyout(&ndata, (void *)data, sizeof (ndata), flag))
383 return (SET_ERROR(EFAULT));
388 return (SET_ERROR(ENOTTY));
392 page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
399 * At present vm_page_clear_dirty extends the cleared range to DEV_BSIZE
400 * aligned boundaries, if the range is not aligned. As a result a
401 * DEV_BSIZE subrange with partially dirty data may get marked as clean.
402 * It may happen that all DEV_BSIZE subranges are marked clean and thus
403 * the whole page would be considred clean despite have some dirty data.
404 * For this reason we should shrink the range to DEV_BSIZE aligned
405 * boundaries before calling vm_page_clear_dirty.
407 end = rounddown2(off + nbytes, DEV_BSIZE);
408 off = roundup2(off, DEV_BSIZE);
412 zfs_vmobject_assert_wlocked(obj);
415 if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
417 if (vm_page_xbusied(pp)) {
419 * Reference the page before unlocking and
420 * sleeping so that the page daemon is less
421 * likely to reclaim it.
423 vm_page_reference(pp);
425 zfs_vmobject_wunlock(obj);
426 vm_page_busy_sleep(pp, "zfsmwb");
427 zfs_vmobject_wlock(obj);
431 } else if (pp == NULL) {
432 pp = vm_page_alloc(obj, OFF_TO_IDX(start),
433 VM_ALLOC_SYSTEM | VM_ALLOC_IFCACHED |
436 ASSERT(pp != NULL && !pp->valid);
441 ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
442 vm_object_pip_add(obj, 1);
443 pmap_remove_write(pp);
445 vm_page_clear_dirty(pp, off, nbytes);
453 page_unbusy(vm_page_t pp)
457 vm_object_pip_subtract(pp->object, 1);
461 page_hold(vnode_t *vp, int64_t start)
467 zfs_vmobject_assert_wlocked(obj);
470 if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
472 if (vm_page_xbusied(pp)) {
474 * Reference the page before unlocking and
475 * sleeping so that the page daemon is less
476 * likely to reclaim it.
478 vm_page_reference(pp);
480 zfs_vmobject_wunlock(obj);
481 vm_page_busy_sleep(pp, "zfsmwb");
482 zfs_vmobject_wlock(obj);
486 ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
499 page_unhold(vm_page_t pp)
508 * When a file is memory mapped, we must keep the IO data synchronized
509 * between the DMU cache and the memory mapped pages. What this means:
511 * On Write: If we find a memory mapped page, we write to *both*
512 * the page and the dmu buffer.
515 update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
516 int segflg, dmu_tx_t *tx)
523 ASSERT(segflg != UIO_NOCOPY);
524 ASSERT(vp->v_mount != NULL);
528 off = start & PAGEOFFSET;
529 zfs_vmobject_wlock(obj);
530 for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
532 int nbytes = imin(PAGESIZE - off, len);
534 if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
535 zfs_vmobject_wunlock(obj);
537 va = zfs_map_page(pp, &sf);
538 (void) dmu_read(os, oid, start+off, nbytes,
539 va+off, DMU_READ_PREFETCH);;
542 zfs_vmobject_wlock(obj);
548 vm_object_pip_wakeupn(obj, 0);
549 zfs_vmobject_wunlock(obj);
553 * Read with UIO_NOCOPY flag means that sendfile(2) requests
554 * ZFS to populate a range of page cache pages with data.
556 * NOTE: this function could be optimized to pre-allocate
557 * all pages in advance, drain exclusive busy on all of them,
558 * map them into contiguous KVA region and populate them
559 * in one single dmu_read() call.
562 mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
564 znode_t *zp = VTOZ(vp);
565 objset_t *os = zp->z_zfsvfs->z_os;
575 ASSERT(uio->uio_segflg == UIO_NOCOPY);
576 ASSERT(vp->v_mount != NULL);
579 ASSERT((uio->uio_loffset & PAGEOFFSET) == 0);
581 zfs_vmobject_wlock(obj);
582 for (start = uio->uio_loffset; len > 0; start += PAGESIZE) {
583 int bytes = MIN(PAGESIZE, len);
585 pp = vm_page_grab(obj, OFF_TO_IDX(start), VM_ALLOC_SBUSY |
586 VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
587 if (pp->valid == 0) {
588 zfs_vmobject_wunlock(obj);
589 va = zfs_map_page(pp, &sf);
590 error = dmu_read(os, zp->z_id, start, bytes, va,
592 if (bytes != PAGESIZE && error == 0)
593 bzero(va + bytes, PAGESIZE - bytes);
595 zfs_vmobject_wlock(obj);
599 if (pp->wire_count == 0 && pp->valid == 0 &&
603 pp->valid = VM_PAGE_BITS_ALL;
604 vm_page_activate(pp);
608 ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
613 uio->uio_resid -= bytes;
614 uio->uio_offset += bytes;
617 zfs_vmobject_wunlock(obj);
622 * When a file is memory mapped, we must keep the IO data synchronized
623 * between the DMU cache and the memory mapped pages. What this means:
625 * On Read: We "read" preferentially from memory mapped pages,
626 * else we default from the dmu buffer.
628 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
629 * the file is memory mapped.
632 mappedread(vnode_t *vp, int nbytes, uio_t *uio)
634 znode_t *zp = VTOZ(vp);
642 ASSERT(vp->v_mount != NULL);
646 start = uio->uio_loffset;
647 off = start & PAGEOFFSET;
648 zfs_vmobject_wlock(obj);
649 for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
651 uint64_t bytes = MIN(PAGESIZE - off, len);
653 if (pp = page_hold(vp, start)) {
657 zfs_vmobject_wunlock(obj);
658 va = zfs_map_page(pp, &sf);
659 error = uiomove(va + off, bytes, UIO_READ, uio);
661 zfs_vmobject_wlock(obj);
664 zfs_vmobject_wunlock(obj);
665 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
667 zfs_vmobject_wlock(obj);
674 zfs_vmobject_wunlock(obj);
678 offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
681 * Read bytes from specified file into supplied buffer.
683 * IN: vp - vnode of file to be read from.
684 * uio - structure supplying read location, range info,
686 * ioflag - SYNC flags; used to provide FRSYNC semantics.
687 * cr - credentials of caller.
688 * ct - caller context
690 * OUT: uio - updated offset and range, buffer filled.
692 * RETURN: 0 on success, error code on failure.
695 * vp - atime updated if byte count > 0
699 zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
701 znode_t *zp = VTOZ(vp);
702 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
711 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
713 return (SET_ERROR(EACCES));
717 * Validate file offset
719 if (uio->uio_loffset < (offset_t)0) {
721 return (SET_ERROR(EINVAL));
725 * Fasttrack empty reads
727 if (uio->uio_resid == 0) {
733 * Check for mandatory locks
735 if (MANDMODE(zp->z_mode)) {
736 if (error = chklock(vp, FREAD,
737 uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) {
744 * If we're in FRSYNC mode, sync out this znode before reading it.
747 (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
748 zil_commit(zfsvfs->z_log, zp->z_id);
751 * Lock the range against changes.
753 rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER);
756 * If we are reading past end-of-file we can skip
757 * to the end; but we might still need to set atime.
759 if (uio->uio_loffset >= zp->z_size) {
764 ASSERT(uio->uio_loffset < zp->z_size);
765 n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
768 if ((uio->uio_extflg == UIO_XUIO) &&
769 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
771 int blksz = zp->z_blksz;
772 uint64_t offset = uio->uio_loffset;
774 xuio = (xuio_t *)uio;
776 nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
779 ASSERT(offset + n <= blksz);
782 (void) dmu_xuio_init(xuio, nblk);
784 if (vn_has_cached_data(vp)) {
786 * For simplicity, we always allocate a full buffer
787 * even if we only expect to read a portion of a block.
789 while (--nblk >= 0) {
790 (void) dmu_xuio_add(xuio,
791 dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
799 nbytes = MIN(n, zfs_read_chunk_size -
800 P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
803 if (uio->uio_segflg == UIO_NOCOPY)
804 error = mappedread_sf(vp, nbytes, uio);
806 #endif /* __FreeBSD__ */
807 if (vn_has_cached_data(vp)) {
808 error = mappedread(vp, nbytes, uio);
810 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
814 /* convert checksum errors into IO errors */
816 error = SET_ERROR(EIO);
823 zfs_range_unlock(rl);
825 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
831 * Write the bytes to a file.
833 * IN: vp - vnode of file to be written to.
834 * uio - structure supplying write location, range info,
836 * ioflag - FAPPEND, FSYNC, and/or FDSYNC. FAPPEND is
837 * set if in append mode.
838 * cr - credentials of caller.
839 * ct - caller context (NFS/CIFS fem monitor only)
841 * OUT: uio - updated offset and range.
843 * RETURN: 0 on success, error code on failure.
846 * vp - ctime|mtime updated if byte count > 0
851 zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
853 znode_t *zp = VTOZ(vp);
854 rlim64_t limit = MAXOFFSET_T;
855 ssize_t start_resid = uio->uio_resid;
859 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
864 int max_blksz = zfsvfs->z_max_blksz;
867 iovec_t *aiov = NULL;
870 int iovcnt = uio->uio_iovcnt;
871 iovec_t *iovp = uio->uio_iov;
874 sa_bulk_attr_t bulk[4];
875 uint64_t mtime[2], ctime[2];
878 * Fasttrack empty write
884 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
890 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
891 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
892 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
894 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
898 * In a case vp->v_vfsp != zp->z_zfsvfs->z_vfs (e.g. snapshots) our
899 * callers might not be able to detect properly that we are read-only,
900 * so check it explicitly here.
902 if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
904 return (SET_ERROR(EROFS));
908 * If immutable or not appending then return EPERM
910 if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
911 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
912 (uio->uio_loffset < zp->z_size))) {
914 return (SET_ERROR(EPERM));
917 zilog = zfsvfs->z_log;
920 * Validate file offset
922 woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
925 return (SET_ERROR(EINVAL));
929 * Check for mandatory locks before calling zfs_range_lock()
930 * in order to prevent a deadlock with locks set via fcntl().
932 if (MANDMODE((mode_t)zp->z_mode) &&
933 (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) {
940 * Pre-fault the pages to ensure slow (eg NFS) pages
942 * Skip this if uio contains loaned arc_buf.
944 if ((uio->uio_extflg == UIO_XUIO) &&
945 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
946 xuio = (xuio_t *)uio;
948 uio_prefaultpages(MIN(n, max_blksz), uio);
952 * If in append mode, set the io offset pointer to eof.
954 if (ioflag & FAPPEND) {
956 * Obtain an appending range lock to guarantee file append
957 * semantics. We reset the write offset once we have the lock.
959 rl = zfs_range_lock(zp, 0, n, RL_APPEND);
961 if (rl->r_len == UINT64_MAX) {
963 * We overlocked the file because this write will cause
964 * the file block size to increase.
965 * Note that zp_size cannot change with this lock held.
969 uio->uio_loffset = woff;
972 * Note that if the file block size will change as a result of
973 * this write, then this range lock will lock the entire file
974 * so that we can re-write the block safely.
976 rl = zfs_range_lock(zp, woff, n, RL_WRITER);
979 if (vn_rlimit_fsize(vp, uio, uio->uio_td)) {
980 zfs_range_unlock(rl);
986 zfs_range_unlock(rl);
988 return (SET_ERROR(EFBIG));
991 if ((woff + n) > limit || woff > (limit - n))
994 /* Will this write extend the file length? */
995 write_eof = (woff + n > zp->z_size);
997 end_size = MAX(zp->z_size, woff + n);
1000 * Write the file in reasonable size chunks. Each chunk is written
1001 * in a separate transaction; this keeps the intent log records small
1002 * and allows us to do more fine-grained space accounting.
1006 woff = uio->uio_loffset;
1007 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
1008 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
1010 dmu_return_arcbuf(abuf);
1011 error = SET_ERROR(EDQUOT);
1015 if (xuio && abuf == NULL) {
1016 ASSERT(i_iov < iovcnt);
1017 aiov = &iovp[i_iov];
1018 abuf = dmu_xuio_arcbuf(xuio, i_iov);
1019 dmu_xuio_clear(xuio, i_iov);
1020 DTRACE_PROBE3(zfs_cp_write, int, i_iov,
1021 iovec_t *, aiov, arc_buf_t *, abuf);
1022 ASSERT((aiov->iov_base == abuf->b_data) ||
1023 ((char *)aiov->iov_base - (char *)abuf->b_data +
1024 aiov->iov_len == arc_buf_size(abuf)));
1026 } else if (abuf == NULL && n >= max_blksz &&
1027 woff >= zp->z_size &&
1028 P2PHASE(woff, max_blksz) == 0 &&
1029 zp->z_blksz == max_blksz) {
1031 * This write covers a full block. "Borrow" a buffer
1032 * from the dmu so that we can fill it before we enter
1033 * a transaction. This avoids the possibility of
1034 * holding up the transaction if the data copy hangs
1035 * up on a pagefault (e.g., from an NFS server mapping).
1039 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
1041 ASSERT(abuf != NULL);
1042 ASSERT(arc_buf_size(abuf) == max_blksz);
1043 if (error = uiocopy(abuf->b_data, max_blksz,
1044 UIO_WRITE, uio, &cbytes)) {
1045 dmu_return_arcbuf(abuf);
1048 ASSERT(cbytes == max_blksz);
1052 * Start a transaction.
1054 tx = dmu_tx_create(zfsvfs->z_os);
1055 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1056 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
1057 zfs_sa_upgrade_txholds(tx, zp);
1058 error = dmu_tx_assign(tx, TXG_WAIT);
1062 dmu_return_arcbuf(abuf);
1067 * If zfs_range_lock() over-locked we grow the blocksize
1068 * and then reduce the lock range. This will only happen
1069 * on the first iteration since zfs_range_reduce() will
1070 * shrink down r_len to the appropriate size.
1072 if (rl->r_len == UINT64_MAX) {
1075 if (zp->z_blksz > max_blksz) {
1077 * File's blocksize is already larger than the
1078 * "recordsize" property. Only let it grow to
1079 * the next power of 2.
1081 ASSERT(!ISP2(zp->z_blksz));
1082 new_blksz = MIN(end_size,
1083 1 << highbit64(zp->z_blksz));
1085 new_blksz = MIN(end_size, max_blksz);
1087 zfs_grow_blocksize(zp, new_blksz, tx);
1088 zfs_range_reduce(rl, woff, n);
1092 * XXX - should we really limit each write to z_max_blksz?
1093 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
1095 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
1097 if (woff + nbytes > zp->z_size)
1098 vnode_pager_setsize(vp, woff + nbytes);
1101 tx_bytes = uio->uio_resid;
1102 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
1104 tx_bytes -= uio->uio_resid;
1107 ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
1109 * If this is not a full block write, but we are
1110 * extending the file past EOF and this data starts
1111 * block-aligned, use assign_arcbuf(). Otherwise,
1112 * write via dmu_write().
1114 if (tx_bytes < max_blksz && (!write_eof ||
1115 aiov->iov_base != abuf->b_data)) {
1117 dmu_write(zfsvfs->z_os, zp->z_id, woff,
1118 aiov->iov_len, aiov->iov_base, tx);
1119 dmu_return_arcbuf(abuf);
1120 xuio_stat_wbuf_copied();
1122 ASSERT(xuio || tx_bytes == max_blksz);
1123 dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl),
1126 ASSERT(tx_bytes <= uio->uio_resid);
1127 uioskip(uio, tx_bytes);
1129 if (tx_bytes && vn_has_cached_data(vp)) {
1130 update_pages(vp, woff, tx_bytes, zfsvfs->z_os,
1131 zp->z_id, uio->uio_segflg, tx);
1135 * If we made no progress, we're done. If we made even
1136 * partial progress, update the znode and ZIL accordingly.
1138 if (tx_bytes == 0) {
1139 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
1140 (void *)&zp->z_size, sizeof (uint64_t), tx);
1147 * Clear Set-UID/Set-GID bits on successful write if not
1148 * privileged and at least one of the excute bits is set.
1150 * It would be nice to to this after all writes have
1151 * been done, but that would still expose the ISUID/ISGID
1152 * to another app after the partial write is committed.
1154 * Note: we don't call zfs_fuid_map_id() here because
1155 * user 0 is not an ephemeral uid.
1157 mutex_enter(&zp->z_acl_lock);
1158 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
1159 (S_IXUSR >> 6))) != 0 &&
1160 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
1161 secpolicy_vnode_setid_retain(vp, cr,
1162 (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) {
1164 zp->z_mode &= ~(S_ISUID | S_ISGID);
1165 newmode = zp->z_mode;
1166 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
1167 (void *)&newmode, sizeof (uint64_t), tx);
1169 mutex_exit(&zp->z_acl_lock);
1171 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
1175 * Update the file size (zp_size) if it has changed;
1176 * account for possible concurrent updates.
1178 while ((end_size = zp->z_size) < uio->uio_loffset) {
1179 (void) atomic_cas_64(&zp->z_size, end_size,
1184 * If we are replaying and eof is non zero then force
1185 * the file size to the specified eof. Note, there's no
1186 * concurrency during replay.
1188 if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
1189 zp->z_size = zfsvfs->z_replay_eof;
1191 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1193 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag);
1198 ASSERT(tx_bytes == nbytes);
1203 uio_prefaultpages(MIN(n, max_blksz), uio);
1207 zfs_range_unlock(rl);
1210 * If we're in replay mode, or we made no progress, return error.
1211 * Otherwise, it's at least a partial write, so it's successful.
1213 if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
1218 if (ioflag & (FSYNC | FDSYNC) ||
1219 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1220 zil_commit(zilog, zp->z_id);
1227 zfs_get_done(zgd_t *zgd, int error)
1229 znode_t *zp = zgd->zgd_private;
1230 objset_t *os = zp->z_zfsvfs->z_os;
1233 dmu_buf_rele(zgd->zgd_db, zgd);
1235 zfs_range_unlock(zgd->zgd_rl);
1238 * Release the vnode asynchronously as we currently have the
1239 * txg stopped from syncing.
1241 VN_RELE_ASYNC(ZTOV(zp), dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
1243 if (error == 0 && zgd->zgd_bp)
1244 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1246 kmem_free(zgd, sizeof (zgd_t));
1250 static int zil_fault_io = 0;
1254 * Get data to generate a TX_WRITE intent log record.
1257 zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1259 zfsvfs_t *zfsvfs = arg;
1260 objset_t *os = zfsvfs->z_os;
1262 uint64_t object = lr->lr_foid;
1263 uint64_t offset = lr->lr_offset;
1264 uint64_t size = lr->lr_length;
1265 blkptr_t *bp = &lr->lr_blkptr;
1270 ASSERT(zio != NULL);
1274 * Nothing to do if the file has been removed
1276 if (zfs_zget(zfsvfs, object, &zp) != 0)
1277 return (SET_ERROR(ENOENT));
1278 if (zp->z_unlinked) {
1280 * Release the vnode asynchronously as we currently have the
1281 * txg stopped from syncing.
1283 VN_RELE_ASYNC(ZTOV(zp),
1284 dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
1285 return (SET_ERROR(ENOENT));
1288 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1289 zgd->zgd_zilog = zfsvfs->z_log;
1290 zgd->zgd_private = zp;
1293 * Write records come in two flavors: immediate and indirect.
1294 * For small writes it's cheaper to store the data with the
1295 * log record (immediate); for large writes it's cheaper to
1296 * sync the data and get a pointer to it (indirect) so that
1297 * we don't have to write the data twice.
1299 if (buf != NULL) { /* immediate write */
1300 zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER);
1301 /* test for truncation needs to be done while range locked */
1302 if (offset >= zp->z_size) {
1303 error = SET_ERROR(ENOENT);
1305 error = dmu_read(os, object, offset, size, buf,
1306 DMU_READ_NO_PREFETCH);
1308 ASSERT(error == 0 || error == ENOENT);
1309 } else { /* indirect write */
1311 * Have to lock the whole block to ensure when it's
1312 * written out and it's checksum is being calculated
1313 * that no one can change the data. We need to re-check
1314 * blocksize after we get the lock in case it's changed!
1319 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
1321 zgd->zgd_rl = zfs_range_lock(zp, offset, size,
1323 if (zp->z_blksz == size)
1326 zfs_range_unlock(zgd->zgd_rl);
1328 /* test for truncation needs to be done while range locked */
1329 if (lr->lr_offset >= zp->z_size)
1330 error = SET_ERROR(ENOENT);
1333 error = SET_ERROR(EIO);
1338 error = dmu_buf_hold(os, object, offset, zgd, &db,
1339 DMU_READ_NO_PREFETCH);
1342 blkptr_t *obp = dmu_buf_get_blkptr(db);
1344 ASSERT(BP_IS_HOLE(bp));
1351 ASSERT(db->db_offset == offset);
1352 ASSERT(db->db_size == size);
1354 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1356 ASSERT(error || lr->lr_length <= zp->z_blksz);
1359 * On success, we need to wait for the write I/O
1360 * initiated by dmu_sync() to complete before we can
1361 * release this dbuf. We will finish everything up
1362 * in the zfs_get_done() callback.
1367 if (error == EALREADY) {
1368 lr->lr_common.lrc_txtype = TX_WRITE2;
1374 zfs_get_done(zgd, error);
1381 zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr,
1382 caller_context_t *ct)
1384 znode_t *zp = VTOZ(vp);
1385 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1391 if (flag & V_ACE_MASK)
1392 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1394 error = zfs_zaccess_rwx(zp, mode, flag, cr);
1401 * If vnode is for a device return a specfs vnode instead.
1404 specvp_check(vnode_t **vpp, cred_t *cr)
1408 if (IS_DEVVP(*vpp)) {
1411 svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
1414 error = SET_ERROR(ENOSYS);
1422 * Lookup an entry in a directory, or an extended attribute directory.
1423 * If it exists, return a held vnode reference for it.
1425 * IN: dvp - vnode of directory to search.
1426 * nm - name of entry to lookup.
1427 * pnp - full pathname to lookup [UNUSED].
1428 * flags - LOOKUP_XATTR set if looking for an attribute.
1429 * rdir - root directory vnode [UNUSED].
1430 * cr - credentials of caller.
1431 * ct - caller context
1432 * direntflags - directory lookup flags
1433 * realpnp - returned pathname.
1435 * OUT: vpp - vnode of located entry, NULL if not found.
1437 * RETURN: 0 on success, error code on failure.
1444 zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
1445 int nameiop, cred_t *cr, kthread_t *td, int flags)
1447 znode_t *zdp = VTOZ(dvp);
1448 zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
1450 int *direntflags = NULL;
1451 void *realpnp = NULL;
1454 if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
1456 if (dvp->v_type != VDIR) {
1457 return (SET_ERROR(ENOTDIR));
1458 } else if (zdp->z_sa_hdl == NULL) {
1459 return (SET_ERROR(EIO));
1462 if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
1463 error = zfs_fastaccesschk_execute(zdp, cr);
1471 vnode_t *tvp = dnlc_lookup(dvp, nm);
1474 error = zfs_fastaccesschk_execute(zdp, cr);
1479 if (tvp == DNLC_NO_VNODE) {
1481 return (SET_ERROR(ENOENT));
1484 return (specvp_check(vpp, cr));
1490 DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp, char *, nm);
1497 if (flags & LOOKUP_XATTR) {
1500 * If the xattr property is off, refuse the lookup request.
1502 if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) {
1504 return (SET_ERROR(EINVAL));
1509 * We don't allow recursive attributes..
1510 * Maybe someday we will.
1512 if (zdp->z_pflags & ZFS_XATTR) {
1514 return (SET_ERROR(EINVAL));
1517 if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) {
1523 * Do we have permission to get into attribute directory?
1526 if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, 0,
1536 if (dvp->v_type != VDIR) {
1538 return (SET_ERROR(ENOTDIR));
1542 * Check accessibility of directory.
1545 if (error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr)) {
1550 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
1551 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1553 return (SET_ERROR(EILSEQ));
1556 error = zfs_dirlook(zdp, nm, vpp, flags, direntflags, realpnp);
1558 error = specvp_check(vpp, cr);
1560 /* Translate errors and add SAVENAME when needed. */
1561 if (cnp->cn_flags & ISLASTCN) {
1565 if (error == ENOENT) {
1566 error = EJUSTRETURN;
1567 cnp->cn_flags |= SAVENAME;
1573 cnp->cn_flags |= SAVENAME;
1577 if (error == 0 && (nm[0] != '.' || nm[1] != '\0')) {
1580 if (cnp->cn_flags & ISDOTDOT) {
1581 ltype = VOP_ISLOCKED(dvp);
1585 error = vn_lock(*vpp, cnp->cn_lkflags);
1586 if (cnp->cn_flags & ISDOTDOT)
1587 vn_lock(dvp, ltype | LK_RETRY);
1597 #ifdef FREEBSD_NAMECACHE
1599 * Insert name into cache (as non-existent) if appropriate.
1601 if (error == ENOENT && (cnp->cn_flags & MAKEENTRY) != 0)
1602 cache_enter(dvp, *vpp, cnp);
1604 * Insert name into cache if appropriate.
1606 if (error == 0 && (cnp->cn_flags & MAKEENTRY)) {
1607 if (!(cnp->cn_flags & ISLASTCN) ||
1608 (nameiop != DELETE && nameiop != RENAME)) {
1609 cache_enter(dvp, *vpp, cnp);
1618 * Attempt to create a new entry in a directory. If the entry
1619 * already exists, truncate the file if permissible, else return
1620 * an error. Return the vp of the created or trunc'd file.
1622 * IN: dvp - vnode of directory to put new file entry in.
1623 * name - name of new file entry.
1624 * vap - attributes of new file.
1625 * excl - flag indicating exclusive or non-exclusive mode.
1626 * mode - mode to open file with.
1627 * cr - credentials of caller.
1628 * flag - large file flag [UNUSED].
1629 * ct - caller context
1630 * vsecp - ACL to be set
1632 * OUT: vpp - vnode of created or trunc'd entry.
1634 * RETURN: 0 on success, error code on failure.
1637 * dvp - ctime|mtime updated if new entry created
1638 * vp - ctime|mtime always, atime if new
1643 zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode,
1644 vnode_t **vpp, cred_t *cr, kthread_t *td)
1646 znode_t *zp, *dzp = VTOZ(dvp);
1647 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1655 gid_t gid = crgetgid(cr);
1656 zfs_acl_ids_t acl_ids;
1657 boolean_t fuid_dirtied;
1658 boolean_t have_acl = B_FALSE;
1659 boolean_t waited = B_FALSE;
1664 * If we have an ephemeral id, ACL, or XVATTR then
1665 * make sure file system is at proper version
1668 ksid = crgetsid(cr, KSID_OWNER);
1670 uid = ksid_getid(ksid);
1674 if (zfsvfs->z_use_fuids == B_FALSE &&
1675 (vsecp || (vap->va_mask & AT_XVATTR) ||
1676 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1677 return (SET_ERROR(EINVAL));
1682 zilog = zfsvfs->z_log;
1684 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
1685 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1687 return (SET_ERROR(EILSEQ));
1690 if (vap->va_mask & AT_XVATTR) {
1691 if ((error = secpolicy_xvattr(dvp, (xvattr_t *)vap,
1692 crgetuid(cr), cr, vap->va_type)) != 0) {
1698 getnewvnode_reserve(1);
1703 if ((vap->va_mode & S_ISVTX) && secpolicy_vnode_stky_modify(cr))
1704 vap->va_mode &= ~S_ISVTX;
1706 if (*name == '\0') {
1708 * Null component name refers to the directory itself.
1715 /* possible VN_HOLD(zp) */
1718 if (flag & FIGNORECASE)
1721 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1725 zfs_acl_ids_free(&acl_ids);
1726 if (strcmp(name, "..") == 0)
1727 error = SET_ERROR(EISDIR);
1728 getnewvnode_drop_reserve();
1738 * Create a new file object and update the directory
1741 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
1743 zfs_acl_ids_free(&acl_ids);
1748 * We only support the creation of regular files in
1749 * extended attribute directories.
1752 if ((dzp->z_pflags & ZFS_XATTR) &&
1753 (vap->va_type != VREG)) {
1755 zfs_acl_ids_free(&acl_ids);
1756 error = SET_ERROR(EINVAL);
1760 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1761 cr, vsecp, &acl_ids)) != 0)
1765 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1766 zfs_acl_ids_free(&acl_ids);
1767 error = SET_ERROR(EDQUOT);
1771 tx = dmu_tx_create(os);
1773 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1774 ZFS_SA_BASE_ATTR_SIZE);
1776 fuid_dirtied = zfsvfs->z_fuid_dirty;
1778 zfs_fuid_txhold(zfsvfs, tx);
1779 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1780 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1781 if (!zfsvfs->z_use_sa &&
1782 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1783 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1784 0, acl_ids.z_aclp->z_acl_bytes);
1786 error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
1788 zfs_dirent_unlock(dl);
1789 if (error == ERESTART) {
1795 zfs_acl_ids_free(&acl_ids);
1797 getnewvnode_drop_reserve();
1801 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1804 zfs_fuid_sync(zfsvfs, tx);
1806 (void) zfs_link_create(dl, zp, tx, ZNEW);
1807 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1808 if (flag & FIGNORECASE)
1810 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1811 vsecp, acl_ids.z_fuidp, vap);
1812 zfs_acl_ids_free(&acl_ids);
1815 int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1818 zfs_acl_ids_free(&acl_ids);
1822 * A directory entry already exists for this name.
1825 * Can't truncate an existing file if in exclusive mode.
1828 error = SET_ERROR(EEXIST);
1832 * Can't open a directory for writing.
1834 if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) {
1835 error = SET_ERROR(EISDIR);
1839 * Verify requested access to file.
1841 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
1845 mutex_enter(&dzp->z_lock);
1847 mutex_exit(&dzp->z_lock);
1850 * Truncate regular files if requested.
1852 if ((ZTOV(zp)->v_type == VREG) &&
1853 (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) {
1854 /* we can't hold any locks when calling zfs_freesp() */
1855 zfs_dirent_unlock(dl);
1857 error = zfs_freesp(zp, 0, 0, mode, TRUE);
1859 vnevent_create(ZTOV(zp), ct);
1864 getnewvnode_drop_reserve();
1866 zfs_dirent_unlock(dl);
1873 error = specvp_check(vpp, cr);
1876 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1877 zil_commit(zilog, 0);
1884 * Remove an entry from a directory.
1886 * IN: dvp - vnode of directory to remove entry from.
1887 * name - name of entry to remove.
1888 * cr - credentials of caller.
1889 * ct - caller context
1890 * flags - case flags
1892 * RETURN: 0 on success, error code on failure.
1896 * vp - ctime (if nlink > 0)
1899 uint64_t null_xattr = 0;
1903 zfs_remove(vnode_t *dvp, char *name, cred_t *cr, caller_context_t *ct,
1906 znode_t *zp, *dzp = VTOZ(dvp);
1909 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1911 uint64_t acl_obj, xattr_obj;
1912 uint64_t xattr_obj_unlinked = 0;
1916 boolean_t may_delete_now, delete_now = FALSE;
1917 boolean_t unlinked, toobig = FALSE;
1919 pathname_t *realnmp = NULL;
1923 boolean_t waited = B_FALSE;
1927 zilog = zfsvfs->z_log;
1929 if (flags & FIGNORECASE) {
1939 * Attempt to lock directory; fail if entry doesn't exist.
1941 if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1951 if (error = zfs_zaccess_delete(dzp, zp, cr)) {
1956 * Need to use rmdir for removing directories.
1958 if (vp->v_type == VDIR) {
1959 error = SET_ERROR(EPERM);
1963 vnevent_remove(vp, dvp, name, ct);
1966 dnlc_remove(dvp, realnmp->pn_buf);
1968 dnlc_remove(dvp, name);
1971 may_delete_now = vp->v_count == 1 && !vn_has_cached_data(vp);
1975 * We may delete the znode now, or we may put it in the unlinked set;
1976 * it depends on whether we're the last link, and on whether there are
1977 * other holds on the vnode. So we dmu_tx_hold() the right things to
1978 * allow for either case.
1981 tx = dmu_tx_create(zfsvfs->z_os);
1982 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1983 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1984 zfs_sa_upgrade_txholds(tx, zp);
1985 zfs_sa_upgrade_txholds(tx, dzp);
1986 if (may_delete_now) {
1988 zp->z_size > zp->z_blksz * DMU_MAX_DELETEBLKCNT;
1989 /* if the file is too big, only hold_free a token amount */
1990 dmu_tx_hold_free(tx, zp->z_id, 0,
1991 (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
1994 /* are there any extended attributes? */
1995 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1996 &xattr_obj, sizeof (xattr_obj));
1997 if (error == 0 && xattr_obj) {
1998 error = zfs_zget(zfsvfs, xattr_obj, &xzp);
2000 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2001 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
2004 mutex_enter(&zp->z_lock);
2005 if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
2006 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
2007 mutex_exit(&zp->z_lock);
2009 /* charge as an update -- would be nice not to charge at all */
2010 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2013 * Mark this transaction as typically resulting in a net free of space
2015 dmu_tx_mark_netfree(tx);
2017 error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
2019 zfs_dirent_unlock(dl);
2023 if (error == ERESTART) {
2037 * Remove the directory entry.
2039 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
2048 * Hold z_lock so that we can make sure that the ACL obj
2049 * hasn't changed. Could have been deleted due to
2052 mutex_enter(&zp->z_lock);
2054 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
2055 &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
2056 delete_now = may_delete_now && !toobig &&
2057 vp->v_count == 1 && !vn_has_cached_data(vp) &&
2058 xattr_obj == xattr_obj_unlinked && zfs_external_acl(zp) ==
2065 panic("zfs_remove: delete_now branch taken");
2067 if (xattr_obj_unlinked) {
2068 ASSERT3U(xzp->z_links, ==, 2);
2069 mutex_enter(&xzp->z_lock);
2070 xzp->z_unlinked = 1;
2072 error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
2073 &xzp->z_links, sizeof (xzp->z_links), tx);
2074 ASSERT3U(error, ==, 0);
2075 mutex_exit(&xzp->z_lock);
2076 zfs_unlinked_add(xzp, tx);
2079 error = sa_remove(zp->z_sa_hdl,
2080 SA_ZPL_XATTR(zfsvfs), tx);
2082 error = sa_update(zp->z_sa_hdl,
2083 SA_ZPL_XATTR(zfsvfs), &null_xattr,
2084 sizeof (uint64_t), tx);
2089 ASSERT0(vp->v_count);
2091 mutex_exit(&zp->z_lock);
2092 zfs_znode_delete(zp, tx);
2093 } else if (unlinked) {
2094 mutex_exit(&zp->z_lock);
2095 zfs_unlinked_add(zp, tx);
2097 vp->v_vflag |= VV_NOSYNC;
2102 if (flags & FIGNORECASE)
2104 zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
2111 zfs_dirent_unlock(dl);
2118 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2119 zil_commit(zilog, 0);
2126 * Create a new directory and insert it into dvp using the name
2127 * provided. Return a pointer to the inserted directory.
2129 * IN: dvp - vnode of directory to add subdir to.
2130 * dirname - name of new directory.
2131 * vap - attributes of new directory.
2132 * cr - credentials of caller.
2133 * ct - caller context
2134 * flags - case flags
2135 * vsecp - ACL to be set
2137 * OUT: vpp - vnode of created directory.
2139 * RETURN: 0 on success, error code on failure.
2142 * dvp - ctime|mtime updated
2143 * vp - ctime|mtime|atime updated
2147 zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr,
2148 caller_context_t *ct, int flags, vsecattr_t *vsecp)
2150 znode_t *zp, *dzp = VTOZ(dvp);
2151 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
2160 gid_t gid = crgetgid(cr);
2161 zfs_acl_ids_t acl_ids;
2162 boolean_t fuid_dirtied;
2163 boolean_t waited = B_FALSE;
2165 ASSERT(vap->va_type == VDIR);
2168 * If we have an ephemeral id, ACL, or XVATTR then
2169 * make sure file system is at proper version
2172 ksid = crgetsid(cr, KSID_OWNER);
2174 uid = ksid_getid(ksid);
2177 if (zfsvfs->z_use_fuids == B_FALSE &&
2178 (vsecp || (vap->va_mask & AT_XVATTR) ||
2179 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
2180 return (SET_ERROR(EINVAL));
2184 zilog = zfsvfs->z_log;
2186 if (dzp->z_pflags & ZFS_XATTR) {
2188 return (SET_ERROR(EINVAL));
2191 if (zfsvfs->z_utf8 && u8_validate(dirname,
2192 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
2194 return (SET_ERROR(EILSEQ));
2196 if (flags & FIGNORECASE)
2199 if (vap->va_mask & AT_XVATTR) {
2200 if ((error = secpolicy_xvattr(dvp, (xvattr_t *)vap,
2201 crgetuid(cr), cr, vap->va_type)) != 0) {
2207 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
2208 vsecp, &acl_ids)) != 0) {
2213 getnewvnode_reserve(1);
2216 * First make sure the new directory doesn't exist.
2218 * Existence is checked first to make sure we don't return
2219 * EACCES instead of EEXIST which can cause some applications
2225 if (error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
2227 zfs_acl_ids_free(&acl_ids);
2228 getnewvnode_drop_reserve();
2233 if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) {
2234 zfs_acl_ids_free(&acl_ids);
2235 zfs_dirent_unlock(dl);
2236 getnewvnode_drop_reserve();
2241 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
2242 zfs_acl_ids_free(&acl_ids);
2243 zfs_dirent_unlock(dl);
2244 getnewvnode_drop_reserve();
2246 return (SET_ERROR(EDQUOT));
2250 * Add a new entry to the directory.
2252 tx = dmu_tx_create(zfsvfs->z_os);
2253 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
2254 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
2255 fuid_dirtied = zfsvfs->z_fuid_dirty;
2257 zfs_fuid_txhold(zfsvfs, tx);
2258 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2259 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
2260 acl_ids.z_aclp->z_acl_bytes);
2263 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
2264 ZFS_SA_BASE_ATTR_SIZE);
2266 error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
2268 zfs_dirent_unlock(dl);
2269 if (error == ERESTART) {
2275 zfs_acl_ids_free(&acl_ids);
2277 getnewvnode_drop_reserve();
2285 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
2288 zfs_fuid_sync(zfsvfs, tx);
2291 * Now put new name in parent dir.
2293 (void) zfs_link_create(dl, zp, tx, ZNEW);
2297 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
2298 if (flags & FIGNORECASE)
2300 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
2301 acl_ids.z_fuidp, vap);
2303 zfs_acl_ids_free(&acl_ids);
2307 getnewvnode_drop_reserve();
2309 zfs_dirent_unlock(dl);
2311 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2312 zil_commit(zilog, 0);
2319 * Remove a directory subdir entry. If the current working
2320 * directory is the same as the subdir to be removed, the
2323 * IN: dvp - vnode of directory to remove from.
2324 * name - name of directory to be removed.
2325 * cwd - vnode of current working directory.
2326 * cr - credentials of caller.
2327 * ct - caller context
2328 * flags - case flags
2330 * RETURN: 0 on success, error code on failure.
2333 * dvp - ctime|mtime updated
2337 zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr,
2338 caller_context_t *ct, int flags)
2340 znode_t *dzp = VTOZ(dvp);
2343 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
2349 boolean_t waited = B_FALSE;
2353 zilog = zfsvfs->z_log;
2355 if (flags & FIGNORECASE)
2361 * Attempt to lock directory; fail if entry doesn't exist.
2363 if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
2371 if (error = zfs_zaccess_delete(dzp, zp, cr)) {
2375 if (vp->v_type != VDIR) {
2376 error = SET_ERROR(ENOTDIR);
2381 error = SET_ERROR(EINVAL);
2385 vnevent_rmdir(vp, dvp, name, ct);
2388 * Grab a lock on the directory to make sure that noone is
2389 * trying to add (or lookup) entries while we are removing it.
2391 rw_enter(&zp->z_name_lock, RW_WRITER);
2394 * Grab a lock on the parent pointer to make sure we play well
2395 * with the treewalk and directory rename code.
2397 rw_enter(&zp->z_parent_lock, RW_WRITER);
2399 tx = dmu_tx_create(zfsvfs->z_os);
2400 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
2401 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2402 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2403 zfs_sa_upgrade_txholds(tx, zp);
2404 zfs_sa_upgrade_txholds(tx, dzp);
2405 error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
2407 rw_exit(&zp->z_parent_lock);
2408 rw_exit(&zp->z_name_lock);
2409 zfs_dirent_unlock(dl);
2411 if (error == ERESTART) {
2422 #ifdef FREEBSD_NAMECACHE
2426 error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
2429 uint64_t txtype = TX_RMDIR;
2430 if (flags & FIGNORECASE)
2432 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
2437 rw_exit(&zp->z_parent_lock);
2438 rw_exit(&zp->z_name_lock);
2439 #ifdef FREEBSD_NAMECACHE
2443 zfs_dirent_unlock(dl);
2447 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2448 zil_commit(zilog, 0);
2455 * Read as many directory entries as will fit into the provided
2456 * buffer from the given directory cursor position (specified in
2457 * the uio structure).
2459 * IN: vp - vnode of directory to read.
2460 * uio - structure supplying read location, range info,
2461 * and return buffer.
2462 * cr - credentials of caller.
2463 * ct - caller context
2464 * flags - case flags
2466 * OUT: uio - updated offset and range, buffer filled.
2467 * eofp - set to true if end-of-file detected.
2469 * RETURN: 0 on success, error code on failure.
2472 * vp - atime updated
2474 * Note that the low 4 bits of the cookie returned by zap is always zero.
2475 * This allows us to use the low range for "special" directory entries:
2476 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
2477 * we use the offset 2 for the '.zfs' directory.
2481 zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, int *ncookies, u_long **cookies)
2483 znode_t *zp = VTOZ(vp);
2487 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2492 zap_attribute_t zap;
2493 uint_t bytes_wanted;
2494 uint64_t offset; /* must be unsigned; checks for < 1 */
2500 boolean_t check_sysattrs;
2503 u_long *cooks = NULL;
2509 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
2510 &parent, sizeof (parent))) != 0) {
2516 * If we are not given an eof variable,
2523 * Check for valid iov_len.
2525 if (uio->uio_iov->iov_len <= 0) {
2527 return (SET_ERROR(EINVAL));
2531 * Quit if directory has been removed (posix)
2533 if ((*eofp = zp->z_unlinked) != 0) {
2540 offset = uio->uio_loffset;
2541 prefetch = zp->z_zn_prefetch;
2544 * Initialize the iterator cursor.
2548 * Start iteration from the beginning of the directory.
2550 zap_cursor_init(&zc, os, zp->z_id);
2553 * The offset is a serialized cursor.
2555 zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
2559 * Get space to change directory entries into fs independent format.
2561 iovp = uio->uio_iov;
2562 bytes_wanted = iovp->iov_len;
2563 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) {
2564 bufsize = bytes_wanted;
2565 outbuf = kmem_alloc(bufsize, KM_SLEEP);
2566 odp = (struct dirent64 *)outbuf;
2568 bufsize = bytes_wanted;
2570 odp = (struct dirent64 *)iovp->iov_base;
2572 eodp = (struct edirent *)odp;
2574 if (ncookies != NULL) {
2576 * Minimum entry size is dirent size and 1 byte for a file name.
2578 ncooks = uio->uio_resid / (sizeof(struct dirent) - sizeof(((struct dirent *)NULL)->d_name) + 1);
2579 cooks = malloc(ncooks * sizeof(u_long), M_TEMP, M_WAITOK);
2584 * If this VFS supports the system attribute view interface; and
2585 * we're looking at an extended attribute directory; and we care
2586 * about normalization conflicts on this vfs; then we must check
2587 * for normalization conflicts with the sysattr name space.
2590 check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
2591 (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm &&
2592 (flags & V_RDDIR_ENTFLAGS);
2598 * Transform to file-system independent format
2601 while (outcount < bytes_wanted) {
2604 off64_t *next = NULL;
2607 * Special case `.', `..', and `.zfs'.
2610 (void) strcpy(zap.za_name, ".");
2611 zap.za_normalization_conflict = 0;
2614 } else if (offset == 1) {
2615 (void) strcpy(zap.za_name, "..");
2616 zap.za_normalization_conflict = 0;
2619 } else if (offset == 2 && zfs_show_ctldir(zp)) {
2620 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2621 zap.za_normalization_conflict = 0;
2622 objnum = ZFSCTL_INO_ROOT;
2628 if (error = zap_cursor_retrieve(&zc, &zap)) {
2629 if ((*eofp = (error == ENOENT)) != 0)
2635 if (zap.za_integer_length != 8 ||
2636 zap.za_num_integers != 1) {
2637 cmn_err(CE_WARN, "zap_readdir: bad directory "
2638 "entry, obj = %lld, offset = %lld\n",
2639 (u_longlong_t)zp->z_id,
2640 (u_longlong_t)offset);
2641 error = SET_ERROR(ENXIO);
2645 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
2647 * MacOS X can extract the object type here such as:
2648 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2650 type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2652 if (check_sysattrs && !zap.za_normalization_conflict) {
2654 zap.za_normalization_conflict =
2655 xattr_sysattr_casechk(zap.za_name);
2657 panic("%s:%u: TODO", __func__, __LINE__);
2662 if (flags & V_RDDIR_ACCFILTER) {
2664 * If we have no access at all, don't include
2665 * this entry in the returned information
2668 if (zfs_zget(zp->z_zfsvfs, objnum, &ezp) != 0)
2670 if (!zfs_has_access(ezp, cr)) {
2677 if (flags & V_RDDIR_ENTFLAGS)
2678 reclen = EDIRENT_RECLEN(strlen(zap.za_name));
2680 reclen = DIRENT64_RECLEN(strlen(zap.za_name));
2683 * Will this entry fit in the buffer?
2685 if (outcount + reclen > bufsize) {
2687 * Did we manage to fit anything in the buffer?
2690 error = SET_ERROR(EINVAL);
2695 if (flags & V_RDDIR_ENTFLAGS) {
2697 * Add extended flag entry:
2699 eodp->ed_ino = objnum;
2700 eodp->ed_reclen = reclen;
2701 /* NOTE: ed_off is the offset for the *next* entry */
2702 next = &(eodp->ed_off);
2703 eodp->ed_eflags = zap.za_normalization_conflict ?
2704 ED_CASE_CONFLICT : 0;
2705 (void) strncpy(eodp->ed_name, zap.za_name,
2706 EDIRENT_NAMELEN(reclen));
2707 eodp = (edirent_t *)((intptr_t)eodp + reclen);
2712 odp->d_ino = objnum;
2713 odp->d_reclen = reclen;
2714 odp->d_namlen = strlen(zap.za_name);
2715 (void) strlcpy(odp->d_name, zap.za_name, odp->d_namlen + 1);
2717 odp = (dirent64_t *)((intptr_t)odp + reclen);
2721 ASSERT(outcount <= bufsize);
2723 /* Prefetch znode */
2725 dmu_prefetch(os, objnum, 0, 0, 0,
2726 ZIO_PRIORITY_SYNC_READ);
2730 * Move to the next entry, fill in the previous offset.
2732 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
2733 zap_cursor_advance(&zc);
2734 offset = zap_cursor_serialize(&zc);
2739 if (cooks != NULL) {
2742 KASSERT(ncooks >= 0, ("ncookies=%d", ncooks));
2745 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2747 /* Subtract unused cookies */
2748 if (ncookies != NULL)
2749 *ncookies -= ncooks;
2751 if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) {
2752 iovp->iov_base += outcount;
2753 iovp->iov_len -= outcount;
2754 uio->uio_resid -= outcount;
2755 } else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) {
2757 * Reset the pointer.
2759 offset = uio->uio_loffset;
2763 zap_cursor_fini(&zc);
2764 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
2765 kmem_free(outbuf, bufsize);
2767 if (error == ENOENT)
2770 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
2772 uio->uio_loffset = offset;
2774 if (error != 0 && cookies != NULL) {
2775 free(*cookies, M_TEMP);
2782 ulong_t zfs_fsync_sync_cnt = 4;
2785 zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
2787 znode_t *zp = VTOZ(vp);
2788 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2790 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2792 if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
2795 zil_commit(zfsvfs->z_log, zp->z_id);
2803 * Get the requested file attributes and place them in the provided
2806 * IN: vp - vnode of file.
2807 * vap - va_mask identifies requested attributes.
2808 * If AT_XVATTR set, then optional attrs are requested
2809 * flags - ATTR_NOACLCHECK (CIFS server context)
2810 * cr - credentials of caller.
2811 * ct - caller context
2813 * OUT: vap - attribute values.
2815 * RETURN: 0 (always succeeds).
2819 zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2820 caller_context_t *ct)
2822 znode_t *zp = VTOZ(vp);
2823 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2826 u_longlong_t nblocks;
2828 uint64_t mtime[2], ctime[2], crtime[2], rdev;
2829 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2830 xoptattr_t *xoap = NULL;
2831 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2832 sa_bulk_attr_t bulk[4];
2838 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2840 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
2841 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
2842 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16);
2843 if (vp->v_type == VBLK || vp->v_type == VCHR)
2844 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
2847 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2853 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2854 * Also, if we are the owner don't bother, since owner should
2855 * always be allowed to read basic attributes of file.
2857 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2858 (vap->va_uid != crgetuid(cr))) {
2859 if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2867 * Return all attributes. It's cheaper to provide the answer
2868 * than to determine whether we were asked the question.
2871 mutex_enter(&zp->z_lock);
2872 vap->va_type = IFTOVT(zp->z_mode);
2873 vap->va_mode = zp->z_mode & ~S_IFMT;
2875 vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev;
2877 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
2879 vap->va_nodeid = zp->z_id;
2880 if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp))
2881 links = zp->z_links + 1;
2883 links = zp->z_links;
2884 vap->va_nlink = MIN(links, LINK_MAX); /* nlink_t limit! */
2885 vap->va_size = zp->z_size;
2887 vap->va_rdev = vp->v_rdev;
2889 if (vp->v_type == VBLK || vp->v_type == VCHR)
2890 vap->va_rdev = zfs_cmpldev(rdev);
2892 vap->va_seq = zp->z_seq;
2893 vap->va_flags = 0; /* FreeBSD: Reset chflags(2) flags. */
2894 vap->va_filerev = zp->z_seq;
2897 * Add in any requested optional attributes and the create time.
2898 * Also set the corresponding bits in the returned attribute bitmap.
2900 if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
2901 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2903 ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2904 XVA_SET_RTN(xvap, XAT_ARCHIVE);
2907 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2908 xoap->xoa_readonly =
2909 ((zp->z_pflags & ZFS_READONLY) != 0);
2910 XVA_SET_RTN(xvap, XAT_READONLY);
2913 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2915 ((zp->z_pflags & ZFS_SYSTEM) != 0);
2916 XVA_SET_RTN(xvap, XAT_SYSTEM);
2919 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2921 ((zp->z_pflags & ZFS_HIDDEN) != 0);
2922 XVA_SET_RTN(xvap, XAT_HIDDEN);
2925 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2926 xoap->xoa_nounlink =
2927 ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2928 XVA_SET_RTN(xvap, XAT_NOUNLINK);
2931 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2932 xoap->xoa_immutable =
2933 ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2934 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2937 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2938 xoap->xoa_appendonly =
2939 ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2940 XVA_SET_RTN(xvap, XAT_APPENDONLY);
2943 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2945 ((zp->z_pflags & ZFS_NODUMP) != 0);
2946 XVA_SET_RTN(xvap, XAT_NODUMP);
2949 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2951 ((zp->z_pflags & ZFS_OPAQUE) != 0);
2952 XVA_SET_RTN(xvap, XAT_OPAQUE);
2955 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2956 xoap->xoa_av_quarantined =
2957 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2958 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2961 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2962 xoap->xoa_av_modified =
2963 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2964 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2967 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2968 vp->v_type == VREG) {
2969 zfs_sa_get_scanstamp(zp, xvap);
2972 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
2975 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
2976 times, sizeof (times));
2977 ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
2978 XVA_SET_RTN(xvap, XAT_CREATETIME);
2981 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2982 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2983 XVA_SET_RTN(xvap, XAT_REPARSE);
2985 if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2986 xoap->xoa_generation = zp->z_gen;
2987 XVA_SET_RTN(xvap, XAT_GEN);
2990 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2992 ((zp->z_pflags & ZFS_OFFLINE) != 0);
2993 XVA_SET_RTN(xvap, XAT_OFFLINE);
2996 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2998 ((zp->z_pflags & ZFS_SPARSE) != 0);
2999 XVA_SET_RTN(xvap, XAT_SPARSE);
3003 ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
3004 ZFS_TIME_DECODE(&vap->va_mtime, mtime);
3005 ZFS_TIME_DECODE(&vap->va_ctime, ctime);
3006 ZFS_TIME_DECODE(&vap->va_birthtime, crtime);
3008 mutex_exit(&zp->z_lock);
3010 sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
3011 vap->va_blksize = blksize;
3012 vap->va_bytes = nblocks << 9; /* nblocks * 512 */
3014 if (zp->z_blksz == 0) {
3016 * Block size hasn't been set; suggest maximal I/O transfers.
3018 vap->va_blksize = zfsvfs->z_max_blksz;
3026 * Set the file attributes to the values contained in the
3029 * IN: vp - vnode of file to be modified.
3030 * vap - new attribute values.
3031 * If AT_XVATTR set, then optional attrs are being set
3032 * flags - ATTR_UTIME set if non-default time values provided.
3033 * - ATTR_NOACLCHECK (CIFS context only).
3034 * cr - credentials of caller.
3035 * ct - caller context
3037 * RETURN: 0 on success, error code on failure.
3040 * vp - ctime updated, mtime updated if size changed.
3044 zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
3045 caller_context_t *ct)
3047 znode_t *zp = VTOZ(vp);
3048 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3053 uint_t mask = vap->va_mask;
3054 uint_t saved_mask = 0;
3055 uint64_t saved_mode;
3058 uint64_t new_uid, new_gid;
3060 uint64_t mtime[2], ctime[2];
3062 int need_policy = FALSE;
3064 zfs_fuid_info_t *fuidp = NULL;
3065 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
3068 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
3069 boolean_t fuid_dirtied = B_FALSE;
3070 sa_bulk_attr_t bulk[7], xattr_bulk[7];
3071 int count = 0, xattr_count = 0;
3076 if (mask & AT_NOSET)
3077 return (SET_ERROR(EINVAL));
3082 zilog = zfsvfs->z_log;
3085 * Make sure that if we have ephemeral uid/gid or xvattr specified
3086 * that file system is at proper version level
3089 if (zfsvfs->z_use_fuids == B_FALSE &&
3090 (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) ||
3091 ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) ||
3092 (mask & AT_XVATTR))) {
3094 return (SET_ERROR(EINVAL));
3097 if (mask & AT_SIZE && vp->v_type == VDIR) {
3099 return (SET_ERROR(EISDIR));
3102 if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) {
3104 return (SET_ERROR(EINVAL));
3108 * If this is an xvattr_t, then get a pointer to the structure of
3109 * optional attributes. If this is NULL, then we have a vattr_t.
3111 xoap = xva_getxoptattr(xvap);
3113 xva_init(&tmpxvattr);
3116 * Immutable files can only alter immutable bit and atime
3118 if ((zp->z_pflags & ZFS_IMMUTABLE) &&
3119 ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
3120 ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
3122 return (SET_ERROR(EPERM));
3125 if ((mask & AT_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
3127 return (SET_ERROR(EPERM));
3131 * Verify timestamps doesn't overflow 32 bits.
3132 * ZFS can handle large timestamps, but 32bit syscalls can't
3133 * handle times greater than 2039. This check should be removed
3134 * once large timestamps are fully supported.
3136 if (mask & (AT_ATIME | AT_MTIME)) {
3137 if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
3138 ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
3140 return (SET_ERROR(EOVERFLOW));
3148 /* Can this be moved to before the top label? */
3149 if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
3151 return (SET_ERROR(EROFS));
3155 * First validate permissions
3158 if (mask & AT_SIZE) {
3160 * XXX - Note, we are not providing any open
3161 * mode flags here (like FNDELAY), so we may
3162 * block if there are locks present... this
3163 * should be addressed in openat().
3165 /* XXX - would it be OK to generate a log record here? */
3166 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
3173 if (mask & (AT_ATIME|AT_MTIME) ||
3174 ((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
3175 XVA_ISSET_REQ(xvap, XAT_READONLY) ||
3176 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
3177 XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
3178 XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
3179 XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
3180 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
3181 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
3185 if (mask & (AT_UID|AT_GID)) {
3186 int idmask = (mask & (AT_UID|AT_GID));
3191 * NOTE: even if a new mode is being set,
3192 * we may clear S_ISUID/S_ISGID bits.
3195 if (!(mask & AT_MODE))
3196 vap->va_mode = zp->z_mode;
3199 * Take ownership or chgrp to group we are a member of
3202 take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr));
3203 take_group = (mask & AT_GID) &&
3204 zfs_groupmember(zfsvfs, vap->va_gid, cr);
3207 * If both AT_UID and AT_GID are set then take_owner and
3208 * take_group must both be set in order to allow taking
3211 * Otherwise, send the check through secpolicy_vnode_setattr()
3215 if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) ||
3216 ((idmask == AT_UID) && take_owner) ||
3217 ((idmask == AT_GID) && take_group)) {
3218 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
3219 skipaclchk, cr) == 0) {
3221 * Remove setuid/setgid for non-privileged users
3223 secpolicy_setid_clear(vap, vp, cr);
3224 trim_mask = (mask & (AT_UID|AT_GID));
3233 mutex_enter(&zp->z_lock);
3234 oldva.va_mode = zp->z_mode;
3235 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
3236 if (mask & AT_XVATTR) {
3238 * Update xvattr mask to include only those attributes
3239 * that are actually changing.
3241 * the bits will be restored prior to actually setting
3242 * the attributes so the caller thinks they were set.
3244 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
3245 if (xoap->xoa_appendonly !=
3246 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
3249 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
3250 XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
3254 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
3255 if (xoap->xoa_nounlink !=
3256 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
3259 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
3260 XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
3264 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
3265 if (xoap->xoa_immutable !=
3266 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
3269 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
3270 XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
3274 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
3275 if (xoap->xoa_nodump !=
3276 ((zp->z_pflags & ZFS_NODUMP) != 0)) {
3279 XVA_CLR_REQ(xvap, XAT_NODUMP);
3280 XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
3284 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
3285 if (xoap->xoa_av_modified !=
3286 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
3289 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
3290 XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
3294 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
3295 if ((vp->v_type != VREG &&
3296 xoap->xoa_av_quarantined) ||
3297 xoap->xoa_av_quarantined !=
3298 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
3301 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
3302 XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
3306 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
3307 mutex_exit(&zp->z_lock);
3309 return (SET_ERROR(EPERM));
3312 if (need_policy == FALSE &&
3313 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
3314 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
3319 mutex_exit(&zp->z_lock);
3321 if (mask & AT_MODE) {
3322 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
3323 err = secpolicy_setid_setsticky_clear(vp, vap,
3329 trim_mask |= AT_MODE;
3337 * If trim_mask is set then take ownership
3338 * has been granted or write_acl is present and user
3339 * has the ability to modify mode. In that case remove
3340 * UID|GID and or MODE from mask so that
3341 * secpolicy_vnode_setattr() doesn't revoke it.
3345 saved_mask = vap->va_mask;
3346 vap->va_mask &= ~trim_mask;
3347 if (trim_mask & AT_MODE) {
3349 * Save the mode, as secpolicy_vnode_setattr()
3350 * will overwrite it with ova.va_mode.
3352 saved_mode = vap->va_mode;
3355 err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
3356 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
3363 vap->va_mask |= saved_mask;
3364 if (trim_mask & AT_MODE) {
3366 * Recover the mode after
3367 * secpolicy_vnode_setattr().
3369 vap->va_mode = saved_mode;
3375 * secpolicy_vnode_setattr, or take ownership may have
3378 mask = vap->va_mask;
3380 if ((mask & (AT_UID | AT_GID))) {
3381 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
3382 &xattr_obj, sizeof (xattr_obj));
3384 if (err == 0 && xattr_obj) {
3385 err = zfs_zget(zp->z_zfsvfs, xattr_obj, &attrzp);
3389 if (mask & AT_UID) {
3390 new_uid = zfs_fuid_create(zfsvfs,
3391 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
3392 if (new_uid != zp->z_uid &&
3393 zfs_fuid_overquota(zfsvfs, B_FALSE, new_uid)) {
3395 VN_RELE(ZTOV(attrzp));
3396 err = SET_ERROR(EDQUOT);
3401 if (mask & AT_GID) {
3402 new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
3403 cr, ZFS_GROUP, &fuidp);
3404 if (new_gid != zp->z_gid &&
3405 zfs_fuid_overquota(zfsvfs, B_TRUE, new_gid)) {
3407 VN_RELE(ZTOV(attrzp));
3408 err = SET_ERROR(EDQUOT);
3413 tx = dmu_tx_create(zfsvfs->z_os);
3415 if (mask & AT_MODE) {
3416 uint64_t pmode = zp->z_mode;
3418 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
3420 if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
3421 !(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
3422 err = SET_ERROR(EPERM);
3426 if (err = zfs_acl_chmod_setattr(zp, &aclp, new_mode))
3429 mutex_enter(&zp->z_lock);
3430 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
3432 * Are we upgrading ACL from old V0 format
3435 if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
3436 zfs_znode_acl_version(zp) ==
3437 ZFS_ACL_VERSION_INITIAL) {
3438 dmu_tx_hold_free(tx, acl_obj, 0,
3440 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3441 0, aclp->z_acl_bytes);
3443 dmu_tx_hold_write(tx, acl_obj, 0,
3446 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3447 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3448 0, aclp->z_acl_bytes);
3450 mutex_exit(&zp->z_lock);
3451 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3453 if ((mask & AT_XVATTR) &&
3454 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3455 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3457 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3461 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
3464 fuid_dirtied = zfsvfs->z_fuid_dirty;
3466 zfs_fuid_txhold(zfsvfs, tx);
3468 zfs_sa_upgrade_txholds(tx, zp);
3470 err = dmu_tx_assign(tx, TXG_WAIT);
3476 * Set each attribute requested.
3477 * We group settings according to the locks they need to acquire.
3479 * Note: you cannot set ctime directly, although it will be
3480 * updated as a side-effect of calling this function.
3484 if (mask & (AT_UID|AT_GID|AT_MODE))
3485 mutex_enter(&zp->z_acl_lock);
3486 mutex_enter(&zp->z_lock);
3488 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
3489 &zp->z_pflags, sizeof (zp->z_pflags));
3492 if (mask & (AT_UID|AT_GID|AT_MODE))
3493 mutex_enter(&attrzp->z_acl_lock);
3494 mutex_enter(&attrzp->z_lock);
3495 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3496 SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
3497 sizeof (attrzp->z_pflags));
3500 if (mask & (AT_UID|AT_GID)) {
3502 if (mask & AT_UID) {
3503 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
3504 &new_uid, sizeof (new_uid));
3505 zp->z_uid = new_uid;
3507 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3508 SA_ZPL_UID(zfsvfs), NULL, &new_uid,
3510 attrzp->z_uid = new_uid;
3514 if (mask & AT_GID) {
3515 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
3516 NULL, &new_gid, sizeof (new_gid));
3517 zp->z_gid = new_gid;
3519 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3520 SA_ZPL_GID(zfsvfs), NULL, &new_gid,
3522 attrzp->z_gid = new_gid;
3525 if (!(mask & AT_MODE)) {
3526 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
3527 NULL, &new_mode, sizeof (new_mode));
3528 new_mode = zp->z_mode;
3530 err = zfs_acl_chown_setattr(zp);
3533 err = zfs_acl_chown_setattr(attrzp);
3538 if (mask & AT_MODE) {
3539 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
3540 &new_mode, sizeof (new_mode));
3541 zp->z_mode = new_mode;
3542 ASSERT3U((uintptr_t)aclp, !=, 0);
3543 err = zfs_aclset_common(zp, aclp, cr, tx);
3545 if (zp->z_acl_cached)
3546 zfs_acl_free(zp->z_acl_cached);
3547 zp->z_acl_cached = aclp;
3552 if (mask & AT_ATIME) {
3553 ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
3554 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
3555 &zp->z_atime, sizeof (zp->z_atime));
3558 if (mask & AT_MTIME) {
3559 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
3560 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
3561 mtime, sizeof (mtime));
3564 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
3565 if (mask & AT_SIZE && !(mask & AT_MTIME)) {
3566 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
3567 NULL, mtime, sizeof (mtime));
3568 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3569 &ctime, sizeof (ctime));
3570 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
3572 } else if (mask != 0) {
3573 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3574 &ctime, sizeof (ctime));
3575 zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
3578 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3579 SA_ZPL_CTIME(zfsvfs), NULL,
3580 &ctime, sizeof (ctime));
3581 zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
3582 mtime, ctime, B_TRUE);
3586 * Do this after setting timestamps to prevent timestamp
3587 * update from toggling bit
3590 if (xoap && (mask & AT_XVATTR)) {
3593 * restore trimmed off masks
3594 * so that return masks can be set for caller.
3597 if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
3598 XVA_SET_REQ(xvap, XAT_APPENDONLY);
3600 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
3601 XVA_SET_REQ(xvap, XAT_NOUNLINK);
3603 if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
3604 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
3606 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
3607 XVA_SET_REQ(xvap, XAT_NODUMP);
3609 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
3610 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
3612 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
3613 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
3616 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3617 ASSERT(vp->v_type == VREG);
3619 zfs_xvattr_set(zp, xvap, tx);
3623 zfs_fuid_sync(zfsvfs, tx);
3626 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
3628 mutex_exit(&zp->z_lock);
3629 if (mask & (AT_UID|AT_GID|AT_MODE))
3630 mutex_exit(&zp->z_acl_lock);
3633 if (mask & (AT_UID|AT_GID|AT_MODE))
3634 mutex_exit(&attrzp->z_acl_lock);
3635 mutex_exit(&attrzp->z_lock);
3638 if (err == 0 && attrzp) {
3639 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
3645 VN_RELE(ZTOV(attrzp));
3651 zfs_fuid_info_free(fuidp);
3657 if (err == ERESTART)
3660 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
3665 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3666 zil_commit(zilog, 0);
3672 typedef struct zfs_zlock {
3673 krwlock_t *zl_rwlock; /* lock we acquired */
3674 znode_t *zl_znode; /* znode we held */
3675 struct zfs_zlock *zl_next; /* next in list */
3679 * Drop locks and release vnodes that were held by zfs_rename_lock().
3682 zfs_rename_unlock(zfs_zlock_t **zlpp)
3686 while ((zl = *zlpp) != NULL) {
3687 if (zl->zl_znode != NULL)
3688 VN_RELE(ZTOV(zl->zl_znode));
3689 rw_exit(zl->zl_rwlock);
3690 *zlpp = zl->zl_next;
3691 kmem_free(zl, sizeof (*zl));
3696 * Search back through the directory tree, using the ".." entries.
3697 * Lock each directory in the chain to prevent concurrent renames.
3698 * Fail any attempt to move a directory into one of its own descendants.
3699 * XXX - z_parent_lock can overlap with map or grow locks
3702 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
3706 uint64_t rootid = zp->z_zfsvfs->z_root;
3707 uint64_t oidp = zp->z_id;
3708 krwlock_t *rwlp = &szp->z_parent_lock;
3709 krw_t rw = RW_WRITER;
3712 * First pass write-locks szp and compares to zp->z_id.
3713 * Later passes read-lock zp and compare to zp->z_parent.
3716 if (!rw_tryenter(rwlp, rw)) {
3718 * Another thread is renaming in this path.
3719 * Note that if we are a WRITER, we don't have any
3720 * parent_locks held yet.
3722 if (rw == RW_READER && zp->z_id > szp->z_id) {
3724 * Drop our locks and restart
3726 zfs_rename_unlock(&zl);
3730 rwlp = &szp->z_parent_lock;
3735 * Wait for other thread to drop its locks
3741 zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
3742 zl->zl_rwlock = rwlp;
3743 zl->zl_znode = NULL;
3744 zl->zl_next = *zlpp;
3747 if (oidp == szp->z_id) /* We're a descendant of szp */
3748 return (SET_ERROR(EINVAL));
3750 if (oidp == rootid) /* We've hit the top */
3753 if (rw == RW_READER) { /* i.e. not the first pass */
3754 int error = zfs_zget(zp->z_zfsvfs, oidp, &zp);
3759 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zp->z_zfsvfs),
3760 &oidp, sizeof (oidp));
3761 rwlp = &zp->z_parent_lock;
3764 } while (zp->z_id != sdzp->z_id);
3770 * Move an entry from the provided source directory to the target
3771 * directory. Change the entry name as indicated.
3773 * IN: sdvp - Source directory containing the "old entry".
3774 * snm - Old entry name.
3775 * tdvp - Target directory to contain the "new entry".
3776 * tnm - New entry name.
3777 * cr - credentials of caller.
3778 * ct - caller context
3779 * flags - case flags
3781 * RETURN: 0 on success, error code on failure.
3784 * sdvp,tdvp - ctime|mtime updated
3788 zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr,
3789 caller_context_t *ct, int flags)
3791 znode_t *tdzp, *sdzp, *szp, *tzp;
3795 zfs_dirlock_t *sdl, *tdl;
3798 int cmp, serr, terr;
3801 boolean_t waited = B_FALSE;
3804 ZFS_VERIFY_ZP(tdzp);
3805 zfsvfs = tdzp->z_zfsvfs;
3807 zilog = zfsvfs->z_log;
3811 * In case sdzp is not valid, let's be sure to exit from the right
3814 if (sdzp->z_sa_hdl == NULL) {
3816 return (SET_ERROR(EIO));
3820 * We check z_zfsvfs rather than v_vfsp here, because snapshots and the
3821 * ctldir appear to have the same v_vfsp.
3823 if (sdzp->z_zfsvfs != zfsvfs || zfsctl_is_node(tdvp)) {
3825 return (SET_ERROR(EXDEV));
3828 if (zfsvfs->z_utf8 && u8_validate(tnm,
3829 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3831 return (SET_ERROR(EILSEQ));
3834 if (flags & FIGNORECASE)
3843 * This is to prevent the creation of links into attribute space
3844 * by renaming a linked file into/outof an attribute directory.
3845 * See the comment in zfs_link() for why this is considered bad.
3847 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3849 return (SET_ERROR(EINVAL));
3853 * Lock source and target directory entries. To prevent deadlock,
3854 * a lock ordering must be defined. We lock the directory with
3855 * the smallest object id first, or if it's a tie, the one with
3856 * the lexically first name.
3858 if (sdzp->z_id < tdzp->z_id) {
3860 } else if (sdzp->z_id > tdzp->z_id) {
3864 * First compare the two name arguments without
3865 * considering any case folding.
3867 int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
3869 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
3870 ASSERT(error == 0 || !zfsvfs->z_utf8);
3873 * POSIX: "If the old argument and the new argument
3874 * both refer to links to the same existing file,
3875 * the rename() function shall return successfully
3876 * and perform no other action."
3882 * If the file system is case-folding, then we may
3883 * have some more checking to do. A case-folding file
3884 * system is either supporting mixed case sensitivity
3885 * access or is completely case-insensitive. Note
3886 * that the file system is always case preserving.
3888 * In mixed sensitivity mode case sensitive behavior
3889 * is the default. FIGNORECASE must be used to
3890 * explicitly request case insensitive behavior.
3892 * If the source and target names provided differ only
3893 * by case (e.g., a request to rename 'tim' to 'Tim'),
3894 * we will treat this as a special case in the
3895 * case-insensitive mode: as long as the source name
3896 * is an exact match, we will allow this to proceed as
3897 * a name-change request.
3899 if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
3900 (zfsvfs->z_case == ZFS_CASE_MIXED &&
3901 flags & FIGNORECASE)) &&
3902 u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
3905 * case preserving rename request, require exact
3914 * If the source and destination directories are the same, we should
3915 * grab the z_name_lock of that directory only once.
3919 rw_enter(&sdzp->z_name_lock, RW_READER);
3923 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
3924 ZEXISTS | zflg, NULL, NULL);
3925 terr = zfs_dirent_lock(&tdl,
3926 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
3928 terr = zfs_dirent_lock(&tdl,
3929 tdzp, tnm, &tzp, zflg, NULL, NULL);
3930 serr = zfs_dirent_lock(&sdl,
3931 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
3937 * Source entry invalid or not there.
3940 zfs_dirent_unlock(tdl);
3946 rw_exit(&sdzp->z_name_lock);
3949 * FreeBSD: In OpenSolaris they only check if rename source is
3950 * ".." here, because "." is handled in their lookup. This is
3951 * not the case for FreeBSD, so we check for "." explicitly.
3953 if (strcmp(snm, ".") == 0 || strcmp(snm, "..") == 0)
3954 serr = SET_ERROR(EINVAL);
3959 zfs_dirent_unlock(sdl);
3963 rw_exit(&sdzp->z_name_lock);
3965 if (strcmp(tnm, "..") == 0)
3966 terr = SET_ERROR(EINVAL);
3972 * Must have write access at the source to remove the old entry
3973 * and write access at the target to create the new entry.
3974 * Note that if target and source are the same, this can be
3975 * done in a single check.
3978 if (error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr))
3981 if (ZTOV(szp)->v_type == VDIR) {
3983 * Check to make sure rename is valid.
3984 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3986 if (error = zfs_rename_lock(szp, tdzp, sdzp, &zl))
3991 * Does target exist?
3995 * Source and target must be the same type.
3997 if (ZTOV(szp)->v_type == VDIR) {
3998 if (ZTOV(tzp)->v_type != VDIR) {
3999 error = SET_ERROR(ENOTDIR);
4003 if (ZTOV(tzp)->v_type == VDIR) {
4004 error = SET_ERROR(EISDIR);
4009 * POSIX dictates that when the source and target
4010 * entries refer to the same file object, rename
4011 * must do nothing and exit without error.
4013 if (szp->z_id == tzp->z_id) {
4019 vnevent_rename_src(ZTOV(szp), sdvp, snm, ct);
4021 vnevent_rename_dest(ZTOV(tzp), tdvp, tnm, ct);
4024 * notify the target directory if it is not the same
4025 * as source directory.
4028 vnevent_rename_dest_dir(tdvp, ct);
4031 tx = dmu_tx_create(zfsvfs->z_os);
4032 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
4033 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
4034 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
4035 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
4037 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
4038 zfs_sa_upgrade_txholds(tx, tdzp);
4041 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
4042 zfs_sa_upgrade_txholds(tx, tzp);
4045 zfs_sa_upgrade_txholds(tx, szp);
4046 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
4047 error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
4050 zfs_rename_unlock(&zl);
4051 zfs_dirent_unlock(sdl);
4052 zfs_dirent_unlock(tdl);
4055 rw_exit(&sdzp->z_name_lock);
4060 if (error == ERESTART) {
4071 if (tzp) /* Attempt to remove the existing target */
4072 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
4075 error = zfs_link_create(tdl, szp, tx, ZRENAMING);
4077 szp->z_pflags |= ZFS_AV_MODIFIED;
4079 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
4080 (void *)&szp->z_pflags, sizeof (uint64_t), tx);
4083 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
4085 zfs_log_rename(zilog, tx, TX_RENAME |
4086 (flags & FIGNORECASE ? TX_CI : 0), sdzp,
4087 sdl->dl_name, tdzp, tdl->dl_name, szp);
4090 * Update path information for the target vnode
4092 vn_renamepath(tdvp, ZTOV(szp), tnm,
4096 * At this point, we have successfully created
4097 * the target name, but have failed to remove
4098 * the source name. Since the create was done
4099 * with the ZRENAMING flag, there are
4100 * complications; for one, the link count is
4101 * wrong. The easiest way to deal with this
4102 * is to remove the newly created target, and
4103 * return the original error. This must
4104 * succeed; fortunately, it is very unlikely to
4105 * fail, since we just created it.
4107 VERIFY3U(zfs_link_destroy(tdl, szp, tx,
4108 ZRENAMING, NULL), ==, 0);
4111 #ifdef FREEBSD_NAMECACHE
4115 cache_purge(ZTOV(szp));
4117 cache_purge(ZTOV(tzp));
4125 zfs_rename_unlock(&zl);
4127 zfs_dirent_unlock(sdl);
4128 zfs_dirent_unlock(tdl);
4131 rw_exit(&sdzp->z_name_lock);
4138 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4139 zil_commit(zilog, 0);
4147 * Insert the indicated symbolic reference entry into the directory.
4149 * IN: dvp - Directory to contain new symbolic link.
4150 * link - Name for new symlink entry.
4151 * vap - Attributes of new entry.
4152 * cr - credentials of caller.
4153 * ct - caller context
4154 * flags - case flags
4156 * RETURN: 0 on success, error code on failure.
4159 * dvp - ctime|mtime updated
4163 zfs_symlink(vnode_t *dvp, vnode_t **vpp, char *name, vattr_t *vap, char *link,
4164 cred_t *cr, kthread_t *td)
4166 znode_t *zp, *dzp = VTOZ(dvp);
4169 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
4171 uint64_t len = strlen(link);
4174 zfs_acl_ids_t acl_ids;
4175 boolean_t fuid_dirtied;
4176 uint64_t txtype = TX_SYMLINK;
4177 boolean_t waited = B_FALSE;
4180 ASSERT(vap->va_type == VLNK);
4184 zilog = zfsvfs->z_log;
4186 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
4187 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
4189 return (SET_ERROR(EILSEQ));
4191 if (flags & FIGNORECASE)
4194 if (len > MAXPATHLEN) {
4196 return (SET_ERROR(ENAMETOOLONG));
4199 if ((error = zfs_acl_ids_create(dzp, 0,
4200 vap, cr, NULL, &acl_ids)) != 0) {
4205 getnewvnode_reserve(1);
4209 * Attempt to lock directory; fail if entry already exists.
4211 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
4213 zfs_acl_ids_free(&acl_ids);
4214 getnewvnode_drop_reserve();
4219 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
4220 zfs_acl_ids_free(&acl_ids);
4221 zfs_dirent_unlock(dl);
4222 getnewvnode_drop_reserve();
4227 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
4228 zfs_acl_ids_free(&acl_ids);
4229 zfs_dirent_unlock(dl);
4230 getnewvnode_drop_reserve();
4232 return (SET_ERROR(EDQUOT));
4234 tx = dmu_tx_create(zfsvfs->z_os);
4235 fuid_dirtied = zfsvfs->z_fuid_dirty;
4236 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
4237 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4238 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
4239 ZFS_SA_BASE_ATTR_SIZE + len);
4240 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
4241 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
4242 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
4243 acl_ids.z_aclp->z_acl_bytes);
4246 zfs_fuid_txhold(zfsvfs, tx);
4247 error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
4249 zfs_dirent_unlock(dl);
4250 if (error == ERESTART) {
4256 zfs_acl_ids_free(&acl_ids);
4258 getnewvnode_drop_reserve();
4264 * Create a new object for the symlink.
4265 * for version 4 ZPL datsets the symlink will be an SA attribute
4267 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
4270 zfs_fuid_sync(zfsvfs, tx);
4272 mutex_enter(&zp->z_lock);
4274 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
4277 zfs_sa_symlink(zp, link, len, tx);
4278 mutex_exit(&zp->z_lock);
4281 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
4282 &zp->z_size, sizeof (zp->z_size), tx);
4284 * Insert the new object into the directory.
4286 (void) zfs_link_create(dl, zp, tx, ZNEW);
4288 if (flags & FIGNORECASE)
4290 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
4293 zfs_acl_ids_free(&acl_ids);
4297 getnewvnode_drop_reserve();
4299 zfs_dirent_unlock(dl);
4301 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4302 zil_commit(zilog, 0);
4309 * Return, in the buffer contained in the provided uio structure,
4310 * the symbolic path referred to by vp.
4312 * IN: vp - vnode of symbolic link.
4313 * uio - structure to contain the link path.
4314 * cr - credentials of caller.
4315 * ct - caller context
4317 * OUT: uio - structure containing the link path.
4319 * RETURN: 0 on success, error code on failure.
4322 * vp - atime updated
4326 zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct)
4328 znode_t *zp = VTOZ(vp);
4329 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4335 mutex_enter(&zp->z_lock);
4337 error = sa_lookup_uio(zp->z_sa_hdl,
4338 SA_ZPL_SYMLINK(zfsvfs), uio);
4340 error = zfs_sa_readlink(zp, uio);
4341 mutex_exit(&zp->z_lock);
4343 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
4350 * Insert a new entry into directory tdvp referencing svp.
4352 * IN: tdvp - Directory to contain new entry.
4353 * svp - vnode of new entry.
4354 * name - name of new entry.
4355 * cr - credentials of caller.
4356 * ct - caller context
4358 * RETURN: 0 on success, error code on failure.
4361 * tdvp - ctime|mtime updated
4362 * svp - ctime updated
4366 zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr,
4367 caller_context_t *ct, int flags)
4369 znode_t *dzp = VTOZ(tdvp);
4371 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
4380 boolean_t waited = B_FALSE;
4382 ASSERT(tdvp->v_type == VDIR);
4386 zilog = zfsvfs->z_log;
4388 if (VOP_REALVP(svp, &realvp, ct) == 0)
4392 * POSIX dictates that we return EPERM here.
4393 * Better choices include ENOTSUP or EISDIR.
4395 if (svp->v_type == VDIR) {
4397 return (SET_ERROR(EPERM));
4403 if (szp->z_pflags & (ZFS_APPENDONLY | ZFS_IMMUTABLE | ZFS_READONLY)) {
4405 return (SET_ERROR(EPERM));
4409 * We check z_zfsvfs rather than v_vfsp here, because snapshots and the
4410 * ctldir appear to have the same v_vfsp.
4412 if (szp->z_zfsvfs != zfsvfs || zfsctl_is_node(svp)) {
4414 return (SET_ERROR(EXDEV));
4417 /* Prevent links to .zfs/shares files */
4419 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
4420 &parent, sizeof (uint64_t))) != 0) {
4424 if (parent == zfsvfs->z_shares_dir) {
4426 return (SET_ERROR(EPERM));
4429 if (zfsvfs->z_utf8 && u8_validate(name,
4430 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
4432 return (SET_ERROR(EILSEQ));
4434 if (flags & FIGNORECASE)
4438 * We do not support links between attributes and non-attributes
4439 * because of the potential security risk of creating links
4440 * into "normal" file space in order to circumvent restrictions
4441 * imposed in attribute space.
4443 if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
4445 return (SET_ERROR(EINVAL));
4449 owner = zfs_fuid_map_id(zfsvfs, szp->z_uid, cr, ZFS_OWNER);
4450 if (owner != crgetuid(cr) && secpolicy_basic_link(svp, cr) != 0) {
4452 return (SET_ERROR(EPERM));
4455 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
4462 * Attempt to lock directory; fail if entry already exists.
4464 error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
4470 tx = dmu_tx_create(zfsvfs->z_os);
4471 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
4472 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4473 zfs_sa_upgrade_txholds(tx, szp);
4474 zfs_sa_upgrade_txholds(tx, dzp);
4475 error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
4477 zfs_dirent_unlock(dl);
4478 if (error == ERESTART) {
4489 error = zfs_link_create(dl, szp, tx, 0);
4492 uint64_t txtype = TX_LINK;
4493 if (flags & FIGNORECASE)
4495 zfs_log_link(zilog, tx, txtype, dzp, szp, name);
4500 zfs_dirent_unlock(dl);
4503 vnevent_link(svp, ct);
4506 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4507 zil_commit(zilog, 0);
4515 * zfs_null_putapage() is used when the file system has been force
4516 * unmounted. It just drops the pages.
4520 zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
4521 size_t *lenp, int flags, cred_t *cr)
4523 pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR);
4528 * Push a page out to disk, klustering if possible.
4530 * IN: vp - file to push page to.
4531 * pp - page to push.
4532 * flags - additional flags.
4533 * cr - credentials of caller.
4535 * OUT: offp - start of range pushed.
4536 * lenp - len of range pushed.
4538 * RETURN: 0 on success, error code on failure.
4540 * NOTE: callers must have locked the page to be pushed. On
4541 * exit, the page (and all other pages in the kluster) must be
4546 zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
4547 size_t *lenp, int flags, cred_t *cr)
4549 znode_t *zp = VTOZ(vp);
4550 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4552 u_offset_t off, koff;
4559 * If our blocksize is bigger than the page size, try to kluster
4560 * multiple pages so that we write a full block (thus avoiding
4561 * a read-modify-write).
4563 if (off < zp->z_size && zp->z_blksz > PAGESIZE) {
4564 klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE);
4565 koff = ISP2(klen) ? P2ALIGN(off, (u_offset_t)klen) : 0;
4566 ASSERT(koff <= zp->z_size);
4567 if (koff + klen > zp->z_size)
4568 klen = P2ROUNDUP(zp->z_size - koff, (uint64_t)PAGESIZE);
4569 pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags);
4571 ASSERT3U(btop(len), ==, btopr(len));
4574 * Can't push pages past end-of-file.
4576 if (off >= zp->z_size) {
4577 /* ignore all pages */
4580 } else if (off + len > zp->z_size) {
4581 int npages = btopr(zp->z_size - off);
4584 page_list_break(&pp, &trunc, npages);
4585 /* ignore pages past end of file */
4587 pvn_write_done(trunc, flags);
4588 len = zp->z_size - off;
4591 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
4592 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
4593 err = SET_ERROR(EDQUOT);
4596 tx = dmu_tx_create(zfsvfs->z_os);
4597 dmu_tx_hold_write(tx, zp->z_id, off, len);
4599 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4600 zfs_sa_upgrade_txholds(tx, zp);
4601 err = dmu_tx_assign(tx, TXG_WAIT);
4607 if (zp->z_blksz <= PAGESIZE) {
4608 caddr_t va = zfs_map_page(pp, S_READ);
4609 ASSERT3U(len, <=, PAGESIZE);
4610 dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx);
4611 zfs_unmap_page(pp, va);
4613 err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx);
4617 uint64_t mtime[2], ctime[2];
4618 sa_bulk_attr_t bulk[3];
4621 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
4623 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
4625 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
4627 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
4629 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0);
4634 pvn_write_done(pp, (err ? B_ERROR : 0) | flags);
4644 * Copy the portion of the file indicated from pages into the file.
4645 * The pages are stored in a page list attached to the files vnode.
4647 * IN: vp - vnode of file to push page data to.
4648 * off - position in file to put data.
4649 * len - amount of data to write.
4650 * flags - flags to control the operation.
4651 * cr - credentials of caller.
4652 * ct - caller context.
4654 * RETURN: 0 on success, error code on failure.
4657 * vp - ctime|mtime updated
4661 zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
4662 caller_context_t *ct)
4664 znode_t *zp = VTOZ(vp);
4665 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4677 * Align this request to the file block size in case we kluster.
4678 * XXX - this can result in pretty aggresive locking, which can
4679 * impact simultanious read/write access. One option might be
4680 * to break up long requests (len == 0) into block-by-block
4681 * operations to get narrower locking.
4683 blksz = zp->z_blksz;
4685 io_off = P2ALIGN_TYPED(off, blksz, u_offset_t);
4688 if (len > 0 && ISP2(blksz))
4689 io_len = P2ROUNDUP_TYPED(len + (off - io_off), blksz, size_t);
4695 * Search the entire vp list for pages >= io_off.
4697 rl = zfs_range_lock(zp, io_off, UINT64_MAX, RL_WRITER);
4698 error = pvn_vplist_dirty(vp, io_off, zfs_putapage, flags, cr);
4701 rl = zfs_range_lock(zp, io_off, io_len, RL_WRITER);
4703 if (off > zp->z_size) {
4704 /* past end of file */
4705 zfs_range_unlock(rl);
4710 len = MIN(io_len, P2ROUNDUP(zp->z_size, PAGESIZE) - io_off);
4712 for (off = io_off; io_off < off + len; io_off += io_len) {
4713 if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
4714 pp = page_lookup(vp, io_off,
4715 (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED);
4717 pp = page_lookup_nowait(vp, io_off,
4718 (flags & B_FREE) ? SE_EXCL : SE_SHARED);
4721 if (pp != NULL && pvn_getdirty(pp, flags)) {
4725 * Found a dirty page to push
4727 err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr);
4735 zfs_range_unlock(rl);
4736 if ((flags & B_ASYNC) == 0 || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4737 zil_commit(zfsvfs->z_log, zp->z_id);
4741 #endif /* illumos */
4745 zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
4747 znode_t *zp = VTOZ(vp);
4748 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4751 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
4752 if (zp->z_sa_hdl == NULL) {
4754 * The fs has been unmounted, or we did a
4755 * suspend/resume and this file no longer exists.
4757 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4762 mutex_enter(&zp->z_lock);
4763 if (zp->z_unlinked) {
4765 * Fast path to recycle a vnode of a removed file.
4767 mutex_exit(&zp->z_lock);
4768 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4772 mutex_exit(&zp->z_lock);
4774 if (zp->z_atime_dirty && zp->z_unlinked == 0) {
4775 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
4777 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4778 zfs_sa_upgrade_txholds(tx, zp);
4779 error = dmu_tx_assign(tx, TXG_WAIT);
4783 mutex_enter(&zp->z_lock);
4784 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
4785 (void *)&zp->z_atime, sizeof (zp->z_atime), tx);
4786 zp->z_atime_dirty = 0;
4787 mutex_exit(&zp->z_lock);
4791 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4796 * Bounds-check the seek operation.
4798 * IN: vp - vnode seeking within
4799 * ooff - old file offset
4800 * noffp - pointer to new file offset
4801 * ct - caller context
4803 * RETURN: 0 on success, EINVAL if new offset invalid.
4807 zfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp,
4808 caller_context_t *ct)
4810 if (vp->v_type == VDIR)
4812 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4816 * Pre-filter the generic locking function to trap attempts to place
4817 * a mandatory lock on a memory mapped file.
4820 zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset,
4821 flk_callback_t *flk_cbp, cred_t *cr, caller_context_t *ct)
4823 znode_t *zp = VTOZ(vp);
4824 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4830 * We are following the UFS semantics with respect to mapcnt
4831 * here: If we see that the file is mapped already, then we will
4832 * return an error, but we don't worry about races between this
4833 * function and zfs_map().
4835 if (zp->z_mapcnt > 0 && MANDMODE(zp->z_mode)) {
4837 return (SET_ERROR(EAGAIN));
4840 return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
4844 * If we can't find a page in the cache, we will create a new page
4845 * and fill it with file data. For efficiency, we may try to fill
4846 * multiple pages at once (klustering) to fill up the supplied page
4847 * list. Note that the pages to be filled are held with an exclusive
4848 * lock to prevent access by other threads while they are being filled.
4851 zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg,
4852 caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw)
4854 znode_t *zp = VTOZ(vp);
4855 page_t *pp, *cur_pp;
4856 objset_t *os = zp->z_zfsvfs->z_os;
4857 u_offset_t io_off, total;
4861 if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) {
4863 * We only have a single page, don't bother klustering
4867 pp = page_create_va(vp, io_off, io_len,
4868 PG_EXCL | PG_WAIT, seg, addr);
4871 * Try to find enough pages to fill the page list
4873 pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
4874 &io_len, off, plsz, 0);
4878 * The page already exists, nothing to do here.
4885 * Fill the pages in the kluster.
4888 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
4891 ASSERT3U(io_off, ==, cur_pp->p_offset);
4892 va = zfs_map_page(cur_pp, S_WRITE);
4893 err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
4895 zfs_unmap_page(cur_pp, va);
4897 /* On error, toss the entire kluster */
4898 pvn_read_done(pp, B_ERROR);
4899 /* convert checksum errors into IO errors */
4901 err = SET_ERROR(EIO);
4904 cur_pp = cur_pp->p_next;
4908 * Fill in the page list array from the kluster starting
4909 * from the desired offset `off'.
4910 * NOTE: the page list will always be null terminated.
4912 pvn_plist_init(pp, pl, plsz, off, io_len, rw);
4913 ASSERT(pl == NULL || (*pl)->p_offset == off);
4919 * Return pointers to the pages for the file region [off, off + len]
4920 * in the pl array. If plsz is greater than len, this function may
4921 * also return page pointers from after the specified region
4922 * (i.e. the region [off, off + plsz]). These additional pages are
4923 * only returned if they are already in the cache, or were created as
4924 * part of a klustered read.
4926 * IN: vp - vnode of file to get data from.
4927 * off - position in file to get data from.
4928 * len - amount of data to retrieve.
4929 * plsz - length of provided page list.
4930 * seg - segment to obtain pages for.
4931 * addr - virtual address of fault.
4932 * rw - mode of created pages.
4933 * cr - credentials of caller.
4934 * ct - caller context.
4936 * OUT: protp - protection mode of created pages.
4937 * pl - list of pages created.
4939 * RETURN: 0 on success, error code on failure.
4942 * vp - atime updated
4946 zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
4947 page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
4948 enum seg_rw rw, cred_t *cr, caller_context_t *ct)
4950 znode_t *zp = VTOZ(vp);
4951 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4955 /* we do our own caching, faultahead is unnecessary */
4958 else if (len > plsz)
4961 len = P2ROUNDUP(len, PAGESIZE);
4962 ASSERT(plsz >= len);
4971 * Loop through the requested range [off, off + len) looking
4972 * for pages. If we don't find a page, we will need to create
4973 * a new page and fill it with data from the file.
4976 if (*pl = page_lookup(vp, off, SE_SHARED))
4978 else if (err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw))
4981 ASSERT3U((*pl)->p_offset, ==, off);
4985 ASSERT3U(len, >=, PAGESIZE);
4988 ASSERT3U(plsz, >=, PAGESIZE);
4995 * Fill out the page array with any pages already in the cache.
4998 (*pl++ = page_lookup_nowait(vp, off, SE_SHARED))) {
5005 * Release any pages we have previously locked.
5010 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
5020 * Request a memory map for a section of a file. This code interacts
5021 * with common code and the VM system as follows:
5023 * - common code calls mmap(), which ends up in smmap_common()
5024 * - this calls VOP_MAP(), which takes you into (say) zfs
5025 * - zfs_map() calls as_map(), passing segvn_create() as the callback
5026 * - segvn_create() creates the new segment and calls VOP_ADDMAP()
5027 * - zfs_addmap() updates z_mapcnt
5031 zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
5032 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
5033 caller_context_t *ct)
5035 znode_t *zp = VTOZ(vp);
5036 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5037 segvn_crargs_t vn_a;
5043 if ((prot & PROT_WRITE) && (zp->z_pflags &
5044 (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
5046 return (SET_ERROR(EPERM));
5049 if ((prot & (PROT_READ | PROT_EXEC)) &&
5050 (zp->z_pflags & ZFS_AV_QUARANTINED)) {
5052 return (SET_ERROR(EACCES));
5055 if (vp->v_flag & VNOMAP) {
5057 return (SET_ERROR(ENOSYS));
5060 if (off < 0 || len > MAXOFFSET_T - off) {
5062 return (SET_ERROR(ENXIO));
5065 if (vp->v_type != VREG) {
5067 return (SET_ERROR(ENODEV));
5071 * If file is locked, disallow mapping.
5073 if (MANDMODE(zp->z_mode) && vn_has_flocks(vp)) {
5075 return (SET_ERROR(EAGAIN));
5079 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
5087 vn_a.offset = (u_offset_t)off;
5088 vn_a.type = flags & MAP_TYPE;
5090 vn_a.maxprot = maxprot;
5093 vn_a.flags = flags & ~MAP_TYPE;
5095 vn_a.lgrp_mem_policy_flags = 0;
5097 error = as_map(as, *addrp, len, segvn_create, &vn_a);
5106 zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
5107 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
5108 caller_context_t *ct)
5110 uint64_t pages = btopr(len);
5112 atomic_add_64(&VTOZ(vp)->z_mapcnt, pages);
5117 * The reason we push dirty pages as part of zfs_delmap() is so that we get a
5118 * more accurate mtime for the associated file. Since we don't have a way of
5119 * detecting when the data was actually modified, we have to resort to
5120 * heuristics. If an explicit msync() is done, then we mark the mtime when the
5121 * last page is pushed. The problem occurs when the msync() call is omitted,
5122 * which by far the most common case:
5130 * putpage() via fsflush
5132 * If we wait until fsflush to come along, we can have a modification time that
5133 * is some arbitrary point in the future. In order to prevent this in the
5134 * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is
5139 zfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
5140 size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
5141 caller_context_t *ct)
5143 uint64_t pages = btopr(len);
5145 ASSERT3U(VTOZ(vp)->z_mapcnt, >=, pages);
5146 atomic_add_64(&VTOZ(vp)->z_mapcnt, -pages);
5148 if ((flags & MAP_SHARED) && (prot & PROT_WRITE) &&
5149 vn_has_cached_data(vp))
5150 (void) VOP_PUTPAGE(vp, off, len, B_ASYNC, cr, ct);
5156 * Free or allocate space in a file. Currently, this function only
5157 * supports the `F_FREESP' command. However, this command is somewhat
5158 * misnamed, as its functionality includes the ability to allocate as
5159 * well as free space.
5161 * IN: vp - vnode of file to free data in.
5162 * cmd - action to take (only F_FREESP supported).
5163 * bfp - section of file to free/alloc.
5164 * flag - current file open mode flags.
5165 * offset - current file offset.
5166 * cr - credentials of caller [UNUSED].
5167 * ct - caller context.
5169 * RETURN: 0 on success, error code on failure.
5172 * vp - ctime|mtime updated
5176 zfs_space(vnode_t *vp, int cmd, flock64_t *bfp, int flag,
5177 offset_t offset, cred_t *cr, caller_context_t *ct)
5179 znode_t *zp = VTOZ(vp);
5180 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5187 if (cmd != F_FREESP) {
5189 return (SET_ERROR(EINVAL));
5193 * In a case vp->v_vfsp != zp->z_zfsvfs->z_vfs (e.g. snapshots) our
5194 * callers might not be able to detect properly that we are read-only,
5195 * so check it explicitly here.
5197 if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
5199 return (SET_ERROR(EROFS));
5202 if (error = convoff(vp, bfp, 0, offset)) {
5207 if (bfp->l_len < 0) {
5209 return (SET_ERROR(EINVAL));
5213 len = bfp->l_len; /* 0 means from off to end of file */
5215 error = zfs_freesp(zp, off, len, flag, TRUE);
5220 #endif /* illumos */
5222 CTASSERT(sizeof(struct zfid_short) <= sizeof(struct fid));
5223 CTASSERT(sizeof(struct zfid_long) <= sizeof(struct fid));
5227 zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
5229 znode_t *zp = VTOZ(vp);
5230 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5233 uint64_t object = zp->z_id;
5240 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
5241 &gen64, sizeof (uint64_t))) != 0) {
5246 gen = (uint32_t)gen64;
5248 size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
5251 if (fidp->fid_len < size) {
5252 fidp->fid_len = size;
5254 return (SET_ERROR(ENOSPC));
5257 fidp->fid_len = size;
5260 zfid = (zfid_short_t *)fidp;
5262 zfid->zf_len = size;
5264 for (i = 0; i < sizeof (zfid->zf_object); i++)
5265 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
5267 /* Must have a non-zero generation number to distinguish from .zfs */
5270 for (i = 0; i < sizeof (zfid->zf_gen); i++)
5271 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
5273 if (size == LONG_FID_LEN) {
5274 uint64_t objsetid = dmu_objset_id(zfsvfs->z_os);
5277 zlfid = (zfid_long_t *)fidp;
5279 for (i = 0; i < sizeof (zlfid->zf_setid); i++)
5280 zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
5282 /* XXX - this should be the generation number for the objset */
5283 for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
5284 zlfid->zf_setgen[i] = 0;
5292 zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
5293 caller_context_t *ct)
5305 case _PC_FILESIZEBITS:
5309 case _PC_XATTR_EXISTS:
5311 zfsvfs = zp->z_zfsvfs;
5315 error = zfs_dirent_lock(&dl, zp, "", &xzp,
5316 ZXATTR | ZEXISTS | ZSHARED, NULL, NULL);
5318 zfs_dirent_unlock(dl);
5319 if (!zfs_dirempty(xzp))
5322 } else if (error == ENOENT) {
5324 * If there aren't extended attributes, it's the
5325 * same as having zero of them.
5332 case _PC_SATTR_ENABLED:
5333 case _PC_SATTR_EXISTS:
5334 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
5335 (vp->v_type == VREG || vp->v_type == VDIR);
5338 case _PC_ACCESS_FILTERING:
5339 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_ACCESS_FILTER) &&
5343 case _PC_ACL_ENABLED:
5344 *valp = _ACL_ACE_ENABLED;
5346 #endif /* illumos */
5347 case _PC_MIN_HOLE_SIZE:
5348 *valp = (int)SPA_MINBLOCKSIZE;
5351 case _PC_TIMESTAMP_RESOLUTION:
5352 /* nanosecond timestamp resolution */
5356 case _PC_ACL_EXTENDED:
5364 case _PC_ACL_PATH_MAX:
5365 *valp = ACL_MAX_ENTRIES;
5369 return (EOPNOTSUPP);
5375 zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
5376 caller_context_t *ct)
5378 znode_t *zp = VTOZ(vp);
5379 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5381 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
5385 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
5393 zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
5394 caller_context_t *ct)
5396 znode_t *zp = VTOZ(vp);
5397 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5399 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
5400 zilog_t *zilog = zfsvfs->z_log;
5405 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
5407 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
5408 zil_commit(zilog, 0);
5416 * The smallest read we may consider to loan out an arcbuf.
5417 * This must be a power of 2.
5419 int zcr_blksz_min = (1 << 10); /* 1K */
5421 * If set to less than the file block size, allow loaning out of an
5422 * arcbuf for a partial block read. This must be a power of 2.
5424 int zcr_blksz_max = (1 << 17); /* 128K */
5428 zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr,
5429 caller_context_t *ct)
5431 znode_t *zp = VTOZ(vp);
5432 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5433 int max_blksz = zfsvfs->z_max_blksz;
5434 uio_t *uio = &xuio->xu_uio;
5435 ssize_t size = uio->uio_resid;
5436 offset_t offset = uio->uio_loffset;
5441 int preamble, postamble;
5443 if (xuio->xu_type != UIOTYPE_ZEROCOPY)
5444 return (SET_ERROR(EINVAL));
5451 * Loan out an arc_buf for write if write size is bigger than
5452 * max_blksz, and the file's block size is also max_blksz.
5455 if (size < blksz || zp->z_blksz != blksz) {
5457 return (SET_ERROR(EINVAL));
5460 * Caller requests buffers for write before knowing where the
5461 * write offset might be (e.g. NFS TCP write).
5466 preamble = P2PHASE(offset, blksz);
5468 preamble = blksz - preamble;
5473 postamble = P2PHASE(size, blksz);
5476 fullblk = size / blksz;
5477 (void) dmu_xuio_init(xuio,
5478 (preamble != 0) + fullblk + (postamble != 0));
5479 DTRACE_PROBE3(zfs_reqzcbuf_align, int, preamble,
5480 int, postamble, int,
5481 (preamble != 0) + fullblk + (postamble != 0));
5484 * Have to fix iov base/len for partial buffers. They
5485 * currently represent full arc_buf's.
5488 /* data begins in the middle of the arc_buf */
5489 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5492 (void) dmu_xuio_add(xuio, abuf,
5493 blksz - preamble, preamble);
5496 for (i = 0; i < fullblk; i++) {
5497 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5500 (void) dmu_xuio_add(xuio, abuf, 0, blksz);
5504 /* data ends in the middle of the arc_buf */
5505 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5508 (void) dmu_xuio_add(xuio, abuf, 0, postamble);
5513 * Loan out an arc_buf for read if the read size is larger than
5514 * the current file block size. Block alignment is not
5515 * considered. Partial arc_buf will be loaned out for read.
5517 blksz = zp->z_blksz;
5518 if (blksz < zcr_blksz_min)
5519 blksz = zcr_blksz_min;
5520 if (blksz > zcr_blksz_max)
5521 blksz = zcr_blksz_max;
5522 /* avoid potential complexity of dealing with it */
5523 if (blksz > max_blksz) {
5525 return (SET_ERROR(EINVAL));
5528 maxsize = zp->z_size - uio->uio_loffset;
5532 if (size < blksz || vn_has_cached_data(vp)) {
5534 return (SET_ERROR(EINVAL));
5539 return (SET_ERROR(EINVAL));
5542 uio->uio_extflg = UIO_XUIO;
5543 XUIO_XUZC_RW(xuio) = ioflag;
5550 zfs_retzcbuf(vnode_t *vp, xuio_t *xuio, cred_t *cr, caller_context_t *ct)
5554 int ioflag = XUIO_XUZC_RW(xuio);
5556 ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
5558 i = dmu_xuio_cnt(xuio);
5560 abuf = dmu_xuio_arcbuf(xuio, i);
5562 * if abuf == NULL, it must be a write buffer
5563 * that has been returned in zfs_write().
5566 dmu_return_arcbuf(abuf);
5567 ASSERT(abuf || ioflag == UIO_WRITE);
5570 dmu_xuio_fini(xuio);
5575 * Predeclare these here so that the compiler assumes that
5576 * this is an "old style" function declaration that does
5577 * not include arguments => we won't get type mismatch errors
5578 * in the initializations that follow.
5580 static int zfs_inval();
5581 static int zfs_isdir();
5586 return (SET_ERROR(EINVAL));
5592 return (SET_ERROR(EISDIR));
5595 * Directory vnode operations template
5597 vnodeops_t *zfs_dvnodeops;
5598 const fs_operation_def_t zfs_dvnodeops_template[] = {
5599 VOPNAME_OPEN, { .vop_open = zfs_open },
5600 VOPNAME_CLOSE, { .vop_close = zfs_close },
5601 VOPNAME_READ, { .error = zfs_isdir },
5602 VOPNAME_WRITE, { .error = zfs_isdir },
5603 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl },
5604 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr },
5605 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr },
5606 VOPNAME_ACCESS, { .vop_access = zfs_access },
5607 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup },
5608 VOPNAME_CREATE, { .vop_create = zfs_create },
5609 VOPNAME_REMOVE, { .vop_remove = zfs_remove },
5610 VOPNAME_LINK, { .vop_link = zfs_link },
5611 VOPNAME_RENAME, { .vop_rename = zfs_rename },
5612 VOPNAME_MKDIR, { .vop_mkdir = zfs_mkdir },
5613 VOPNAME_RMDIR, { .vop_rmdir = zfs_rmdir },
5614 VOPNAME_READDIR, { .vop_readdir = zfs_readdir },
5615 VOPNAME_SYMLINK, { .vop_symlink = zfs_symlink },
5616 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync },
5617 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
5618 VOPNAME_FID, { .vop_fid = zfs_fid },
5619 VOPNAME_SEEK, { .vop_seek = zfs_seek },
5620 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
5621 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr },
5622 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr },
5623 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
5628 * Regular file vnode operations template
5630 vnodeops_t *zfs_fvnodeops;
5631 const fs_operation_def_t zfs_fvnodeops_template[] = {
5632 VOPNAME_OPEN, { .vop_open = zfs_open },
5633 VOPNAME_CLOSE, { .vop_close = zfs_close },
5634 VOPNAME_READ, { .vop_read = zfs_read },
5635 VOPNAME_WRITE, { .vop_write = zfs_write },
5636 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl },
5637 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr },
5638 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr },
5639 VOPNAME_ACCESS, { .vop_access = zfs_access },
5640 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup },
5641 VOPNAME_RENAME, { .vop_rename = zfs_rename },
5642 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync },
5643 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
5644 VOPNAME_FID, { .vop_fid = zfs_fid },
5645 VOPNAME_SEEK, { .vop_seek = zfs_seek },
5646 VOPNAME_FRLOCK, { .vop_frlock = zfs_frlock },
5647 VOPNAME_SPACE, { .vop_space = zfs_space },
5648 VOPNAME_GETPAGE, { .vop_getpage = zfs_getpage },
5649 VOPNAME_PUTPAGE, { .vop_putpage = zfs_putpage },
5650 VOPNAME_MAP, { .vop_map = zfs_map },
5651 VOPNAME_ADDMAP, { .vop_addmap = zfs_addmap },
5652 VOPNAME_DELMAP, { .vop_delmap = zfs_delmap },
5653 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
5654 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr },
5655 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr },
5656 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
5657 VOPNAME_REQZCBUF, { .vop_reqzcbuf = zfs_reqzcbuf },
5658 VOPNAME_RETZCBUF, { .vop_retzcbuf = zfs_retzcbuf },
5663 * Symbolic link vnode operations template
5665 vnodeops_t *zfs_symvnodeops;
5666 const fs_operation_def_t zfs_symvnodeops_template[] = {
5667 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr },
5668 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr },
5669 VOPNAME_ACCESS, { .vop_access = zfs_access },
5670 VOPNAME_RENAME, { .vop_rename = zfs_rename },
5671 VOPNAME_READLINK, { .vop_readlink = zfs_readlink },
5672 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
5673 VOPNAME_FID, { .vop_fid = zfs_fid },
5674 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
5675 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
5680 * special share hidden files vnode operations template
5682 vnodeops_t *zfs_sharevnodeops;
5683 const fs_operation_def_t zfs_sharevnodeops_template[] = {
5684 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr },
5685 VOPNAME_ACCESS, { .vop_access = zfs_access },
5686 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
5687 VOPNAME_FID, { .vop_fid = zfs_fid },
5688 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
5689 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr },
5690 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr },
5691 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
5696 * Extended attribute directory vnode operations template
5698 * This template is identical to the directory vnodes
5699 * operation template except for restricted operations:
5703 * Note that there are other restrictions embedded in:
5704 * zfs_create() - restrict type to VREG
5705 * zfs_link() - no links into/out of attribute space
5706 * zfs_rename() - no moves into/out of attribute space
5708 vnodeops_t *zfs_xdvnodeops;
5709 const fs_operation_def_t zfs_xdvnodeops_template[] = {
5710 VOPNAME_OPEN, { .vop_open = zfs_open },
5711 VOPNAME_CLOSE, { .vop_close = zfs_close },
5712 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl },
5713 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr },
5714 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr },
5715 VOPNAME_ACCESS, { .vop_access = zfs_access },
5716 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup },
5717 VOPNAME_CREATE, { .vop_create = zfs_create },
5718 VOPNAME_REMOVE, { .vop_remove = zfs_remove },
5719 VOPNAME_LINK, { .vop_link = zfs_link },
5720 VOPNAME_RENAME, { .vop_rename = zfs_rename },
5721 VOPNAME_MKDIR, { .error = zfs_inval },
5722 VOPNAME_RMDIR, { .vop_rmdir = zfs_rmdir },
5723 VOPNAME_READDIR, { .vop_readdir = zfs_readdir },
5724 VOPNAME_SYMLINK, { .error = zfs_inval },
5725 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync },
5726 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
5727 VOPNAME_FID, { .vop_fid = zfs_fid },
5728 VOPNAME_SEEK, { .vop_seek = zfs_seek },
5729 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
5730 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr },
5731 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr },
5732 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
5737 * Error vnode operations template
5739 vnodeops_t *zfs_evnodeops;
5740 const fs_operation_def_t zfs_evnodeops_template[] = {
5741 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
5742 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
5745 #endif /* illumos */
5748 ioflags(int ioflags)
5752 if (ioflags & IO_APPEND)
5754 if (ioflags & IO_NDELAY)
5756 if (ioflags & IO_SYNC)
5757 flags |= (FSYNC | FDSYNC | FRSYNC);
5763 zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
5765 znode_t *zp = VTOZ(vp);
5766 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5767 objset_t *os = zp->z_zfsvfs->z_os;
5768 vm_page_t mfirst, mlast, mreq;
5772 off_t startoff, endoff;
5774 vm_pindex_t reqstart, reqend;
5775 int pcount, lsize, reqsize, size;
5780 pcount = OFF_TO_IDX(round_page(count));
5782 object = mreq->object;
5785 KASSERT(vp->v_object == object, ("mismatching object"));
5787 if (pcount > 1 && zp->z_blksz > PAGESIZE) {
5788 startoff = rounddown(IDX_TO_OFF(mreq->pindex), zp->z_blksz);
5789 reqstart = OFF_TO_IDX(round_page(startoff));
5790 if (reqstart < m[0]->pindex)
5793 reqstart = reqstart - m[0]->pindex;
5794 endoff = roundup(IDX_TO_OFF(mreq->pindex) + PAGE_SIZE,
5796 reqend = OFF_TO_IDX(trunc_page(endoff)) - 1;
5797 if (reqend > m[pcount - 1]->pindex)
5798 reqend = m[pcount - 1]->pindex;
5799 reqsize = reqend - m[reqstart]->pindex + 1;
5800 KASSERT(reqstart <= reqpage && reqpage < reqstart + reqsize,
5801 ("reqpage beyond [reqstart, reqstart + reqsize[ bounds"));
5806 mfirst = m[reqstart];
5807 mlast = m[reqstart + reqsize - 1];
5809 zfs_vmobject_wlock(object);
5811 for (i = 0; i < reqstart; i++) {
5814 vm_page_unlock(m[i]);
5816 for (i = reqstart + reqsize; i < pcount; i++) {
5819 vm_page_unlock(m[i]);
5822 if (mreq->valid && reqsize == 1) {
5823 if (mreq->valid != VM_PAGE_BITS_ALL)
5824 vm_page_zero_invalid(mreq, TRUE);
5825 zfs_vmobject_wunlock(object);
5827 return (zfs_vm_pagerret_ok);
5830 PCPU_INC(cnt.v_vnodein);
5831 PCPU_ADD(cnt.v_vnodepgsin, reqsize);
5833 if (IDX_TO_OFF(mreq->pindex) >= object->un_pager.vnp.vnp_size) {
5834 for (i = reqstart; i < reqstart + reqsize; i++) {
5838 vm_page_unlock(m[i]);
5841 zfs_vmobject_wunlock(object);
5843 return (zfs_vm_pagerret_bad);
5847 if (IDX_TO_OFF(mlast->pindex) + lsize > object->un_pager.vnp.vnp_size)
5848 lsize = object->un_pager.vnp.vnp_size - IDX_TO_OFF(mlast->pindex);
5850 zfs_vmobject_wunlock(object);
5852 for (i = reqstart; i < reqstart + reqsize; i++) {
5854 if (i == (reqstart + reqsize - 1))
5856 va = zfs_map_page(m[i], &sf);
5857 error = dmu_read(os, zp->z_id, IDX_TO_OFF(m[i]->pindex),
5858 size, va, DMU_READ_PREFETCH);
5859 if (size != PAGE_SIZE)
5860 bzero(va + size, PAGE_SIZE - size);
5866 zfs_vmobject_wlock(object);
5868 for (i = reqstart; i < reqstart + reqsize; i++) {
5870 m[i]->valid = VM_PAGE_BITS_ALL;
5871 KASSERT(m[i]->dirty == 0, ("zfs_getpages: page %p is dirty", m[i]));
5873 vm_page_readahead_finish(m[i]);
5876 zfs_vmobject_wunlock(object);
5878 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
5880 return (error ? zfs_vm_pagerret_error : zfs_vm_pagerret_ok);
5884 zfs_freebsd_getpages(ap)
5885 struct vop_getpages_args /* {
5890 vm_ooffset_t a_offset;
5894 return (zfs_getpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_reqpage));
5898 zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
5901 znode_t *zp = VTOZ(vp);
5902 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5911 vm_ooffset_t lo_off;
5922 object = vp->v_object;
5926 KASSERT(ma[0]->object == object, ("mismatching object"));
5927 KASSERT(len > 0 && (len & PAGE_MASK) == 0, ("unexpected length"));
5929 for (i = 0; i < pcount; i++)
5930 rtvals[i] = zfs_vm_pagerret_error;
5932 off = IDX_TO_OFF(ma[0]->pindex);
5933 blksz = zp->z_blksz;
5934 lo_off = rounddown(off, blksz);
5935 lo_len = roundup(len + (off - lo_off), blksz);
5936 rl = zfs_range_lock(zp, lo_off, lo_len, RL_WRITER);
5938 zfs_vmobject_wlock(object);
5939 if (len + off > object->un_pager.vnp.vnp_size) {
5940 if (object->un_pager.vnp.vnp_size > off) {
5943 len = object->un_pager.vnp.vnp_size - off;
5945 if ((pgoff = (int)len & PAGE_MASK) != 0) {
5947 * If the object is locked and the following
5948 * conditions hold, then the page's dirty
5949 * field cannot be concurrently changed by a
5953 vm_page_assert_sbusied(m);
5954 KASSERT(!pmap_page_is_write_mapped(m),
5955 ("zfs_putpages: page %p is not read-only", m));
5956 vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
5963 if (ncount < pcount) {
5964 for (i = ncount; i < pcount; i++) {
5965 rtvals[i] = zfs_vm_pagerret_bad;
5969 zfs_vmobject_wunlock(object);
5974 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
5975 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
5980 tx = dmu_tx_create(zfsvfs->z_os);
5981 dmu_tx_hold_write(tx, zp->z_id, off, len);
5983 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
5984 zfs_sa_upgrade_txholds(tx, zp);
5985 err = dmu_tx_assign(tx, TXG_NOWAIT);
5987 if (err == ERESTART) {
5996 if (zp->z_blksz < PAGE_SIZE) {
5998 for (i = 0; len > 0; off += tocopy, len -= tocopy, i++) {
5999 tocopy = len > PAGE_SIZE ? PAGE_SIZE : len;
6000 va = zfs_map_page(ma[i], &sf);
6001 dmu_write(zfsvfs->z_os, zp->z_id, off, tocopy, va, tx);
6005 err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, ma, tx);
6009 uint64_t mtime[2], ctime[2];
6010 sa_bulk_attr_t bulk[3];
6013 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
6015 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
6017 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
6019 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
6021 (void)sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
6022 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0);
6024 zfs_vmobject_wlock(object);
6025 for (i = 0; i < ncount; i++) {
6026 rtvals[i] = zfs_vm_pagerret_ok;
6027 vm_page_undirty(ma[i]);
6029 zfs_vmobject_wunlock(object);
6030 PCPU_INC(cnt.v_vnodeout);
6031 PCPU_ADD(cnt.v_vnodepgsout, ncount);
6036 zfs_range_unlock(rl);
6037 if ((flags & (zfs_vm_pagerput_sync | zfs_vm_pagerput_inval)) != 0 ||
6038 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
6039 zil_commit(zfsvfs->z_log, zp->z_id);
6045 zfs_freebsd_putpages(ap)
6046 struct vop_putpages_args /* {
6052 vm_ooffset_t a_offset;
6056 return (zfs_putpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_sync,
6061 zfs_freebsd_bmap(ap)
6062 struct vop_bmap_args /* {
6065 struct bufobj **a_bop;
6072 if (ap->a_bop != NULL)
6073 *ap->a_bop = &ap->a_vp->v_bufobj;
6074 if (ap->a_bnp != NULL)
6075 *ap->a_bnp = ap->a_bn;
6076 if (ap->a_runp != NULL)
6078 if (ap->a_runb != NULL)
6085 zfs_freebsd_open(ap)
6086 struct vop_open_args /* {
6089 struct ucred *a_cred;
6090 struct thread *a_td;
6093 vnode_t *vp = ap->a_vp;
6094 znode_t *zp = VTOZ(vp);
6097 error = zfs_open(&vp, ap->a_mode, ap->a_cred, NULL);
6099 vnode_create_vobject(vp, zp->z_size, ap->a_td);
6104 zfs_freebsd_close(ap)
6105 struct vop_close_args /* {
6108 struct ucred *a_cred;
6109 struct thread *a_td;
6113 return (zfs_close(ap->a_vp, ap->a_fflag, 1, 0, ap->a_cred, NULL));
6117 zfs_freebsd_ioctl(ap)
6118 struct vop_ioctl_args /* {
6128 return (zfs_ioctl(ap->a_vp, ap->a_command, (intptr_t)ap->a_data,
6129 ap->a_fflag, ap->a_cred, NULL, NULL));
6133 zfs_freebsd_read(ap)
6134 struct vop_read_args /* {
6138 struct ucred *a_cred;
6142 return (zfs_read(ap->a_vp, ap->a_uio, ioflags(ap->a_ioflag),
6147 zfs_freebsd_write(ap)
6148 struct vop_write_args /* {
6152 struct ucred *a_cred;
6156 return (zfs_write(ap->a_vp, ap->a_uio, ioflags(ap->a_ioflag),
6161 zfs_freebsd_access(ap)
6162 struct vop_access_args /* {
6164 accmode_t a_accmode;
6165 struct ucred *a_cred;
6166 struct thread *a_td;
6169 vnode_t *vp = ap->a_vp;
6170 znode_t *zp = VTOZ(vp);
6175 * ZFS itself only knowns about VREAD, VWRITE, VEXEC and VAPPEND,
6177 accmode = ap->a_accmode & (VREAD|VWRITE|VEXEC|VAPPEND);
6179 error = zfs_access(ap->a_vp, accmode, 0, ap->a_cred, NULL);
6182 * VADMIN has to be handled by vaccess().
6185 accmode = ap->a_accmode & ~(VREAD|VWRITE|VEXEC|VAPPEND);
6187 error = vaccess(vp->v_type, zp->z_mode, zp->z_uid,
6188 zp->z_gid, accmode, ap->a_cred, NULL);
6193 * For VEXEC, ensure that at least one execute bit is set for
6196 if (error == 0 && (ap->a_accmode & VEXEC) != 0 && vp->v_type != VDIR &&
6197 (zp->z_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
6205 zfs_freebsd_lookup(ap)
6206 struct vop_lookup_args /* {
6207 struct vnode *a_dvp;
6208 struct vnode **a_vpp;
6209 struct componentname *a_cnp;
6212 struct componentname *cnp = ap->a_cnp;
6213 char nm[NAME_MAX + 1];
6215 ASSERT(cnp->cn_namelen < sizeof(nm));
6216 strlcpy(nm, cnp->cn_nameptr, MIN(cnp->cn_namelen + 1, sizeof(nm)));
6218 return (zfs_lookup(ap->a_dvp, nm, ap->a_vpp, cnp, cnp->cn_nameiop,
6219 cnp->cn_cred, cnp->cn_thread, 0));
6223 zfs_freebsd_create(ap)
6224 struct vop_create_args /* {
6225 struct vnode *a_dvp;
6226 struct vnode **a_vpp;
6227 struct componentname *a_cnp;
6228 struct vattr *a_vap;
6231 struct componentname *cnp = ap->a_cnp;
6232 vattr_t *vap = ap->a_vap;
6235 ASSERT(cnp->cn_flags & SAVENAME);
6237 vattr_init_mask(vap);
6238 mode = vap->va_mode & ALLPERMS;
6240 error = zfs_create(ap->a_dvp, cnp->cn_nameptr, vap, !EXCL, mode,
6241 ap->a_vpp, cnp->cn_cred, cnp->cn_thread);
6242 #ifdef FREEBSD_NAMECACHE
6243 if (error == 0 && (cnp->cn_flags & MAKEENTRY) != 0)
6244 cache_enter(ap->a_dvp, *ap->a_vpp, cnp);
6250 zfs_freebsd_remove(ap)
6251 struct vop_remove_args /* {
6252 struct vnode *a_dvp;
6254 struct componentname *a_cnp;
6258 ASSERT(ap->a_cnp->cn_flags & SAVENAME);
6260 return (zfs_remove(ap->a_dvp, ap->a_cnp->cn_nameptr,
6261 ap->a_cnp->cn_cred, NULL, 0));
6265 zfs_freebsd_mkdir(ap)
6266 struct vop_mkdir_args /* {
6267 struct vnode *a_dvp;
6268 struct vnode **a_vpp;
6269 struct componentname *a_cnp;
6270 struct vattr *a_vap;
6273 vattr_t *vap = ap->a_vap;
6275 ASSERT(ap->a_cnp->cn_flags & SAVENAME);
6277 vattr_init_mask(vap);
6279 return (zfs_mkdir(ap->a_dvp, ap->a_cnp->cn_nameptr, vap, ap->a_vpp,
6280 ap->a_cnp->cn_cred, NULL, 0, NULL));
6284 zfs_freebsd_rmdir(ap)
6285 struct vop_rmdir_args /* {
6286 struct vnode *a_dvp;
6288 struct componentname *a_cnp;
6291 struct componentname *cnp = ap->a_cnp;
6293 ASSERT(cnp->cn_flags & SAVENAME);
6295 return (zfs_rmdir(ap->a_dvp, cnp->cn_nameptr, NULL, cnp->cn_cred, NULL, 0));
6299 zfs_freebsd_readdir(ap)
6300 struct vop_readdir_args /* {
6303 struct ucred *a_cred;
6310 return (zfs_readdir(ap->a_vp, ap->a_uio, ap->a_cred, ap->a_eofflag,
6311 ap->a_ncookies, ap->a_cookies));
6315 zfs_freebsd_fsync(ap)
6316 struct vop_fsync_args /* {
6319 struct thread *a_td;
6324 return (zfs_fsync(ap->a_vp, 0, ap->a_td->td_ucred, NULL));
6328 zfs_freebsd_getattr(ap)
6329 struct vop_getattr_args /* {
6331 struct vattr *a_vap;
6332 struct ucred *a_cred;
6335 vattr_t *vap = ap->a_vap;
6341 xvap.xva_vattr = *vap;
6342 xvap.xva_vattr.va_mask |= AT_XVATTR;
6344 /* Convert chflags into ZFS-type flags. */
6345 /* XXX: what about SF_SETTABLE?. */
6346 XVA_SET_REQ(&xvap, XAT_IMMUTABLE);
6347 XVA_SET_REQ(&xvap, XAT_APPENDONLY);
6348 XVA_SET_REQ(&xvap, XAT_NOUNLINK);
6349 XVA_SET_REQ(&xvap, XAT_NODUMP);
6350 XVA_SET_REQ(&xvap, XAT_READONLY);
6351 XVA_SET_REQ(&xvap, XAT_ARCHIVE);
6352 XVA_SET_REQ(&xvap, XAT_SYSTEM);
6353 XVA_SET_REQ(&xvap, XAT_HIDDEN);
6354 XVA_SET_REQ(&xvap, XAT_REPARSE);
6355 XVA_SET_REQ(&xvap, XAT_OFFLINE);
6356 XVA_SET_REQ(&xvap, XAT_SPARSE);
6358 error = zfs_getattr(ap->a_vp, (vattr_t *)&xvap, 0, ap->a_cred, NULL);
6362 /* Convert ZFS xattr into chflags. */
6363 #define FLAG_CHECK(fflag, xflag, xfield) do { \
6364 if (XVA_ISSET_RTN(&xvap, (xflag)) && (xfield) != 0) \
6365 fflags |= (fflag); \
6367 FLAG_CHECK(SF_IMMUTABLE, XAT_IMMUTABLE,
6368 xvap.xva_xoptattrs.xoa_immutable);
6369 FLAG_CHECK(SF_APPEND, XAT_APPENDONLY,
6370 xvap.xva_xoptattrs.xoa_appendonly);
6371 FLAG_CHECK(SF_NOUNLINK, XAT_NOUNLINK,
6372 xvap.xva_xoptattrs.xoa_nounlink);
6373 FLAG_CHECK(UF_ARCHIVE, XAT_ARCHIVE,
6374 xvap.xva_xoptattrs.xoa_archive);
6375 FLAG_CHECK(UF_NODUMP, XAT_NODUMP,
6376 xvap.xva_xoptattrs.xoa_nodump);
6377 FLAG_CHECK(UF_READONLY, XAT_READONLY,
6378 xvap.xva_xoptattrs.xoa_readonly);
6379 FLAG_CHECK(UF_SYSTEM, XAT_SYSTEM,
6380 xvap.xva_xoptattrs.xoa_system);
6381 FLAG_CHECK(UF_HIDDEN, XAT_HIDDEN,
6382 xvap.xva_xoptattrs.xoa_hidden);
6383 FLAG_CHECK(UF_REPARSE, XAT_REPARSE,
6384 xvap.xva_xoptattrs.xoa_reparse);
6385 FLAG_CHECK(UF_OFFLINE, XAT_OFFLINE,
6386 xvap.xva_xoptattrs.xoa_offline);
6387 FLAG_CHECK(UF_SPARSE, XAT_SPARSE,
6388 xvap.xva_xoptattrs.xoa_sparse);
6391 *vap = xvap.xva_vattr;
6392 vap->va_flags = fflags;
6397 zfs_freebsd_setattr(ap)
6398 struct vop_setattr_args /* {
6400 struct vattr *a_vap;
6401 struct ucred *a_cred;
6404 vnode_t *vp = ap->a_vp;
6405 vattr_t *vap = ap->a_vap;
6406 cred_t *cred = ap->a_cred;
6411 vattr_init_mask(vap);
6412 vap->va_mask &= ~AT_NOSET;
6415 xvap.xva_vattr = *vap;
6417 zflags = VTOZ(vp)->z_pflags;
6419 if (vap->va_flags != VNOVAL) {
6420 zfsvfs_t *zfsvfs = VTOZ(vp)->z_zfsvfs;
6423 if (zfsvfs->z_use_fuids == B_FALSE)
6424 return (EOPNOTSUPP);
6426 fflags = vap->va_flags;
6429 * We need to figure out whether it makes sense to allow
6430 * UF_REPARSE through, since we don't really have other
6431 * facilities to handle reparse points and zfs_setattr()
6432 * doesn't currently allow setting that attribute anyway.
6434 if ((fflags & ~(SF_IMMUTABLE|SF_APPEND|SF_NOUNLINK|UF_ARCHIVE|
6435 UF_NODUMP|UF_SYSTEM|UF_HIDDEN|UF_READONLY|UF_REPARSE|
6436 UF_OFFLINE|UF_SPARSE)) != 0)
6437 return (EOPNOTSUPP);
6439 * Unprivileged processes are not permitted to unset system
6440 * flags, or modify flags if any system flags are set.
6441 * Privileged non-jail processes may not modify system flags
6442 * if securelevel > 0 and any existing system flags are set.
6443 * Privileged jail processes behave like privileged non-jail
6444 * processes if the security.jail.chflags_allowed sysctl is
6445 * is non-zero; otherwise, they behave like unprivileged
6448 if (secpolicy_fs_owner(vp->v_mount, cred) == 0 ||
6449 priv_check_cred(cred, PRIV_VFS_SYSFLAGS, 0) == 0) {
6451 (ZFS_IMMUTABLE | ZFS_APPENDONLY | ZFS_NOUNLINK)) {
6452 error = securelevel_gt(cred, 0);
6458 * Callers may only modify the file flags on objects they
6459 * have VADMIN rights for.
6461 if ((error = VOP_ACCESS(vp, VADMIN, cred, curthread)) != 0)
6464 (ZFS_IMMUTABLE | ZFS_APPENDONLY | ZFS_NOUNLINK)) {
6468 (SF_IMMUTABLE | SF_APPEND | SF_NOUNLINK)) {
6473 #define FLAG_CHANGE(fflag, zflag, xflag, xfield) do { \
6474 if (((fflags & (fflag)) && !(zflags & (zflag))) || \
6475 ((zflags & (zflag)) && !(fflags & (fflag)))) { \
6476 XVA_SET_REQ(&xvap, (xflag)); \
6477 (xfield) = ((fflags & (fflag)) != 0); \
6480 /* Convert chflags into ZFS-type flags. */
6481 /* XXX: what about SF_SETTABLE?. */
6482 FLAG_CHANGE(SF_IMMUTABLE, ZFS_IMMUTABLE, XAT_IMMUTABLE,
6483 xvap.xva_xoptattrs.xoa_immutable);
6484 FLAG_CHANGE(SF_APPEND, ZFS_APPENDONLY, XAT_APPENDONLY,
6485 xvap.xva_xoptattrs.xoa_appendonly);
6486 FLAG_CHANGE(SF_NOUNLINK, ZFS_NOUNLINK, XAT_NOUNLINK,
6487 xvap.xva_xoptattrs.xoa_nounlink);
6488 FLAG_CHANGE(UF_ARCHIVE, ZFS_ARCHIVE, XAT_ARCHIVE,
6489 xvap.xva_xoptattrs.xoa_archive);
6490 FLAG_CHANGE(UF_NODUMP, ZFS_NODUMP, XAT_NODUMP,
6491 xvap.xva_xoptattrs.xoa_nodump);
6492 FLAG_CHANGE(UF_READONLY, ZFS_READONLY, XAT_READONLY,
6493 xvap.xva_xoptattrs.xoa_readonly);
6494 FLAG_CHANGE(UF_SYSTEM, ZFS_SYSTEM, XAT_SYSTEM,
6495 xvap.xva_xoptattrs.xoa_system);
6496 FLAG_CHANGE(UF_HIDDEN, ZFS_HIDDEN, XAT_HIDDEN,
6497 xvap.xva_xoptattrs.xoa_hidden);
6498 FLAG_CHANGE(UF_REPARSE, ZFS_REPARSE, XAT_REPARSE,
6499 xvap.xva_xoptattrs.xoa_hidden);
6500 FLAG_CHANGE(UF_OFFLINE, ZFS_OFFLINE, XAT_OFFLINE,
6501 xvap.xva_xoptattrs.xoa_offline);
6502 FLAG_CHANGE(UF_SPARSE, ZFS_SPARSE, XAT_SPARSE,
6503 xvap.xva_xoptattrs.xoa_sparse);
6506 return (zfs_setattr(vp, (vattr_t *)&xvap, 0, cred, NULL));
6510 zfs_freebsd_rename(ap)
6511 struct vop_rename_args /* {
6512 struct vnode *a_fdvp;
6513 struct vnode *a_fvp;
6514 struct componentname *a_fcnp;
6515 struct vnode *a_tdvp;
6516 struct vnode *a_tvp;
6517 struct componentname *a_tcnp;
6520 vnode_t *fdvp = ap->a_fdvp;
6521 vnode_t *fvp = ap->a_fvp;
6522 vnode_t *tdvp = ap->a_tdvp;
6523 vnode_t *tvp = ap->a_tvp;
6526 ASSERT(ap->a_fcnp->cn_flags & (SAVENAME|SAVESTART));
6527 ASSERT(ap->a_tcnp->cn_flags & (SAVENAME|SAVESTART));
6530 * Check for cross-device rename.
6532 if ((fdvp->v_mount != tdvp->v_mount) ||
6533 (tvp && (fdvp->v_mount != tvp->v_mount)))
6536 error = zfs_rename(fdvp, ap->a_fcnp->cn_nameptr, tdvp,
6537 ap->a_tcnp->cn_nameptr, ap->a_fcnp->cn_cred, NULL, 0);
6551 zfs_freebsd_symlink(ap)
6552 struct vop_symlink_args /* {
6553 struct vnode *a_dvp;
6554 struct vnode **a_vpp;
6555 struct componentname *a_cnp;
6556 struct vattr *a_vap;
6560 struct componentname *cnp = ap->a_cnp;
6561 vattr_t *vap = ap->a_vap;
6563 ASSERT(cnp->cn_flags & SAVENAME);
6565 vap->va_type = VLNK; /* FreeBSD: Syscall only sets va_mode. */
6566 vattr_init_mask(vap);
6568 return (zfs_symlink(ap->a_dvp, ap->a_vpp, cnp->cn_nameptr, vap,
6569 ap->a_target, cnp->cn_cred, cnp->cn_thread));
6573 zfs_freebsd_readlink(ap)
6574 struct vop_readlink_args /* {
6577 struct ucred *a_cred;
6581 return (zfs_readlink(ap->a_vp, ap->a_uio, ap->a_cred, NULL));
6585 zfs_freebsd_link(ap)
6586 struct vop_link_args /* {
6587 struct vnode *a_tdvp;
6589 struct componentname *a_cnp;
6592 struct componentname *cnp = ap->a_cnp;
6593 vnode_t *vp = ap->a_vp;
6594 vnode_t *tdvp = ap->a_tdvp;
6596 if (tdvp->v_mount != vp->v_mount)
6599 ASSERT(cnp->cn_flags & SAVENAME);
6601 return (zfs_link(tdvp, vp, cnp->cn_nameptr, cnp->cn_cred, NULL, 0));
6605 zfs_freebsd_inactive(ap)
6606 struct vop_inactive_args /* {
6608 struct thread *a_td;
6611 vnode_t *vp = ap->a_vp;
6613 zfs_inactive(vp, ap->a_td->td_ucred, NULL);
6618 zfs_freebsd_reclaim(ap)
6619 struct vop_reclaim_args /* {
6621 struct thread *a_td;
6624 vnode_t *vp = ap->a_vp;
6625 znode_t *zp = VTOZ(vp);
6626 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
6630 /* Destroy the vm object and flush associated pages. */
6631 vnode_destroy_vobject(vp);
6634 * z_teardown_inactive_lock protects from a race with
6635 * zfs_znode_dmu_fini in zfsvfs_teardown during
6638 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
6639 if (zp->z_sa_hdl == NULL)
6643 rw_exit(&zfsvfs->z_teardown_inactive_lock);
6651 struct vop_fid_args /* {
6657 return (zfs_fid(ap->a_vp, (void *)ap->a_fid, NULL));
6661 zfs_freebsd_pathconf(ap)
6662 struct vop_pathconf_args /* {
6665 register_t *a_retval;
6671 error = zfs_pathconf(ap->a_vp, ap->a_name, &val, curthread->td_ucred, NULL);
6673 *ap->a_retval = val;
6674 else if (error == EOPNOTSUPP)
6675 error = vop_stdpathconf(ap);
6680 zfs_freebsd_fifo_pathconf(ap)
6681 struct vop_pathconf_args /* {
6684 register_t *a_retval;
6688 switch (ap->a_name) {
6689 case _PC_ACL_EXTENDED:
6691 case _PC_ACL_PATH_MAX:
6692 case _PC_MAC_PRESENT:
6693 return (zfs_freebsd_pathconf(ap));
6695 return (fifo_specops.vop_pathconf(ap));
6700 * FreeBSD's extended attributes namespace defines file name prefix for ZFS'
6701 * extended attribute name:
6704 * system freebsd:system:
6705 * user (none, can be used to access ZFS fsattr(5) attributes
6706 * created on Solaris)
6709 zfs_create_attrname(int attrnamespace, const char *name, char *attrname,
6712 const char *namespace, *prefix, *suffix;
6714 /* We don't allow '/' character in attribute name. */
6715 if (strchr(name, '/') != NULL)
6717 /* We don't allow attribute names that start with "freebsd:" string. */
6718 if (strncmp(name, "freebsd:", 8) == 0)
6721 bzero(attrname, size);
6723 switch (attrnamespace) {
6724 case EXTATTR_NAMESPACE_USER:
6726 prefix = "freebsd:";
6727 namespace = EXTATTR_NAMESPACE_USER_STRING;
6731 * This is the default namespace by which we can access all
6732 * attributes created on Solaris.
6734 prefix = namespace = suffix = "";
6737 case EXTATTR_NAMESPACE_SYSTEM:
6738 prefix = "freebsd:";
6739 namespace = EXTATTR_NAMESPACE_SYSTEM_STRING;
6742 case EXTATTR_NAMESPACE_EMPTY:
6746 if (snprintf(attrname, size, "%s%s%s%s", prefix, namespace, suffix,
6748 return (ENAMETOOLONG);
6754 * Vnode operating to retrieve a named extended attribute.
6757 zfs_getextattr(struct vop_getextattr_args *ap)
6760 IN struct vnode *a_vp;
6761 IN int a_attrnamespace;
6762 IN const char *a_name;
6763 INOUT struct uio *a_uio;
6765 IN struct ucred *a_cred;
6766 IN struct thread *a_td;
6770 zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
6771 struct thread *td = ap->a_td;
6772 struct nameidata nd;
6775 vnode_t *xvp = NULL, *vp;
6778 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
6779 ap->a_cred, ap->a_td, VREAD);
6783 error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
6790 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
6798 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname,
6800 error = vn_open_cred(&nd, &flags, 0, 0, ap->a_cred, NULL);
6802 NDFREE(&nd, NDF_ONLY_PNBUF);
6805 if (error == ENOENT)
6810 if (ap->a_size != NULL) {
6811 error = VOP_GETATTR(vp, &va, ap->a_cred);
6813 *ap->a_size = (size_t)va.va_size;
6814 } else if (ap->a_uio != NULL)
6815 error = VOP_READ(vp, ap->a_uio, IO_UNIT, ap->a_cred);
6818 vn_close(vp, flags, ap->a_cred, td);
6825 * Vnode operation to remove a named attribute.
6828 zfs_deleteextattr(struct vop_deleteextattr_args *ap)
6831 IN struct vnode *a_vp;
6832 IN int a_attrnamespace;
6833 IN const char *a_name;
6834 IN struct ucred *a_cred;
6835 IN struct thread *a_td;
6839 zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
6840 struct thread *td = ap->a_td;
6841 struct nameidata nd;
6844 vnode_t *xvp = NULL, *vp;
6847 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
6848 ap->a_cred, ap->a_td, VWRITE);
6852 error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
6859 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
6866 NDINIT_ATVP(&nd, DELETE, NOFOLLOW | LOCKPARENT | LOCKLEAF,
6867 UIO_SYSSPACE, attrname, xvp, td);
6872 NDFREE(&nd, NDF_ONLY_PNBUF);
6873 if (error == ENOENT)
6878 error = VOP_REMOVE(nd.ni_dvp, vp, &nd.ni_cnd);
6879 NDFREE(&nd, NDF_ONLY_PNBUF);
6882 if (vp == nd.ni_dvp)
6892 * Vnode operation to set a named attribute.
6895 zfs_setextattr(struct vop_setextattr_args *ap)
6898 IN struct vnode *a_vp;
6899 IN int a_attrnamespace;
6900 IN const char *a_name;
6901 INOUT struct uio *a_uio;
6902 IN struct ucred *a_cred;
6903 IN struct thread *a_td;
6907 zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
6908 struct thread *td = ap->a_td;
6909 struct nameidata nd;
6912 vnode_t *xvp = NULL, *vp;
6915 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
6916 ap->a_cred, ap->a_td, VWRITE);
6920 error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
6927 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
6928 LOOKUP_XATTR | CREATE_XATTR_DIR);
6934 flags = FFLAGS(O_WRONLY | O_CREAT);
6935 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname,
6937 error = vn_open_cred(&nd, &flags, 0600, 0, ap->a_cred, NULL);
6939 NDFREE(&nd, NDF_ONLY_PNBUF);
6947 error = VOP_SETATTR(vp, &va, ap->a_cred);
6949 VOP_WRITE(vp, ap->a_uio, IO_UNIT, ap->a_cred);
6952 vn_close(vp, flags, ap->a_cred, td);
6959 * Vnode operation to retrieve extended attributes on a vnode.
6962 zfs_listextattr(struct vop_listextattr_args *ap)
6965 IN struct vnode *a_vp;
6966 IN int a_attrnamespace;
6967 INOUT struct uio *a_uio;
6969 IN struct ucred *a_cred;
6970 IN struct thread *a_td;
6974 zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
6975 struct thread *td = ap->a_td;
6976 struct nameidata nd;
6977 char attrprefix[16];
6978 u_char dirbuf[sizeof(struct dirent)];
6981 struct uio auio, *uio = ap->a_uio;
6982 size_t *sizep = ap->a_size;
6984 vnode_t *xvp = NULL, *vp;
6985 int done, error, eof, pos;
6987 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
6988 ap->a_cred, ap->a_td, VREAD);
6992 error = zfs_create_attrname(ap->a_attrnamespace, "", attrprefix,
6993 sizeof(attrprefix));
6996 plen = strlen(attrprefix);
7003 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
7008 * ENOATTR means that the EA directory does not yet exist,
7009 * i.e. there are no extended attributes there.
7011 if (error == ENOATTR)
7016 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | LOCKSHARED,
7017 UIO_SYSSPACE, ".", xvp, td);
7020 NDFREE(&nd, NDF_ONLY_PNBUF);
7026 auio.uio_iov = &aiov;
7027 auio.uio_iovcnt = 1;
7028 auio.uio_segflg = UIO_SYSSPACE;
7030 auio.uio_rw = UIO_READ;
7031 auio.uio_offset = 0;
7036 aiov.iov_base = (void *)dirbuf;
7037 aiov.iov_len = sizeof(dirbuf);
7038 auio.uio_resid = sizeof(dirbuf);
7039 error = VOP_READDIR(vp, &auio, ap->a_cred, &eof, NULL, NULL);
7040 done = sizeof(dirbuf) - auio.uio_resid;
7043 for (pos = 0; pos < done;) {
7044 dp = (struct dirent *)(dirbuf + pos);
7045 pos += dp->d_reclen;
7047 * XXX: Temporarily we also accept DT_UNKNOWN, as this
7048 * is what we get when attribute was created on Solaris.
7050 if (dp->d_type != DT_REG && dp->d_type != DT_UNKNOWN)
7052 if (plen == 0 && strncmp(dp->d_name, "freebsd:", 8) == 0)
7054 else if (strncmp(dp->d_name, attrprefix, plen) != 0)
7056 nlen = dp->d_namlen - plen;
7059 else if (uio != NULL) {
7061 * Format of extattr name entry is one byte for
7062 * length and the rest for name.
7064 error = uiomove(&nlen, 1, uio->uio_rw, uio);
7066 error = uiomove(dp->d_name + plen, nlen,
7073 } while (!eof && error == 0);
7082 zfs_freebsd_getacl(ap)
7083 struct vop_getacl_args /* {
7092 vsecattr_t vsecattr;
7094 if (ap->a_type != ACL_TYPE_NFS4)
7097 vsecattr.vsa_mask = VSA_ACE | VSA_ACECNT;
7098 if (error = zfs_getsecattr(ap->a_vp, &vsecattr, 0, ap->a_cred, NULL))
7101 error = acl_from_aces(ap->a_aclp, vsecattr.vsa_aclentp, vsecattr.vsa_aclcnt);
7102 if (vsecattr.vsa_aclentp != NULL)
7103 kmem_free(vsecattr.vsa_aclentp, vsecattr.vsa_aclentsz);
7109 zfs_freebsd_setacl(ap)
7110 struct vop_setacl_args /* {
7119 vsecattr_t vsecattr;
7120 int aclbsize; /* size of acl list in bytes */
7123 if (ap->a_type != ACL_TYPE_NFS4)
7126 if (ap->a_aclp->acl_cnt < 1 || ap->a_aclp->acl_cnt > MAX_ACL_ENTRIES)
7130 * With NFSv4 ACLs, chmod(2) may need to add additional entries,
7131 * splitting every entry into two and appending "canonical six"
7132 * entries at the end. Don't allow for setting an ACL that would
7133 * cause chmod(2) to run out of ACL entries.
7135 if (ap->a_aclp->acl_cnt * 2 + 6 > ACL_MAX_ENTRIES)
7138 error = acl_nfs4_check(ap->a_aclp, ap->a_vp->v_type == VDIR);
7142 vsecattr.vsa_mask = VSA_ACE;
7143 aclbsize = ap->a_aclp->acl_cnt * sizeof(ace_t);
7144 vsecattr.vsa_aclentp = kmem_alloc(aclbsize, KM_SLEEP);
7145 aaclp = vsecattr.vsa_aclentp;
7146 vsecattr.vsa_aclentsz = aclbsize;
7148 aces_from_acl(vsecattr.vsa_aclentp, &vsecattr.vsa_aclcnt, ap->a_aclp);
7149 error = zfs_setsecattr(ap->a_vp, &vsecattr, 0, ap->a_cred, NULL);
7150 kmem_free(aaclp, aclbsize);
7156 zfs_freebsd_aclcheck(ap)
7157 struct vop_aclcheck_args /* {
7166 return (EOPNOTSUPP);
7169 struct vop_vector zfs_vnodeops;
7170 struct vop_vector zfs_fifoops;
7171 struct vop_vector zfs_shareops;
7173 struct vop_vector zfs_vnodeops = {
7174 .vop_default = &default_vnodeops,
7175 .vop_inactive = zfs_freebsd_inactive,
7176 .vop_reclaim = zfs_freebsd_reclaim,
7177 .vop_access = zfs_freebsd_access,
7178 #ifdef FREEBSD_NAMECACHE
7179 .vop_lookup = vfs_cache_lookup,
7180 .vop_cachedlookup = zfs_freebsd_lookup,
7182 .vop_lookup = zfs_freebsd_lookup,
7184 .vop_getattr = zfs_freebsd_getattr,
7185 .vop_setattr = zfs_freebsd_setattr,
7186 .vop_create = zfs_freebsd_create,
7187 .vop_mknod = zfs_freebsd_create,
7188 .vop_mkdir = zfs_freebsd_mkdir,
7189 .vop_readdir = zfs_freebsd_readdir,
7190 .vop_fsync = zfs_freebsd_fsync,
7191 .vop_open = zfs_freebsd_open,
7192 .vop_close = zfs_freebsd_close,
7193 .vop_rmdir = zfs_freebsd_rmdir,
7194 .vop_ioctl = zfs_freebsd_ioctl,
7195 .vop_link = zfs_freebsd_link,
7196 .vop_symlink = zfs_freebsd_symlink,
7197 .vop_readlink = zfs_freebsd_readlink,
7198 .vop_read = zfs_freebsd_read,
7199 .vop_write = zfs_freebsd_write,
7200 .vop_remove = zfs_freebsd_remove,
7201 .vop_rename = zfs_freebsd_rename,
7202 .vop_pathconf = zfs_freebsd_pathconf,
7203 .vop_bmap = zfs_freebsd_bmap,
7204 .vop_fid = zfs_freebsd_fid,
7205 .vop_getextattr = zfs_getextattr,
7206 .vop_deleteextattr = zfs_deleteextattr,
7207 .vop_setextattr = zfs_setextattr,
7208 .vop_listextattr = zfs_listextattr,
7209 .vop_getacl = zfs_freebsd_getacl,
7210 .vop_setacl = zfs_freebsd_setacl,
7211 .vop_aclcheck = zfs_freebsd_aclcheck,
7212 .vop_getpages = zfs_freebsd_getpages,
7213 .vop_putpages = zfs_freebsd_putpages,
7216 struct vop_vector zfs_fifoops = {
7217 .vop_default = &fifo_specops,
7218 .vop_fsync = zfs_freebsd_fsync,
7219 .vop_access = zfs_freebsd_access,
7220 .vop_getattr = zfs_freebsd_getattr,
7221 .vop_inactive = zfs_freebsd_inactive,
7222 .vop_read = VOP_PANIC,
7223 .vop_reclaim = zfs_freebsd_reclaim,
7224 .vop_setattr = zfs_freebsd_setattr,
7225 .vop_write = VOP_PANIC,
7226 .vop_pathconf = zfs_freebsd_fifo_pathconf,
7227 .vop_fid = zfs_freebsd_fid,
7228 .vop_getacl = zfs_freebsd_getacl,
7229 .vop_setacl = zfs_freebsd_setacl,
7230 .vop_aclcheck = zfs_freebsd_aclcheck,
7234 * special share hidden files vnode operations template
7236 struct vop_vector zfs_shareops = {
7237 .vop_default = &default_vnodeops,
7238 .vop_access = zfs_freebsd_access,
7239 .vop_inactive = zfs_freebsd_inactive,
7240 .vop_reclaim = zfs_freebsd_reclaim,
7241 .vop_fid = zfs_freebsd_fid,
7242 .vop_pathconf = zfs_freebsd_pathconf,