4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc.
29 /* Portions Copyright 2007 Jeremy Teo */
30 /* Portions Copyright 2010 Robert Milkowski */
33 #include <sys/types.h>
34 #include <sys/param.h>
36 #include <sys/sysmacros.h>
41 #include <sys/taskq.h>
43 #include <sys/vmsystm.h>
44 #include <sys/atomic.h>
45 #include <sys/pathname.h>
46 #include <sys/cmn_err.h>
47 #include <sys/errno.h>
48 #include <sys/zfs_dir.h>
49 #include <sys/zfs_acl.h>
50 #include <sys/zfs_ioctl.h>
51 #include <sys/fs/zfs.h>
53 #include <sys/dmu_objset.h>
59 #include <sys/policy.h>
60 #include <sys/sunddi.h>
63 #include <sys/zfs_ctldir.h>
64 #include <sys/zfs_fuid.h>
65 #include <sys/zfs_sa.h>
66 #include <sys/zfs_vnops.h>
67 #include <sys/zfs_rlock.h>
71 #include <sys/sa_impl.h>
76 * Each vnode op performs some logical unit of work. To do this, the ZPL must
77 * properly lock its in-core state, create a DMU transaction, do the work,
78 * record this work in the intent log (ZIL), commit the DMU transaction,
79 * and wait for the intent log to commit if it is a synchronous operation.
80 * Moreover, the vnode ops must work in both normal and log replay context.
81 * The ordering of events is important to avoid deadlocks and references
82 * to freed memory. The example below illustrates the following Big Rules:
84 * (1) A check must be made in each zfs thread for a mounted file system.
85 * This is done avoiding races using ZFS_ENTER(zfsvfs).
86 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
87 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
88 * can return EIO from the calling function.
90 * (2) iput() should always be the last thing except for zil_commit()
91 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
92 * First, if it's the last reference, the vnode/znode
93 * can be freed, so the zp may point to freed memory. Second, the last
94 * reference will call zfs_zinactive(), which may induce a lot of work --
95 * pushing cached pages (which acquires range locks) and syncing out
96 * cached atime changes. Third, zfs_zinactive() may require a new tx,
97 * which could deadlock the system if you were already holding one.
98 * If you must call iput() within a tx then use zfs_iput_async().
100 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
101 * as they can span dmu_tx_assign() calls.
103 * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
104 * dmu_tx_assign(). This is critical because we don't want to block
105 * while holding locks.
107 * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
108 * reduces lock contention and CPU usage when we must wait (note that if
109 * throughput is constrained by the storage, nearly every transaction
112 * Note, in particular, that if a lock is sometimes acquired before
113 * the tx assigns, and sometimes after (e.g. z_lock), then failing
114 * to use a non-blocking assign can deadlock the system. The scenario:
116 * Thread A has grabbed a lock before calling dmu_tx_assign().
117 * Thread B is in an already-assigned tx, and blocks for this lock.
118 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
119 * forever, because the previous txg can't quiesce until B's tx commits.
121 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
122 * then drop all locks, call dmu_tx_wait(), and try again. On subsequent
123 * calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
124 * to indicate that this operation has already called dmu_tx_wait().
125 * This will ensure that we don't retry forever, waiting a short bit
128 * (5) If the operation succeeded, generate the intent log entry for it
129 * before dropping locks. This ensures that the ordering of events
130 * in the intent log matches the order in which they actually occurred.
131 * During ZIL replay the zfs_log_* functions will update the sequence
132 * number to indicate the zil transaction has replayed.
134 * (6) At the end of each vnode op, the DMU tx must always commit,
135 * regardless of whether there were any errors.
137 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
138 * to ensure that synchronous semantics are provided when necessary.
140 * In general, this is how things should be ordered in each vnode op:
142 * ZFS_ENTER(zfsvfs); // exit if unmounted
144 * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
145 * rw_enter(...); // grab any other locks you need
146 * tx = dmu_tx_create(...); // get DMU tx
147 * dmu_tx_hold_*(); // hold each object you might modify
148 * error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
150 * rw_exit(...); // drop locks
151 * zfs_dirent_unlock(dl); // unlock directory entry
152 * iput(...); // release held vnodes
153 * if (error == ERESTART) {
159 * dmu_tx_abort(tx); // abort DMU tx
160 * ZFS_EXIT(zfsvfs); // finished in zfs
161 * return (error); // really out of space
163 * error = do_real_work(); // do whatever this VOP does
165 * zfs_log_*(...); // on success, make ZIL entry
166 * dmu_tx_commit(tx); // commit DMU tx -- error or not
167 * rw_exit(...); // drop locks
168 * zfs_dirent_unlock(dl); // unlock directory entry
169 * iput(...); // release held vnodes
170 * zil_commit(zilog, foid); // synchronous when necessary
171 * ZFS_EXIT(zfsvfs); // finished in zfs
172 * return (error); // done, report error
176 * Virus scanning is unsupported. It would be possible to add a hook
177 * here to performance the required virus scan. This could be done
178 * entirely in the kernel or potentially as an update to invoke a
182 zfs_vscan(struct inode *ip, cred_t *cr, int async)
189 zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
191 znode_t *zp = ITOZ(ip);
192 zfsvfs_t *zfsvfs = ITOZSB(ip);
197 /* Honor ZFS_APPENDONLY file attribute */
198 if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
199 ((flag & O_APPEND) == 0)) {
201 return (SET_ERROR(EPERM));
204 /* Virus scan eligible files on open */
205 if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
206 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
207 if (zfs_vscan(ip, cr, 0) != 0) {
209 return (SET_ERROR(EACCES));
213 /* Keep a count of the synchronous opens in the znode */
215 atomic_inc_32(&zp->z_sync_cnt);
223 zfs_close(struct inode *ip, int flag, cred_t *cr)
225 znode_t *zp = ITOZ(ip);
226 zfsvfs_t *zfsvfs = ITOZSB(ip);
231 /* Decrement the synchronous opens in the znode */
233 atomic_dec_32(&zp->z_sync_cnt);
235 if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
236 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
237 VERIFY(zfs_vscan(ip, cr, 1) == 0);
243 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
245 * Lseek support for finding holes (cmd == SEEK_HOLE) and
246 * data (cmd == SEEK_DATA). "off" is an in/out parameter.
249 zfs_holey_common(struct inode *ip, int cmd, loff_t *off)
251 znode_t *zp = ITOZ(ip);
252 uint64_t noff = (uint64_t)*off; /* new offset */
257 file_sz = zp->z_size;
258 if (noff >= file_sz) {
259 return (SET_ERROR(ENXIO));
262 if (cmd == SEEK_HOLE)
267 error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
270 return (SET_ERROR(ENXIO));
272 /* file was dirty, so fall back to using generic logic */
273 if (error == EBUSY) {
281 * We could find a hole that begins after the logical end-of-file,
282 * because dmu_offset_next() only works on whole blocks. If the
283 * EOF falls mid-block, then indicate that the "virtual hole"
284 * at the end of the file begins at the logical EOF, rather than
285 * at the end of the last block.
287 if (noff > file_sz) {
299 zfs_holey(struct inode *ip, int cmd, loff_t *off)
301 znode_t *zp = ITOZ(ip);
302 zfsvfs_t *zfsvfs = ITOZSB(ip);
308 error = zfs_holey_common(ip, cmd, off);
313 #endif /* SEEK_HOLE && SEEK_DATA */
317 * When a file is memory mapped, we must keep the IO data synchronized
318 * between the DMU cache and the memory mapped pages. What this means:
320 * On Write: If we find a memory mapped page, we write to *both*
321 * the page and the dmu buffer.
324 update_pages(struct inode *ip, int64_t start, int len,
325 objset_t *os, uint64_t oid)
327 struct address_space *mp = ip->i_mapping;
333 off = start & (PAGE_SIZE-1);
334 for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
335 nbytes = MIN(PAGE_SIZE - off, len);
337 pp = find_lock_page(mp, start >> PAGE_SHIFT);
339 if (mapping_writably_mapped(mp))
340 flush_dcache_page(pp);
343 (void) dmu_read(os, oid, start+off, nbytes, pb+off,
347 if (mapping_writably_mapped(mp))
348 flush_dcache_page(pp);
350 mark_page_accessed(pp);
363 * When a file is memory mapped, we must keep the IO data synchronized
364 * between the DMU cache and the memory mapped pages. What this means:
366 * On Read: We "read" preferentially from memory mapped pages,
367 * else we default from the dmu buffer.
369 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
370 * the file is memory mapped.
373 mappedread(struct inode *ip, int nbytes, uio_t *uio)
375 struct address_space *mp = ip->i_mapping;
377 znode_t *zp = ITOZ(ip);
384 start = uio->uio_loffset;
385 off = start & (PAGE_SIZE-1);
386 for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
387 bytes = MIN(PAGE_SIZE - off, len);
389 pp = find_lock_page(mp, start >> PAGE_SHIFT);
391 ASSERT(PageUptodate(pp));
395 error = uiomove(pb + off, bytes, UIO_READ, uio);
398 if (mapping_writably_mapped(mp))
399 flush_dcache_page(pp);
401 mark_page_accessed(pp);
404 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
417 unsigned long zfs_read_chunk_size = 1024 * 1024; /* Tunable */
418 unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT;
421 * Read bytes from specified file into supplied buffer.
423 * IN: ip - inode of file to be read from.
424 * uio - structure supplying read location, range info,
426 * ioflag - FSYNC flags; used to provide FRSYNC semantics.
427 * O_DIRECT flag; used to bypass page cache.
428 * cr - credentials of caller.
430 * OUT: uio - updated offset and range, buffer filled.
432 * RETURN: 0 on success, error code on failure.
435 * inode - atime updated if byte count > 0
439 zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
442 boolean_t frsync = B_FALSE;
444 znode_t *zp = ITOZ(ip);
445 zfsvfs_t *zfsvfs = ITOZSB(ip);
449 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
451 return (SET_ERROR(EACCES));
455 * Validate file offset
457 if (uio->uio_loffset < (offset_t)0) {
459 return (SET_ERROR(EINVAL));
463 * Fasttrack empty reads
465 if (uio->uio_resid == 0) {
472 * If we're in FRSYNC mode, sync out this znode before reading it.
473 * Only do this for non-snapshots.
475 * Some platforms do not support FRSYNC and instead map it
476 * to FSYNC, which results in unnecessary calls to zil_commit. We
477 * only honor FRSYNC requests on platforms which support it.
479 frsync = !!(ioflag & FRSYNC);
482 (frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
483 zil_commit(zfsvfs->z_log, zp->z_id);
486 * Lock the range against changes.
488 locked_range_t *lr = rangelock_enter(&zp->z_rangelock,
489 uio->uio_loffset, uio->uio_resid, RL_READER);
492 * If we are reading past end-of-file we can skip
493 * to the end; but we might still need to set atime.
495 if (uio->uio_loffset >= zp->z_size) {
500 ASSERT(uio->uio_loffset < zp->z_size);
501 ssize_t n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
502 ssize_t start_resid = n;
504 #ifdef HAVE_UIO_ZEROCOPY
506 if ((uio->uio_extflg == UIO_XUIO) &&
507 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
509 int blksz = zp->z_blksz;
510 uint64_t offset = uio->uio_loffset;
512 xuio = (xuio_t *)uio;
514 nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
517 ASSERT(offset + n <= blksz);
520 (void) dmu_xuio_init(xuio, nblk);
522 if (vn_has_cached_data(ip)) {
524 * For simplicity, we always allocate a full buffer
525 * even if we only expect to read a portion of a block.
527 while (--nblk >= 0) {
528 (void) dmu_xuio_add(xuio,
529 dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
534 #endif /* HAVE_UIO_ZEROCOPY */
537 ssize_t nbytes = MIN(n, zfs_read_chunk_size -
538 P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
540 if (zp->z_is_mapped && !(ioflag & O_DIRECT)) {
541 error = mappedread(ip, nbytes, uio);
543 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
548 /* convert checksum errors into IO errors */
550 error = SET_ERROR(EIO);
557 int64_t nread = start_resid - n;
558 dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
559 task_io_account_read(nread);
568 * Write the bytes to a file.
570 * IN: ip - inode of file to be written to.
571 * uio - structure supplying write location, range info,
573 * ioflag - FAPPEND flag set if in append mode.
574 * O_DIRECT flag; used to bypass page cache.
575 * cr - credentials of caller.
577 * OUT: uio - updated offset and range.
579 * RETURN: 0 if success
580 * error code if failure
583 * ip - ctime|mtime updated if byte count > 0
588 zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
591 ssize_t start_resid = uio->uio_resid;
594 * Fasttrack empty write
596 ssize_t n = start_resid;
600 rlim64_t limit = uio->uio_limit;
601 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
604 znode_t *zp = ITOZ(ip);
605 zfsvfs_t *zfsvfs = ZTOZSB(zp);
609 sa_bulk_attr_t bulk[4];
611 uint64_t mtime[2], ctime[2];
612 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
613 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
614 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
616 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
620 * Callers might not be able to detect properly that we are read-only,
621 * so check it explicitly here.
623 if (zfs_is_readonly(zfsvfs)) {
625 return (SET_ERROR(EROFS));
629 * If immutable or not appending then return EPERM
631 if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
632 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
633 (uio->uio_loffset < zp->z_size))) {
635 return (SET_ERROR(EPERM));
639 * Validate file offset
641 offset_t woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
644 return (SET_ERROR(EINVAL));
647 int max_blksz = zfsvfs->z_max_blksz;
651 * Pre-fault the pages to ensure slow (eg NFS) pages
653 * Skip this if uio contains loaned arc_buf.
655 #ifdef HAVE_UIO_ZEROCOPY
656 if ((uio->uio_extflg == UIO_XUIO) &&
657 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
658 xuio = (xuio_t *)uio;
661 if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
663 return (SET_ERROR(EFAULT));
667 * If in append mode, set the io offset pointer to eof.
670 if (ioflag & FAPPEND) {
672 * Obtain an appending range lock to guarantee file append
673 * semantics. We reset the write offset once we have the lock.
675 lr = rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
676 woff = lr->lr_offset;
677 if (lr->lr_length == UINT64_MAX) {
679 * We overlocked the file because this write will cause
680 * the file block size to increase.
681 * Note that zp_size cannot change with this lock held.
685 uio->uio_loffset = woff;
688 * Note that if the file block size will change as a result of
689 * this write, then this range lock will lock the entire file
690 * so that we can re-write the block safely.
692 lr = rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
698 return (SET_ERROR(EFBIG));
701 if ((woff + n) > limit || woff > (limit - n))
704 /* Will this write extend the file length? */
705 int write_eof = (woff + n > zp->z_size);
707 uint64_t end_size = MAX(zp->z_size, woff + n);
708 zilog_t *zilog = zfsvfs->z_log;
709 #ifdef HAVE_UIO_ZEROCOPY
711 const iovec_t *iovp = uio->uio_iov;
712 ASSERTV(int iovcnt = uio->uio_iovcnt);
717 * Write the file in reasonable size chunks. Each chunk is written
718 * in a separate transaction; this keeps the intent log records small
719 * and allows us to do more fine-grained space accounting.
722 woff = uio->uio_loffset;
724 if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
725 KUID_TO_SUID(ip->i_uid)) ||
726 zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
727 KGID_TO_SGID(ip->i_gid)) ||
728 (zp->z_projid != ZFS_DEFAULT_PROJID &&
729 zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
731 error = SET_ERROR(EDQUOT);
735 arc_buf_t *abuf = NULL;
736 const iovec_t *aiov = NULL;
738 #ifdef HAVE_UIO_ZEROCOPY
739 ASSERT(i_iov < iovcnt);
740 ASSERT3U(uio->uio_segflg, !=, UIO_BVEC);
742 abuf = dmu_xuio_arcbuf(xuio, i_iov);
743 dmu_xuio_clear(xuio, i_iov);
744 ASSERT((aiov->iov_base == abuf->b_data) ||
745 ((char *)aiov->iov_base - (char *)abuf->b_data +
746 aiov->iov_len == arc_buf_size(abuf)));
749 } else if (n >= max_blksz && woff >= zp->z_size &&
750 P2PHASE(woff, max_blksz) == 0 &&
751 zp->z_blksz == max_blksz) {
753 * This write covers a full block. "Borrow" a buffer
754 * from the dmu so that we can fill it before we enter
755 * a transaction. This avoids the possibility of
756 * holding up the transaction if the data copy hangs
757 * up on a pagefault (e.g., from an NFS server mapping).
761 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
763 ASSERT(abuf != NULL);
764 ASSERT(arc_buf_size(abuf) == max_blksz);
765 if ((error = uiocopy(abuf->b_data, max_blksz,
766 UIO_WRITE, uio, &cbytes))) {
767 dmu_return_arcbuf(abuf);
770 ASSERT(cbytes == max_blksz);
774 * Start a transaction.
776 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
777 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
778 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
779 zfs_sa_upgrade_txholds(tx, zp);
780 error = dmu_tx_assign(tx, TXG_WAIT);
784 dmu_return_arcbuf(abuf);
789 * If rangelock_enter() over-locked we grow the blocksize
790 * and then reduce the lock range. This will only happen
791 * on the first iteration since rangelock_reduce() will
792 * shrink down lr_length to the appropriate size.
794 if (lr->lr_length == UINT64_MAX) {
797 if (zp->z_blksz > max_blksz) {
799 * File's blocksize is already larger than the
800 * "recordsize" property. Only let it grow to
801 * the next power of 2.
803 ASSERT(!ISP2(zp->z_blksz));
804 new_blksz = MIN(end_size,
805 1 << highbit64(zp->z_blksz));
807 new_blksz = MIN(end_size, max_blksz);
809 zfs_grow_blocksize(zp, new_blksz, tx);
810 rangelock_reduce(lr, woff, n);
814 * XXX - should we really limit each write to z_max_blksz?
815 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
817 ssize_t nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
821 tx_bytes = uio->uio_resid;
822 uio->uio_fault_disable = B_TRUE;
823 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
825 uio->uio_fault_disable = B_FALSE;
826 if (error == EFAULT) {
828 if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
832 } else if (error != 0) {
836 tx_bytes -= uio->uio_resid;
839 ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
841 * If this is not a full block write, but we are
842 * extending the file past EOF and this data starts
843 * block-aligned, use assign_arcbuf(). Otherwise,
844 * write via dmu_write().
846 if (tx_bytes < max_blksz && (!write_eof ||
847 aiov->iov_base != abuf->b_data)) {
849 dmu_write(zfsvfs->z_os, zp->z_id, woff,
850 /* cppcheck-suppress nullPointer */
851 aiov->iov_len, aiov->iov_base, tx);
852 dmu_return_arcbuf(abuf);
853 xuio_stat_wbuf_copied();
855 ASSERT(xuio || tx_bytes == max_blksz);
856 error = dmu_assign_arcbuf_by_dbuf(
857 sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
859 dmu_return_arcbuf(abuf);
864 ASSERT(tx_bytes <= uio->uio_resid);
865 uioskip(uio, tx_bytes);
867 if (tx_bytes && zp->z_is_mapped && !(ioflag & O_DIRECT)) {
868 update_pages(ip, woff,
869 tx_bytes, zfsvfs->z_os, zp->z_id);
873 * If we made no progress, we're done. If we made even
874 * partial progress, update the znode and ZIL accordingly.
877 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
878 (void *)&zp->z_size, sizeof (uint64_t), tx);
885 * Clear Set-UID/Set-GID bits on successful write if not
886 * privileged and at least one of the execute bits is set.
888 * It would be nice to to this after all writes have
889 * been done, but that would still expose the ISUID/ISGID
890 * to another app after the partial write is committed.
892 * Note: we don't call zfs_fuid_map_id() here because
893 * user 0 is not an ephemeral uid.
895 mutex_enter(&zp->z_acl_lock);
896 uint32_t uid = KUID_TO_SUID(ip->i_uid);
897 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
898 (S_IXUSR >> 6))) != 0 &&
899 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
900 secpolicy_vnode_setid_retain(cr,
901 ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
903 zp->z_mode &= ~(S_ISUID | S_ISGID);
904 ip->i_mode = newmode = zp->z_mode;
905 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
906 (void *)&newmode, sizeof (uint64_t), tx);
908 mutex_exit(&zp->z_acl_lock);
910 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
913 * Update the file size (zp_size) if it has changed;
914 * account for possible concurrent updates.
916 while ((end_size = zp->z_size) < uio->uio_loffset) {
917 (void) atomic_cas_64(&zp->z_size, end_size,
922 * If we are replaying and eof is non zero then force
923 * the file size to the specified eof. Note, there's no
924 * concurrency during replay.
926 if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
927 zp->z_size = zfsvfs->z_replay_eof;
929 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
931 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
937 ASSERT(tx_bytes == nbytes);
940 if (!xuio && n > 0) {
941 if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
948 zfs_inode_update(zp);
952 * If we're in replay mode, or we made no progress, return error.
953 * Otherwise, it's at least a partial write, so it's successful.
955 if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
960 if (ioflag & (FSYNC | FDSYNC) ||
961 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
962 zil_commit(zilog, zp->z_id);
964 int64_t nwritten = start_resid - uio->uio_resid;
965 dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
966 task_io_account_write(nwritten);
973 * Drop a reference on the passed inode asynchronously. This ensures
974 * that the caller will never drop the last reference on an inode in
975 * the current context. Doing so while holding open a tx could result
976 * in a deadlock if iput_final() re-enters the filesystem code.
979 zfs_iput_async(struct inode *ip)
981 objset_t *os = ITOZSB(ip)->z_os;
983 ASSERT(atomic_read(&ip->i_count) > 0);
986 if (atomic_read(&ip->i_count) == 1)
987 VERIFY(taskq_dispatch(dsl_pool_iput_taskq(dmu_objset_pool(os)),
988 (task_func_t *)iput, ip, TQ_SLEEP) != TASKQID_INVALID);
995 zfs_get_done(zgd_t *zgd, int error)
997 znode_t *zp = zgd->zgd_private;
1000 dmu_buf_rele(zgd->zgd_db, zgd);
1002 rangelock_exit(zgd->zgd_lr);
1005 * Release the vnode asynchronously as we currently have the
1006 * txg stopped from syncing.
1008 zfs_iput_async(ZTOI(zp));
1010 kmem_free(zgd, sizeof (zgd_t));
1014 static int zil_fault_io = 0;
1018 * Get data to generate a TX_WRITE intent log record.
1021 zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
1023 zfsvfs_t *zfsvfs = arg;
1024 objset_t *os = zfsvfs->z_os;
1026 uint64_t object = lr->lr_foid;
1027 uint64_t offset = lr->lr_offset;
1028 uint64_t size = lr->lr_length;
1033 ASSERT3P(lwb, !=, NULL);
1034 ASSERT3P(zio, !=, NULL);
1035 ASSERT3U(size, !=, 0);
1038 * Nothing to do if the file has been removed
1040 if (zfs_zget(zfsvfs, object, &zp) != 0)
1041 return (SET_ERROR(ENOENT));
1042 if (zp->z_unlinked) {
1044 * Release the vnode asynchronously as we currently have the
1045 * txg stopped from syncing.
1047 zfs_iput_async(ZTOI(zp));
1048 return (SET_ERROR(ENOENT));
1051 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1053 zgd->zgd_private = zp;
1056 * Write records come in two flavors: immediate and indirect.
1057 * For small writes it's cheaper to store the data with the
1058 * log record (immediate); for large writes it's cheaper to
1059 * sync the data and get a pointer to it (indirect) so that
1060 * we don't have to write the data twice.
1062 if (buf != NULL) { /* immediate write */
1063 zgd->zgd_lr = rangelock_enter(&zp->z_rangelock,
1064 offset, size, RL_READER);
1065 /* test for truncation needs to be done while range locked */
1066 if (offset >= zp->z_size) {
1067 error = SET_ERROR(ENOENT);
1069 error = dmu_read(os, object, offset, size, buf,
1070 DMU_READ_NO_PREFETCH);
1072 ASSERT(error == 0 || error == ENOENT);
1073 } else { /* indirect write */
1075 * Have to lock the whole block to ensure when it's
1076 * written out and its checksum is being calculated
1077 * that no one can change the data. We need to re-check
1078 * blocksize after we get the lock in case it's changed!
1083 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
1085 zgd->zgd_lr = rangelock_enter(&zp->z_rangelock,
1086 offset, size, RL_READER);
1087 if (zp->z_blksz == size)
1090 rangelock_exit(zgd->zgd_lr);
1092 /* test for truncation needs to be done while range locked */
1093 if (lr->lr_offset >= zp->z_size)
1094 error = SET_ERROR(ENOENT);
1097 error = SET_ERROR(EIO);
1102 error = dmu_buf_hold(os, object, offset, zgd, &db,
1103 DMU_READ_NO_PREFETCH);
1106 blkptr_t *bp = &lr->lr_blkptr;
1111 ASSERT(db->db_offset == offset);
1112 ASSERT(db->db_size == size);
1114 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1116 ASSERT(error || lr->lr_length <= size);
1119 * On success, we need to wait for the write I/O
1120 * initiated by dmu_sync() to complete before we can
1121 * release this dbuf. We will finish everything up
1122 * in the zfs_get_done() callback.
1127 if (error == EALREADY) {
1128 lr->lr_common.lrc_txtype = TX_WRITE2;
1130 * TX_WRITE2 relies on the data previously
1131 * written by the TX_WRITE that caused
1132 * EALREADY. We zero out the BP because
1133 * it is the old, currently-on-disk BP.
1142 zfs_get_done(zgd, error);
1149 zfs_access(struct inode *ip, int mode, int flag, cred_t *cr)
1151 znode_t *zp = ITOZ(ip);
1152 zfsvfs_t *zfsvfs = ITOZSB(ip);
1158 if (flag & V_ACE_MASK)
1159 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1161 error = zfs_zaccess_rwx(zp, mode, flag, cr);
1168 * Lookup an entry in a directory, or an extended attribute directory.
1169 * If it exists, return a held inode reference for it.
1171 * IN: dip - inode of directory to search.
1172 * nm - name of entry to lookup.
1173 * flags - LOOKUP_XATTR set if looking for an attribute.
1174 * cr - credentials of caller.
1175 * direntflags - directory lookup flags
1176 * realpnp - returned pathname.
1178 * OUT: ipp - inode of located entry, NULL if not found.
1180 * RETURN: 0 on success, error code on failure.
1187 zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
1188 cred_t *cr, int *direntflags, pathname_t *realpnp)
1190 znode_t *zdp = ITOZ(dip);
1191 zfsvfs_t *zfsvfs = ITOZSB(dip);
1195 * Fast path lookup, however we must skip DNLC lookup
1196 * for case folding or normalizing lookups because the
1197 * DNLC code only stores the passed in name. This means
1198 * creating 'a' and removing 'A' on a case insensitive
1199 * file system would work, but DNLC still thinks 'a'
1200 * exists and won't let you create it again on the next
1201 * pass through fast path.
1203 if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
1205 if (!S_ISDIR(dip->i_mode)) {
1206 return (SET_ERROR(ENOTDIR));
1207 } else if (zdp->z_sa_hdl == NULL) {
1208 return (SET_ERROR(EIO));
1211 if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
1212 error = zfs_fastaccesschk_execute(zdp, cr);
1227 if (flags & LOOKUP_XATTR) {
1229 * We don't allow recursive attributes..
1230 * Maybe someday we will.
1232 if (zdp->z_pflags & ZFS_XATTR) {
1234 return (SET_ERROR(EINVAL));
1237 if ((error = zfs_get_xattrdir(zdp, ipp, cr, flags))) {
1243 * Do we have permission to get into attribute directory?
1246 if ((error = zfs_zaccess(ITOZ(*ipp), ACE_EXECUTE, 0,
1256 if (!S_ISDIR(dip->i_mode)) {
1258 return (SET_ERROR(ENOTDIR));
1262 * Check accessibility of directory.
1265 if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
1270 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
1271 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1273 return (SET_ERROR(EILSEQ));
1276 error = zfs_dirlook(zdp, nm, ipp, flags, direntflags, realpnp);
1277 if ((error == 0) && (*ipp))
1278 zfs_inode_update(ITOZ(*ipp));
1285 * Attempt to create a new entry in a directory. If the entry
1286 * already exists, truncate the file if permissible, else return
1287 * an error. Return the ip of the created or trunc'd file.
1289 * IN: dip - inode of directory to put new file entry in.
1290 * name - name of new file entry.
1291 * vap - attributes of new file.
1292 * excl - flag indicating exclusive or non-exclusive mode.
1293 * mode - mode to open file with.
1294 * cr - credentials of caller.
1296 * vsecp - ACL to be set
1298 * OUT: ipp - inode of created or trunc'd entry.
1300 * RETURN: 0 on success, error code on failure.
1303 * dip - ctime|mtime updated if new entry created
1304 * ip - ctime|mtime always, atime if new
1309 zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl,
1310 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
1312 znode_t *zp, *dzp = ITOZ(dip);
1313 zfsvfs_t *zfsvfs = ITOZSB(dip);
1321 zfs_acl_ids_t acl_ids;
1322 boolean_t fuid_dirtied;
1323 boolean_t have_acl = B_FALSE;
1324 boolean_t waited = B_FALSE;
1327 * If we have an ephemeral id, ACL, or XVATTR then
1328 * make sure file system is at proper version
1334 if (zfsvfs->z_use_fuids == B_FALSE &&
1335 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1336 return (SET_ERROR(EINVAL));
1339 return (SET_ERROR(EINVAL));
1344 zilog = zfsvfs->z_log;
1346 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
1347 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1349 return (SET_ERROR(EILSEQ));
1352 if (vap->va_mask & ATTR_XVATTR) {
1353 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1354 crgetuid(cr), cr, vap->va_mode)) != 0) {
1362 if (*name == '\0') {
1364 * Null component name refers to the directory itself.
1371 /* possible igrab(zp) */
1374 if (flag & FIGNORECASE)
1377 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1381 zfs_acl_ids_free(&acl_ids);
1382 if (strcmp(name, "..") == 0)
1383 error = SET_ERROR(EISDIR);
1391 uint64_t projid = ZFS_DEFAULT_PROJID;
1394 * Create a new file object and update the directory
1397 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
1399 zfs_acl_ids_free(&acl_ids);
1404 * We only support the creation of regular files in
1405 * extended attribute directories.
1408 if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
1410 zfs_acl_ids_free(&acl_ids);
1411 error = SET_ERROR(EINVAL);
1415 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1416 cr, vsecp, &acl_ids)) != 0)
1420 if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
1421 projid = zfs_inherit_projid(dzp);
1422 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
1423 zfs_acl_ids_free(&acl_ids);
1424 error = SET_ERROR(EDQUOT);
1428 tx = dmu_tx_create(os);
1430 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1431 ZFS_SA_BASE_ATTR_SIZE);
1433 fuid_dirtied = zfsvfs->z_fuid_dirty;
1435 zfs_fuid_txhold(zfsvfs, tx);
1436 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1437 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1438 if (!zfsvfs->z_use_sa &&
1439 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1440 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1441 0, acl_ids.z_aclp->z_acl_bytes);
1444 error = dmu_tx_assign(tx,
1445 (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1447 zfs_dirent_unlock(dl);
1448 if (error == ERESTART) {
1454 zfs_acl_ids_free(&acl_ids);
1459 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1461 error = zfs_link_create(dl, zp, tx, ZNEW);
1464 * Since, we failed to add the directory entry for it,
1465 * delete the newly created dnode.
1467 zfs_znode_delete(zp, tx);
1468 remove_inode_hash(ZTOI(zp));
1469 zfs_acl_ids_free(&acl_ids);
1475 zfs_fuid_sync(zfsvfs, tx);
1477 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1478 if (flag & FIGNORECASE)
1480 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1481 vsecp, acl_ids.z_fuidp, vap);
1482 zfs_acl_ids_free(&acl_ids);
1485 int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1488 zfs_acl_ids_free(&acl_ids);
1492 * A directory entry already exists for this name.
1495 * Can't truncate an existing file if in exclusive mode.
1498 error = SET_ERROR(EEXIST);
1502 * Can't open a directory for writing.
1504 if (S_ISDIR(ZTOI(zp)->i_mode)) {
1505 error = SET_ERROR(EISDIR);
1509 * Verify requested access to file.
1511 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
1515 mutex_enter(&dzp->z_lock);
1517 mutex_exit(&dzp->z_lock);
1520 * Truncate regular files if requested.
1522 if (S_ISREG(ZTOI(zp)->i_mode) &&
1523 (vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
1524 /* we can't hold any locks when calling zfs_freesp() */
1526 zfs_dirent_unlock(dl);
1529 error = zfs_freesp(zp, 0, 0, mode, TRUE);
1535 zfs_dirent_unlock(dl);
1541 zfs_inode_update(dzp);
1542 zfs_inode_update(zp);
1546 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1547 zil_commit(zilog, 0);
1555 zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl,
1556 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
1558 znode_t *zp = NULL, *dzp = ITOZ(dip);
1559 zfsvfs_t *zfsvfs = ITOZSB(dip);
1565 zfs_acl_ids_t acl_ids;
1566 uint64_t projid = ZFS_DEFAULT_PROJID;
1567 boolean_t fuid_dirtied;
1568 boolean_t have_acl = B_FALSE;
1569 boolean_t waited = B_FALSE;
1572 * If we have an ephemeral id, ACL, or XVATTR then
1573 * make sure file system is at proper version
1579 if (zfsvfs->z_use_fuids == B_FALSE &&
1580 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1581 return (SET_ERROR(EINVAL));
1587 if (vap->va_mask & ATTR_XVATTR) {
1588 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1589 crgetuid(cr), cr, vap->va_mode)) != 0) {
1599 * Create a new file object and update the directory
1602 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
1604 zfs_acl_ids_free(&acl_ids);
1608 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1609 cr, vsecp, &acl_ids)) != 0)
1613 if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
1614 projid = zfs_inherit_projid(dzp);
1615 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
1616 zfs_acl_ids_free(&acl_ids);
1617 error = SET_ERROR(EDQUOT);
1621 tx = dmu_tx_create(os);
1623 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1624 ZFS_SA_BASE_ATTR_SIZE);
1625 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1627 fuid_dirtied = zfsvfs->z_fuid_dirty;
1629 zfs_fuid_txhold(zfsvfs, tx);
1630 if (!zfsvfs->z_use_sa &&
1631 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1632 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1633 0, acl_ids.z_aclp->z_acl_bytes);
1635 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1637 if (error == ERESTART) {
1643 zfs_acl_ids_free(&acl_ids);
1648 zfs_mknode(dzp, vap, tx, cr, IS_TMPFILE, &zp, &acl_ids);
1651 zfs_fuid_sync(zfsvfs, tx);
1653 /* Add to unlinked set */
1655 zfs_unlinked_add(zp, tx);
1656 zfs_acl_ids_free(&acl_ids);
1664 zfs_inode_update(dzp);
1665 zfs_inode_update(zp);
1674 * Remove an entry from a directory.
1676 * IN: dip - inode of directory to remove entry from.
1677 * name - name of entry to remove.
1678 * cr - credentials of caller.
1680 * RETURN: 0 if success
1681 * error code if failure
1685 * ip - ctime (if nlink > 0)
1688 uint64_t null_xattr = 0;
1692 zfs_remove(struct inode *dip, char *name, cred_t *cr, int flags)
1694 znode_t *zp, *dzp = ITOZ(dip);
1697 zfsvfs_t *zfsvfs = ITOZSB(dip);
1699 uint64_t acl_obj, xattr_obj;
1700 uint64_t xattr_obj_unlinked = 0;
1705 boolean_t may_delete_now, delete_now = FALSE;
1706 boolean_t unlinked, toobig = FALSE;
1708 pathname_t *realnmp = NULL;
1712 boolean_t waited = B_FALSE;
1715 return (SET_ERROR(EINVAL));
1719 zilog = zfsvfs->z_log;
1721 if (flags & FIGNORECASE) {
1731 * Attempt to lock directory; fail if entry doesn't exist.
1733 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1743 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
1748 * Need to use rmdir for removing directories.
1750 if (S_ISDIR(ip->i_mode)) {
1751 error = SET_ERROR(EPERM);
1755 mutex_enter(&zp->z_lock);
1756 may_delete_now = atomic_read(&ip->i_count) == 1 && !(zp->z_is_mapped);
1757 mutex_exit(&zp->z_lock);
1760 * We may delete the znode now, or we may put it in the unlinked set;
1761 * it depends on whether we're the last link, and on whether there are
1762 * other holds on the inode. So we dmu_tx_hold() the right things to
1763 * allow for either case.
1766 tx = dmu_tx_create(zfsvfs->z_os);
1767 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1768 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1769 zfs_sa_upgrade_txholds(tx, zp);
1770 zfs_sa_upgrade_txholds(tx, dzp);
1771 if (may_delete_now) {
1772 toobig = zp->z_size > zp->z_blksz * zfs_delete_blocks;
1773 /* if the file is too big, only hold_free a token amount */
1774 dmu_tx_hold_free(tx, zp->z_id, 0,
1775 (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
1778 /* are there any extended attributes? */
1779 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1780 &xattr_obj, sizeof (xattr_obj));
1781 if (error == 0 && xattr_obj) {
1782 error = zfs_zget(zfsvfs, xattr_obj, &xzp);
1784 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1785 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1788 mutex_enter(&zp->z_lock);
1789 if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
1790 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1791 mutex_exit(&zp->z_lock);
1793 /* charge as an update -- would be nice not to charge at all */
1794 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1797 * Mark this transaction as typically resulting in a net free of space
1799 dmu_tx_mark_netfree(tx);
1801 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1803 zfs_dirent_unlock(dl);
1804 if (error == ERESTART) {
1824 * Remove the directory entry.
1826 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1835 * Hold z_lock so that we can make sure that the ACL obj
1836 * hasn't changed. Could have been deleted due to
1839 mutex_enter(&zp->z_lock);
1840 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1841 &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
1842 delete_now = may_delete_now && !toobig &&
1843 atomic_read(&ip->i_count) == 1 && !(zp->z_is_mapped) &&
1844 xattr_obj == xattr_obj_unlinked && zfs_external_acl(zp) ==
1849 if (xattr_obj_unlinked) {
1850 ASSERT3U(ZTOI(xzp)->i_nlink, ==, 2);
1851 mutex_enter(&xzp->z_lock);
1852 xzp->z_unlinked = 1;
1853 clear_nlink(ZTOI(xzp));
1855 error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
1856 &links, sizeof (links), tx);
1857 ASSERT3U(error, ==, 0);
1858 mutex_exit(&xzp->z_lock);
1859 zfs_unlinked_add(xzp, tx);
1862 error = sa_remove(zp->z_sa_hdl,
1863 SA_ZPL_XATTR(zfsvfs), tx);
1865 error = sa_update(zp->z_sa_hdl,
1866 SA_ZPL_XATTR(zfsvfs), &null_xattr,
1867 sizeof (uint64_t), tx);
1871 * Add to the unlinked set because a new reference could be
1872 * taken concurrently resulting in a deferred destruction.
1874 zfs_unlinked_add(zp, tx);
1875 mutex_exit(&zp->z_lock);
1876 } else if (unlinked) {
1877 mutex_exit(&zp->z_lock);
1878 zfs_unlinked_add(zp, tx);
1882 if (flags & FIGNORECASE)
1884 zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
1891 zfs_dirent_unlock(dl);
1892 zfs_inode_update(dzp);
1893 zfs_inode_update(zp);
1901 zfs_inode_update(xzp);
1902 zfs_iput_async(ZTOI(xzp));
1905 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1906 zil_commit(zilog, 0);
1913 * Create a new directory and insert it into dip using the name
1914 * provided. Return a pointer to the inserted directory.
1916 * IN: dip - inode of directory to add subdir to.
1917 * dirname - name of new directory.
1918 * vap - attributes of new directory.
1919 * cr - credentials of caller.
1920 * vsecp - ACL to be set
1922 * OUT: ipp - inode of created directory.
1924 * RETURN: 0 if success
1925 * error code if failure
1928 * dip - ctime|mtime updated
1929 * ipp - ctime|mtime|atime updated
1933 zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
1934 cred_t *cr, int flags, vsecattr_t *vsecp)
1936 znode_t *zp, *dzp = ITOZ(dip);
1937 zfsvfs_t *zfsvfs = ITOZSB(dip);
1945 gid_t gid = crgetgid(cr);
1946 zfs_acl_ids_t acl_ids;
1947 boolean_t fuid_dirtied;
1948 boolean_t waited = B_FALSE;
1950 ASSERT(S_ISDIR(vap->va_mode));
1953 * If we have an ephemeral id, ACL, or XVATTR then
1954 * make sure file system is at proper version
1958 if (zfsvfs->z_use_fuids == B_FALSE &&
1959 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1960 return (SET_ERROR(EINVAL));
1962 if (dirname == NULL)
1963 return (SET_ERROR(EINVAL));
1967 zilog = zfsvfs->z_log;
1969 if (dzp->z_pflags & ZFS_XATTR) {
1971 return (SET_ERROR(EINVAL));
1974 if (zfsvfs->z_utf8 && u8_validate(dirname,
1975 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1977 return (SET_ERROR(EILSEQ));
1979 if (flags & FIGNORECASE)
1982 if (vap->va_mask & ATTR_XVATTR) {
1983 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1984 crgetuid(cr), cr, vap->va_mode)) != 0) {
1990 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1991 vsecp, &acl_ids)) != 0) {
1996 * First make sure the new directory doesn't exist.
1998 * Existence is checked first to make sure we don't return
1999 * EACCES instead of EEXIST which can cause some applications
2005 if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
2007 zfs_acl_ids_free(&acl_ids);
2012 if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
2013 zfs_acl_ids_free(&acl_ids);
2014 zfs_dirent_unlock(dl);
2019 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zfs_inherit_projid(dzp))) {
2020 zfs_acl_ids_free(&acl_ids);
2021 zfs_dirent_unlock(dl);
2023 return (SET_ERROR(EDQUOT));
2027 * Add a new entry to the directory.
2029 tx = dmu_tx_create(zfsvfs->z_os);
2030 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
2031 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
2032 fuid_dirtied = zfsvfs->z_fuid_dirty;
2034 zfs_fuid_txhold(zfsvfs, tx);
2035 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2036 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
2037 acl_ids.z_aclp->z_acl_bytes);
2040 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
2041 ZFS_SA_BASE_ATTR_SIZE);
2043 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
2045 zfs_dirent_unlock(dl);
2046 if (error == ERESTART) {
2052 zfs_acl_ids_free(&acl_ids);
2061 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
2064 * Now put new name in parent dir.
2066 error = zfs_link_create(dl, zp, tx, ZNEW);
2068 zfs_znode_delete(zp, tx);
2069 remove_inode_hash(ZTOI(zp));
2074 zfs_fuid_sync(zfsvfs, tx);
2078 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
2079 if (flags & FIGNORECASE)
2081 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
2082 acl_ids.z_fuidp, vap);
2085 zfs_acl_ids_free(&acl_ids);
2089 zfs_dirent_unlock(dl);
2091 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2092 zil_commit(zilog, 0);
2097 zfs_inode_update(dzp);
2098 zfs_inode_update(zp);
2105 * Remove a directory subdir entry. If the current working
2106 * directory is the same as the subdir to be removed, the
2109 * IN: dip - inode of directory to remove from.
2110 * name - name of directory to be removed.
2111 * cwd - inode of current working directory.
2112 * cr - credentials of caller.
2113 * flags - case flags
2115 * RETURN: 0 on success, error code on failure.
2118 * dip - ctime|mtime updated
2122 zfs_rmdir(struct inode *dip, char *name, struct inode *cwd, cred_t *cr,
2125 znode_t *dzp = ITOZ(dip);
2128 zfsvfs_t *zfsvfs = ITOZSB(dip);
2134 boolean_t waited = B_FALSE;
2137 return (SET_ERROR(EINVAL));
2141 zilog = zfsvfs->z_log;
2143 if (flags & FIGNORECASE)
2149 * Attempt to lock directory; fail if entry doesn't exist.
2151 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
2159 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
2163 if (!S_ISDIR(ip->i_mode)) {
2164 error = SET_ERROR(ENOTDIR);
2169 error = SET_ERROR(EINVAL);
2174 * Grab a lock on the directory to make sure that no one is
2175 * trying to add (or lookup) entries while we are removing it.
2177 rw_enter(&zp->z_name_lock, RW_WRITER);
2180 * Grab a lock on the parent pointer to make sure we play well
2181 * with the treewalk and directory rename code.
2183 rw_enter(&zp->z_parent_lock, RW_WRITER);
2185 tx = dmu_tx_create(zfsvfs->z_os);
2186 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
2187 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2188 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2189 zfs_sa_upgrade_txholds(tx, zp);
2190 zfs_sa_upgrade_txholds(tx, dzp);
2191 dmu_tx_mark_netfree(tx);
2192 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
2194 rw_exit(&zp->z_parent_lock);
2195 rw_exit(&zp->z_name_lock);
2196 zfs_dirent_unlock(dl);
2197 if (error == ERESTART) {
2210 error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
2213 uint64_t txtype = TX_RMDIR;
2214 if (flags & FIGNORECASE)
2216 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
2221 rw_exit(&zp->z_parent_lock);
2222 rw_exit(&zp->z_name_lock);
2224 zfs_dirent_unlock(dl);
2226 zfs_inode_update(dzp);
2227 zfs_inode_update(zp);
2230 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2231 zil_commit(zilog, 0);
2238 * Read as many directory entries as will fit into the provided
2239 * dirent buffer from the given directory cursor position.
2241 * IN: ip - inode of directory to read.
2242 * dirent - buffer for directory entries.
2244 * OUT: dirent - filler buffer of directory entries.
2246 * RETURN: 0 if success
2247 * error code if failure
2250 * ip - atime updated
2252 * Note that the low 4 bits of the cookie returned by zap is always zero.
2253 * This allows us to use the low range for "special" directory entries:
2254 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
2255 * we use the offset 2 for the '.zfs' directory.
2259 zfs_readdir(struct inode *ip, zpl_dir_context_t *ctx, cred_t *cr)
2261 znode_t *zp = ITOZ(ip);
2262 zfsvfs_t *zfsvfs = ITOZSB(ip);
2265 zap_attribute_t zap;
2271 uint64_t offset; /* must be unsigned; checks for < 1 */
2276 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
2277 &parent, sizeof (parent))) != 0)
2281 * Quit if directory has been removed (posix)
2289 prefetch = zp->z_zn_prefetch;
2292 * Initialize the iterator cursor.
2296 * Start iteration from the beginning of the directory.
2298 zap_cursor_init(&zc, os, zp->z_id);
2301 * The offset is a serialized cursor.
2303 zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
2307 * Transform to file-system independent format
2312 * Special case `.', `..', and `.zfs'.
2315 (void) strcpy(zap.za_name, ".");
2316 zap.za_normalization_conflict = 0;
2319 } else if (offset == 1) {
2320 (void) strcpy(zap.za_name, "..");
2321 zap.za_normalization_conflict = 0;
2324 } else if (offset == 2 && zfs_show_ctldir(zp)) {
2325 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2326 zap.za_normalization_conflict = 0;
2327 objnum = ZFSCTL_INO_ROOT;
2333 if ((error = zap_cursor_retrieve(&zc, &zap))) {
2334 if (error == ENOENT)
2341 * Allow multiple entries provided the first entry is
2342 * the object id. Non-zpl consumers may safely make
2343 * use of the additional space.
2345 * XXX: This should be a feature flag for compatibility
2347 if (zap.za_integer_length != 8 ||
2348 zap.za_num_integers == 0) {
2349 cmn_err(CE_WARN, "zap_readdir: bad directory "
2350 "entry, obj = %lld, offset = %lld, "
2351 "length = %d, num = %lld\n",
2352 (u_longlong_t)zp->z_id,
2353 (u_longlong_t)offset,
2354 zap.za_integer_length,
2355 (u_longlong_t)zap.za_num_integers);
2356 error = SET_ERROR(ENXIO);
2360 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
2361 type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2364 done = !zpl_dir_emit(ctx, zap.za_name, strlen(zap.za_name),
2369 /* Prefetch znode */
2371 dmu_prefetch(os, objnum, 0, 0, 0,
2372 ZIO_PRIORITY_SYNC_READ);
2376 * Move to the next entry, fill in the previous offset.
2378 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
2379 zap_cursor_advance(&zc);
2380 offset = zap_cursor_serialize(&zc);
2386 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2389 zap_cursor_fini(&zc);
2390 if (error == ENOENT)
2398 ulong_t zfs_fsync_sync_cnt = 4;
2401 zfs_fsync(struct inode *ip, int syncflag, cred_t *cr)
2403 znode_t *zp = ITOZ(ip);
2404 zfsvfs_t *zfsvfs = ITOZSB(ip);
2406 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2408 if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
2411 zil_commit(zfsvfs->z_log, zp->z_id);
2414 tsd_set(zfs_fsyncer_key, NULL);
2421 * Get the requested file attributes and place them in the provided
2424 * IN: ip - inode of file.
2425 * vap - va_mask identifies requested attributes.
2426 * If ATTR_XVATTR set, then optional attrs are requested
2427 * flags - ATTR_NOACLCHECK (CIFS server context)
2428 * cr - credentials of caller.
2430 * OUT: vap - attribute values.
2432 * RETURN: 0 (always succeeds)
2436 zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2438 znode_t *zp = ITOZ(ip);
2439 zfsvfs_t *zfsvfs = ITOZSB(ip);
2442 uint64_t atime[2], mtime[2], ctime[2];
2443 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2444 xoptattr_t *xoap = NULL;
2445 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2446 sa_bulk_attr_t bulk[3];
2452 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2454 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
2455 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
2456 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
2458 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2464 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2465 * Also, if we are the owner don't bother, since owner should
2466 * always be allowed to read basic attributes of file.
2468 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2469 (vap->va_uid != crgetuid(cr))) {
2470 if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2478 * Return all attributes. It's cheaper to provide the answer
2479 * than to determine whether we were asked the question.
2482 mutex_enter(&zp->z_lock);
2483 vap->va_type = vn_mode_to_vtype(zp->z_mode);
2484 vap->va_mode = zp->z_mode;
2485 vap->va_fsid = ZTOI(zp)->i_sb->s_dev;
2486 vap->va_nodeid = zp->z_id;
2487 if ((zp->z_id == zfsvfs->z_root) && zfs_show_ctldir(zp))
2488 links = ZTOI(zp)->i_nlink + 1;
2490 links = ZTOI(zp)->i_nlink;
2491 vap->va_nlink = MIN(links, ZFS_LINK_MAX);
2492 vap->va_size = i_size_read(ip);
2493 vap->va_rdev = ip->i_rdev;
2494 vap->va_seq = ip->i_generation;
2497 * Add in any requested optional attributes and the create time.
2498 * Also set the corresponding bits in the returned attribute bitmap.
2500 if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
2501 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2503 ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2504 XVA_SET_RTN(xvap, XAT_ARCHIVE);
2507 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2508 xoap->xoa_readonly =
2509 ((zp->z_pflags & ZFS_READONLY) != 0);
2510 XVA_SET_RTN(xvap, XAT_READONLY);
2513 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2515 ((zp->z_pflags & ZFS_SYSTEM) != 0);
2516 XVA_SET_RTN(xvap, XAT_SYSTEM);
2519 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2521 ((zp->z_pflags & ZFS_HIDDEN) != 0);
2522 XVA_SET_RTN(xvap, XAT_HIDDEN);
2525 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2526 xoap->xoa_nounlink =
2527 ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2528 XVA_SET_RTN(xvap, XAT_NOUNLINK);
2531 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2532 xoap->xoa_immutable =
2533 ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2534 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2537 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2538 xoap->xoa_appendonly =
2539 ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2540 XVA_SET_RTN(xvap, XAT_APPENDONLY);
2543 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2545 ((zp->z_pflags & ZFS_NODUMP) != 0);
2546 XVA_SET_RTN(xvap, XAT_NODUMP);
2549 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2551 ((zp->z_pflags & ZFS_OPAQUE) != 0);
2552 XVA_SET_RTN(xvap, XAT_OPAQUE);
2555 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2556 xoap->xoa_av_quarantined =
2557 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2558 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2561 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2562 xoap->xoa_av_modified =
2563 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2564 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2567 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2568 S_ISREG(ip->i_mode)) {
2569 zfs_sa_get_scanstamp(zp, xvap);
2572 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
2575 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
2576 times, sizeof (times));
2577 ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
2578 XVA_SET_RTN(xvap, XAT_CREATETIME);
2581 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2582 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2583 XVA_SET_RTN(xvap, XAT_REPARSE);
2585 if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2586 xoap->xoa_generation = ip->i_generation;
2587 XVA_SET_RTN(xvap, XAT_GEN);
2590 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2592 ((zp->z_pflags & ZFS_OFFLINE) != 0);
2593 XVA_SET_RTN(xvap, XAT_OFFLINE);
2596 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2598 ((zp->z_pflags & ZFS_SPARSE) != 0);
2599 XVA_SET_RTN(xvap, XAT_SPARSE);
2602 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
2603 xoap->xoa_projinherit =
2604 ((zp->z_pflags & ZFS_PROJINHERIT) != 0);
2605 XVA_SET_RTN(xvap, XAT_PROJINHERIT);
2608 if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
2609 xoap->xoa_projid = zp->z_projid;
2610 XVA_SET_RTN(xvap, XAT_PROJID);
2614 ZFS_TIME_DECODE(&vap->va_atime, atime);
2615 ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2616 ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2618 mutex_exit(&zp->z_lock);
2620 sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
2622 if (zp->z_blksz == 0) {
2624 * Block size hasn't been set; suggest maximal I/O transfers.
2626 vap->va_blksize = zfsvfs->z_max_blksz;
2634 * Get the basic file attributes and place them in the provided kstat
2635 * structure. The inode is assumed to be the authoritative source
2636 * for most of the attributes. However, the znode currently has the
2637 * authoritative atime, blksize, and block count.
2639 * IN: ip - inode of file.
2641 * OUT: sp - kstat values.
2643 * RETURN: 0 (always succeeds)
2647 zfs_getattr_fast(struct inode *ip, struct kstat *sp)
2649 znode_t *zp = ITOZ(ip);
2650 zfsvfs_t *zfsvfs = ITOZSB(ip);
2652 u_longlong_t nblocks;
2657 mutex_enter(&zp->z_lock);
2659 generic_fillattr(ip, sp);
2661 * +1 link count for root inode with visible '.zfs' directory.
2663 if ((zp->z_id == zfsvfs->z_root) && zfs_show_ctldir(zp))
2664 if (sp->nlink < ZFS_LINK_MAX)
2667 sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
2668 sp->blksize = blksize;
2669 sp->blocks = nblocks;
2671 if (unlikely(zp->z_blksz == 0)) {
2673 * Block size hasn't been set; suggest maximal I/O transfers.
2675 sp->blksize = zfsvfs->z_max_blksz;
2678 mutex_exit(&zp->z_lock);
2681 * Required to prevent NFS client from detecting different inode
2682 * numbers of snapshot root dentry before and after snapshot mount.
2684 if (zfsvfs->z_issnap) {
2685 if (ip->i_sb->s_root->d_inode == ip)
2686 sp->ino = ZFSCTL_INO_SNAPDIRS -
2687 dmu_objset_id(zfsvfs->z_os);
2696 * For the operation of changing file's user/group/project, we need to
2697 * handle not only the main object that is assigned to the file directly,
2698 * but also the ones that are used by the file via hidden xattr directory.
2700 * Because the xattr directory may contains many EA entries, as to it may
2701 * be impossible to change all of them via the transaction of changing the
2702 * main object's user/group/project attributes. Then we have to change them
2703 * via other multiple independent transactions one by one. It may be not good
2704 * solution, but we have no better idea yet.
2707 zfs_setattr_dir(znode_t *dzp)
2709 struct inode *dxip = ZTOI(dzp);
2710 struct inode *xip = NULL;
2711 zfsvfs_t *zfsvfs = ITOZSB(dxip);
2712 objset_t *os = zfsvfs->z_os;
2714 zap_attribute_t zap;
2717 dmu_tx_t *tx = NULL;
2719 sa_bulk_attr_t bulk[4];
2723 zap_cursor_init(&zc, os, dzp->z_id);
2724 while ((err = zap_cursor_retrieve(&zc, &zap)) == 0) {
2726 if (zap.za_integer_length != 8 || zap.za_num_integers != 1) {
2731 err = zfs_dirent_lock(&dl, dzp, (char *)zap.za_name, &zp,
2732 ZEXISTS, NULL, NULL);
2739 if (KUID_TO_SUID(xip->i_uid) == KUID_TO_SUID(dxip->i_uid) &&
2740 KGID_TO_SGID(xip->i_gid) == KGID_TO_SGID(dxip->i_gid) &&
2741 zp->z_projid == dzp->z_projid)
2744 tx = dmu_tx_create(os);
2745 if (!(zp->z_pflags & ZFS_PROJID))
2746 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2748 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2750 err = dmu_tx_assign(tx, TXG_WAIT);
2754 mutex_enter(&dzp->z_lock);
2756 if (KUID_TO_SUID(xip->i_uid) != KUID_TO_SUID(dxip->i_uid)) {
2757 xip->i_uid = dxip->i_uid;
2758 uid = zfs_uid_read(dxip);
2759 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
2760 &uid, sizeof (uid));
2763 if (KGID_TO_SGID(xip->i_gid) != KGID_TO_SGID(dxip->i_gid)) {
2764 xip->i_gid = dxip->i_gid;
2765 gid = zfs_gid_read(dxip);
2766 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
2767 &gid, sizeof (gid));
2770 if (zp->z_projid != dzp->z_projid) {
2771 if (!(zp->z_pflags & ZFS_PROJID)) {
2772 zp->z_pflags |= ZFS_PROJID;
2773 SA_ADD_BULK_ATTR(bulk, count,
2774 SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags,
2775 sizeof (zp->z_pflags));
2778 zp->z_projid = dzp->z_projid;
2779 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PROJID(zfsvfs),
2780 NULL, &zp->z_projid, sizeof (zp->z_projid));
2783 mutex_exit(&dzp->z_lock);
2785 if (likely(count > 0)) {
2786 err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
2792 if (err != 0 && err != ENOENT)
2799 zfs_dirent_unlock(dl);
2801 zap_cursor_advance(&zc);
2808 zfs_dirent_unlock(dl);
2810 zap_cursor_fini(&zc);
2812 return (err == ENOENT ? 0 : err);
2816 * Set the file attributes to the values contained in the
2819 * IN: ip - inode of file to be modified.
2820 * vap - new attribute values.
2821 * If ATTR_XVATTR set, then optional attrs are being set
2822 * flags - ATTR_UTIME set if non-default time values provided.
2823 * - ATTR_NOACLCHECK (CIFS context only).
2824 * cr - credentials of caller.
2826 * RETURN: 0 if success
2827 * error code if failure
2830 * ip - ctime updated, mtime updated if size changed.
2834 zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2836 znode_t *zp = ITOZ(ip);
2837 zfsvfs_t *zfsvfs = ITOZSB(ip);
2838 objset_t *os = zfsvfs->z_os;
2842 xvattr_t *tmpxvattr;
2843 uint_t mask = vap->va_mask;
2844 uint_t saved_mask = 0;
2847 uint64_t new_kuid = 0, new_kgid = 0, new_uid, new_gid;
2849 uint64_t mtime[2], ctime[2], atime[2];
2850 uint64_t projid = ZFS_INVALID_PROJID;
2852 int need_policy = FALSE;
2854 zfs_fuid_info_t *fuidp = NULL;
2855 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2858 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2859 boolean_t fuid_dirtied = B_FALSE;
2860 boolean_t handle_eadir = B_FALSE;
2861 sa_bulk_attr_t *bulk, *xattr_bulk;
2862 int count = 0, xattr_count = 0, bulks = 8;
2871 * If this is a xvattr_t, then get a pointer to the structure of
2872 * optional attributes. If this is NULL, then we have a vattr_t.
2874 xoap = xva_getxoptattr(xvap);
2875 if (xoap != NULL && (mask & ATTR_XVATTR)) {
2876 if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
2877 if (!dmu_objset_projectquota_enabled(os) ||
2878 (!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode))) {
2880 return (SET_ERROR(ENOTSUP));
2883 projid = xoap->xoa_projid;
2884 if (unlikely(projid == ZFS_INVALID_PROJID)) {
2886 return (SET_ERROR(EINVAL));
2889 if (projid == zp->z_projid && zp->z_pflags & ZFS_PROJID)
2890 projid = ZFS_INVALID_PROJID;
2895 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT) &&
2896 (xoap->xoa_projinherit !=
2897 ((zp->z_pflags & ZFS_PROJINHERIT) != 0)) &&
2898 (!dmu_objset_projectquota_enabled(os) ||
2899 (!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode)))) {
2901 return (SET_ERROR(ENOTSUP));
2905 zilog = zfsvfs->z_log;
2908 * Make sure that if we have ephemeral uid/gid or xvattr specified
2909 * that file system is at proper version level
2912 if (zfsvfs->z_use_fuids == B_FALSE &&
2913 (((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2914 ((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2915 (mask & ATTR_XVATTR))) {
2917 return (SET_ERROR(EINVAL));
2920 if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
2922 return (SET_ERROR(EISDIR));
2925 if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
2927 return (SET_ERROR(EINVAL));
2930 tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP);
2931 xva_init(tmpxvattr);
2933 bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
2934 xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
2937 * Immutable files can only alter immutable bit and atime
2939 if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2940 ((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
2941 ((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2942 err = SET_ERROR(EPERM);
2946 if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
2947 err = SET_ERROR(EPERM);
2952 * Verify timestamps doesn't overflow 32 bits.
2953 * ZFS can handle large timestamps, but 32bit syscalls can't
2954 * handle times greater than 2039. This check should be removed
2955 * once large timestamps are fully supported.
2957 if (mask & (ATTR_ATIME | ATTR_MTIME)) {
2958 if (((mask & ATTR_ATIME) &&
2959 TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2960 ((mask & ATTR_MTIME) &&
2961 TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2962 err = SET_ERROR(EOVERFLOW);
2971 /* Can this be moved to before the top label? */
2972 if (zfs_is_readonly(zfsvfs)) {
2973 err = SET_ERROR(EROFS);
2978 * First validate permissions
2981 if (mask & ATTR_SIZE) {
2982 err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
2987 * XXX - Note, we are not providing any open
2988 * mode flags here (like FNDELAY), so we may
2989 * block if there are locks present... this
2990 * should be addressed in openat().
2992 /* XXX - would it be OK to generate a log record here? */
2993 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2998 if (mask & (ATTR_ATIME|ATTR_MTIME) ||
2999 ((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
3000 XVA_ISSET_REQ(xvap, XAT_READONLY) ||
3001 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
3002 XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
3003 XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
3004 XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
3005 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
3006 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
3010 if (mask & (ATTR_UID|ATTR_GID)) {
3011 int idmask = (mask & (ATTR_UID|ATTR_GID));
3016 * NOTE: even if a new mode is being set,
3017 * we may clear S_ISUID/S_ISGID bits.
3020 if (!(mask & ATTR_MODE))
3021 vap->va_mode = zp->z_mode;
3024 * Take ownership or chgrp to group we are a member of
3027 take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr));
3028 take_group = (mask & ATTR_GID) &&
3029 zfs_groupmember(zfsvfs, vap->va_gid, cr);
3032 * If both ATTR_UID and ATTR_GID are set then take_owner and
3033 * take_group must both be set in order to allow taking
3036 * Otherwise, send the check through secpolicy_vnode_setattr()
3040 if (((idmask == (ATTR_UID|ATTR_GID)) &&
3041 take_owner && take_group) ||
3042 ((idmask == ATTR_UID) && take_owner) ||
3043 ((idmask == ATTR_GID) && take_group)) {
3044 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
3045 skipaclchk, cr) == 0) {
3047 * Remove setuid/setgid for non-privileged users
3049 (void) secpolicy_setid_clear(vap, cr);
3050 trim_mask = (mask & (ATTR_UID|ATTR_GID));
3059 mutex_enter(&zp->z_lock);
3060 oldva.va_mode = zp->z_mode;
3061 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
3062 if (mask & ATTR_XVATTR) {
3064 * Update xvattr mask to include only those attributes
3065 * that are actually changing.
3067 * the bits will be restored prior to actually setting
3068 * the attributes so the caller thinks they were set.
3070 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
3071 if (xoap->xoa_appendonly !=
3072 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
3075 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
3076 XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
3080 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
3081 if (xoap->xoa_projinherit !=
3082 ((zp->z_pflags & ZFS_PROJINHERIT) != 0)) {
3085 XVA_CLR_REQ(xvap, XAT_PROJINHERIT);
3086 XVA_SET_REQ(tmpxvattr, XAT_PROJINHERIT);
3090 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
3091 if (xoap->xoa_nounlink !=
3092 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
3095 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
3096 XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
3100 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
3101 if (xoap->xoa_immutable !=
3102 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
3105 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
3106 XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
3110 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
3111 if (xoap->xoa_nodump !=
3112 ((zp->z_pflags & ZFS_NODUMP) != 0)) {
3115 XVA_CLR_REQ(xvap, XAT_NODUMP);
3116 XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
3120 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
3121 if (xoap->xoa_av_modified !=
3122 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
3125 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
3126 XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
3130 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
3131 if ((!S_ISREG(ip->i_mode) &&
3132 xoap->xoa_av_quarantined) ||
3133 xoap->xoa_av_quarantined !=
3134 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
3137 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
3138 XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
3142 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
3143 mutex_exit(&zp->z_lock);
3144 err = SET_ERROR(EPERM);
3148 if (need_policy == FALSE &&
3149 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
3150 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
3155 mutex_exit(&zp->z_lock);
3157 if (mask & ATTR_MODE) {
3158 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
3159 err = secpolicy_setid_setsticky_clear(ip, vap,
3164 trim_mask |= ATTR_MODE;
3172 * If trim_mask is set then take ownership
3173 * has been granted or write_acl is present and user
3174 * has the ability to modify mode. In that case remove
3175 * UID|GID and or MODE from mask so that
3176 * secpolicy_vnode_setattr() doesn't revoke it.
3180 saved_mask = vap->va_mask;
3181 vap->va_mask &= ~trim_mask;
3183 err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
3184 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
3189 vap->va_mask |= saved_mask;
3193 * secpolicy_vnode_setattr, or take ownership may have
3196 mask = vap->va_mask;
3198 if ((mask & (ATTR_UID | ATTR_GID)) || projid != ZFS_INVALID_PROJID) {
3199 handle_eadir = B_TRUE;
3200 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
3201 &xattr_obj, sizeof (xattr_obj));
3203 if (err == 0 && xattr_obj) {
3204 err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
3208 if (mask & ATTR_UID) {
3209 new_kuid = zfs_fuid_create(zfsvfs,
3210 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
3211 if (new_kuid != KUID_TO_SUID(ZTOI(zp)->i_uid) &&
3212 zfs_id_overquota(zfsvfs, DMU_USERUSED_OBJECT,
3216 err = SET_ERROR(EDQUOT);
3221 if (mask & ATTR_GID) {
3222 new_kgid = zfs_fuid_create(zfsvfs,
3223 (uint64_t)vap->va_gid, cr, ZFS_GROUP, &fuidp);
3224 if (new_kgid != KGID_TO_SGID(ZTOI(zp)->i_gid) &&
3225 zfs_id_overquota(zfsvfs, DMU_GROUPUSED_OBJECT,
3229 err = SET_ERROR(EDQUOT);
3234 if (projid != ZFS_INVALID_PROJID &&
3235 zfs_id_overquota(zfsvfs, DMU_PROJECTUSED_OBJECT, projid)) {
3242 tx = dmu_tx_create(os);
3244 if (mask & ATTR_MODE) {
3245 uint64_t pmode = zp->z_mode;
3247 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
3249 zfs_acl_chmod_setattr(zp, &aclp, new_mode);
3251 mutex_enter(&zp->z_lock);
3252 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
3254 * Are we upgrading ACL from old V0 format
3257 if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
3258 zfs_znode_acl_version(zp) ==
3259 ZFS_ACL_VERSION_INITIAL) {
3260 dmu_tx_hold_free(tx, acl_obj, 0,
3262 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3263 0, aclp->z_acl_bytes);
3265 dmu_tx_hold_write(tx, acl_obj, 0,
3268 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3269 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3270 0, aclp->z_acl_bytes);
3272 mutex_exit(&zp->z_lock);
3273 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3275 if (((mask & ATTR_XVATTR) &&
3276 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) ||
3277 (projid != ZFS_INVALID_PROJID &&
3278 !(zp->z_pflags & ZFS_PROJID)))
3279 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3281 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3285 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
3288 fuid_dirtied = zfsvfs->z_fuid_dirty;
3290 zfs_fuid_txhold(zfsvfs, tx);
3292 zfs_sa_upgrade_txholds(tx, zp);
3294 err = dmu_tx_assign(tx, TXG_WAIT);
3300 * Set each attribute requested.
3301 * We group settings according to the locks they need to acquire.
3303 * Note: you cannot set ctime directly, although it will be
3304 * updated as a side-effect of calling this function.
3307 if (projid != ZFS_INVALID_PROJID && !(zp->z_pflags & ZFS_PROJID)) {
3309 * For the existed object that is upgraded from old system,
3310 * its on-disk layout has no slot for the project ID attribute.
3311 * But quota accounting logic needs to access related slots by
3312 * offset directly. So we need to adjust old objects' layout
3313 * to make the project ID to some unified and fixed offset.
3316 err = sa_add_projid(attrzp->z_sa_hdl, tx, projid);
3318 err = sa_add_projid(zp->z_sa_hdl, tx, projid);
3320 if (unlikely(err == EEXIST))
3325 projid = ZFS_INVALID_PROJID;
3328 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3329 mutex_enter(&zp->z_acl_lock);
3330 mutex_enter(&zp->z_lock);
3332 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
3333 &zp->z_pflags, sizeof (zp->z_pflags));
3336 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3337 mutex_enter(&attrzp->z_acl_lock);
3338 mutex_enter(&attrzp->z_lock);
3339 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3340 SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
3341 sizeof (attrzp->z_pflags));
3342 if (projid != ZFS_INVALID_PROJID) {
3343 attrzp->z_projid = projid;
3344 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3345 SA_ZPL_PROJID(zfsvfs), NULL, &attrzp->z_projid,
3346 sizeof (attrzp->z_projid));
3350 if (mask & (ATTR_UID|ATTR_GID)) {
3352 if (mask & ATTR_UID) {
3353 ZTOI(zp)->i_uid = SUID_TO_KUID(new_kuid);
3354 new_uid = zfs_uid_read(ZTOI(zp));
3355 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
3356 &new_uid, sizeof (new_uid));
3358 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3359 SA_ZPL_UID(zfsvfs), NULL, &new_uid,
3361 ZTOI(attrzp)->i_uid = SUID_TO_KUID(new_uid);
3365 if (mask & ATTR_GID) {
3366 ZTOI(zp)->i_gid = SGID_TO_KGID(new_kgid);
3367 new_gid = zfs_gid_read(ZTOI(zp));
3368 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
3369 NULL, &new_gid, sizeof (new_gid));
3371 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3372 SA_ZPL_GID(zfsvfs), NULL, &new_gid,
3374 ZTOI(attrzp)->i_gid = SGID_TO_KGID(new_kgid);
3377 if (!(mask & ATTR_MODE)) {
3378 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
3379 NULL, &new_mode, sizeof (new_mode));
3380 new_mode = zp->z_mode;
3382 err = zfs_acl_chown_setattr(zp);
3385 err = zfs_acl_chown_setattr(attrzp);
3390 if (mask & ATTR_MODE) {
3391 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
3392 &new_mode, sizeof (new_mode));
3393 zp->z_mode = ZTOI(zp)->i_mode = new_mode;
3394 ASSERT3P(aclp, !=, NULL);
3395 err = zfs_aclset_common(zp, aclp, cr, tx);
3397 if (zp->z_acl_cached)
3398 zfs_acl_free(zp->z_acl_cached);
3399 zp->z_acl_cached = aclp;
3403 if ((mask & ATTR_ATIME) || zp->z_atime_dirty) {
3404 zp->z_atime_dirty = 0;
3405 ZFS_TIME_ENCODE(&ip->i_atime, atime);
3406 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
3407 &atime, sizeof (atime));
3410 if (mask & (ATTR_MTIME | ATTR_SIZE)) {
3411 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
3412 ZTOI(zp)->i_mtime = zpl_inode_timespec_trunc(vap->va_mtime,
3413 ZTOI(zp)->i_sb->s_time_gran);
3415 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
3416 mtime, sizeof (mtime));
3419 if (mask & (ATTR_CTIME | ATTR_SIZE)) {
3420 ZFS_TIME_ENCODE(&vap->va_ctime, ctime);
3421 ZTOI(zp)->i_ctime = zpl_inode_timespec_trunc(vap->va_ctime,
3422 ZTOI(zp)->i_sb->s_time_gran);
3423 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3424 ctime, sizeof (ctime));
3427 if (projid != ZFS_INVALID_PROJID) {
3428 zp->z_projid = projid;
3429 SA_ADD_BULK_ATTR(bulk, count,
3430 SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid,
3431 sizeof (zp->z_projid));
3434 if (attrzp && mask) {
3435 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3436 SA_ZPL_CTIME(zfsvfs), NULL, &ctime,
3441 * Do this after setting timestamps to prevent timestamp
3442 * update from toggling bit
3445 if (xoap && (mask & ATTR_XVATTR)) {
3448 * restore trimmed off masks
3449 * so that return masks can be set for caller.
3452 if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
3453 XVA_SET_REQ(xvap, XAT_APPENDONLY);
3455 if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
3456 XVA_SET_REQ(xvap, XAT_NOUNLINK);
3458 if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
3459 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
3461 if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
3462 XVA_SET_REQ(xvap, XAT_NODUMP);
3464 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
3465 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
3467 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
3468 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
3470 if (XVA_ISSET_REQ(tmpxvattr, XAT_PROJINHERIT)) {
3471 XVA_SET_REQ(xvap, XAT_PROJINHERIT);
3474 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3475 ASSERT(S_ISREG(ip->i_mode));
3477 zfs_xvattr_set(zp, xvap, tx);
3481 zfs_fuid_sync(zfsvfs, tx);
3484 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
3486 mutex_exit(&zp->z_lock);
3487 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3488 mutex_exit(&zp->z_acl_lock);
3491 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
3492 mutex_exit(&attrzp->z_acl_lock);
3493 mutex_exit(&attrzp->z_lock);
3496 if (err == 0 && xattr_count > 0) {
3497 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
3506 zfs_fuid_info_free(fuidp);
3514 if (err == ERESTART)
3518 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
3521 if (err2 == 0 && handle_eadir)
3522 err2 = zfs_setattr_dir(attrzp);
3525 zfs_inode_update(zp);
3529 if (os->os_sync == ZFS_SYNC_ALWAYS)
3530 zil_commit(zilog, 0);
3533 kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * bulks);
3534 kmem_free(bulk, sizeof (sa_bulk_attr_t) * bulks);
3535 kmem_free(tmpxvattr, sizeof (xvattr_t));
3540 typedef struct zfs_zlock {
3541 krwlock_t *zl_rwlock; /* lock we acquired */
3542 znode_t *zl_znode; /* znode we held */
3543 struct zfs_zlock *zl_next; /* next in list */
3547 * Drop locks and release vnodes that were held by zfs_rename_lock().
3550 zfs_rename_unlock(zfs_zlock_t **zlpp)
3554 while ((zl = *zlpp) != NULL) {
3555 if (zl->zl_znode != NULL)
3556 zfs_iput_async(ZTOI(zl->zl_znode));
3557 rw_exit(zl->zl_rwlock);
3558 *zlpp = zl->zl_next;
3559 kmem_free(zl, sizeof (*zl));
3564 * Search back through the directory tree, using the ".." entries.
3565 * Lock each directory in the chain to prevent concurrent renames.
3566 * Fail any attempt to move a directory into one of its own descendants.
3567 * XXX - z_parent_lock can overlap with map or grow locks
3570 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
3574 uint64_t rootid = ZTOZSB(zp)->z_root;
3575 uint64_t oidp = zp->z_id;
3576 krwlock_t *rwlp = &szp->z_parent_lock;
3577 krw_t rw = RW_WRITER;
3580 * First pass write-locks szp and compares to zp->z_id.
3581 * Later passes read-lock zp and compare to zp->z_parent.
3584 if (!rw_tryenter(rwlp, rw)) {
3586 * Another thread is renaming in this path.
3587 * Note that if we are a WRITER, we don't have any
3588 * parent_locks held yet.
3590 if (rw == RW_READER && zp->z_id > szp->z_id) {
3592 * Drop our locks and restart
3594 zfs_rename_unlock(&zl);
3598 rwlp = &szp->z_parent_lock;
3603 * Wait for other thread to drop its locks
3609 zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
3610 zl->zl_rwlock = rwlp;
3611 zl->zl_znode = NULL;
3612 zl->zl_next = *zlpp;
3615 if (oidp == szp->z_id) /* We're a descendant of szp */
3616 return (SET_ERROR(EINVAL));
3618 if (oidp == rootid) /* We've hit the top */
3621 if (rw == RW_READER) { /* i.e. not the first pass */
3622 int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
3627 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
3628 &oidp, sizeof (oidp));
3629 rwlp = &zp->z_parent_lock;
3632 } while (zp->z_id != sdzp->z_id);
3638 * Move an entry from the provided source directory to the target
3639 * directory. Change the entry name as indicated.
3641 * IN: sdip - Source directory containing the "old entry".
3642 * snm - Old entry name.
3643 * tdip - Target directory to contain the "new entry".
3644 * tnm - New entry name.
3645 * cr - credentials of caller.
3646 * flags - case flags
3648 * RETURN: 0 on success, error code on failure.
3651 * sdip,tdip - ctime|mtime updated
3655 zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm,
3656 cred_t *cr, int flags)
3658 znode_t *tdzp, *szp, *tzp;
3659 znode_t *sdzp = ITOZ(sdip);
3660 zfsvfs_t *zfsvfs = ITOZSB(sdip);
3662 zfs_dirlock_t *sdl, *tdl;
3665 int cmp, serr, terr;
3668 boolean_t waited = B_FALSE;
3670 if (snm == NULL || tnm == NULL)
3671 return (SET_ERROR(EINVAL));
3674 ZFS_VERIFY_ZP(sdzp);
3675 zilog = zfsvfs->z_log;
3678 ZFS_VERIFY_ZP(tdzp);
3681 * We check i_sb because snapshots and the ctldir must have different
3684 if (tdip->i_sb != sdip->i_sb || zfsctl_is_node(tdip)) {
3686 return (SET_ERROR(EXDEV));
3689 if (zfsvfs->z_utf8 && u8_validate(tnm,
3690 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3692 return (SET_ERROR(EILSEQ));
3695 if (flags & FIGNORECASE)
3704 * This is to prevent the creation of links into attribute space
3705 * by renaming a linked file into/outof an attribute directory.
3706 * See the comment in zfs_link() for why this is considered bad.
3708 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3710 return (SET_ERROR(EINVAL));
3714 * Lock source and target directory entries. To prevent deadlock,
3715 * a lock ordering must be defined. We lock the directory with
3716 * the smallest object id first, or if it's a tie, the one with
3717 * the lexically first name.
3719 if (sdzp->z_id < tdzp->z_id) {
3721 } else if (sdzp->z_id > tdzp->z_id) {
3725 * First compare the two name arguments without
3726 * considering any case folding.
3728 int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
3730 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
3731 ASSERT(error == 0 || !zfsvfs->z_utf8);
3734 * POSIX: "If the old argument and the new argument
3735 * both refer to links to the same existing file,
3736 * the rename() function shall return successfully
3737 * and perform no other action."
3743 * If the file system is case-folding, then we may
3744 * have some more checking to do. A case-folding file
3745 * system is either supporting mixed case sensitivity
3746 * access or is completely case-insensitive. Note
3747 * that the file system is always case preserving.
3749 * In mixed sensitivity mode case sensitive behavior
3750 * is the default. FIGNORECASE must be used to
3751 * explicitly request case insensitive behavior.
3753 * If the source and target names provided differ only
3754 * by case (e.g., a request to rename 'tim' to 'Tim'),
3755 * we will treat this as a special case in the
3756 * case-insensitive mode: as long as the source name
3757 * is an exact match, we will allow this to proceed as
3758 * a name-change request.
3760 if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
3761 (zfsvfs->z_case == ZFS_CASE_MIXED &&
3762 flags & FIGNORECASE)) &&
3763 u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
3766 * case preserving rename request, require exact
3775 * If the source and destination directories are the same, we should
3776 * grab the z_name_lock of that directory only once.
3780 rw_enter(&sdzp->z_name_lock, RW_READER);
3784 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
3785 ZEXISTS | zflg, NULL, NULL);
3786 terr = zfs_dirent_lock(&tdl,
3787 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
3789 terr = zfs_dirent_lock(&tdl,
3790 tdzp, tnm, &tzp, zflg, NULL, NULL);
3791 serr = zfs_dirent_lock(&sdl,
3792 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
3798 * Source entry invalid or not there.
3801 zfs_dirent_unlock(tdl);
3807 rw_exit(&sdzp->z_name_lock);
3809 if (strcmp(snm, "..") == 0)
3815 zfs_dirent_unlock(sdl);
3819 rw_exit(&sdzp->z_name_lock);
3821 if (strcmp(tnm, "..") == 0)
3828 * If we are using project inheritance, means if the directory has
3829 * ZFS_PROJINHERIT set, then its descendant directories will inherit
3830 * not only the project ID, but also the ZFS_PROJINHERIT flag. Under
3831 * such case, we only allow renames into our tree when the project
3834 if (tdzp->z_pflags & ZFS_PROJINHERIT &&
3835 tdzp->z_projid != szp->z_projid) {
3836 error = SET_ERROR(EXDEV);
3841 * Must have write access at the source to remove the old entry
3842 * and write access at the target to create the new entry.
3843 * Note that if target and source are the same, this can be
3844 * done in a single check.
3847 if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
3850 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3852 * Check to make sure rename is valid.
3853 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3855 if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl)))
3860 * Does target exist?
3864 * Source and target must be the same type.
3866 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3867 if (!S_ISDIR(ZTOI(tzp)->i_mode)) {
3868 error = SET_ERROR(ENOTDIR);
3872 if (S_ISDIR(ZTOI(tzp)->i_mode)) {
3873 error = SET_ERROR(EISDIR);
3878 * POSIX dictates that when the source and target
3879 * entries refer to the same file object, rename
3880 * must do nothing and exit without error.
3882 if (szp->z_id == tzp->z_id) {
3888 tx = dmu_tx_create(zfsvfs->z_os);
3889 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3890 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3891 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3892 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3894 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3895 zfs_sa_upgrade_txholds(tx, tdzp);
3898 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3899 zfs_sa_upgrade_txholds(tx, tzp);
3902 zfs_sa_upgrade_txholds(tx, szp);
3903 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3904 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
3907 zfs_rename_unlock(&zl);
3908 zfs_dirent_unlock(sdl);
3909 zfs_dirent_unlock(tdl);
3912 rw_exit(&sdzp->z_name_lock);
3914 if (error == ERESTART) {
3931 if (tzp) /* Attempt to remove the existing target */
3932 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
3935 error = zfs_link_create(tdl, szp, tx, ZRENAMING);
3937 szp->z_pflags |= ZFS_AV_MODIFIED;
3938 if (tdzp->z_pflags & ZFS_PROJINHERIT)
3939 szp->z_pflags |= ZFS_PROJINHERIT;
3941 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
3942 (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3945 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
3947 zfs_log_rename(zilog, tx, TX_RENAME |
3948 (flags & FIGNORECASE ? TX_CI : 0), sdzp,
3949 sdl->dl_name, tdzp, tdl->dl_name, szp);
3952 * At this point, we have successfully created
3953 * the target name, but have failed to remove
3954 * the source name. Since the create was done
3955 * with the ZRENAMING flag, there are
3956 * complications; for one, the link count is
3957 * wrong. The easiest way to deal with this
3958 * is to remove the newly created target, and
3959 * return the original error. This must
3960 * succeed; fortunately, it is very unlikely to
3961 * fail, since we just created it.
3963 VERIFY3U(zfs_link_destroy(tdl, szp, tx,
3964 ZRENAMING, NULL), ==, 0);
3968 * If we had removed the existing target, subsequent
3969 * call to zfs_link_create() to add back the same entry
3970 * but, the new dnode (szp) should not fail.
3972 ASSERT(tzp == NULL);
3979 zfs_rename_unlock(&zl);
3981 zfs_dirent_unlock(sdl);
3982 zfs_dirent_unlock(tdl);
3984 zfs_inode_update(sdzp);
3986 rw_exit(&sdzp->z_name_lock);
3989 zfs_inode_update(tdzp);
3991 zfs_inode_update(szp);
3994 zfs_inode_update(tzp);
3998 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3999 zil_commit(zilog, 0);
4006 * Insert the indicated symbolic reference entry into the directory.
4008 * IN: dip - Directory to contain new symbolic link.
4009 * link - Name for new symlink entry.
4010 * vap - Attributes of new entry.
4011 * target - Target path of new symlink.
4013 * cr - credentials of caller.
4014 * flags - case flags
4016 * RETURN: 0 on success, error code on failure.
4019 * dip - ctime|mtime updated
4023 zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link,
4024 struct inode **ipp, cred_t *cr, int flags)
4026 znode_t *zp, *dzp = ITOZ(dip);
4029 zfsvfs_t *zfsvfs = ITOZSB(dip);
4031 uint64_t len = strlen(link);
4034 zfs_acl_ids_t acl_ids;
4035 boolean_t fuid_dirtied;
4036 uint64_t txtype = TX_SYMLINK;
4037 boolean_t waited = B_FALSE;
4039 ASSERT(S_ISLNK(vap->va_mode));
4042 return (SET_ERROR(EINVAL));
4046 zilog = zfsvfs->z_log;
4048 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
4049 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
4051 return (SET_ERROR(EILSEQ));
4053 if (flags & FIGNORECASE)
4056 if (len > MAXPATHLEN) {
4058 return (SET_ERROR(ENAMETOOLONG));
4061 if ((error = zfs_acl_ids_create(dzp, 0,
4062 vap, cr, NULL, &acl_ids)) != 0) {
4070 * Attempt to lock directory; fail if entry already exists.
4072 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
4074 zfs_acl_ids_free(&acl_ids);
4079 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
4080 zfs_acl_ids_free(&acl_ids);
4081 zfs_dirent_unlock(dl);
4086 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, ZFS_DEFAULT_PROJID)) {
4087 zfs_acl_ids_free(&acl_ids);
4088 zfs_dirent_unlock(dl);
4090 return (SET_ERROR(EDQUOT));
4092 tx = dmu_tx_create(zfsvfs->z_os);
4093 fuid_dirtied = zfsvfs->z_fuid_dirty;
4094 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
4095 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4096 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
4097 ZFS_SA_BASE_ATTR_SIZE + len);
4098 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
4099 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
4100 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
4101 acl_ids.z_aclp->z_acl_bytes);
4104 zfs_fuid_txhold(zfsvfs, tx);
4105 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
4107 zfs_dirent_unlock(dl);
4108 if (error == ERESTART) {
4114 zfs_acl_ids_free(&acl_ids);
4121 * Create a new object for the symlink.
4122 * for version 4 ZPL datsets the symlink will be an SA attribute
4124 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
4127 zfs_fuid_sync(zfsvfs, tx);
4129 mutex_enter(&zp->z_lock);
4131 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
4134 zfs_sa_symlink(zp, link, len, tx);
4135 mutex_exit(&zp->z_lock);
4138 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
4139 &zp->z_size, sizeof (zp->z_size), tx);
4141 * Insert the new object into the directory.
4143 error = zfs_link_create(dl, zp, tx, ZNEW);
4145 zfs_znode_delete(zp, tx);
4146 remove_inode_hash(ZTOI(zp));
4148 if (flags & FIGNORECASE)
4150 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
4152 zfs_inode_update(dzp);
4153 zfs_inode_update(zp);
4156 zfs_acl_ids_free(&acl_ids);
4160 zfs_dirent_unlock(dl);
4165 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4166 zil_commit(zilog, 0);
4176 * Return, in the buffer contained in the provided uio structure,
4177 * the symbolic path referred to by ip.
4179 * IN: ip - inode of symbolic link
4180 * uio - structure to contain the link path.
4181 * cr - credentials of caller.
4183 * RETURN: 0 if success
4184 * error code if failure
4187 * ip - atime updated
4191 zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr)
4193 znode_t *zp = ITOZ(ip);
4194 zfsvfs_t *zfsvfs = ITOZSB(ip);
4200 mutex_enter(&zp->z_lock);
4202 error = sa_lookup_uio(zp->z_sa_hdl,
4203 SA_ZPL_SYMLINK(zfsvfs), uio);
4205 error = zfs_sa_readlink(zp, uio);
4206 mutex_exit(&zp->z_lock);
4213 * Insert a new entry into directory tdip referencing sip.
4215 * IN: tdip - Directory to contain new entry.
4216 * sip - inode of new entry.
4217 * name - name of new entry.
4218 * cr - credentials of caller.
4220 * RETURN: 0 if success
4221 * error code if failure
4224 * tdip - ctime|mtime updated
4225 * sip - ctime updated
4229 zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr,
4232 znode_t *dzp = ITOZ(tdip);
4234 zfsvfs_t *zfsvfs = ITOZSB(tdip);
4242 boolean_t waited = B_FALSE;
4243 boolean_t is_tmpfile = 0;
4246 is_tmpfile = (sip->i_nlink == 0 && (sip->i_state & I_LINKABLE));
4248 ASSERT(S_ISDIR(tdip->i_mode));
4251 return (SET_ERROR(EINVAL));
4255 zilog = zfsvfs->z_log;
4258 * POSIX dictates that we return EPERM here.
4259 * Better choices include ENOTSUP or EISDIR.
4261 if (S_ISDIR(sip->i_mode)) {
4263 return (SET_ERROR(EPERM));
4270 * If we are using project inheritance, means if the directory has
4271 * ZFS_PROJINHERIT set, then its descendant directories will inherit
4272 * not only the project ID, but also the ZFS_PROJINHERIT flag. Under
4273 * such case, we only allow hard link creation in our tree when the
4274 * project IDs are the same.
4276 if (dzp->z_pflags & ZFS_PROJINHERIT && dzp->z_projid != szp->z_projid) {
4278 return (SET_ERROR(EXDEV));
4282 * We check i_sb because snapshots and the ctldir must have different
4285 if (sip->i_sb != tdip->i_sb || zfsctl_is_node(sip)) {
4287 return (SET_ERROR(EXDEV));
4290 /* Prevent links to .zfs/shares files */
4292 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
4293 &parent, sizeof (uint64_t))) != 0) {
4297 if (parent == zfsvfs->z_shares_dir) {
4299 return (SET_ERROR(EPERM));
4302 if (zfsvfs->z_utf8 && u8_validate(name,
4303 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
4305 return (SET_ERROR(EILSEQ));
4307 if (flags & FIGNORECASE)
4311 * We do not support links between attributes and non-attributes
4312 * because of the potential security risk of creating links
4313 * into "normal" file space in order to circumvent restrictions
4314 * imposed in attribute space.
4316 if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
4318 return (SET_ERROR(EINVAL));
4321 owner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(sip->i_uid),
4323 if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
4325 return (SET_ERROR(EPERM));
4328 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
4335 * Attempt to lock directory; fail if entry already exists.
4337 error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
4343 tx = dmu_tx_create(zfsvfs->z_os);
4344 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
4345 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4347 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
4349 zfs_sa_upgrade_txholds(tx, szp);
4350 zfs_sa_upgrade_txholds(tx, dzp);
4351 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
4353 zfs_dirent_unlock(dl);
4354 if (error == ERESTART) {
4364 /* unmark z_unlinked so zfs_link_create will not reject */
4366 szp->z_unlinked = 0;
4367 error = zfs_link_create(dl, szp, tx, 0);
4370 uint64_t txtype = TX_LINK;
4372 * tmpfile is created to be in z_unlinkedobj, so remove it.
4373 * Also, we don't log in ZIL, be cause all previous file
4374 * operation on the tmpfile are ignored by ZIL. Instead we
4375 * always wait for txg to sync to make sure all previous
4376 * operation are sync safe.
4379 VERIFY(zap_remove_int(zfsvfs->z_os,
4380 zfsvfs->z_unlinkedobj, szp->z_id, tx) == 0);
4382 if (flags & FIGNORECASE)
4384 zfs_log_link(zilog, tx, txtype, dzp, szp, name);
4386 } else if (is_tmpfile) {
4387 /* restore z_unlinked since when linking failed */
4388 szp->z_unlinked = 1;
4390 txg = dmu_tx_get_txg(tx);
4393 zfs_dirent_unlock(dl);
4395 if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4396 zil_commit(zilog, 0);
4399 txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), txg);
4401 zfs_inode_update(dzp);
4402 zfs_inode_update(szp);
4408 zfs_putpage_commit_cb(void *arg)
4410 struct page *pp = arg;
4413 end_page_writeback(pp);
4417 * Push a page out to disk, once the page is on stable storage the
4418 * registered commit callback will be run as notification of completion.
4420 * IN: ip - page mapped for inode.
4421 * pp - page to push (page is locked)
4422 * wbc - writeback control data
4424 * RETURN: 0 if success
4425 * error code if failure
4428 * ip - ctime|mtime updated
4432 zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
4434 znode_t *zp = ITOZ(ip);
4435 zfsvfs_t *zfsvfs = ITOZSB(ip);
4442 uint64_t mtime[2], ctime[2];
4443 sa_bulk_attr_t bulk[3];
4445 struct address_space *mapping;
4450 ASSERT(PageLocked(pp));
4452 pgoff = page_offset(pp); /* Page byte-offset in file */
4453 offset = i_size_read(ip); /* File length in bytes */
4454 pglen = MIN(PAGE_SIZE, /* Page length in bytes */
4455 P2ROUNDUP(offset, PAGE_SIZE)-pgoff);
4457 /* Page is beyond end of file */
4458 if (pgoff >= offset) {
4464 /* Truncate page length to end of file */
4465 if (pgoff + pglen > offset)
4466 pglen = offset - pgoff;
4470 * FIXME: Allow mmap writes past its quota. The correct fix
4471 * is to register a page_mkwrite() handler to count the page
4472 * against its quota when it is about to be dirtied.
4474 if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
4475 KUID_TO_SUID(ip->i_uid)) ||
4476 zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
4477 KGID_TO_SGID(ip->i_gid)) ||
4478 (zp->z_projid != ZFS_DEFAULT_PROJID &&
4479 zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
4486 * The ordering here is critical and must adhere to the following
4487 * rules in order to avoid deadlocking in either zfs_read() or
4488 * zfs_free_range() due to a lock inversion.
4490 * 1) The page must be unlocked prior to acquiring the range lock.
4491 * This is critical because zfs_read() calls find_lock_page()
4492 * which may block on the page lock while holding the range lock.
4494 * 2) Before setting or clearing write back on a page the range lock
4495 * must be held in order to prevent a lock inversion with the
4496 * zfs_free_range() function.
4498 * This presents a problem because upon entering this function the
4499 * page lock is already held. To safely acquire the range lock the
4500 * page lock must be dropped. This creates a window where another
4501 * process could truncate, invalidate, dirty, or write out the page.
4503 * Therefore, after successfully reacquiring the range and page locks
4504 * the current page state is checked. In the common case everything
4505 * will be as is expected and it can be written out. However, if
4506 * the page state has changed it must be handled accordingly.
4508 mapping = pp->mapping;
4509 redirty_page_for_writepage(wbc, pp);
4512 locked_range_t *lr = rangelock_enter(&zp->z_rangelock,
4513 pgoff, pglen, RL_WRITER);
4516 /* Page mapping changed or it was no longer dirty, we're done */
4517 if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) {
4524 /* Another process started write block if required */
4525 if (PageWriteback(pp)) {
4529 if (wbc->sync_mode != WB_SYNC_NONE) {
4530 if (PageWriteback(pp))
4531 wait_on_page_bit(pp, PG_writeback);
4538 /* Clear the dirty flag the required locks are held */
4539 if (!clear_page_dirty_for_io(pp)) {
4547 * Counterpart for redirty_page_for_writepage() above. This page
4548 * was in fact not skipped and should not be counted as if it were.
4550 wbc->pages_skipped--;
4551 set_page_writeback(pp);
4554 tx = dmu_tx_create(zfsvfs->z_os);
4555 dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen);
4556 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4557 zfs_sa_upgrade_txholds(tx, zp);
4559 err = dmu_tx_assign(tx, TXG_NOWAIT);
4561 if (err == ERESTART)
4565 __set_page_dirty_nobuffers(pp);
4567 end_page_writeback(pp);
4574 ASSERT3U(pglen, <=, PAGE_SIZE);
4575 dmu_write(zfsvfs->z_os, zp->z_id, pgoff, pglen, va, tx);
4578 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
4579 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
4580 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zfsvfs), NULL,
4583 /* Preserve the mtime and ctime provided by the inode */
4584 ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
4585 ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
4586 zp->z_atime_dirty = 0;
4589 err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
4591 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0,
4592 zfs_putpage_commit_cb, pp);
4597 if (wbc->sync_mode != WB_SYNC_NONE) {
4599 * Note that this is rarely called under writepages(), because
4600 * writepages() normally handles the entire commit for
4601 * performance reasons.
4603 zil_commit(zfsvfs->z_log, zp->z_id);
4611 * Update the system attributes when the inode has been dirtied. For the
4612 * moment we only update the mode, atime, mtime, and ctime.
4615 zfs_dirty_inode(struct inode *ip, int flags)
4617 znode_t *zp = ITOZ(ip);
4618 zfsvfs_t *zfsvfs = ITOZSB(ip);
4620 uint64_t mode, atime[2], mtime[2], ctime[2];
4621 sa_bulk_attr_t bulk[4];
4625 if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
4633 * This is the lazytime semantic indroduced in Linux 4.0
4634 * This flag will only be called from update_time when lazytime is set.
4635 * (Note, I_DIRTY_SYNC will also set if not lazytime)
4636 * Fortunately mtime and ctime are managed within ZFS itself, so we
4637 * only need to dirty atime.
4639 if (flags == I_DIRTY_TIME) {
4640 zp->z_atime_dirty = 1;
4645 tx = dmu_tx_create(zfsvfs->z_os);
4647 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4648 zfs_sa_upgrade_txholds(tx, zp);
4650 error = dmu_tx_assign(tx, TXG_WAIT);
4656 mutex_enter(&zp->z_lock);
4657 zp->z_atime_dirty = 0;
4659 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
4660 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
4661 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
4662 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
4664 /* Preserve the mode, mtime and ctime provided by the inode */
4665 ZFS_TIME_ENCODE(&ip->i_atime, atime);
4666 ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
4667 ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
4672 error = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
4673 mutex_exit(&zp->z_lock);
4683 zfs_inactive(struct inode *ip)
4685 znode_t *zp = ITOZ(ip);
4686 zfsvfs_t *zfsvfs = ITOZSB(ip);
4689 int need_unlock = 0;
4691 /* Only read lock if we haven't already write locked, e.g. rollback */
4692 if (!RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock)) {
4694 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
4696 if (zp->z_sa_hdl == NULL) {
4698 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4702 if (zp->z_atime_dirty && zp->z_unlinked == 0) {
4703 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
4705 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4706 zfs_sa_upgrade_txholds(tx, zp);
4707 error = dmu_tx_assign(tx, TXG_WAIT);
4711 ZFS_TIME_ENCODE(&ip->i_atime, atime);
4712 mutex_enter(&zp->z_lock);
4713 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
4714 (void *)&atime, sizeof (atime), tx);
4715 zp->z_atime_dirty = 0;
4716 mutex_exit(&zp->z_lock);
4723 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4727 * Bounds-check the seek operation.
4729 * IN: ip - inode seeking within
4730 * ooff - old file offset
4731 * noffp - pointer to new file offset
4732 * ct - caller context
4734 * RETURN: 0 if success
4735 * EINVAL if new offset invalid
4739 zfs_seek(struct inode *ip, offset_t ooff, offset_t *noffp)
4741 if (S_ISDIR(ip->i_mode))
4743 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4747 * Fill pages with data from the disk.
4750 zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages)
4752 znode_t *zp = ITOZ(ip);
4753 zfsvfs_t *zfsvfs = ITOZSB(ip);
4755 struct page *cur_pp;
4756 u_offset_t io_off, total;
4763 io_len = nr_pages << PAGE_SHIFT;
4764 i_size = i_size_read(ip);
4765 io_off = page_offset(pl[0]);
4767 if (io_off + io_len > i_size)
4768 io_len = i_size - io_off;
4771 * Iterate over list of pages and read each page individually.
4774 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
4777 cur_pp = pl[page_idx++];
4779 err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
4783 /* convert checksum errors into IO errors */
4785 err = SET_ERROR(EIO);
4794 * Uses zfs_fillpage to read data from the file and fill the pages.
4796 * IN: ip - inode of file to get data from.
4797 * pl - list of pages to read
4798 * nr_pages - number of pages to read
4800 * RETURN: 0 on success, error code on failure.
4803 * vp - atime updated
4807 zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages)
4809 znode_t *zp = ITOZ(ip);
4810 zfsvfs_t *zfsvfs = ITOZSB(ip);
4819 err = zfs_fillpage(ip, pl, nr_pages);
4826 * Check ZFS specific permissions to memory map a section of a file.
4828 * IN: ip - inode of the file to mmap
4830 * addrp - start address in memory region
4831 * len - length of memory region
4832 * vm_flags- address flags
4834 * RETURN: 0 if success
4835 * error code if failure
4839 zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len,
4840 unsigned long vm_flags)
4842 znode_t *zp = ITOZ(ip);
4843 zfsvfs_t *zfsvfs = ITOZSB(ip);
4848 if ((vm_flags & VM_WRITE) && (zp->z_pflags &
4849 (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
4851 return (SET_ERROR(EPERM));
4854 if ((vm_flags & (VM_READ | VM_EXEC)) &&
4855 (zp->z_pflags & ZFS_AV_QUARANTINED)) {
4857 return (SET_ERROR(EACCES));
4860 if (off < 0 || len > MAXOFFSET_T - off) {
4862 return (SET_ERROR(ENXIO));
4870 * convoff - converts the given data (start, whence) to the
4874 convoff(struct inode *ip, flock64_t *lckdat, int whence, offset_t offset)
4879 if ((lckdat->l_whence == SEEK_END) || (whence == SEEK_END)) {
4880 if ((error = zfs_getattr(ip, &vap, 0, CRED())))
4884 switch (lckdat->l_whence) {
4886 lckdat->l_start += offset;
4889 lckdat->l_start += vap.va_size;
4894 return (SET_ERROR(EINVAL));
4897 if (lckdat->l_start < 0)
4898 return (SET_ERROR(EINVAL));
4902 lckdat->l_start -= offset;
4905 lckdat->l_start -= vap.va_size;
4910 return (SET_ERROR(EINVAL));
4913 lckdat->l_whence = (short)whence;
4918 * Free or allocate space in a file. Currently, this function only
4919 * supports the `F_FREESP' command. However, this command is somewhat
4920 * misnamed, as its functionality includes the ability to allocate as
4921 * well as free space.
4923 * IN: ip - inode of file to free data in.
4924 * cmd - action to take (only F_FREESP supported).
4925 * bfp - section of file to free/alloc.
4926 * flag - current file open mode flags.
4927 * offset - current file offset.
4928 * cr - credentials of caller.
4930 * RETURN: 0 on success, error code on failure.
4933 * ip - ctime|mtime updated
4937 zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag,
4938 offset_t offset, cred_t *cr)
4940 znode_t *zp = ITOZ(ip);
4941 zfsvfs_t *zfsvfs = ITOZSB(ip);
4948 if (cmd != F_FREESP) {
4950 return (SET_ERROR(EINVAL));
4954 * Callers might not be able to detect properly that we are read-only,
4955 * so check it explicitly here.
4957 if (zfs_is_readonly(zfsvfs)) {
4959 return (SET_ERROR(EROFS));
4962 if ((error = convoff(ip, bfp, SEEK_SET, offset))) {
4967 if (bfp->l_len < 0) {
4969 return (SET_ERROR(EINVAL));
4973 * Permissions aren't checked on Solaris because on this OS
4974 * zfs_space() can only be called with an opened file handle.
4975 * On Linux we can get here through truncate_range() which
4976 * operates directly on inodes, so we need to check access rights.
4978 if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) {
4984 len = bfp->l_len; /* 0 means from off to end of file */
4986 error = zfs_freesp(zp, off, len, flag, TRUE);
4994 zfs_fid(struct inode *ip, fid_t *fidp)
4996 znode_t *zp = ITOZ(ip);
4997 zfsvfs_t *zfsvfs = ITOZSB(ip);
5000 uint64_t object = zp->z_id;
5007 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
5008 &gen64, sizeof (uint64_t))) != 0) {
5013 gen = (uint32_t)gen64;
5015 size = SHORT_FID_LEN;
5017 zfid = (zfid_short_t *)fidp;
5019 zfid->zf_len = size;
5021 for (i = 0; i < sizeof (zfid->zf_object); i++)
5022 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
5024 /* Must have a non-zero generation number to distinguish from .zfs */
5027 for (i = 0; i < sizeof (zfid->zf_gen); i++)
5028 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
5036 zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
5038 znode_t *zp = ITOZ(ip);
5039 zfsvfs_t *zfsvfs = ITOZSB(ip);
5041 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
5045 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
5053 zfs_setsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
5055 znode_t *zp = ITOZ(ip);
5056 zfsvfs_t *zfsvfs = ITOZSB(ip);
5058 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
5059 zilog_t *zilog = zfsvfs->z_log;
5064 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
5066 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
5067 zil_commit(zilog, 0);
5073 #ifdef HAVE_UIO_ZEROCOPY
5075 * Tunable, both must be a power of 2.
5077 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
5078 * zcr_blksz_max: if set to less than the file block size, allow loaning out of
5079 * an arcbuf for a partial block read
5081 int zcr_blksz_min = (1 << 10); /* 1K */
5082 int zcr_blksz_max = (1 << 17); /* 128K */
5086 zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
5088 znode_t *zp = ITOZ(ip);
5089 zfsvfs_t *zfsvfs = ITOZSB(ip);
5090 int max_blksz = zfsvfs->z_max_blksz;
5091 uio_t *uio = &xuio->xu_uio;
5092 ssize_t size = uio->uio_resid;
5093 offset_t offset = uio->uio_loffset;
5098 int preamble, postamble;
5100 if (xuio->xu_type != UIOTYPE_ZEROCOPY)
5101 return (SET_ERROR(EINVAL));
5108 * Loan out an arc_buf for write if write size is bigger than
5109 * max_blksz, and the file's block size is also max_blksz.
5112 if (size < blksz || zp->z_blksz != blksz) {
5114 return (SET_ERROR(EINVAL));
5117 * Caller requests buffers for write before knowing where the
5118 * write offset might be (e.g. NFS TCP write).
5123 preamble = P2PHASE(offset, blksz);
5125 preamble = blksz - preamble;
5130 postamble = P2PHASE(size, blksz);
5133 fullblk = size / blksz;
5134 (void) dmu_xuio_init(xuio,
5135 (preamble != 0) + fullblk + (postamble != 0));
5138 * Have to fix iov base/len for partial buffers. They
5139 * currently represent full arc_buf's.
5142 /* data begins in the middle of the arc_buf */
5143 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5146 (void) dmu_xuio_add(xuio, abuf,
5147 blksz - preamble, preamble);
5150 for (i = 0; i < fullblk; i++) {
5151 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5154 (void) dmu_xuio_add(xuio, abuf, 0, blksz);
5158 /* data ends in the middle of the arc_buf */
5159 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5162 (void) dmu_xuio_add(xuio, abuf, 0, postamble);
5167 * Loan out an arc_buf for read if the read size is larger than
5168 * the current file block size. Block alignment is not
5169 * considered. Partial arc_buf will be loaned out for read.
5171 blksz = zp->z_blksz;
5172 if (blksz < zcr_blksz_min)
5173 blksz = zcr_blksz_min;
5174 if (blksz > zcr_blksz_max)
5175 blksz = zcr_blksz_max;
5176 /* avoid potential complexity of dealing with it */
5177 if (blksz > max_blksz) {
5179 return (SET_ERROR(EINVAL));
5182 maxsize = zp->z_size - uio->uio_loffset;
5188 return (SET_ERROR(EINVAL));
5193 return (SET_ERROR(EINVAL));
5196 uio->uio_extflg = UIO_XUIO;
5197 XUIO_XUZC_RW(xuio) = ioflag;
5204 zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
5208 int ioflag = XUIO_XUZC_RW(xuio);
5210 ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
5212 i = dmu_xuio_cnt(xuio);
5214 abuf = dmu_xuio_arcbuf(xuio, i);
5216 * if abuf == NULL, it must be a write buffer
5217 * that has been returned in zfs_write().
5220 dmu_return_arcbuf(abuf);
5221 ASSERT(abuf || ioflag == UIO_WRITE);
5224 dmu_xuio_fini(xuio);
5227 #endif /* HAVE_UIO_ZEROCOPY */
5229 #if defined(_KERNEL)
5230 EXPORT_SYMBOL(zfs_open);
5231 EXPORT_SYMBOL(zfs_close);
5232 EXPORT_SYMBOL(zfs_read);
5233 EXPORT_SYMBOL(zfs_write);
5234 EXPORT_SYMBOL(zfs_access);
5235 EXPORT_SYMBOL(zfs_lookup);
5236 EXPORT_SYMBOL(zfs_create);
5237 EXPORT_SYMBOL(zfs_tmpfile);
5238 EXPORT_SYMBOL(zfs_remove);
5239 EXPORT_SYMBOL(zfs_mkdir);
5240 EXPORT_SYMBOL(zfs_rmdir);
5241 EXPORT_SYMBOL(zfs_readdir);
5242 EXPORT_SYMBOL(zfs_fsync);
5243 EXPORT_SYMBOL(zfs_getattr);
5244 EXPORT_SYMBOL(zfs_getattr_fast);
5245 EXPORT_SYMBOL(zfs_setattr);
5246 EXPORT_SYMBOL(zfs_rename);
5247 EXPORT_SYMBOL(zfs_symlink);
5248 EXPORT_SYMBOL(zfs_readlink);
5249 EXPORT_SYMBOL(zfs_link);
5250 EXPORT_SYMBOL(zfs_inactive);
5251 EXPORT_SYMBOL(zfs_space);
5252 EXPORT_SYMBOL(zfs_fid);
5253 EXPORT_SYMBOL(zfs_getsecattr);
5254 EXPORT_SYMBOL(zfs_setsecattr);
5255 EXPORT_SYMBOL(zfs_getpage);
5256 EXPORT_SYMBOL(zfs_putpage);
5257 EXPORT_SYMBOL(zfs_dirty_inode);
5258 EXPORT_SYMBOL(zfs_map);
5261 module_param(zfs_delete_blocks, ulong, 0644);
5262 MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
5263 module_param(zfs_read_chunk_size, long, 0644);
5264 MODULE_PARM_DESC(zfs_read_chunk_size, "Bytes to read per chunk");