4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc.
27 * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
30 /* Portions Copyright 2007 Jeremy Teo */
31 /* Portions Copyright 2010 Robert Milkowski */
33 #include <sys/types.h>
34 #include <sys/param.h>
36 #include <sys/sysmacros.h>
38 #include <sys/uio_impl.h>
42 #include <sys/cmn_err.h>
43 #include <sys/errno.h>
44 #include <sys/zfs_dir.h>
45 #include <sys/zfs_acl.h>
46 #include <sys/zfs_ioctl.h>
47 #include <sys/fs/zfs.h>
49 #include <sys/dmu_objset.h>
50 #include <sys/dsl_crypt.h>
54 #include <sys/policy.h>
55 #include <sys/zfeature.h>
56 #include <sys/zfs_vnops.h>
57 #include <sys/zfs_quota.h>
58 #include <sys/zfs_vfsops.h>
59 #include <sys/zfs_znode.h>
62 * Enable the experimental block cloning feature. If this setting is 0, then
63 * even if feature@block_cloning is enabled, attempts to clone blocks will act
64 * as though the feature is disabled.
66 int zfs_bclone_enabled = 1;
69 * When set zfs_clone_range() waits for dirty data to be written to disk.
70 * This allows the clone operation to reliably succeed when a file is modified
71 * and then immediately cloned. For small files this may be slower than making
72 * a copy of the file and is therefore not the default. However, in certain
73 * scenarios this behavior may be desirable so a tunable is provided.
75 static int zfs_bclone_wait_dirty = 0;
78 * Maximum bytes to read per chunk in zfs_read().
80 static uint64_t zfs_vnops_read_chunk_size = 1024 * 1024;
83 zfs_fsync(znode_t *zp, int syncflag, cred_t *cr)
86 zfsvfs_t *zfsvfs = ZTOZSB(zp);
88 if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
89 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
91 atomic_inc_32(&zp->z_sync_writes_cnt);
92 zil_commit(zfsvfs->z_log, zp->z_id);
93 atomic_dec_32(&zp->z_sync_writes_cnt);
94 zfs_exit(zfsvfs, FTAG);
100 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
102 * Lseek support for finding holes (cmd == SEEK_HOLE) and
103 * data (cmd == SEEK_DATA). "off" is an in/out parameter.
106 zfs_holey_common(znode_t *zp, ulong_t cmd, loff_t *off)
108 zfs_locked_range_t *lr;
109 uint64_t noff = (uint64_t)*off; /* new offset */
114 file_sz = zp->z_size;
115 if (noff >= file_sz) {
116 return (SET_ERROR(ENXIO));
119 if (cmd == F_SEEK_HOLE)
124 /* Flush any mmap()'d data to disk */
125 if (zn_has_cached_data(zp, 0, file_sz - 1))
126 zn_flush_cached_data(zp, B_FALSE);
128 lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_READER);
129 error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
130 zfs_rangelock_exit(lr);
133 return (SET_ERROR(ENXIO));
135 /* File was dirty, so fall back to using generic logic */
136 if (error == EBUSY) {
144 * We could find a hole that begins after the logical end-of-file,
145 * because dmu_offset_next() only works on whole blocks. If the
146 * EOF falls mid-block, then indicate that the "virtual hole"
147 * at the end of the file begins at the logical EOF, rather than
148 * at the end of the last block.
150 if (noff > file_sz) {
162 zfs_holey(znode_t *zp, ulong_t cmd, loff_t *off)
164 zfsvfs_t *zfsvfs = ZTOZSB(zp);
167 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
170 error = zfs_holey_common(zp, cmd, off);
172 zfs_exit(zfsvfs, FTAG);
175 #endif /* SEEK_HOLE && SEEK_DATA */
178 zfs_access(znode_t *zp, int mode, int flag, cred_t *cr)
180 zfsvfs_t *zfsvfs = ZTOZSB(zp);
183 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
186 if (flag & V_ACE_MASK)
187 #if defined(__linux__)
188 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr,
191 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr,
195 #if defined(__linux__)
196 error = zfs_zaccess_rwx(zp, mode, flag, cr, zfs_init_idmap);
198 error = zfs_zaccess_rwx(zp, mode, flag, cr, NULL);
201 zfs_exit(zfsvfs, FTAG);
206 * Read bytes from specified file into supplied buffer.
208 * IN: zp - inode of file to be read from.
209 * uio - structure supplying read location, range info,
211 * ioflag - O_SYNC flags; used to provide FRSYNC semantics.
212 * O_DIRECT flag; used to bypass page cache.
213 * cr - credentials of caller.
215 * OUT: uio - updated offset and range, buffer filled.
217 * RETURN: 0 on success, error code on failure.
220 * inode - atime updated if byte count > 0
223 zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
227 boolean_t frsync = B_FALSE;
229 zfsvfs_t *zfsvfs = ZTOZSB(zp);
230 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
233 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
234 zfs_exit(zfsvfs, FTAG);
235 return (SET_ERROR(EACCES));
238 /* We don't copy out anything useful for directories. */
239 if (Z_ISDIR(ZTOTYPE(zp))) {
240 zfs_exit(zfsvfs, FTAG);
241 return (SET_ERROR(EISDIR));
245 * Validate file offset
247 if (zfs_uio_offset(uio) < (offset_t)0) {
248 zfs_exit(zfsvfs, FTAG);
249 return (SET_ERROR(EINVAL));
253 * Fasttrack empty reads
255 if (zfs_uio_resid(uio) == 0) {
256 zfs_exit(zfsvfs, FTAG);
262 * If we're in FRSYNC mode, sync out this znode before reading it.
263 * Only do this for non-snapshots.
265 * Some platforms do not support FRSYNC and instead map it
266 * to O_SYNC, which results in unnecessary calls to zil_commit. We
267 * only honor FRSYNC requests on platforms which support it.
269 frsync = !!(ioflag & FRSYNC);
272 (frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
273 zil_commit(zfsvfs->z_log, zp->z_id);
276 * Lock the range against changes.
278 zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
279 zfs_uio_offset(uio), zfs_uio_resid(uio), RL_READER);
282 * If we are reading past end-of-file we can skip
283 * to the end; but we might still need to set atime.
285 if (zfs_uio_offset(uio) >= zp->z_size) {
290 ASSERT(zfs_uio_offset(uio) < zp->z_size);
291 #if defined(__linux__)
292 ssize_t start_offset = zfs_uio_offset(uio);
294 ssize_t n = MIN(zfs_uio_resid(uio), zp->z_size - zfs_uio_offset(uio));
295 ssize_t start_resid = n;
298 ssize_t nbytes = MIN(n, zfs_vnops_read_chunk_size -
299 P2PHASE(zfs_uio_offset(uio), zfs_vnops_read_chunk_size));
301 if (zfs_uio_segflg(uio) == UIO_NOCOPY)
302 error = mappedread_sf(zp, nbytes, uio);
305 if (zn_has_cached_data(zp, zfs_uio_offset(uio),
306 zfs_uio_offset(uio) + nbytes - 1) && !(ioflag & O_DIRECT)) {
307 error = mappedread(zp, nbytes, uio);
309 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
314 /* convert checksum errors into IO errors */
316 error = SET_ERROR(EIO);
318 #if defined(__linux__)
320 * if we actually read some bytes, bubbling EFAULT
321 * up to become EAGAIN isn't what we want here...
323 * ...on Linux, at least. On FBSD, doing this breaks.
325 if (error == EFAULT &&
326 (zfs_uio_offset(uio) - start_offset) != 0)
335 int64_t nread = start_resid - n;
336 dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
337 task_io_account_read(nread);
339 zfs_rangelock_exit(lr);
341 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
342 zfs_exit(zfsvfs, FTAG);
347 zfs_clear_setid_bits_if_necessary(zfsvfs_t *zfsvfs, znode_t *zp, cred_t *cr,
348 uint64_t *clear_setid_bits_txgp, dmu_tx_t *tx)
350 zilog_t *zilog = zfsvfs->z_log;
351 const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
353 ASSERT(clear_setid_bits_txgp != NULL);
357 * Clear Set-UID/Set-GID bits on successful write if not
358 * privileged and at least one of the execute bits is set.
360 * It would be nice to do this after all writes have
361 * been done, but that would still expose the ISUID/ISGID
362 * to another app after the partial write is committed.
364 * Note: we don't call zfs_fuid_map_id() here because
365 * user 0 is not an ephemeral uid.
367 mutex_enter(&zp->z_acl_lock);
368 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) | (S_IXUSR >> 6))) != 0 &&
369 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
370 secpolicy_vnode_setid_retain(zp, cr,
371 ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
374 zp->z_mode &= ~(S_ISUID | S_ISGID);
375 newmode = zp->z_mode;
376 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
377 (void *)&newmode, sizeof (uint64_t), tx);
379 mutex_exit(&zp->z_acl_lock);
382 * Make sure SUID/SGID bits will be removed when we replay the
383 * log. If the setid bits are keep coming back, don't log more
384 * than one TX_SETATTR per transaction group.
386 if (*clear_setid_bits_txgp != dmu_tx_get_txg(tx)) {
389 va.va_mask = ATTR_MODE;
390 va.va_nodeid = zp->z_id;
391 va.va_mode = newmode;
392 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, &va,
394 *clear_setid_bits_txgp = dmu_tx_get_txg(tx);
397 mutex_exit(&zp->z_acl_lock);
402 * Write the bytes to a file.
404 * IN: zp - znode of file to be written to.
405 * uio - structure supplying write location, range info,
407 * ioflag - O_APPEND flag set if in append mode.
408 * O_DIRECT flag; used to bypass page cache.
409 * cr - credentials of caller.
411 * OUT: uio - updated offset and range.
413 * RETURN: 0 if success
414 * error code if failure
417 * ip - ctime|mtime updated if byte count > 0
420 zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
422 int error = 0, error1;
423 ssize_t start_resid = zfs_uio_resid(uio);
424 uint64_t clear_setid_bits_txg = 0;
427 * Fasttrack empty write
429 ssize_t n = start_resid;
433 zfsvfs_t *zfsvfs = ZTOZSB(zp);
434 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
437 sa_bulk_attr_t bulk[4];
439 uint64_t mtime[2], ctime[2];
440 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
441 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
442 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
444 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
448 * Callers might not be able to detect properly that we are read-only,
449 * so check it explicitly here.
451 if (zfs_is_readonly(zfsvfs)) {
452 zfs_exit(zfsvfs, FTAG);
453 return (SET_ERROR(EROFS));
457 * If immutable or not appending then return EPERM.
458 * Intentionally allow ZFS_READONLY through here.
459 * See zfs_zaccess_common()
461 if ((zp->z_pflags & ZFS_IMMUTABLE) ||
462 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) &&
463 (zfs_uio_offset(uio) < zp->z_size))) {
464 zfs_exit(zfsvfs, FTAG);
465 return (SET_ERROR(EPERM));
469 * Validate file offset
471 offset_t woff = ioflag & O_APPEND ? zp->z_size : zfs_uio_offset(uio);
473 zfs_exit(zfsvfs, FTAG);
474 return (SET_ERROR(EINVAL));
478 * Pre-fault the pages to ensure slow (eg NFS) pages
481 ssize_t pfbytes = MIN(n, DMU_MAX_ACCESS >> 1);
482 if (zfs_uio_prefaultpages(pfbytes, uio)) {
483 zfs_exit(zfsvfs, FTAG);
484 return (SET_ERROR(EFAULT));
488 * If in append mode, set the io offset pointer to eof.
490 zfs_locked_range_t *lr;
491 if (ioflag & O_APPEND) {
493 * Obtain an appending range lock to guarantee file append
494 * semantics. We reset the write offset once we have the lock.
496 lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
497 woff = lr->lr_offset;
498 if (lr->lr_length == UINT64_MAX) {
500 * We overlocked the file because this write will cause
501 * the file block size to increase.
502 * Note that zp_size cannot change with this lock held.
506 zfs_uio_setoffset(uio, woff);
509 * Note that if the file block size will change as a result of
510 * this write, then this range lock will lock the entire file
511 * so that we can re-write the block safely.
513 lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
516 if (zn_rlimit_fsize_uio(zp, uio)) {
517 zfs_rangelock_exit(lr);
518 zfs_exit(zfsvfs, FTAG);
519 return (SET_ERROR(EFBIG));
522 const rlim64_t limit = MAXOFFSET_T;
525 zfs_rangelock_exit(lr);
526 zfs_exit(zfsvfs, FTAG);
527 return (SET_ERROR(EFBIG));
530 if (n > limit - woff)
533 uint64_t end_size = MAX(zp->z_size, woff + n);
534 zilog_t *zilog = zfsvfs->z_log;
535 boolean_t commit = (ioflag & (O_SYNC | O_DSYNC)) ||
536 (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS);
538 const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
539 const uint64_t gid = KGID_TO_SGID(ZTOGID(zp));
540 const uint64_t projid = zp->z_projid;
543 * Write the file in reasonable size chunks. Each chunk is written
544 * in a separate transaction; this keeps the intent log records small
545 * and allows us to do more fine-grained space accounting.
548 woff = zfs_uio_offset(uio);
550 if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) ||
551 zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) ||
552 (projid != ZFS_DEFAULT_PROJID &&
553 zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
555 error = SET_ERROR(EDQUOT);
560 if (lr->lr_length == UINT64_MAX && zp->z_size <= zp->z_blksz) {
561 if (zp->z_blksz > zfsvfs->z_max_blksz &&
562 !ISP2(zp->z_blksz)) {
564 * File's blocksize is already larger than the
565 * "recordsize" property. Only let it grow to
566 * the next power of 2.
568 blksz = 1 << highbit64(zp->z_blksz);
570 blksz = zfsvfs->z_max_blksz;
572 blksz = MIN(blksz, P2ROUNDUP(end_size,
574 blksz = MAX(blksz, zp->z_blksz);
579 arc_buf_t *abuf = NULL;
581 if (n >= blksz && woff >= zp->z_size &&
582 P2PHASE(woff, blksz) == 0 &&
583 (blksz >= SPA_OLD_MAXBLOCKSIZE || n < 4 * blksz)) {
585 * This write covers a full block. "Borrow" a buffer
586 * from the dmu so that we can fill it before we enter
587 * a transaction. This avoids the possibility of
588 * holding up the transaction if the data copy hangs
589 * up on a pagefault (e.g., from an NFS server mapping).
591 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
593 ASSERT(abuf != NULL);
594 ASSERT(arc_buf_size(abuf) == blksz);
595 if ((error = zfs_uiocopy(abuf->b_data, blksz,
596 UIO_WRITE, uio, &nbytes))) {
597 dmu_return_arcbuf(abuf);
600 ASSERT3S(nbytes, ==, blksz);
602 nbytes = MIN(n, (DMU_MAX_ACCESS >> 1) -
603 P2PHASE(woff, blksz));
604 if (pfbytes < nbytes) {
605 if (zfs_uio_prefaultpages(nbytes, uio)) {
606 error = SET_ERROR(EFAULT);
614 * Start a transaction.
616 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
617 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
618 dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
620 dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff, nbytes);
622 zfs_sa_upgrade_txholds(tx, zp);
623 error = dmu_tx_assign(tx, TXG_WAIT);
627 dmu_return_arcbuf(abuf);
632 * NB: We must call zfs_clear_setid_bits_if_necessary before
633 * committing the transaction!
637 * If rangelock_enter() over-locked we grow the blocksize
638 * and then reduce the lock range. This will only happen
639 * on the first iteration since rangelock_reduce() will
640 * shrink down lr_length to the appropriate size.
642 if (lr->lr_length == UINT64_MAX) {
643 zfs_grow_blocksize(zp, blksz, tx);
644 zfs_rangelock_reduce(lr, woff, n);
649 tx_bytes = zfs_uio_resid(uio);
650 zfs_uio_fault_disable(uio, B_TRUE);
651 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
653 zfs_uio_fault_disable(uio, B_FALSE);
655 if (error == EFAULT) {
656 zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
657 cr, &clear_setid_bits_txg, tx);
660 * Account for partial writes before
661 * continuing the loop.
662 * Update needs to occur before the next
663 * zfs_uio_prefaultpages, or prefaultpages may
664 * error, and we may break the loop early.
666 n -= tx_bytes - zfs_uio_resid(uio);
667 pfbytes -= tx_bytes - zfs_uio_resid(uio);
672 * On FreeBSD, EFAULT should be propagated back to the
673 * VFS, which will handle faulting and will retry.
675 if (error != 0 && error != EFAULT) {
676 zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
677 cr, &clear_setid_bits_txg, tx);
681 tx_bytes -= zfs_uio_resid(uio);
684 * Thus, we're writing a full block at a block-aligned
685 * offset and extending the file past EOF.
687 * dmu_assign_arcbuf_by_dbuf() will directly assign the
688 * arc buffer to a dbuf.
690 error = dmu_assign_arcbuf_by_dbuf(
691 sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
694 * XXX This might not be necessary if
695 * dmu_assign_arcbuf_by_dbuf is guaranteed
698 zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
699 cr, &clear_setid_bits_txg, tx);
700 dmu_return_arcbuf(abuf);
704 ASSERT3S(nbytes, <=, zfs_uio_resid(uio));
705 zfs_uioskip(uio, nbytes);
709 zn_has_cached_data(zp, woff, woff + tx_bytes - 1) &&
710 !(ioflag & O_DIRECT)) {
711 update_pages(zp, woff, tx_bytes, zfsvfs->z_os);
715 * If we made no progress, we're done. If we made even
716 * partial progress, update the znode and ZIL accordingly.
719 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
720 (void *)&zp->z_size, sizeof (uint64_t), tx);
726 zfs_clear_setid_bits_if_necessary(zfsvfs, zp, cr,
727 &clear_setid_bits_txg, tx);
729 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
732 * Update the file size (zp_size) if it has changed;
733 * account for possible concurrent updates.
735 while ((end_size = zp->z_size) < zfs_uio_offset(uio)) {
736 (void) atomic_cas_64(&zp->z_size, end_size,
737 zfs_uio_offset(uio));
738 ASSERT(error == 0 || error == EFAULT);
741 * If we are replaying and eof is non zero then force
742 * the file size to the specified eof. Note, there's no
743 * concurrency during replay.
745 if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
746 zp->z_size = zfsvfs->z_replay_eof;
748 error1 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
750 /* Avoid clobbering EFAULT. */
754 * NB: During replay, the TX_SETATTR record logged by
755 * zfs_clear_setid_bits_if_necessary must precede any of
756 * the TX_WRITE records logged here.
758 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, commit,
765 ASSERT3S(tx_bytes, ==, nbytes);
770 zfs_znode_update_vfs(zp);
771 zfs_rangelock_exit(lr);
774 * If we're in replay mode, or we made no progress, or the
775 * uio data is inaccessible return an error. Otherwise, it's
776 * at least a partial write, so it's successful.
778 if (zfsvfs->z_replay || zfs_uio_resid(uio) == start_resid ||
780 zfs_exit(zfsvfs, FTAG);
785 zil_commit(zilog, zp->z_id);
787 const int64_t nwritten = start_resid - zfs_uio_resid(uio);
788 dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
789 task_io_account_write(nwritten);
791 zfs_exit(zfsvfs, FTAG);
796 zfs_getsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
798 zfsvfs_t *zfsvfs = ZTOZSB(zp);
800 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
802 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
804 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
805 zfs_exit(zfsvfs, FTAG);
811 zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
813 zfsvfs_t *zfsvfs = ZTOZSB(zp);
815 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
818 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
820 zilog = zfsvfs->z_log;
821 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
823 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
824 zil_commit(zilog, 0);
826 zfs_exit(zfsvfs, FTAG);
831 static int zil_fault_io = 0;
834 static void zfs_get_done(zgd_t *zgd, int error);
837 * Get data to generate a TX_WRITE intent log record.
840 zfs_get_data(void *arg, uint64_t gen, lr_write_t *lr, char *buf,
841 struct lwb *lwb, zio_t *zio)
843 zfsvfs_t *zfsvfs = arg;
844 objset_t *os = zfsvfs->z_os;
846 uint64_t object = lr->lr_foid;
847 uint64_t offset = lr->lr_offset;
848 uint64_t size = lr->lr_length;
854 ASSERT3P(lwb, !=, NULL);
855 ASSERT3U(size, !=, 0);
858 * Nothing to do if the file has been removed
860 if (zfs_zget(zfsvfs, object, &zp) != 0)
861 return (SET_ERROR(ENOENT));
862 if (zp->z_unlinked) {
864 * Release the vnode asynchronously as we currently have the
865 * txg stopped from syncing.
868 return (SET_ERROR(ENOENT));
870 /* check if generation number matches */
871 if (sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
872 sizeof (zp_gen)) != 0) {
874 return (SET_ERROR(EIO));
878 return (SET_ERROR(ENOENT));
881 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
883 zgd->zgd_private = zp;
886 * Write records come in two flavors: immediate and indirect.
887 * For small writes it's cheaper to store the data with the
888 * log record (immediate); for large writes it's cheaper to
889 * sync the data and get a pointer to it (indirect) so that
890 * we don't have to write the data twice.
892 if (buf != NULL) { /* immediate write */
893 zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
894 offset, size, RL_READER);
895 /* test for truncation needs to be done while range locked */
896 if (offset >= zp->z_size) {
897 error = SET_ERROR(ENOENT);
899 error = dmu_read(os, object, offset, size, buf,
900 DMU_READ_NO_PREFETCH);
902 ASSERT(error == 0 || error == ENOENT);
903 } else { /* indirect write */
904 ASSERT3P(zio, !=, NULL);
906 * Have to lock the whole block to ensure when it's
907 * written out and its checksum is being calculated
908 * that no one can change the data. We need to re-check
909 * blocksize after we get the lock in case it's changed!
914 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
916 zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
917 offset, size, RL_READER);
918 if (zp->z_blksz == size)
921 zfs_rangelock_exit(zgd->zgd_lr);
923 /* test for truncation needs to be done while range locked */
924 if (lr->lr_offset >= zp->z_size)
925 error = SET_ERROR(ENOENT);
928 error = SET_ERROR(EIO);
933 error = dmu_buf_hold_noread(os, object, offset, zgd,
937 blkptr_t *bp = &lr->lr_blkptr;
942 ASSERT(db->db_offset == offset);
943 ASSERT(db->db_size == size);
945 error = dmu_sync(zio, lr->lr_common.lrc_txg,
947 ASSERT(error || lr->lr_length <= size);
950 * On success, we need to wait for the write I/O
951 * initiated by dmu_sync() to complete before we can
952 * release this dbuf. We will finish everything up
953 * in the zfs_get_done() callback.
958 if (error == EALREADY) {
959 lr->lr_common.lrc_txtype = TX_WRITE2;
961 * TX_WRITE2 relies on the data previously
962 * written by the TX_WRITE that caused
963 * EALREADY. We zero out the BP because
964 * it is the old, currently-on-disk BP.
973 zfs_get_done(zgd, error);
980 zfs_get_done(zgd_t *zgd, int error)
983 znode_t *zp = zgd->zgd_private;
986 dmu_buf_rele(zgd->zgd_db, zgd);
988 zfs_rangelock_exit(zgd->zgd_lr);
991 * Release the vnode asynchronously as we currently have the
992 * txg stopped from syncing.
996 kmem_free(zgd, sizeof (zgd_t));
1000 zfs_enter_two(zfsvfs_t *zfsvfs1, zfsvfs_t *zfsvfs2, const char *tag)
1004 /* Swap. Not sure if the order of zfs_enter()s is important. */
1005 if (zfsvfs1 > zfsvfs2) {
1006 zfsvfs_t *tmpzfsvfs;
1008 tmpzfsvfs = zfsvfs2;
1010 zfsvfs1 = tmpzfsvfs;
1013 error = zfs_enter(zfsvfs1, tag);
1016 if (zfsvfs1 != zfsvfs2) {
1017 error = zfs_enter(zfsvfs2, tag);
1019 zfs_exit(zfsvfs1, tag);
1028 zfs_exit_two(zfsvfs_t *zfsvfs1, zfsvfs_t *zfsvfs2, const char *tag)
1031 zfs_exit(zfsvfs1, tag);
1032 if (zfsvfs1 != zfsvfs2)
1033 zfs_exit(zfsvfs2, tag);
1037 * We split each clone request in chunks that can fit into a single ZIL
1038 * log entry. Each ZIL log entry can fit 130816 bytes for a block cloning
1039 * operation (see zil_max_log_data() and zfs_log_clone_range()). This gives
1040 * us room for storing 1022 block pointers.
1042 * On success, the function return the number of bytes copied in *lenp.
1043 * Note, it doesn't return how much bytes are left to be copied.
1044 * On errors which are caused by any file system limitations or
1045 * brt limitations `EINVAL` is returned. In the most cases a user
1046 * requested bad parameters, it could be possible to clone the file but
1047 * some parameters don't match the requirements.
1050 zfs_clone_range(znode_t *inzp, uint64_t *inoffp, znode_t *outzp,
1051 uint64_t *outoffp, uint64_t *lenp, cred_t *cr)
1053 zfsvfs_t *inzfsvfs, *outzfsvfs;
1054 objset_t *inos, *outos;
1055 zfs_locked_range_t *inlr, *outlr;
1059 uint64_t inoff, outoff, len, done;
1060 uint64_t outsize, size;
1063 sa_bulk_attr_t bulk[3];
1064 uint64_t mtime[2], ctime[2];
1065 uint64_t uid, gid, projid;
1067 size_t maxblocks, nbps;
1069 uint64_t clear_setid_bits_txg = 0;
1070 uint64_t last_synced_txg = 0;
1077 inzfsvfs = ZTOZSB(inzp);
1078 outzfsvfs = ZTOZSB(outzp);
1081 * We need to call zfs_enter() potentially on two different datasets,
1082 * so we need a dedicated function for that.
1084 error = zfs_enter_two(inzfsvfs, outzfsvfs, FTAG);
1088 inos = inzfsvfs->z_os;
1089 outos = outzfsvfs->z_os;
1092 * Both source and destination have to belong to the same storage pool.
1094 if (dmu_objset_spa(inos) != dmu_objset_spa(outos)) {
1095 zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
1096 return (SET_ERROR(EXDEV));
1100 * outos and inos belongs to the same storage pool.
1101 * see a few lines above, only one check.
1103 if (!spa_feature_is_enabled(dmu_objset_spa(outos),
1104 SPA_FEATURE_BLOCK_CLONING)) {
1105 zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
1106 return (SET_ERROR(EOPNOTSUPP));
1109 ASSERT(!outzfsvfs->z_replay);
1112 * Block cloning from an unencrypted dataset into an encrypted
1113 * dataset and vice versa is not supported.
1115 if (inos->os_encrypted != outos->os_encrypted) {
1116 zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
1117 return (SET_ERROR(EXDEV));
1121 * Cloning across encrypted datasets is possible only if they
1122 * share the same master key.
1124 if (inos != outos && inos->os_encrypted &&
1125 !dmu_objset_crypto_key_equal(inos, outos)) {
1126 zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
1127 return (SET_ERROR(EXDEV));
1130 error = zfs_verify_zp(inzp);
1132 error = zfs_verify_zp(outzp);
1134 zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
1139 * We don't copy source file's flags that's why we don't allow to clone
1140 * files that are in quarantine.
1142 if (inzp->z_pflags & ZFS_AV_QUARANTINED) {
1143 zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
1144 return (SET_ERROR(EACCES));
1147 if (inoff >= inzp->z_size) {
1149 zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
1152 if (len > inzp->z_size - inoff) {
1153 len = inzp->z_size - inoff;
1157 zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
1162 * Callers might not be able to detect properly that we are read-only,
1163 * so check it explicitly here.
1165 if (zfs_is_readonly(outzfsvfs)) {
1166 zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
1167 return (SET_ERROR(EROFS));
1171 * If immutable or not appending then return EPERM.
1172 * Intentionally allow ZFS_READONLY through here.
1173 * See zfs_zaccess_common()
1175 if ((outzp->z_pflags & ZFS_IMMUTABLE) != 0) {
1176 zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
1177 return (SET_ERROR(EPERM));
1181 * No overlapping if we are cloning within the same file.
1183 if (inzp == outzp) {
1184 if (inoff < outoff + len && outoff < inoff + len) {
1185 zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
1186 return (SET_ERROR(EINVAL));
1191 * Maintain predictable lock order.
1193 if (inzp < outzp || (inzp == outzp && inoff < outoff)) {
1194 inlr = zfs_rangelock_enter(&inzp->z_rangelock, inoff, len,
1196 outlr = zfs_rangelock_enter(&outzp->z_rangelock, outoff, len,
1199 outlr = zfs_rangelock_enter(&outzp->z_rangelock, outoff, len,
1201 inlr = zfs_rangelock_enter(&inzp->z_rangelock, inoff, len,
1205 inblksz = inzp->z_blksz;
1208 * We cannot clone into a file with different block size if we can't
1209 * grow it (block size is already bigger, has more than one block, or
1210 * not locked for growth). There are other possible reasons for the
1211 * grow to fail, but we cover what we can before opening transaction
1212 * and the rest detect after we try to do it.
1214 if (inblksz < outzp->z_blksz) {
1215 error = SET_ERROR(EINVAL);
1218 if (inblksz != outzp->z_blksz && (outzp->z_size > outzp->z_blksz ||
1219 outlr->lr_length != UINT64_MAX)) {
1220 error = SET_ERROR(EINVAL);
1225 * Block size must be power-of-2 if destination offset != 0.
1226 * There can be no multiple blocks of non-power-of-2 size.
1228 if (outoff != 0 && !ISP2(inblksz)) {
1229 error = SET_ERROR(EINVAL);
1234 * Offsets and len must be at block boundries.
1236 if ((inoff % inblksz) != 0 || (outoff % inblksz) != 0) {
1237 error = SET_ERROR(EINVAL);
1241 * Length must be multipe of blksz, except for the end of the file.
1243 if ((len % inblksz) != 0 &&
1244 (len < inzp->z_size - inoff || len < outzp->z_size - outoff)) {
1245 error = SET_ERROR(EINVAL);
1250 * If we are copying only one block and it is smaller than recordsize
1251 * property, do not allow destination to grow beyond one block if it
1252 * is not there yet. Otherwise the destination will get stuck with
1253 * that block size forever, that can be as small as 512 bytes, no
1254 * matter how big the destination grow later.
1256 if (len <= inblksz && inblksz < outzfsvfs->z_max_blksz &&
1257 outzp->z_size <= inblksz && outoff + len > inblksz) {
1258 error = SET_ERROR(EINVAL);
1262 error = zn_rlimit_fsize(outoff + len);
1267 if (inoff >= MAXOFFSET_T || outoff >= MAXOFFSET_T) {
1268 error = SET_ERROR(EFBIG);
1272 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(outzfsvfs), NULL,
1274 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(outzfsvfs), NULL,
1276 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(outzfsvfs), NULL,
1279 zilog = outzfsvfs->z_log;
1280 maxblocks = zil_max_log_data(zilog, sizeof (lr_clone_range_t)) /
1283 uid = KUID_TO_SUID(ZTOUID(outzp));
1284 gid = KGID_TO_SGID(ZTOGID(outzp));
1285 projid = outzp->z_projid;
1287 bps = vmem_alloc(sizeof (bps[0]) * maxblocks, KM_SLEEP);
1290 * Clone the file in reasonable size chunks. Each chunk is cloned
1291 * in a separate transaction; this keeps the intent log records small
1292 * and allows us to do more fine-grained space accounting.
1295 size = MIN(inblksz * maxblocks, len);
1297 if (zfs_id_overblockquota(outzfsvfs, DMU_USERUSED_OBJECT,
1299 zfs_id_overblockquota(outzfsvfs, DMU_GROUPUSED_OBJECT,
1301 (projid != ZFS_DEFAULT_PROJID &&
1302 zfs_id_overblockquota(outzfsvfs, DMU_PROJECTUSED_OBJECT,
1304 error = SET_ERROR(EDQUOT);
1309 last_synced_txg = spa_last_synced_txg(dmu_objset_spa(inos));
1310 error = dmu_read_l0_bps(inos, inzp->z_id, inoff, size, bps,
1314 * If we are trying to clone a block that was created
1315 * in the current transaction group, the error will be
1316 * EAGAIN here. Based on zfs_bclone_wait_dirty either
1317 * return a shortened range to the caller so it can
1318 * fallback, or wait for the next TXG and check again.
1320 if (error == EAGAIN && zfs_bclone_wait_dirty) {
1321 txg_wait_synced(dmu_objset_pool(inos),
1322 last_synced_txg + 1);
1330 * Start a transaction.
1332 tx = dmu_tx_create(outos);
1333 dmu_tx_hold_sa(tx, outzp->z_sa_hdl, B_FALSE);
1334 db = (dmu_buf_impl_t *)sa_get_db(outzp->z_sa_hdl);
1336 dmu_tx_hold_clone_by_dnode(tx, DB_DNODE(db), outoff, size);
1338 zfs_sa_upgrade_txholds(tx, outzp);
1339 error = dmu_tx_assign(tx, TXG_WAIT);
1346 * Copy source znode's block size. This is done only if the
1347 * whole znode is locked (see zfs_rangelock_cb()) and only
1348 * on the first iteration since zfs_rangelock_reduce() will
1349 * shrink down lr_length to the appropriate size.
1351 if (outlr->lr_length == UINT64_MAX) {
1352 zfs_grow_blocksize(outzp, inblksz, tx);
1355 * Block growth may fail for many reasons we can not
1356 * predict here. If it happen the cloning is doomed.
1358 if (inblksz != outzp->z_blksz) {
1359 error = SET_ERROR(EINVAL);
1365 * Round range lock up to the block boundary, so we
1366 * prevent appends until we are done.
1368 zfs_rangelock_reduce(outlr, outoff,
1369 ((len - 1) / inblksz + 1) * inblksz);
1372 error = dmu_brt_clone(outos, outzp->z_id, outoff, size, tx,
1379 if (zn_has_cached_data(outzp, outoff, outoff + size - 1)) {
1380 update_pages(outzp, outoff, size, outos);
1383 zfs_clear_setid_bits_if_necessary(outzfsvfs, outzp, cr,
1384 &clear_setid_bits_txg, tx);
1386 zfs_tstamp_update_setup(outzp, CONTENT_MODIFIED, mtime, ctime);
1389 * Update the file size (zp_size) if it has changed;
1390 * account for possible concurrent updates.
1392 while ((outsize = outzp->z_size) < outoff + size) {
1393 (void) atomic_cas_64(&outzp->z_size, outsize,
1397 error = sa_bulk_update(outzp->z_sa_hdl, bulk, count, tx);
1399 zfs_log_clone_range(zilog, tx, TX_CLONE_RANGE, outzp, outoff,
1400 size, inblksz, bps, nbps);
1413 vmem_free(bps, sizeof (bps[0]) * maxblocks);
1414 zfs_znode_update_vfs(outzp);
1417 zfs_rangelock_exit(outlr);
1418 zfs_rangelock_exit(inlr);
1422 * If we have made at least partial progress, reset the error.
1426 ZFS_ACCESSTIME_STAMP(inzfsvfs, inzp);
1428 if (outos->os_sync == ZFS_SYNC_ALWAYS) {
1429 zil_commit(zilog, outzp->z_id);
1437 * If we made no progress, there must be a good reason.
1438 * EOF is handled explicitly above, before the loop.
1440 ASSERT3S(error, !=, 0);
1443 zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
1449 * Usual pattern would be to call zfs_clone_range() from zfs_replay_clone(),
1450 * but we cannot do that, because when replaying we don't have source znode
1451 * available. This is why we need a dedicated replay function.
1454 zfs_clone_range_replay(znode_t *zp, uint64_t off, uint64_t len, uint64_t blksz,
1455 const blkptr_t *bps, size_t nbps)
1462 sa_bulk_attr_t bulk[3];
1463 uint64_t mtime[2], ctime[2];
1465 ASSERT3U(off, <, MAXOFFSET_T);
1466 ASSERT3U(len, >, 0);
1467 ASSERT3U(nbps, >, 0);
1469 zfsvfs = ZTOZSB(zp);
1471 ASSERT(spa_feature_is_enabled(dmu_objset_spa(zfsvfs->z_os),
1472 SPA_FEATURE_BLOCK_CLONING));
1474 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
1477 ASSERT(zfsvfs->z_replay);
1478 ASSERT(!zfs_is_readonly(zfsvfs));
1480 if ((off % blksz) != 0) {
1481 zfs_exit(zfsvfs, FTAG);
1482 return (SET_ERROR(EINVAL));
1485 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
1486 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
1487 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
1491 * Start a transaction.
1493 tx = dmu_tx_create(zfsvfs->z_os);
1495 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1496 db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
1498 dmu_tx_hold_clone_by_dnode(tx, DB_DNODE(db), off, len);
1500 zfs_sa_upgrade_txholds(tx, zp);
1501 error = dmu_tx_assign(tx, TXG_WAIT);
1504 zfs_exit(zfsvfs, FTAG);
1508 if (zp->z_blksz < blksz)
1509 zfs_grow_blocksize(zp, blksz, tx);
1511 dmu_brt_clone(zfsvfs->z_os, zp->z_id, off, len, tx, bps, nbps);
1513 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
1515 if (zp->z_size < off + len)
1516 zp->z_size = off + len;
1518 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1521 * zil_replaying() not only check if we are replaying ZIL, but also
1522 * updates the ZIL header to record replay progress.
1524 VERIFY(zil_replaying(zfsvfs->z_log, tx));
1528 zfs_znode_update_vfs(zp);
1530 zfs_exit(zfsvfs, FTAG);
1535 EXPORT_SYMBOL(zfs_access);
1536 EXPORT_SYMBOL(zfs_fsync);
1537 EXPORT_SYMBOL(zfs_holey);
1538 EXPORT_SYMBOL(zfs_read);
1539 EXPORT_SYMBOL(zfs_write);
1540 EXPORT_SYMBOL(zfs_getsecattr);
1541 EXPORT_SYMBOL(zfs_setsecattr);
1542 EXPORT_SYMBOL(zfs_clone_range);
1543 EXPORT_SYMBOL(zfs_clone_range_replay);
1545 ZFS_MODULE_PARAM(zfs_vnops, zfs_vnops_, read_chunk_size, U64, ZMOD_RW,
1546 "Bytes to read per chunk");
1548 ZFS_MODULE_PARAM(zfs, zfs_, bclone_enabled, INT, ZMOD_RW,
1549 "Enable block cloning");
1551 ZFS_MODULE_PARAM(zfs, zfs_, bclone_wait_dirty, INT, ZMOD_RW,
1552 "Wait for dirty blocks when cloning");