4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc.
29 /* Portions Copyright 2007 Jeremy Teo */
30 /* Portions Copyright 2010 Robert Milkowski */
32 #include <sys/types.h>
33 #include <sys/param.h>
35 #include <sys/sysmacros.h>
41 #include <sys/cmn_err.h>
42 #include <sys/errno.h>
43 #include <sys/zfs_dir.h>
44 #include <sys/zfs_acl.h>
45 #include <sys/zfs_ioctl.h>
46 #include <sys/fs/zfs.h>
48 #include <sys/dmu_objset.h>
52 #include <sys/policy.h>
53 #include <sys/zfs_vnops.h>
54 #include <sys/zfs_quota.h>
57 static ulong_t zfs_fsync_sync_cnt = 4;
60 zfs_fsync(znode_t *zp, int syncflag, cred_t *cr)
62 zfsvfs_t *zfsvfs = ZTOZSB(zp);
64 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
66 if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
69 zil_commit(zfsvfs->z_log, zp->z_id);
72 tsd_set(zfs_fsyncer_key, NULL);
78 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
80 * Lseek support for finding holes (cmd == SEEK_HOLE) and
81 * data (cmd == SEEK_DATA). "off" is an in/out parameter.
84 zfs_holey_common(znode_t *zp, ulong_t cmd, loff_t *off)
86 uint64_t noff = (uint64_t)*off; /* new offset */
92 if (noff >= file_sz) {
93 return (SET_ERROR(ENXIO));
96 if (cmd == F_SEEK_HOLE)
101 error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
104 return (SET_ERROR(ENXIO));
106 /* file was dirty, so fall back to using generic logic */
107 if (error == EBUSY) {
115 * We could find a hole that begins after the logical end-of-file,
116 * because dmu_offset_next() only works on whole blocks. If the
117 * EOF falls mid-block, then indicate that the "virtual hole"
118 * at the end of the file begins at the logical EOF, rather than
119 * at the end of the last block.
121 if (noff > file_sz) {
133 zfs_holey(znode_t *zp, ulong_t cmd, loff_t *off)
135 zfsvfs_t *zfsvfs = ZTOZSB(zp);
141 error = zfs_holey_common(zp, cmd, off);
146 #endif /* SEEK_HOLE && SEEK_DATA */
150 zfs_access(znode_t *zp, int mode, int flag, cred_t *cr)
152 zfsvfs_t *zfsvfs = ZTOZSB(zp);
158 if (flag & V_ACE_MASK)
159 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
161 error = zfs_zaccess_rwx(zp, mode, flag, cr);
167 static unsigned long zfs_vnops_read_chunk_size = 1024 * 1024; /* Tunable */
170 * Read bytes from specified file into supplied buffer.
172 * IN: zp - inode of file to be read from.
173 * uio - structure supplying read location, range info,
175 * ioflag - O_SYNC flags; used to provide FRSYNC semantics.
176 * O_DIRECT flag; used to bypass page cache.
177 * cr - credentials of caller.
179 * OUT: uio - updated offset and range, buffer filled.
181 * RETURN: 0 on success, error code on failure.
184 * inode - atime updated if byte count > 0
188 zfs_read(struct znode *zp, uio_t *uio, int ioflag, cred_t *cr)
191 boolean_t frsync = B_FALSE;
193 zfsvfs_t *zfsvfs = ZTOZSB(zp);
197 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
199 return (SET_ERROR(EACCES));
202 /* We don't copy out anything useful for directories. */
203 if (Z_ISDIR(ZTOTYPE(zp))) {
205 return (SET_ERROR(EISDIR));
209 * Validate file offset
211 if (uio->uio_loffset < (offset_t)0) {
213 return (SET_ERROR(EINVAL));
217 * Fasttrack empty reads
219 if (uio->uio_resid == 0) {
226 * If we're in FRSYNC mode, sync out this znode before reading it.
227 * Only do this for non-snapshots.
229 * Some platforms do not support FRSYNC and instead map it
230 * to O_SYNC, which results in unnecessary calls to zil_commit. We
231 * only honor FRSYNC requests on platforms which support it.
233 frsync = !!(ioflag & FRSYNC);
236 (frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
237 zil_commit(zfsvfs->z_log, zp->z_id);
240 * Lock the range against changes.
242 zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
243 uio->uio_loffset, uio->uio_resid, RL_READER);
246 * If we are reading past end-of-file we can skip
247 * to the end; but we might still need to set atime.
249 if (uio->uio_loffset >= zp->z_size) {
254 ASSERT(uio->uio_loffset < zp->z_size);
255 ssize_t n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
256 ssize_t start_resid = n;
259 ssize_t nbytes = MIN(n, zfs_vnops_read_chunk_size -
260 P2PHASE(uio->uio_loffset, zfs_vnops_read_chunk_size));
262 if (uio->uio_segflg == UIO_NOCOPY)
263 error = mappedread_sf(zp, nbytes, uio);
266 if (zn_has_cached_data(zp) && !(ioflag & O_DIRECT)) {
267 error = mappedread(zp, nbytes, uio);
269 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
274 /* convert checksum errors into IO errors */
276 error = SET_ERROR(EIO);
283 int64_t nread = start_resid - n;
284 dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
285 task_io_account_read(nread);
287 zfs_rangelock_exit(lr);
289 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
295 * Write the bytes to a file.
297 * IN: zp - znode of file to be written to.
298 * uio - structure supplying write location, range info,
300 * ioflag - O_APPEND flag set if in append mode.
301 * O_DIRECT flag; used to bypass page cache.
302 * cr - credentials of caller.
304 * OUT: uio - updated offset and range.
306 * RETURN: 0 if success
307 * error code if failure
310 * ip - ctime|mtime updated if byte count > 0
315 zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr)
318 ssize_t start_resid = uio->uio_resid;
321 * Fasttrack empty write
323 ssize_t n = start_resid;
327 rlim64_t limit = MAXOFFSET_T;
329 zfsvfs_t *zfsvfs = ZTOZSB(zp);
333 sa_bulk_attr_t bulk[4];
335 uint64_t mtime[2], ctime[2];
336 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
337 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
338 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
340 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
344 * Callers might not be able to detect properly that we are read-only,
345 * so check it explicitly here.
347 if (zfs_is_readonly(zfsvfs)) {
349 return (SET_ERROR(EROFS));
353 * If immutable or not appending then return EPERM
355 if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
356 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) &&
357 (uio->uio_loffset < zp->z_size))) {
359 return (SET_ERROR(EPERM));
363 * Validate file offset
365 offset_t woff = ioflag & O_APPEND ? zp->z_size : uio->uio_loffset;
368 return (SET_ERROR(EINVAL));
371 int max_blksz = zfsvfs->z_max_blksz;
374 * Pre-fault the pages to ensure slow (eg NFS) pages
376 * Skip this if uio contains loaned arc_buf.
378 if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
380 return (SET_ERROR(EFAULT));
384 * If in append mode, set the io offset pointer to eof.
386 zfs_locked_range_t *lr;
387 if (ioflag & O_APPEND) {
389 * Obtain an appending range lock to guarantee file append
390 * semantics. We reset the write offset once we have the lock.
392 lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
393 woff = lr->lr_offset;
394 if (lr->lr_length == UINT64_MAX) {
396 * We overlocked the file because this write will cause
397 * the file block size to increase.
398 * Note that zp_size cannot change with this lock held.
402 uio->uio_loffset = woff;
405 * Note that if the file block size will change as a result of
406 * this write, then this range lock will lock the entire file
407 * so that we can re-write the block safely.
409 lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
412 if (zn_rlimit_fsize(zp, uio, uio->uio_td)) {
413 zfs_rangelock_exit(lr);
419 zfs_rangelock_exit(lr);
421 return (SET_ERROR(EFBIG));
424 if ((woff + n) > limit || woff > (limit - n))
427 uint64_t end_size = MAX(zp->z_size, woff + n);
428 zilog_t *zilog = zfsvfs->z_log;
431 * Write the file in reasonable size chunks. Each chunk is written
432 * in a separate transaction; this keeps the intent log records small
433 * and allows us to do more fine-grained space accounting.
436 woff = uio->uio_loffset;
438 if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
439 KUID_TO_SUID(ZTOUID(zp))) ||
440 zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
441 KGID_TO_SGID(ZTOGID(zp))) ||
442 (zp->z_projid != ZFS_DEFAULT_PROJID &&
443 zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
445 error = SET_ERROR(EDQUOT);
449 arc_buf_t *abuf = NULL;
450 if (n >= max_blksz && woff >= zp->z_size &&
451 P2PHASE(woff, max_blksz) == 0 &&
452 zp->z_blksz == max_blksz) {
454 * This write covers a full block. "Borrow" a buffer
455 * from the dmu so that we can fill it before we enter
456 * a transaction. This avoids the possibility of
457 * holding up the transaction if the data copy hangs
458 * up on a pagefault (e.g., from an NFS server mapping).
462 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
464 ASSERT(abuf != NULL);
465 ASSERT(arc_buf_size(abuf) == max_blksz);
466 if ((error = uiocopy(abuf->b_data, max_blksz,
467 UIO_WRITE, uio, &cbytes))) {
468 dmu_return_arcbuf(abuf);
471 ASSERT(cbytes == max_blksz);
475 * Start a transaction.
477 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
478 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
479 dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
481 dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff,
484 zfs_sa_upgrade_txholds(tx, zp);
485 error = dmu_tx_assign(tx, TXG_WAIT);
489 dmu_return_arcbuf(abuf);
494 * If rangelock_enter() over-locked we grow the blocksize
495 * and then reduce the lock range. This will only happen
496 * on the first iteration since rangelock_reduce() will
497 * shrink down lr_length to the appropriate size.
499 if (lr->lr_length == UINT64_MAX) {
502 if (zp->z_blksz > max_blksz) {
504 * File's blocksize is already larger than the
505 * "recordsize" property. Only let it grow to
506 * the next power of 2.
508 ASSERT(!ISP2(zp->z_blksz));
509 new_blksz = MIN(end_size,
510 1 << highbit64(zp->z_blksz));
512 new_blksz = MIN(end_size, max_blksz);
514 zfs_grow_blocksize(zp, new_blksz, tx);
515 zfs_rangelock_reduce(lr, woff, n);
519 * XXX - should we really limit each write to z_max_blksz?
520 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
522 ssize_t nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
526 tx_bytes = uio->uio_resid;
527 uio_fault_disable(uio, B_TRUE);
528 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
530 uio_fault_disable(uio, B_FALSE);
532 if (error == EFAULT) {
535 * Account for partial writes before
536 * continuing the loop.
537 * Update needs to occur before the next
538 * uio_prefaultpages, or prefaultpages may
539 * error, and we may break the loop early.
541 if (tx_bytes != uio->uio_resid)
542 n -= tx_bytes - uio->uio_resid;
543 if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
553 tx_bytes -= uio->uio_resid;
556 * Is this block ever reached?
560 * If this is not a full block write, but we are
561 * extending the file past EOF and this data starts
562 * block-aligned, use assign_arcbuf(). Otherwise,
563 * write via dmu_write().
566 if (tx_bytes == max_blksz) {
567 error = dmu_assign_arcbuf_by_dbuf(
568 sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
570 dmu_return_arcbuf(abuf);
575 ASSERT(tx_bytes <= uio->uio_resid);
576 uioskip(uio, tx_bytes);
578 if (tx_bytes && zn_has_cached_data(zp) &&
579 !(ioflag & O_DIRECT)) {
580 update_pages(zp, woff,
581 tx_bytes, zfsvfs->z_os, zp->z_id);
585 * If we made no progress, we're done. If we made even
586 * partial progress, update the znode and ZIL accordingly.
589 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
590 (void *)&zp->z_size, sizeof (uint64_t), tx);
597 * Clear Set-UID/Set-GID bits on successful write if not
598 * privileged and at least one of the execute bits is set.
600 * It would be nice to do this after all writes have
601 * been done, but that would still expose the ISUID/ISGID
602 * to another app after the partial write is committed.
604 * Note: we don't call zfs_fuid_map_id() here because
605 * user 0 is not an ephemeral uid.
607 mutex_enter(&zp->z_acl_lock);
608 uint32_t uid = KUID_TO_SUID(ZTOUID(zp));
609 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
610 (S_IXUSR >> 6))) != 0 &&
611 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
612 secpolicy_vnode_setid_retain(zp, cr,
613 ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
615 zp->z_mode &= ~(S_ISUID | S_ISGID);
616 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
617 (void *)&newmode, sizeof (uint64_t), tx);
619 mutex_exit(&zp->z_acl_lock);
621 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
624 * Update the file size (zp_size) if it has changed;
625 * account for possible concurrent updates.
627 while ((end_size = zp->z_size) < uio->uio_loffset) {
628 (void) atomic_cas_64(&zp->z_size, end_size,
633 * If we are replaying and eof is non zero then force
634 * the file size to the specified eof. Note, there's no
635 * concurrency during replay.
637 if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
638 zp->z_size = zfsvfs->z_replay_eof;
640 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
642 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
648 ASSERT(tx_bytes == nbytes);
652 if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
659 zfs_inode_update(zp);
660 zfs_rangelock_exit(lr);
663 * If we're in replay mode, or we made no progress, return error.
664 * Otherwise, it's at least a partial write, so it's successful.
666 if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
671 if (ioflag & (O_SYNC | O_DSYNC) ||
672 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
673 zil_commit(zilog, zp->z_id);
675 int64_t nwritten = start_resid - uio->uio_resid;
676 dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
677 task_io_account_write(nwritten);
685 zfs_getsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
687 zfsvfs_t *zfsvfs = ZTOZSB(zp);
689 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
693 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
701 zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
703 zfsvfs_t *zfsvfs = ZTOZSB(zp);
705 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
706 zilog_t *zilog = zfsvfs->z_log;
711 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
713 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
714 zil_commit(zilog, 0);
720 EXPORT_SYMBOL(zfs_access);
721 EXPORT_SYMBOL(zfs_fsync);
722 EXPORT_SYMBOL(zfs_holey);
723 EXPORT_SYMBOL(zfs_read);
724 EXPORT_SYMBOL(zfs_write);
725 EXPORT_SYMBOL(zfs_getsecattr);
726 EXPORT_SYMBOL(zfs_setsecattr);
728 ZFS_MODULE_PARAM(zfs_vnops, zfs_vnops_, read_chunk_size, ULONG, ZMOD_RW,
729 "Bytes to read per chunk");