4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc.
29 /* Portions Copyright 2007 Jeremy Teo */
30 /* Portions Copyright 2010 Robert Milkowski */
32 #include <sys/types.h>
33 #include <sys/param.h>
35 #include <sys/sysmacros.h>
37 #include <sys/uio_impl.h>
41 #include <sys/cmn_err.h>
42 #include <sys/errno.h>
43 #include <sys/zfs_dir.h>
44 #include <sys/zfs_acl.h>
45 #include <sys/zfs_ioctl.h>
46 #include <sys/fs/zfs.h>
48 #include <sys/dmu_objset.h>
52 #include <sys/policy.h>
53 #include <sys/zfs_vnops.h>
54 #include <sys/zfs_quota.h>
55 #include <sys/zfs_vfsops.h>
56 #include <sys/zfs_znode.h>
59 static ulong_t zfs_fsync_sync_cnt = 4;
62 zfs_fsync(znode_t *zp, int syncflag, cred_t *cr)
64 zfsvfs_t *zfsvfs = ZTOZSB(zp);
66 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
68 if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
71 zil_commit(zfsvfs->z_log, zp->z_id);
74 tsd_set(zfs_fsyncer_key, NULL);
80 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
82 * Lseek support for finding holes (cmd == SEEK_HOLE) and
83 * data (cmd == SEEK_DATA). "off" is an in/out parameter.
86 zfs_holey_common(znode_t *zp, ulong_t cmd, loff_t *off)
88 zfs_locked_range_t *lr;
89 uint64_t noff = (uint64_t)*off; /* new offset */
95 if (noff >= file_sz) {
96 return (SET_ERROR(ENXIO));
99 if (cmd == F_SEEK_HOLE)
104 /* Flush any mmap()'d data to disk */
105 if (zn_has_cached_data(zp))
106 zn_flush_cached_data(zp, B_FALSE);
108 lr = zfs_rangelock_enter(&zp->z_rangelock, 0, file_sz, RL_READER);
109 error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
110 zfs_rangelock_exit(lr);
113 return (SET_ERROR(ENXIO));
115 /* File was dirty, so fall back to using generic logic */
116 if (error == EBUSY) {
124 * We could find a hole that begins after the logical end-of-file,
125 * because dmu_offset_next() only works on whole blocks. If the
126 * EOF falls mid-block, then indicate that the "virtual hole"
127 * at the end of the file begins at the logical EOF, rather than
128 * at the end of the last block.
130 if (noff > file_sz) {
142 zfs_holey(znode_t *zp, ulong_t cmd, loff_t *off)
144 zfsvfs_t *zfsvfs = ZTOZSB(zp);
150 error = zfs_holey_common(zp, cmd, off);
155 #endif /* SEEK_HOLE && SEEK_DATA */
159 zfs_access(znode_t *zp, int mode, int flag, cred_t *cr)
161 zfsvfs_t *zfsvfs = ZTOZSB(zp);
167 if (flag & V_ACE_MASK)
168 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
170 error = zfs_zaccess_rwx(zp, mode, flag, cr);
176 static unsigned long zfs_vnops_read_chunk_size = 1024 * 1024; /* Tunable */
179 * Read bytes from specified file into supplied buffer.
181 * IN: zp - inode of file to be read from.
182 * uio - structure supplying read location, range info,
184 * ioflag - O_SYNC flags; used to provide FRSYNC semantics.
185 * O_DIRECT flag; used to bypass page cache.
186 * cr - credentials of caller.
188 * OUT: uio - updated offset and range, buffer filled.
190 * RETURN: 0 on success, error code on failure.
193 * inode - atime updated if byte count > 0
197 zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
200 boolean_t frsync = B_FALSE;
202 zfsvfs_t *zfsvfs = ZTOZSB(zp);
206 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
208 return (SET_ERROR(EACCES));
211 /* We don't copy out anything useful for directories. */
212 if (Z_ISDIR(ZTOTYPE(zp))) {
214 return (SET_ERROR(EISDIR));
218 * Validate file offset
220 if (zfs_uio_offset(uio) < (offset_t)0) {
222 return (SET_ERROR(EINVAL));
226 * Fasttrack empty reads
228 if (zfs_uio_resid(uio) == 0) {
235 * If we're in FRSYNC mode, sync out this znode before reading it.
236 * Only do this for non-snapshots.
238 * Some platforms do not support FRSYNC and instead map it
239 * to O_SYNC, which results in unnecessary calls to zil_commit. We
240 * only honor FRSYNC requests on platforms which support it.
242 frsync = !!(ioflag & FRSYNC);
245 (frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
246 zil_commit(zfsvfs->z_log, zp->z_id);
249 * Lock the range against changes.
251 zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
252 zfs_uio_offset(uio), zfs_uio_resid(uio), RL_READER);
255 * If we are reading past end-of-file we can skip
256 * to the end; but we might still need to set atime.
258 if (zfs_uio_offset(uio) >= zp->z_size) {
263 ASSERT(zfs_uio_offset(uio) < zp->z_size);
264 ssize_t n = MIN(zfs_uio_resid(uio), zp->z_size - zfs_uio_offset(uio));
265 ssize_t start_resid = n;
268 ssize_t nbytes = MIN(n, zfs_vnops_read_chunk_size -
269 P2PHASE(zfs_uio_offset(uio), zfs_vnops_read_chunk_size));
271 if (zfs_uio_segflg(uio) == UIO_NOCOPY)
272 error = mappedread_sf(zp, nbytes, uio);
275 if (zn_has_cached_data(zp) && !(ioflag & O_DIRECT)) {
276 error = mappedread(zp, nbytes, uio);
278 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
283 /* convert checksum errors into IO errors */
285 error = SET_ERROR(EIO);
292 int64_t nread = start_resid - n;
293 dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
294 task_io_account_read(nread);
296 zfs_rangelock_exit(lr);
298 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
304 * Write the bytes to a file.
306 * IN: zp - znode of file to be written to.
307 * uio - structure supplying write location, range info,
309 * ioflag - O_APPEND flag set if in append mode.
310 * O_DIRECT flag; used to bypass page cache.
311 * cr - credentials of caller.
313 * OUT: uio - updated offset and range.
315 * RETURN: 0 if success
316 * error code if failure
319 * ip - ctime|mtime updated if byte count > 0
324 zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
327 ssize_t start_resid = zfs_uio_resid(uio);
330 * Fasttrack empty write
332 ssize_t n = start_resid;
336 zfsvfs_t *zfsvfs = ZTOZSB(zp);
340 sa_bulk_attr_t bulk[4];
342 uint64_t mtime[2], ctime[2];
343 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
344 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
345 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
347 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
351 * Callers might not be able to detect properly that we are read-only,
352 * so check it explicitly here.
354 if (zfs_is_readonly(zfsvfs)) {
356 return (SET_ERROR(EROFS));
360 * If immutable or not appending then return EPERM.
361 * Intentionally allow ZFS_READONLY through here.
362 * See zfs_zaccess_common()
364 if ((zp->z_pflags & ZFS_IMMUTABLE) ||
365 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) &&
366 (zfs_uio_offset(uio) < zp->z_size))) {
368 return (SET_ERROR(EPERM));
372 * Validate file offset
374 offset_t woff = ioflag & O_APPEND ? zp->z_size : zfs_uio_offset(uio);
377 return (SET_ERROR(EINVAL));
380 const uint64_t max_blksz = zfsvfs->z_max_blksz;
383 * Pre-fault the pages to ensure slow (eg NFS) pages
385 * Skip this if uio contains loaned arc_buf.
387 if (zfs_uio_prefaultpages(MIN(n, max_blksz), uio)) {
389 return (SET_ERROR(EFAULT));
393 * If in append mode, set the io offset pointer to eof.
395 zfs_locked_range_t *lr;
396 if (ioflag & O_APPEND) {
398 * Obtain an appending range lock to guarantee file append
399 * semantics. We reset the write offset once we have the lock.
401 lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
402 woff = lr->lr_offset;
403 if (lr->lr_length == UINT64_MAX) {
405 * We overlocked the file because this write will cause
406 * the file block size to increase.
407 * Note that zp_size cannot change with this lock held.
411 zfs_uio_setoffset(uio, woff);
414 * Note that if the file block size will change as a result of
415 * this write, then this range lock will lock the entire file
416 * so that we can re-write the block safely.
418 lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
421 if (zn_rlimit_fsize(zp, uio)) {
422 zfs_rangelock_exit(lr);
424 return (SET_ERROR(EFBIG));
427 const rlim64_t limit = MAXOFFSET_T;
430 zfs_rangelock_exit(lr);
432 return (SET_ERROR(EFBIG));
435 if (n > limit - woff)
438 uint64_t end_size = MAX(zp->z_size, woff + n);
439 zilog_t *zilog = zfsvfs->z_log;
441 const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
442 const uint64_t gid = KGID_TO_SGID(ZTOGID(zp));
443 const uint64_t projid = zp->z_projid;
446 * Write the file in reasonable size chunks. Each chunk is written
447 * in a separate transaction; this keeps the intent log records small
448 * and allows us to do more fine-grained space accounting.
451 woff = zfs_uio_offset(uio);
453 if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) ||
454 zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) ||
455 (projid != ZFS_DEFAULT_PROJID &&
456 zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
458 error = SET_ERROR(EDQUOT);
462 arc_buf_t *abuf = NULL;
463 if (n >= max_blksz && woff >= zp->z_size &&
464 P2PHASE(woff, max_blksz) == 0 &&
465 zp->z_blksz == max_blksz) {
467 * This write covers a full block. "Borrow" a buffer
468 * from the dmu so that we can fill it before we enter
469 * a transaction. This avoids the possibility of
470 * holding up the transaction if the data copy hangs
471 * up on a pagefault (e.g., from an NFS server mapping).
475 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
477 ASSERT(abuf != NULL);
478 ASSERT(arc_buf_size(abuf) == max_blksz);
479 if ((error = zfs_uiocopy(abuf->b_data, max_blksz,
480 UIO_WRITE, uio, &cbytes))) {
481 dmu_return_arcbuf(abuf);
484 ASSERT3S(cbytes, ==, max_blksz);
488 * Start a transaction.
490 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
491 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
492 dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
494 dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff,
497 zfs_sa_upgrade_txholds(tx, zp);
498 error = dmu_tx_assign(tx, TXG_WAIT);
502 dmu_return_arcbuf(abuf);
507 * If rangelock_enter() over-locked we grow the blocksize
508 * and then reduce the lock range. This will only happen
509 * on the first iteration since rangelock_reduce() will
510 * shrink down lr_length to the appropriate size.
512 if (lr->lr_length == UINT64_MAX) {
515 if (zp->z_blksz > max_blksz) {
517 * File's blocksize is already larger than the
518 * "recordsize" property. Only let it grow to
519 * the next power of 2.
521 ASSERT(!ISP2(zp->z_blksz));
522 new_blksz = MIN(end_size,
523 1 << highbit64(zp->z_blksz));
525 new_blksz = MIN(end_size, max_blksz);
527 zfs_grow_blocksize(zp, new_blksz, tx);
528 zfs_rangelock_reduce(lr, woff, n);
532 * XXX - should we really limit each write to z_max_blksz?
533 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
535 const ssize_t nbytes =
536 MIN(n, max_blksz - P2PHASE(woff, max_blksz));
540 tx_bytes = zfs_uio_resid(uio);
541 zfs_uio_fault_disable(uio, B_TRUE);
542 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
544 zfs_uio_fault_disable(uio, B_FALSE);
546 if (error == EFAULT) {
549 * Account for partial writes before
550 * continuing the loop.
551 * Update needs to occur before the next
552 * zfs_uio_prefaultpages, or prefaultpages may
553 * error, and we may break the loop early.
555 if (tx_bytes != zfs_uio_resid(uio))
556 n -= tx_bytes - zfs_uio_resid(uio);
557 if (zfs_uio_prefaultpages(MIN(n, max_blksz),
568 tx_bytes -= zfs_uio_resid(uio);
570 /* Implied by abuf != NULL: */
571 ASSERT3S(n, >=, max_blksz);
572 ASSERT0(P2PHASE(woff, max_blksz));
574 * We can simplify nbytes to MIN(n, max_blksz) since
575 * P2PHASE(woff, max_blksz) is 0, and knowing
576 * n >= max_blksz lets us simplify further:
578 ASSERT3S(nbytes, ==, max_blksz);
580 * Thus, we're writing a full block at a block-aligned
581 * offset and extending the file past EOF.
583 * dmu_assign_arcbuf_by_dbuf() will directly assign the
584 * arc buffer to a dbuf.
586 error = dmu_assign_arcbuf_by_dbuf(
587 sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
589 dmu_return_arcbuf(abuf);
593 ASSERT3S(nbytes, <=, zfs_uio_resid(uio));
594 zfs_uioskip(uio, nbytes);
597 if (tx_bytes && zn_has_cached_data(zp) &&
598 !(ioflag & O_DIRECT)) {
599 update_pages(zp, woff, tx_bytes, zfsvfs->z_os);
603 * If we made no progress, we're done. If we made even
604 * partial progress, update the znode and ZIL accordingly.
607 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
608 (void *)&zp->z_size, sizeof (uint64_t), tx);
615 * Clear Set-UID/Set-GID bits on successful write if not
616 * privileged and at least one of the execute bits is set.
618 * It would be nice to do this after all writes have
619 * been done, but that would still expose the ISUID/ISGID
620 * to another app after the partial write is committed.
622 * Note: we don't call zfs_fuid_map_id() here because
623 * user 0 is not an ephemeral uid.
625 mutex_enter(&zp->z_acl_lock);
626 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
627 (S_IXUSR >> 6))) != 0 &&
628 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
629 secpolicy_vnode_setid_retain(zp, cr,
630 ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
632 zp->z_mode &= ~(S_ISUID | S_ISGID);
633 newmode = zp->z_mode;
634 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
635 (void *)&newmode, sizeof (uint64_t), tx);
637 mutex_exit(&zp->z_acl_lock);
639 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
642 * Update the file size (zp_size) if it has changed;
643 * account for possible concurrent updates.
645 while ((end_size = zp->z_size) < zfs_uio_offset(uio)) {
646 (void) atomic_cas_64(&zp->z_size, end_size,
647 zfs_uio_offset(uio));
651 * If we are replaying and eof is non zero then force
652 * the file size to the specified eof. Note, there's no
653 * concurrency during replay.
655 if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
656 zp->z_size = zfsvfs->z_replay_eof;
658 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
660 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
666 ASSERT3S(tx_bytes, ==, nbytes);
670 if (zfs_uio_prefaultpages(MIN(n, max_blksz), uio)) {
671 error = SET_ERROR(EFAULT);
677 zfs_znode_update_vfs(zp);
678 zfs_rangelock_exit(lr);
681 * If we're in replay mode, or we made no progress, or the
682 * uio data is inaccessible return an error. Otherwise, it's
683 * at least a partial write, so it's successful.
685 if (zfsvfs->z_replay || zfs_uio_resid(uio) == start_resid ||
691 if (ioflag & (O_SYNC | O_DSYNC) ||
692 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
693 zil_commit(zilog, zp->z_id);
695 const int64_t nwritten = start_resid - zfs_uio_resid(uio);
696 dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
697 task_io_account_write(nwritten);
705 zfs_getsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
707 zfsvfs_t *zfsvfs = ZTOZSB(zp);
709 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
713 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
721 zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
723 zfsvfs_t *zfsvfs = ZTOZSB(zp);
725 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
726 zilog_t *zilog = zfsvfs->z_log;
731 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
733 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
734 zil_commit(zilog, 0);
741 static int zil_fault_io = 0;
744 static void zfs_get_done(zgd_t *zgd, int error);
747 * Get data to generate a TX_WRITE intent log record.
750 zfs_get_data(void *arg, uint64_t gen, lr_write_t *lr, char *buf,
751 struct lwb *lwb, zio_t *zio)
753 zfsvfs_t *zfsvfs = arg;
754 objset_t *os = zfsvfs->z_os;
756 uint64_t object = lr->lr_foid;
757 uint64_t offset = lr->lr_offset;
758 uint64_t size = lr->lr_length;
764 ASSERT3P(lwb, !=, NULL);
765 ASSERT3P(zio, !=, NULL);
766 ASSERT3U(size, !=, 0);
769 * Nothing to do if the file has been removed
771 if (zfs_zget(zfsvfs, object, &zp) != 0)
772 return (SET_ERROR(ENOENT));
773 if (zp->z_unlinked) {
775 * Release the vnode asynchronously as we currently have the
776 * txg stopped from syncing.
779 return (SET_ERROR(ENOENT));
781 /* check if generation number matches */
782 if (sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
783 sizeof (zp_gen)) != 0) {
785 return (SET_ERROR(EIO));
789 return (SET_ERROR(ENOENT));
792 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
794 zgd->zgd_private = zp;
797 * Write records come in two flavors: immediate and indirect.
798 * For small writes it's cheaper to store the data with the
799 * log record (immediate); for large writes it's cheaper to
800 * sync the data and get a pointer to it (indirect) so that
801 * we don't have to write the data twice.
803 if (buf != NULL) { /* immediate write */
804 zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
805 offset, size, RL_READER);
806 /* test for truncation needs to be done while range locked */
807 if (offset >= zp->z_size) {
808 error = SET_ERROR(ENOENT);
810 error = dmu_read(os, object, offset, size, buf,
811 DMU_READ_NO_PREFETCH);
813 ASSERT(error == 0 || error == ENOENT);
814 } else { /* indirect write */
816 * Have to lock the whole block to ensure when it's
817 * written out and its checksum is being calculated
818 * that no one can change the data. We need to re-check
819 * blocksize after we get the lock in case it's changed!
824 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
826 zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
827 offset, size, RL_READER);
828 if (zp->z_blksz == size)
831 zfs_rangelock_exit(zgd->zgd_lr);
833 /* test for truncation needs to be done while range locked */
834 if (lr->lr_offset >= zp->z_size)
835 error = SET_ERROR(ENOENT);
838 error = SET_ERROR(EIO);
843 error = dmu_buf_hold(os, object, offset, zgd, &db,
844 DMU_READ_NO_PREFETCH);
847 blkptr_t *bp = &lr->lr_blkptr;
852 ASSERT(db->db_offset == offset);
853 ASSERT(db->db_size == size);
855 error = dmu_sync(zio, lr->lr_common.lrc_txg,
857 ASSERT(error || lr->lr_length <= size);
860 * On success, we need to wait for the write I/O
861 * initiated by dmu_sync() to complete before we can
862 * release this dbuf. We will finish everything up
863 * in the zfs_get_done() callback.
868 if (error == EALREADY) {
869 lr->lr_common.lrc_txtype = TX_WRITE2;
871 * TX_WRITE2 relies on the data previously
872 * written by the TX_WRITE that caused
873 * EALREADY. We zero out the BP because
874 * it is the old, currently-on-disk BP.
883 zfs_get_done(zgd, error);
891 zfs_get_done(zgd_t *zgd, int error)
893 znode_t *zp = zgd->zgd_private;
896 dmu_buf_rele(zgd->zgd_db, zgd);
898 zfs_rangelock_exit(zgd->zgd_lr);
901 * Release the vnode asynchronously as we currently have the
902 * txg stopped from syncing.
906 kmem_free(zgd, sizeof (zgd_t));
909 EXPORT_SYMBOL(zfs_access);
910 EXPORT_SYMBOL(zfs_fsync);
911 EXPORT_SYMBOL(zfs_holey);
912 EXPORT_SYMBOL(zfs_read);
913 EXPORT_SYMBOL(zfs_write);
914 EXPORT_SYMBOL(zfs_getsecattr);
915 EXPORT_SYMBOL(zfs_setsecattr);
917 ZFS_MODULE_PARAM(zfs_vnops, zfs_vnops_, read_chunk_size, ULONG, ZMOD_RW,
918 "Bytes to read per chunk");