4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
30 * University Copyright- Copyright (c) 1982, 1986, 1988
31 * The Regents of the University of California
34 * University Acknowledgment- Portions of this document are derived from
35 * software developed by the University of California, Berkeley, and its
39 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
44 #include <sys/types.h>
45 #include <sys/uio_impl.h>
46 #include <sys/sysmacros.h>
47 #include <sys/string.h>
48 #include <linux/kmap_compat.h>
49 #include <linux/uaccess.h>
52 * Move "n" bytes at byte address "p"; "rw" indicates the direction
53 * of the move, and the I/O parameters are provided in "uio", which is
54 * update to reflect the data which was moved. Returns 0 on success or
55 * a non-zero errno on failure.
58 zfs_uiomove_iov(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
60 const struct iovec *iov = uio->uio_iov;
61 size_t skip = uio->uio_skip;
64 while (n && uio->uio_resid) {
65 cnt = MIN(iov->iov_len - skip, n);
66 switch (uio->uio_segflg) {
69 * p = kernel data pointer
70 * iov->iov_base = user data pointer
73 if (copy_to_user(iov->iov_base+skip, p, cnt))
76 unsigned long b_left = 0;
77 if (uio->uio_fault_disable) {
78 if (!zfs_access_ok(VERIFY_READ,
79 (iov->iov_base + skip), cnt)) {
84 __copy_from_user_inatomic(p,
85 (iov->iov_base + skip), cnt);
90 (iov->iov_base + skip), cnt);
93 unsigned long c_bytes =
95 uio->uio_skip += c_bytes;
96 ASSERT3U(uio->uio_skip, <,
98 uio->uio_resid -= c_bytes;
99 uio->uio_loffset += c_bytes;
106 memcpy(iov->iov_base + skip, p, cnt);
108 memcpy(p, iov->iov_base + skip, cnt);
114 if (skip == iov->iov_len) {
116 uio->uio_iov = (++iov);
119 uio->uio_skip = skip;
120 uio->uio_resid -= cnt;
121 uio->uio_loffset += cnt;
122 p = (caddr_t)p + cnt;
129 zfs_uiomove_bvec(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
131 const struct bio_vec *bv = uio->uio_bvec;
132 size_t skip = uio->uio_skip;
135 while (n && uio->uio_resid) {
137 cnt = MIN(bv->bv_len - skip, n);
139 paddr = zfs_kmap_atomic(bv->bv_page);
141 memcpy(paddr + bv->bv_offset + skip, p, cnt);
143 memcpy(p, paddr + bv->bv_offset + skip, cnt);
144 zfs_kunmap_atomic(paddr);
147 if (skip == bv->bv_len) {
149 uio->uio_bvec = (++bv);
152 uio->uio_skip = skip;
153 uio->uio_resid -= cnt;
154 uio->uio_loffset += cnt;
155 p = (caddr_t)p + cnt;
161 #if defined(HAVE_VFS_IOV_ITER)
163 zfs_uiomove_iter(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio,
166 size_t cnt = MIN(n, uio->uio_resid);
169 iov_iter_advance(uio->uio_iter, uio->uio_skip);
172 cnt = copy_to_iter(p, cnt, uio->uio_iter);
174 cnt = copy_from_iter(p, cnt, uio->uio_iter);
177 * When operating on a full pipe no bytes are processed.
178 * In which case return EFAULT which is converted to EAGAIN
179 * by the kernel's generic_file_splice_read() function.
185 * Revert advancing the uio_iter. This is set by zfs_uiocopy()
186 * to avoid consuming the uio and its iov_iter structure.
189 iov_iter_revert(uio->uio_iter, cnt);
191 uio->uio_resid -= cnt;
192 uio->uio_loffset += cnt;
199 zfs_uiomove(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
201 if (uio->uio_segflg == UIO_BVEC)
202 return (zfs_uiomove_bvec(p, n, rw, uio));
203 #if defined(HAVE_VFS_IOV_ITER)
204 else if (uio->uio_segflg == UIO_ITER)
205 return (zfs_uiomove_iter(p, n, rw, uio, B_FALSE));
208 return (zfs_uiomove_iov(p, n, rw, uio));
210 EXPORT_SYMBOL(zfs_uiomove);
213 * Fault in the pages of the first n bytes specified by the uio structure.
214 * 1 byte in each page is touched and the uio struct is unmodified. Any
215 * error will terminate the process as this is only a best attempt to get
216 * the pages resident.
219 zfs_uio_prefaultpages(ssize_t n, zfs_uio_t *uio)
221 if (uio->uio_segflg == UIO_SYSSPACE || uio->uio_segflg == UIO_BVEC) {
222 /* There's never a need to fault in kernel pages */
224 #if defined(HAVE_VFS_IOV_ITER)
225 } else if (uio->uio_segflg == UIO_ITER) {
227 * At least a Linux 4.9 kernel, iov_iter_fault_in_readable()
228 * can be relied on to fault in user pages when referenced.
230 if (iov_iter_fault_in_readable(uio->uio_iter, n))
234 /* Fault in all user pages */
235 ASSERT3S(uio->uio_segflg, ==, UIO_USERSPACE);
236 const struct iovec *iov = uio->uio_iov;
237 int iovcnt = uio->uio_iovcnt;
238 size_t skip = uio->uio_skip;
242 for (; n > 0 && iovcnt > 0; iov++, iovcnt--, skip = 0) {
243 ulong_t cnt = MIN(iov->iov_len - skip, n);
248 /* touch each page in this segment. */
249 p = iov->iov_base + skip;
251 if (get_user(tmp, (uint8_t *)p))
253 ulong_t incr = MIN(cnt, PAGESIZE);
257 /* touch the last byte in case it straddles a page. */
259 if (get_user(tmp, (uint8_t *)p))
266 EXPORT_SYMBOL(zfs_uio_prefaultpages);
269 * The same as zfs_uiomove() but doesn't modify uio structure.
270 * return in cbytes how many bytes were copied.
273 zfs_uiocopy(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio, size_t *cbytes)
278 memcpy(&uio_copy, uio, sizeof (zfs_uio_t));
280 if (uio->uio_segflg == UIO_BVEC)
281 ret = zfs_uiomove_bvec(p, n, rw, &uio_copy);
282 #if defined(HAVE_VFS_IOV_ITER)
283 else if (uio->uio_segflg == UIO_ITER)
284 ret = zfs_uiomove_iter(p, n, rw, &uio_copy, B_TRUE);
287 ret = zfs_uiomove_iov(p, n, rw, &uio_copy);
289 *cbytes = uio->uio_resid - uio_copy.uio_resid;
293 EXPORT_SYMBOL(zfs_uiocopy);
296 * Drop the next n chars out of *uio.
299 zfs_uioskip(zfs_uio_t *uio, size_t n)
301 if (n > uio->uio_resid)
304 if (uio->uio_segflg == UIO_BVEC) {
306 while (uio->uio_iovcnt &&
307 uio->uio_skip >= uio->uio_bvec->bv_len) {
308 uio->uio_skip -= uio->uio_bvec->bv_len;
312 #if defined(HAVE_VFS_IOV_ITER)
313 } else if (uio->uio_segflg == UIO_ITER) {
314 iov_iter_advance(uio->uio_iter, n);
318 while (uio->uio_iovcnt &&
319 uio->uio_skip >= uio->uio_iov->iov_len) {
320 uio->uio_skip -= uio->uio_iov->iov_len;
325 uio->uio_loffset += n;
328 EXPORT_SYMBOL(zfs_uioskip);