4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
30 * University Copyright- Copyright (c) 1982, 1986, 1988
31 * The Regents of the University of California
34 * University Acknowledgment- Portions of this document are derived from
35 * software developed by the University of California, Berkeley, and its
39 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
43 * The uio support from OpenSolaris has been added as a short term
44 * work around. The hope is to adopt native Linux type and drop the
45 * use of uio's entirely. Under Linux they only add overhead and
46 * when possible we want to use native APIs for the ZPL layer.
50 #include <sys/types.h>
51 #include <sys/uio_impl.h>
52 #include <sys/sysmacros.h>
53 #include <sys/strings.h>
54 #include <linux/kmap_compat.h>
55 #include <linux/uaccess.h>
58 * Move "n" bytes at byte address "p"; "rw" indicates the direction
59 * of the move, and the I/O parameters are provided in "uio", which is
60 * update to reflect the data which was moved. Returns 0 on success or
61 * a non-zero errno on failure.
64 uiomove_iov(void *p, size_t n, enum uio_rw rw, struct uio *uio)
66 const struct iovec *iov = uio->uio_iov;
67 size_t skip = uio->uio_skip;
70 while (n && uio->uio_resid) {
71 cnt = MIN(iov->iov_len - skip, n);
72 switch (uio->uio_segflg) {
76 * p = kernel data pointer
77 * iov->iov_base = user data pointer
80 if (copy_to_user(iov->iov_base+skip, p, cnt))
83 unsigned long b_left = 0;
84 if (uio->uio_fault_disable) {
85 if (!zfs_access_ok(VERIFY_READ,
86 (iov->iov_base + skip), cnt)) {
91 __copy_from_user_inatomic(p,
92 (iov->iov_base + skip), cnt);
97 (iov->iov_base + skip), cnt);
100 unsigned long c_bytes =
102 uio->uio_skip += c_bytes;
103 ASSERT3U(uio->uio_skip, <,
105 uio->uio_resid -= c_bytes;
106 uio->uio_loffset += c_bytes;
113 bcopy(p, iov->iov_base + skip, cnt);
115 bcopy(iov->iov_base + skip, p, cnt);
121 if (skip == iov->iov_len) {
123 uio->uio_iov = (++iov);
126 uio->uio_skip = skip;
127 uio->uio_resid -= cnt;
128 uio->uio_loffset += cnt;
129 p = (caddr_t)p + cnt;
136 uiomove_bvec(void *p, size_t n, enum uio_rw rw, struct uio *uio)
138 const struct bio_vec *bv = uio->uio_bvec;
139 size_t skip = uio->uio_skip;
142 while (n && uio->uio_resid) {
144 cnt = MIN(bv->bv_len - skip, n);
146 paddr = zfs_kmap_atomic(bv->bv_page, KM_USER1);
148 bcopy(p, paddr + bv->bv_offset + skip, cnt);
150 bcopy(paddr + bv->bv_offset + skip, p, cnt);
151 zfs_kunmap_atomic(paddr, KM_USER1);
154 if (skip == bv->bv_len) {
156 uio->uio_bvec = (++bv);
159 uio->uio_skip = skip;
160 uio->uio_resid -= cnt;
161 uio->uio_loffset += cnt;
162 p = (caddr_t)p + cnt;
169 uiomove(void *p, size_t n, enum uio_rw rw, struct uio *uio)
171 if (uio->uio_segflg != UIO_BVEC)
172 return (uiomove_iov(p, n, rw, uio));
174 return (uiomove_bvec(p, n, rw, uio));
176 EXPORT_SYMBOL(uiomove);
178 #define fuword8(uptr, vptr) get_user((*vptr), (uptr))
181 * Fault in the pages of the first n bytes specified by the uio structure.
182 * 1 byte in each page is touched and the uio struct is unmodified. Any
183 * error will terminate the process as this is only a best attempt to get
184 * the pages resident.
187 uio_prefaultpages(ssize_t n, struct uio *uio)
189 const struct iovec *iov;
196 /* no need to fault in kernel pages */
197 switch (uio->uio_segflg) {
209 iovcnt = uio->uio_iovcnt;
210 skip = uio->uio_skip;
212 for (; n > 0 && iovcnt > 0; iov++, iovcnt--, skip = 0) {
213 cnt = MIN(iov->iov_len - skip, n);
219 * touch each page in this segment.
221 p = iov->iov_base + skip;
223 if (fuword8((uint8_t *)p, &tmp))
225 incr = MIN(cnt, PAGESIZE);
230 * touch the last byte in case it straddles a page.
233 if (fuword8((uint8_t *)p, &tmp))
239 EXPORT_SYMBOL(uio_prefaultpages);
242 * same as uiomove() but doesn't modify uio structure.
243 * return in cbytes how many bytes were copied.
246 uiocopy(void *p, size_t n, enum uio_rw rw, struct uio *uio, size_t *cbytes)
251 bcopy(uio, &uio_copy, sizeof (struct uio));
252 ret = uiomove(p, n, rw, &uio_copy);
253 *cbytes = uio->uio_resid - uio_copy.uio_resid;
256 EXPORT_SYMBOL(uiocopy);
259 * Drop the next n chars out of *uiop.
262 uioskip(uio_t *uiop, size_t n)
264 if (n > uiop->uio_resid)
268 if (uiop->uio_segflg != UIO_BVEC) {
269 while (uiop->uio_iovcnt &&
270 uiop->uio_skip >= uiop->uio_iov->iov_len) {
271 uiop->uio_skip -= uiop->uio_iov->iov_len;
276 while (uiop->uio_iovcnt &&
277 uiop->uio_skip >= uiop->uio_bvec->bv_len) {
278 uiop->uio_skip -= uiop->uio_bvec->bv_len;
283 uiop->uio_loffset += n;
284 uiop->uio_resid -= n;
286 EXPORT_SYMBOL(uioskip);