1 /* $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $ */
4 * Copyright (c) 1999 Theo de Raadt
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/errno.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
41 #include <sys/limits.h>
45 #include <machine/vmparam.h>
48 #include <vm/vm_page.h>
51 #include <opencrypto/cryptodev.h>
53 SDT_PROVIDER_DECLARE(opencrypto);
56 * These macros are only for avoiding code duplication, as we need to skip
57 * given number of bytes in the same way in several functions below.
59 #define CUIO_SKIP() do { \
60 KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \
61 KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \
63 KASSERT(iol >= 0, ("%s: empty in skip", __func__)); \
64 if (off < iov->iov_len) \
66 off -= iov->iov_len; \
72 #define CVM_PAGE_SKIP() do { \
73 KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \
74 KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \
76 if (off < PAGE_SIZE) \
78 processed += PAGE_SIZE - off; \
79 off -= PAGE_SIZE - off; \
85 cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
87 struct iovec *iov = uio->uio_iov;
88 int iol = uio->uio_iovcnt;
93 KASSERT(iol >= 0, ("%s: empty", __func__));
94 count = min(iov->iov_len - off, len);
95 bcopy(((caddr_t)iov->iov_base) + off, cp, count);
105 cuio_copyback(struct uio* uio, int off, int len, c_caddr_t cp)
107 struct iovec *iov = uio->uio_iov;
108 int iol = uio->uio_iovcnt;
113 KASSERT(iol >= 0, ("%s: empty", __func__));
114 count = min(iov->iov_len - off, len);
115 bcopy(cp, ((caddr_t)iov->iov_base) + off, count);
125 * Return the index and offset of location in iovec list.
128 cuio_getptr(struct uio *uio, int loc, int *off)
133 while (loc >= 0 && ind < uio->uio_iovcnt) {
134 len = uio->uio_iov[ind].iov_len;
143 if (ind > 0 && loc == 0) {
145 *off = uio->uio_iov[ind].iov_len;
152 #if CRYPTO_MAY_HAVE_VMPAGE
154 * Apply function f to the data in a vm_page_t list starting "off" bytes from
155 * the beginning, continuing for "len" bytes.
158 cvm_page_apply(vm_page_t *pages, int off, int len,
159 int (*f)(void *, const void *, u_int), void *arg)
167 char *kaddr = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages));
168 count = min(PAGE_SIZE - off, len);
169 rval = (*f)(arg, kaddr + off, count);
181 cvm_page_contiguous_segment(vm_page_t *pages, size_t skip, int len)
183 if ((skip + len - 1) / PAGE_SIZE > skip / PAGE_SIZE)
186 pages += (skip / PAGE_SIZE);
187 skip -= rounddown(skip, PAGE_SIZE);
188 return (((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages))) + skip);
192 * Copy len bytes of data from the vm_page_t array, skipping the first off
193 * bytes, into the pointer cp. Return the number of bytes skipped and copied.
194 * Does not verify the length of the array.
197 cvm_page_copyback(vm_page_t *pages, int off, int len, c_caddr_t cp)
204 count = min(PAGE_SIZE - off, len);
205 bcopy(cp, (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off,
217 * Copy len bytes of data from the pointer cp into the vm_page_t array,
218 * skipping the first off bytes, Return the number of bytes skipped and copied.
219 * Does not verify the length of the array.
222 cvm_page_copydata(vm_page_t *pages, int off, int len, caddr_t cp)
229 count = min(PAGE_SIZE - off, len);
230 bcopy(((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off), cp,
240 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
243 * Given a starting page in an m_epg, determine the length of the
244 * current physically contiguous segment.
246 static __inline size_t
247 m_epg_pages_extent(struct mbuf *m, int idx, u_int pglen)
253 for (i = idx + 1; i < m->m_epg_npgs; i++) {
254 if (m->m_epg_pa[i - 1] + PAGE_SIZE != m->m_epg_pa[i])
256 len += m_epg_pagelen(m, i, 0);
261 static __inline void *
262 m_epg_segbase(struct mbuf *m, size_t offset)
264 u_int i, pglen, pgoff;
266 offset += mtod(m, vm_offset_t);
267 if (offset < m->m_epg_hdrlen)
268 return (m->m_epg_hdr + offset);
269 offset -= m->m_epg_hdrlen;
270 pgoff = m->m_epg_1st_off;
271 for (i = 0; i < m->m_epg_npgs; i++) {
272 pglen = m_epg_pagelen(m, i, pgoff);
274 return ((void *)PHYS_TO_DMAP(m->m_epg_pa[i] + pgoff +
279 KASSERT(offset <= m->m_epg_trllen, ("%s: offset beyond trailer",
281 return (m->m_epg_trail + offset);
284 static __inline size_t
285 m_epg_seglen(struct mbuf *m, size_t offset)
287 u_int i, pglen, pgoff;
289 offset += mtod(m, vm_offset_t);
290 if (offset < m->m_epg_hdrlen)
291 return (m->m_epg_hdrlen - offset);
292 offset -= m->m_epg_hdrlen;
293 pgoff = m->m_epg_1st_off;
294 for (i = 0; i < m->m_epg_npgs; i++) {
295 pglen = m_epg_pagelen(m, i, pgoff);
297 return (m_epg_pages_extent(m, i, pglen) - offset);
301 KASSERT(offset <= m->m_epg_trllen, ("%s: offset beyond trailer",
303 return (m->m_epg_trllen - offset);
306 static __inline void *
307 m_epg_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len)
309 u_int i, pglen, pgoff;
311 skip += mtod(m, vm_offset_t);
312 if (skip < m->m_epg_hdrlen) {
313 if (len > m->m_epg_hdrlen - skip)
315 return (m->m_epg_hdr + skip);
317 skip -= m->m_epg_hdrlen;
318 pgoff = m->m_epg_1st_off;
319 for (i = 0; i < m->m_epg_npgs; i++) {
320 pglen = m_epg_pagelen(m, i, pgoff);
322 if (len > m_epg_pages_extent(m, i, pglen) - skip)
324 return ((void *)PHYS_TO_DMAP(m->m_epg_pa[i] + pgoff +
330 KASSERT(skip <= m->m_epg_trllen && len <= m->m_epg_trllen - skip,
331 ("%s: segment beyond trailer", __func__));
332 return (m->m_epg_trail + skip);
336 crypto_cursor_init(struct crypto_buffer_cursor *cc,
337 const struct crypto_buffer *cb)
339 memset(cc, 0, sizeof(*cc));
340 cc->cc_type = cb->cb_type;
341 switch (cc->cc_type) {
342 case CRYPTO_BUF_CONTIG:
343 cc->cc_buf = cb->cb_buf;
344 cc->cc_buf_len = cb->cb_buf_len;
346 case CRYPTO_BUF_MBUF:
347 case CRYPTO_BUF_SINGLE_MBUF:
348 cc->cc_mbuf = cb->cb_mbuf;
350 case CRYPTO_BUF_VMPAGE:
351 cc->cc_vmpage = cb->cb_vm_page;
352 cc->cc_buf_len = cb->cb_vm_page_len;
353 cc->cc_offset = cb->cb_vm_page_offset;
356 cc->cc_iov = cb->cb_uio->uio_iov;
360 panic("%s: invalid buffer type %d", __func__, cb->cb_type);
366 SDT_PROBE_DEFINE2(opencrypto, criov, cursor_advance, vmpage, "struct crypto_buffer_cursor*", "size_t");
369 crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount)
373 switch (cc->cc_type) {
374 case CRYPTO_BUF_CONTIG:
375 MPASS(cc->cc_buf_len >= amount);
376 cc->cc_buf += amount;
377 cc->cc_buf_len -= amount;
379 case CRYPTO_BUF_MBUF:
381 remain = cc->cc_mbuf->m_len - cc->cc_offset;
382 if (amount < remain) {
383 cc->cc_offset += amount;
387 cc->cc_mbuf = cc->cc_mbuf->m_next;
393 case CRYPTO_BUF_SINGLE_MBUF:
394 MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + amount);
395 cc->cc_offset += amount;
397 case CRYPTO_BUF_VMPAGE:
399 SDT_PROBE2(opencrypto, criov, cursor_advance, vmpage,
401 remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
402 if (amount < remain) {
403 cc->cc_buf_len -= amount;
404 cc->cc_offset += amount;
407 cc->cc_buf_len -= remain;
411 if (amount == 0 || cc->cc_buf_len == 0)
417 remain = cc->cc_iov->iov_len - cc->cc_offset;
418 if (amount < remain) {
419 cc->cc_offset += amount;
431 panic("%s: invalid buffer type %d", __func__, cc->cc_type);
438 crypto_cursor_segbase(struct crypto_buffer_cursor *cc)
440 switch (cc->cc_type) {
441 case CRYPTO_BUF_CONTIG:
443 case CRYPTO_BUF_MBUF:
444 case CRYPTO_BUF_SINGLE_MBUF:
445 if (cc->cc_mbuf == NULL)
447 if (cc->cc_mbuf->m_flags & M_EXTPG)
448 return (m_epg_segbase(cc->cc_mbuf, cc->cc_offset));
449 return (mtod(cc->cc_mbuf, char *) + cc->cc_offset);
450 case CRYPTO_BUF_VMPAGE:
451 return ((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
452 *cc->cc_vmpage)) + cc->cc_offset);
454 return ((char *)cc->cc_iov->iov_base + cc->cc_offset);
457 panic("%s: invalid buffer type %d", __func__, cc->cc_type);
464 crypto_cursor_seglen(struct crypto_buffer_cursor *cc)
466 switch (cc->cc_type) {
467 case CRYPTO_BUF_CONTIG:
468 return (cc->cc_buf_len);
469 case CRYPTO_BUF_VMPAGE:
470 return (PAGE_SIZE - cc->cc_offset);
471 case CRYPTO_BUF_MBUF:
472 case CRYPTO_BUF_SINGLE_MBUF:
473 if (cc->cc_mbuf == NULL)
475 if (cc->cc_mbuf->m_flags & M_EXTPG)
476 return (m_epg_seglen(cc->cc_mbuf, cc->cc_offset));
477 return (cc->cc_mbuf->m_len - cc->cc_offset);
479 return (cc->cc_iov->iov_len - cc->cc_offset);
482 panic("%s: invalid buffer type %d", __func__, cc->cc_type);
489 crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size,
497 switch (cc->cc_type) {
498 case CRYPTO_BUF_CONTIG:
499 MPASS(cc->cc_buf_len >= size);
500 memcpy(cc->cc_buf, src, size);
502 cc->cc_buf_len -= size;
504 case CRYPTO_BUF_MBUF:
507 * This uses m_copyback() for individual
508 * mbufs so that cc_mbuf and cc_offset are
511 remain = cc->cc_mbuf->m_len - cc->cc_offset;
512 todo = MIN(remain, size);
513 m_copyback(cc->cc_mbuf, cc->cc_offset, todo, src);
516 cc->cc_offset += todo;
520 cc->cc_mbuf = cc->cc_mbuf->m_next;
526 case CRYPTO_BUF_SINGLE_MBUF:
527 MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + size);
528 m_copyback(cc->cc_mbuf, cc->cc_offset, size, src);
529 cc->cc_offset += size;
531 case CRYPTO_BUF_VMPAGE:
533 dst = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
534 *cc->cc_vmpage)) + cc->cc_offset;
535 remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
536 todo = MIN(remain, size);
537 memcpy(dst, src, todo);
539 cc->cc_buf_len -= todo;
541 cc->cc_offset += todo;
553 dst = (char *)cc->cc_iov->iov_base + cc->cc_offset;
554 remain = cc->cc_iov->iov_len - cc->cc_offset;
555 todo = MIN(remain, size);
556 memcpy(dst, src, todo);
559 cc->cc_offset += todo;
571 panic("%s: invalid buffer type %d", __func__, cc->cc_type);
578 crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, void *vdst)
585 switch (cc->cc_type) {
586 case CRYPTO_BUF_CONTIG:
587 MPASS(cc->cc_buf_len >= size);
588 memcpy(dst, cc->cc_buf, size);
590 cc->cc_buf_len -= size;
592 case CRYPTO_BUF_MBUF:
595 * This uses m_copydata() for individual
596 * mbufs so that cc_mbuf and cc_offset are
599 remain = cc->cc_mbuf->m_len - cc->cc_offset;
600 todo = MIN(remain, size);
601 m_copydata(cc->cc_mbuf, cc->cc_offset, todo, dst);
604 cc->cc_offset += todo;
608 cc->cc_mbuf = cc->cc_mbuf->m_next;
614 case CRYPTO_BUF_SINGLE_MBUF:
615 MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + size);
616 m_copydata(cc->cc_mbuf, cc->cc_offset, size, dst);
617 cc->cc_offset += size;
619 case CRYPTO_BUF_VMPAGE:
621 src = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
622 *cc->cc_vmpage)) + cc->cc_offset;
623 remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
624 todo = MIN(remain, size);
625 memcpy(dst, src, todo);
627 cc->cc_buf_len -= todo;
629 cc->cc_offset += todo;
641 src = (const char *)cc->cc_iov->iov_base +
643 remain = cc->cc_iov->iov_len - cc->cc_offset;
644 todo = MIN(remain, size);
645 memcpy(dst, src, todo);
648 cc->cc_offset += todo;
660 panic("%s: invalid buffer type %d", __func__, cc->cc_type);
667 * To avoid advancing 'cursor', make a local copy that gets advanced
671 crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size,
674 struct crypto_buffer_cursor copy;
677 crypto_cursor_copydata(©, size, vdst);
681 * Apply function f to the data in an iovec list starting "off" bytes from
682 * the beginning, continuing for "len" bytes.
685 cuio_apply(struct uio *uio, int off, int len,
686 int (*f)(void *, const void *, u_int), void *arg)
688 struct iovec *iov = uio->uio_iov;
689 int iol = uio->uio_iovcnt;
695 KASSERT(iol >= 0, ("%s: empty", __func__));
696 count = min(iov->iov_len - off, len);
697 rval = (*f)(arg, ((caddr_t)iov->iov_base) + off, count);
709 crypto_copyback(struct cryptop *crp, int off, int size, const void *src)
711 struct crypto_buffer *cb;
713 if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE)
717 switch (cb->cb_type) {
718 case CRYPTO_BUF_MBUF:
719 case CRYPTO_BUF_SINGLE_MBUF:
720 m_copyback(cb->cb_mbuf, off, size, src);
722 #if CRYPTO_MAY_HAVE_VMPAGE
723 case CRYPTO_BUF_VMPAGE:
724 MPASS(size <= cb->cb_vm_page_len);
726 cb->cb_vm_page_len + cb->cb_vm_page_offset);
727 cvm_page_copyback(cb->cb_vm_page,
728 off + cb->cb_vm_page_offset, size, src);
730 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
732 cuio_copyback(cb->cb_uio, off, size, src);
734 case CRYPTO_BUF_CONTIG:
735 MPASS(off + size <= cb->cb_buf_len);
736 bcopy(src, cb->cb_buf + off, size);
740 panic("invalid crp buf type %d", cb->cb_type);
747 crypto_copydata(struct cryptop *crp, int off, int size, void *dst)
750 switch (crp->crp_buf.cb_type) {
751 case CRYPTO_BUF_MBUF:
752 case CRYPTO_BUF_SINGLE_MBUF:
753 m_copydata(crp->crp_buf.cb_mbuf, off, size, dst);
755 #if CRYPTO_MAY_HAVE_VMPAGE
756 case CRYPTO_BUF_VMPAGE:
757 MPASS(size <= crp->crp_buf.cb_vm_page_len);
758 MPASS(size + off <= crp->crp_buf.cb_vm_page_len +
759 crp->crp_buf.cb_vm_page_offset);
760 cvm_page_copydata(crp->crp_buf.cb_vm_page,
761 off + crp->crp_buf.cb_vm_page_offset, size, dst);
763 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
765 cuio_copydata(crp->crp_buf.cb_uio, off, size, dst);
767 case CRYPTO_BUF_CONTIG:
768 MPASS(off + size <= crp->crp_buf.cb_buf_len);
769 bcopy(crp->crp_buf.cb_buf + off, dst, size);
773 panic("invalid crp buf type %d", crp->crp_buf.cb_type);
780 crypto_apply_buf(struct crypto_buffer *cb, int off, int len,
781 int (*f)(void *, const void *, u_int), void *arg)
785 switch (cb->cb_type) {
786 case CRYPTO_BUF_MBUF:
787 case CRYPTO_BUF_SINGLE_MBUF:
788 error = m_apply(cb->cb_mbuf, off, len,
789 (int (*)(void *, void *, u_int))f, arg);
792 error = cuio_apply(cb->cb_uio, off, len, f, arg);
794 #if CRYPTO_MAY_HAVE_VMPAGE
795 case CRYPTO_BUF_VMPAGE:
796 error = cvm_page_apply(cb->cb_vm_page,
797 off + cb->cb_vm_page_offset, len, f, arg);
799 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
800 case CRYPTO_BUF_CONTIG:
801 MPASS(off + len <= cb->cb_buf_len);
802 error = (*f)(arg, cb->cb_buf + off, len);
806 panic("invalid crypto buf type %d", cb->cb_type);
815 crypto_apply(struct cryptop *crp, int off, int len,
816 int (*f)(void *, const void *, u_int), void *arg)
818 return (crypto_apply_buf(&crp->crp_buf, off, len, f, arg));
822 m_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len)
826 MPASS(skip <= INT_MAX);
828 m = m_getptr(m, (int)skip, &rel_off);
834 if (skip + len > m->m_len)
837 if (m->m_flags & M_EXTPG)
838 return (m_epg_contiguous_subsegment(m, skip, len));
839 return (mtod(m, char*) + skip);
843 cuio_contiguous_segment(struct uio *uio, size_t skip, size_t len)
847 MPASS(skip <= INT_MAX);
848 idx = cuio_getptr(uio, (int)skip, &rel_off);
854 if (skip + len > uio->uio_iov[idx].iov_len)
856 return ((char *)uio->uio_iov[idx].iov_base + skip);
860 crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, size_t skip,
864 switch (cb->cb_type) {
865 case CRYPTO_BUF_MBUF:
866 case CRYPTO_BUF_SINGLE_MBUF:
867 return (m_contiguous_subsegment(cb->cb_mbuf, skip, len));
869 return (cuio_contiguous_segment(cb->cb_uio, skip, len));
870 #if CRYPTO_MAY_HAVE_VMPAGE
871 case CRYPTO_BUF_VMPAGE:
872 MPASS(skip + len <= cb->cb_vm_page_len);
873 return (cvm_page_contiguous_segment(cb->cb_vm_page,
874 skip + cb->cb_vm_page_offset, len));
875 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
876 case CRYPTO_BUF_CONTIG:
877 MPASS(skip + len <= cb->cb_buf_len);
878 return (cb->cb_buf + skip);
881 panic("invalid crp buf type %d", cb->cb_type);
888 crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len)
890 return (crypto_buffer_contiguous_subsegment(&crp->crp_buf, skip, len));