1 /* $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $ */
4 * Copyright (c) 1999 Theo de Raadt
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
34 #include <sys/errno.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
39 #include <sys/limits.h>
43 #include <machine/vmparam.h>
46 #include <vm/vm_page.h>
49 #include <opencrypto/cryptodev.h>
51 SDT_PROVIDER_DECLARE(opencrypto);
54 * These macros are only for avoiding code duplication, as we need to skip
55 * given number of bytes in the same way in several functions below.
57 #define CUIO_SKIP() do { \
58 KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \
59 KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \
61 KASSERT(iol >= 0, ("%s: empty in skip", __func__)); \
62 if (off < iov->iov_len) \
64 off -= iov->iov_len; \
70 #define CVM_PAGE_SKIP() do { \
71 KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \
72 KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \
74 if (off < PAGE_SIZE) \
76 processed += PAGE_SIZE - off; \
77 off -= PAGE_SIZE - off; \
83 cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
85 struct iovec *iov = uio->uio_iov;
86 int iol __diagused = uio->uio_iovcnt;
91 KASSERT(iol >= 0, ("%s: empty", __func__));
92 count = min(iov->iov_len - off, len);
93 bcopy(((caddr_t)iov->iov_base) + off, cp, count);
103 cuio_copyback(struct uio* uio, int off, int len, c_caddr_t cp)
105 struct iovec *iov = uio->uio_iov;
106 int iol __diagused = uio->uio_iovcnt;
111 KASSERT(iol >= 0, ("%s: empty", __func__));
112 count = min(iov->iov_len - off, len);
113 bcopy(cp, ((caddr_t)iov->iov_base) + off, count);
123 * Return the index and offset of location in iovec list.
126 cuio_getptr(struct uio *uio, int loc, int *off)
131 while (loc >= 0 && ind < uio->uio_iovcnt) {
132 len = uio->uio_iov[ind].iov_len;
141 if (ind > 0 && loc == 0) {
143 *off = uio->uio_iov[ind].iov_len;
150 #if CRYPTO_MAY_HAVE_VMPAGE
152 * Apply function f to the data in a vm_page_t list starting "off" bytes from
153 * the beginning, continuing for "len" bytes.
156 cvm_page_apply(vm_page_t *pages, int off, int len,
157 int (*f)(void *, const void *, u_int), void *arg)
159 int processed __unused;
166 char *kaddr = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages));
167 count = min(PAGE_SIZE - off, len);
168 rval = (*f)(arg, kaddr + off, count);
180 cvm_page_contiguous_segment(vm_page_t *pages, size_t skip, int len)
182 if ((skip + len - 1) / PAGE_SIZE > skip / PAGE_SIZE)
185 pages += (skip / PAGE_SIZE);
186 skip -= rounddown(skip, PAGE_SIZE);
187 return (((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages))) + skip);
191 * Copy len bytes of data from the vm_page_t array, skipping the first off
192 * bytes, into the pointer cp. Return the number of bytes skipped and copied.
193 * Does not verify the length of the array.
196 cvm_page_copyback(vm_page_t *pages, int off, int len, c_caddr_t cp)
203 count = min(PAGE_SIZE - off, len);
204 bcopy(cp, (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off,
216 * Copy len bytes of data from the pointer cp into the vm_page_t array,
217 * skipping the first off bytes, Return the number of bytes skipped and copied.
218 * Does not verify the length of the array.
221 cvm_page_copydata(vm_page_t *pages, int off, int len, caddr_t cp)
228 count = min(PAGE_SIZE - off, len);
229 bcopy(((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off), cp,
239 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
242 * Given a starting page in an m_epg, determine the length of the
243 * current physically contiguous segment.
245 static __inline size_t
246 m_epg_pages_extent(struct mbuf *m, int idx, u_int pglen)
252 for (i = idx + 1; i < m->m_epg_npgs; i++) {
253 if (m->m_epg_pa[i - 1] + PAGE_SIZE != m->m_epg_pa[i])
255 len += m_epg_pagelen(m, i, 0);
261 m_epg_segment(struct mbuf *m, size_t offset, size_t *len)
263 u_int i, pglen, pgoff;
265 offset += mtod(m, vm_offset_t);
266 if (offset < m->m_epg_hdrlen) {
267 *len = m->m_epg_hdrlen - offset;
268 return (m->m_epg_hdr + offset);
270 offset -= m->m_epg_hdrlen;
271 pgoff = m->m_epg_1st_off;
272 for (i = 0; i < m->m_epg_npgs; i++) {
273 pglen = m_epg_pagelen(m, i, pgoff);
274 if (offset < pglen) {
275 *len = m_epg_pages_extent(m, i, pglen) - offset;
276 return ((void *)PHYS_TO_DMAP(m->m_epg_pa[i] + pgoff +
282 KASSERT(offset <= m->m_epg_trllen, ("%s: offset beyond trailer",
284 *len = m->m_epg_trllen - offset;
285 return (m->m_epg_trail + offset);
288 static __inline void *
289 m_epg_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len)
294 base = m_epg_segment(m, skip, &seglen);
301 crypto_cursor_init(struct crypto_buffer_cursor *cc,
302 const struct crypto_buffer *cb)
304 memset(cc, 0, sizeof(*cc));
305 cc->cc_type = cb->cb_type;
306 switch (cc->cc_type) {
307 case CRYPTO_BUF_CONTIG:
308 cc->cc_buf = cb->cb_buf;
309 cc->cc_buf_len = cb->cb_buf_len;
311 case CRYPTO_BUF_MBUF:
312 case CRYPTO_BUF_SINGLE_MBUF:
313 cc->cc_mbuf = cb->cb_mbuf;
315 case CRYPTO_BUF_VMPAGE:
316 cc->cc_vmpage = cb->cb_vm_page;
317 cc->cc_buf_len = cb->cb_vm_page_len;
318 cc->cc_offset = cb->cb_vm_page_offset;
321 cc->cc_iov = cb->cb_uio->uio_iov;
322 cc->cc_buf_len = cb->cb_uio->uio_resid;
326 panic("%s: invalid buffer type %d", __func__, cb->cb_type);
332 SDT_PROBE_DEFINE2(opencrypto, criov, cursor_advance, vmpage, "struct crypto_buffer_cursor*", "size_t");
335 crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount)
339 switch (cc->cc_type) {
340 case CRYPTO_BUF_CONTIG:
341 MPASS(cc->cc_buf_len >= amount);
342 cc->cc_buf += amount;
343 cc->cc_buf_len -= amount;
345 case CRYPTO_BUF_MBUF:
347 remain = cc->cc_mbuf->m_len - cc->cc_offset;
348 if (amount < remain) {
349 cc->cc_offset += amount;
353 cc->cc_mbuf = cc->cc_mbuf->m_next;
359 case CRYPTO_BUF_SINGLE_MBUF:
360 MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + amount);
361 cc->cc_offset += amount;
363 case CRYPTO_BUF_VMPAGE:
365 SDT_PROBE2(opencrypto, criov, cursor_advance, vmpage,
367 remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
368 if (amount < remain) {
369 cc->cc_buf_len -= amount;
370 cc->cc_offset += amount;
373 cc->cc_buf_len -= remain;
377 if (amount == 0 || cc->cc_buf_len == 0)
383 remain = cc->cc_iov->iov_len - cc->cc_offset;
384 if (amount < remain) {
385 cc->cc_offset += amount;
388 cc->cc_buf_len -= remain;
398 panic("%s: invalid buffer type %d", __func__, cc->cc_type);
405 crypto_cursor_segment(struct crypto_buffer_cursor *cc, size_t *len)
407 switch (cc->cc_type) {
408 case CRYPTO_BUF_CONTIG:
410 case CRYPTO_BUF_VMPAGE:
411 if (cc->cc_buf_len == 0) {
416 case CRYPTO_BUF_MBUF:
417 case CRYPTO_BUF_SINGLE_MBUF:
418 if (cc->cc_mbuf == NULL) {
425 panic("%s: invalid buffer type %d", __func__, cc->cc_type);
431 switch (cc->cc_type) {
432 case CRYPTO_BUF_CONTIG:
433 *len = cc->cc_buf_len;
435 case CRYPTO_BUF_MBUF:
436 case CRYPTO_BUF_SINGLE_MBUF:
437 if (cc->cc_mbuf->m_flags & M_EXTPG)
438 return (m_epg_segment(cc->cc_mbuf, cc->cc_offset, len));
439 *len = cc->cc_mbuf->m_len - cc->cc_offset;
440 return (mtod(cc->cc_mbuf, char *) + cc->cc_offset);
441 case CRYPTO_BUF_VMPAGE:
442 *len = PAGE_SIZE - cc->cc_offset;
443 return ((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
444 *cc->cc_vmpage)) + cc->cc_offset);
446 *len = cc->cc_iov->iov_len - cc->cc_offset;
447 return ((char *)cc->cc_iov->iov_base + cc->cc_offset);
449 __assert_unreachable();
454 crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size,
462 switch (cc->cc_type) {
463 case CRYPTO_BUF_CONTIG:
464 MPASS(cc->cc_buf_len >= size);
465 memcpy(cc->cc_buf, src, size);
467 cc->cc_buf_len -= size;
469 case CRYPTO_BUF_MBUF:
472 * This uses m_copyback() for individual
473 * mbufs so that cc_mbuf and cc_offset are
476 remain = cc->cc_mbuf->m_len - cc->cc_offset;
477 todo = MIN(remain, size);
478 m_copyback(cc->cc_mbuf, cc->cc_offset, todo, src);
481 cc->cc_offset += todo;
485 cc->cc_mbuf = cc->cc_mbuf->m_next;
491 case CRYPTO_BUF_SINGLE_MBUF:
492 MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + size);
493 m_copyback(cc->cc_mbuf, cc->cc_offset, size, src);
494 cc->cc_offset += size;
496 case CRYPTO_BUF_VMPAGE:
498 dst = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
499 *cc->cc_vmpage)) + cc->cc_offset;
500 remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
501 todo = MIN(remain, size);
502 memcpy(dst, src, todo);
504 cc->cc_buf_len -= todo;
506 cc->cc_offset += todo;
518 dst = (char *)cc->cc_iov->iov_base + cc->cc_offset;
519 remain = cc->cc_iov->iov_len - cc->cc_offset;
520 todo = MIN(remain, size);
521 memcpy(dst, src, todo);
523 cc->cc_buf_len -= todo;
525 cc->cc_offset += todo;
537 panic("%s: invalid buffer type %d", __func__, cc->cc_type);
544 crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, void *vdst)
551 switch (cc->cc_type) {
552 case CRYPTO_BUF_CONTIG:
553 MPASS(cc->cc_buf_len >= size);
554 memcpy(dst, cc->cc_buf, size);
556 cc->cc_buf_len -= size;
558 case CRYPTO_BUF_MBUF:
561 * This uses m_copydata() for individual
562 * mbufs so that cc_mbuf and cc_offset are
565 remain = cc->cc_mbuf->m_len - cc->cc_offset;
566 todo = MIN(remain, size);
567 m_copydata(cc->cc_mbuf, cc->cc_offset, todo, dst);
570 cc->cc_offset += todo;
574 cc->cc_mbuf = cc->cc_mbuf->m_next;
580 case CRYPTO_BUF_SINGLE_MBUF:
581 MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + size);
582 m_copydata(cc->cc_mbuf, cc->cc_offset, size, dst);
583 cc->cc_offset += size;
585 case CRYPTO_BUF_VMPAGE:
587 src = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
588 *cc->cc_vmpage)) + cc->cc_offset;
589 remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
590 todo = MIN(remain, size);
591 memcpy(dst, src, todo);
593 cc->cc_buf_len -= todo;
595 cc->cc_offset += todo;
607 src = (const char *)cc->cc_iov->iov_base +
609 remain = cc->cc_iov->iov_len - cc->cc_offset;
610 todo = MIN(remain, size);
611 memcpy(dst, src, todo);
613 cc->cc_buf_len -= todo;
615 cc->cc_offset += todo;
627 panic("%s: invalid buffer type %d", __func__, cc->cc_type);
634 * To avoid advancing 'cursor', make a local copy that gets advanced
638 crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size,
641 struct crypto_buffer_cursor copy;
644 crypto_cursor_copydata(©, size, vdst);
648 * Apply function f to the data in an iovec list starting "off" bytes from
649 * the beginning, continuing for "len" bytes.
652 cuio_apply(struct uio *uio, int off, int len,
653 int (*f)(void *, const void *, u_int), void *arg)
655 struct iovec *iov = uio->uio_iov;
656 int iol __diagused = uio->uio_iovcnt;
662 KASSERT(iol >= 0, ("%s: empty", __func__));
663 count = min(iov->iov_len - off, len);
664 rval = (*f)(arg, ((caddr_t)iov->iov_base) + off, count);
676 crypto_copyback(struct cryptop *crp, int off, int size, const void *src)
678 struct crypto_buffer *cb;
680 if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE)
684 switch (cb->cb_type) {
685 case CRYPTO_BUF_MBUF:
686 case CRYPTO_BUF_SINGLE_MBUF:
687 m_copyback(cb->cb_mbuf, off, size, src);
689 #if CRYPTO_MAY_HAVE_VMPAGE
690 case CRYPTO_BUF_VMPAGE:
691 MPASS(size <= cb->cb_vm_page_len);
693 cb->cb_vm_page_len + cb->cb_vm_page_offset);
694 cvm_page_copyback(cb->cb_vm_page,
695 off + cb->cb_vm_page_offset, size, src);
697 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
699 cuio_copyback(cb->cb_uio, off, size, src);
701 case CRYPTO_BUF_CONTIG:
702 MPASS(off + size <= cb->cb_buf_len);
703 bcopy(src, cb->cb_buf + off, size);
707 panic("invalid crp buf type %d", cb->cb_type);
714 crypto_copydata(struct cryptop *crp, int off, int size, void *dst)
717 switch (crp->crp_buf.cb_type) {
718 case CRYPTO_BUF_MBUF:
719 case CRYPTO_BUF_SINGLE_MBUF:
720 m_copydata(crp->crp_buf.cb_mbuf, off, size, dst);
722 #if CRYPTO_MAY_HAVE_VMPAGE
723 case CRYPTO_BUF_VMPAGE:
724 MPASS(size <= crp->crp_buf.cb_vm_page_len);
725 MPASS(size + off <= crp->crp_buf.cb_vm_page_len +
726 crp->crp_buf.cb_vm_page_offset);
727 cvm_page_copydata(crp->crp_buf.cb_vm_page,
728 off + crp->crp_buf.cb_vm_page_offset, size, dst);
730 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
732 cuio_copydata(crp->crp_buf.cb_uio, off, size, dst);
734 case CRYPTO_BUF_CONTIG:
735 MPASS(off + size <= crp->crp_buf.cb_buf_len);
736 bcopy(crp->crp_buf.cb_buf + off, dst, size);
740 panic("invalid crp buf type %d", crp->crp_buf.cb_type);
747 crypto_apply_buf(struct crypto_buffer *cb, int off, int len,
748 int (*f)(void *, const void *, u_int), void *arg)
752 switch (cb->cb_type) {
753 case CRYPTO_BUF_MBUF:
754 case CRYPTO_BUF_SINGLE_MBUF:
755 error = m_apply(cb->cb_mbuf, off, len,
756 (int (*)(void *, void *, u_int))f, arg);
759 error = cuio_apply(cb->cb_uio, off, len, f, arg);
761 #if CRYPTO_MAY_HAVE_VMPAGE
762 case CRYPTO_BUF_VMPAGE:
763 error = cvm_page_apply(cb->cb_vm_page,
764 off + cb->cb_vm_page_offset, len, f, arg);
766 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
767 case CRYPTO_BUF_CONTIG:
768 MPASS(off + len <= cb->cb_buf_len);
769 error = (*f)(arg, cb->cb_buf + off, len);
773 panic("invalid crypto buf type %d", cb->cb_type);
782 crypto_apply(struct cryptop *crp, int off, int len,
783 int (*f)(void *, const void *, u_int), void *arg)
785 return (crypto_apply_buf(&crp->crp_buf, off, len, f, arg));
789 m_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len)
793 MPASS(skip <= INT_MAX);
795 m = m_getptr(m, (int)skip, &rel_off);
801 if (skip + len > m->m_len)
804 if (m->m_flags & M_EXTPG)
805 return (m_epg_contiguous_subsegment(m, skip, len));
806 return (mtod(m, char*) + skip);
810 cuio_contiguous_segment(struct uio *uio, size_t skip, size_t len)
814 MPASS(skip <= INT_MAX);
815 idx = cuio_getptr(uio, (int)skip, &rel_off);
821 if (skip + len > uio->uio_iov[idx].iov_len)
823 return ((char *)uio->uio_iov[idx].iov_base + skip);
827 crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, size_t skip,
831 switch (cb->cb_type) {
832 case CRYPTO_BUF_MBUF:
833 case CRYPTO_BUF_SINGLE_MBUF:
834 return (m_contiguous_subsegment(cb->cb_mbuf, skip, len));
836 return (cuio_contiguous_segment(cb->cb_uio, skip, len));
837 #if CRYPTO_MAY_HAVE_VMPAGE
838 case CRYPTO_BUF_VMPAGE:
839 MPASS(skip + len <= cb->cb_vm_page_len);
840 return (cvm_page_contiguous_segment(cb->cb_vm_page,
841 skip + cb->cb_vm_page_offset, len));
842 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
843 case CRYPTO_BUF_CONTIG:
844 MPASS(skip + len <= cb->cb_buf_len);
845 return (cb->cb_buf + skip);
848 panic("invalid crp buf type %d", cb->cb_type);
855 crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len)
857 return (crypto_buffer_contiguous_subsegment(&crp->crp_buf, skip, len));