2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
19 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
37 #include <sys/systm.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
42 #include <sys/malloc.h>
44 #include <sys/module.h>
45 #include <sys/mutex.h>
46 #include <sys/random.h>
49 #include <machine/_inttypes.h>
50 #include <machine/bus.h>
51 #include <machine/resource.h>
53 #include <opencrypto/cryptodev.h>
54 #include "cryptodev_if.h"
56 #include <dev/ofw/ofw_bus_subr.h>
57 #include <dev/sec/sec.h>
59 static int sec_probe(device_t dev);
60 static int sec_attach(device_t dev);
61 static int sec_detach(device_t dev);
62 static int sec_suspend(device_t dev);
63 static int sec_resume(device_t dev);
64 static int sec_shutdown(device_t dev);
65 static void sec_primary_intr(void *arg);
66 static void sec_secondary_intr(void *arg);
67 static int sec_setup_intr(struct sec_softc *sc, struct resource **ires,
68 void **ihand, int *irid, driver_intr_t handler, const char *iname);
69 static void sec_release_intr(struct sec_softc *sc, struct resource *ires,
70 void *ihand, int irid, const char *iname);
71 static int sec_controller_reset(struct sec_softc *sc);
72 static int sec_channel_reset(struct sec_softc *sc, int channel, int full);
73 static int sec_init(struct sec_softc *sc);
74 static int sec_alloc_dma_mem(struct sec_softc *sc,
75 struct sec_dma_mem *dma_mem, bus_size_t size);
76 static int sec_desc_map_dma(struct sec_softc *sc,
77 struct sec_dma_mem *dma_mem, void *mem, bus_size_t size, int type,
78 struct sec_desc_map_info *sdmi);
79 static void sec_free_dma_mem(struct sec_dma_mem *dma_mem);
80 static void sec_enqueue(struct sec_softc *sc);
81 static int sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc,
83 static int sec_eu_channel(struct sec_softc *sc, int eu);
84 static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
85 u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype);
86 static int sec_make_pointer_direct(struct sec_softc *sc,
87 struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize);
88 static int sec_alloc_session(struct sec_softc *sc);
89 static int sec_newsession(device_t dev, u_int32_t *sidp,
90 struct cryptoini *cri);
91 static int sec_freesession(device_t dev, uint64_t tid);
92 static int sec_process(device_t dev, struct cryptop *crp, int hint);
93 static int sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
94 struct cryptoini **mac);
95 static int sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
96 struct cryptodesc **mac);
97 static int sec_build_common_ns_desc(struct sec_softc *sc,
98 struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
99 struct cryptodesc *enc, int buftype);
100 static int sec_build_common_s_desc(struct sec_softc *sc,
101 struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
102 struct cryptodesc *enc, struct cryptodesc *mac, int buftype);
104 static struct sec_session *sec_get_session(struct sec_softc *sc, u_int sid);
105 static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr);
108 static int sec_aesu_newsession(struct sec_softc *sc,
109 struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
110 static int sec_aesu_make_desc(struct sec_softc *sc,
111 struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
115 static int sec_deu_newsession(struct sec_softc *sc,
116 struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
117 static int sec_deu_make_desc(struct sec_softc *sc,
118 struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
122 static int sec_mdeu_can_handle(u_int alg);
123 static int sec_mdeu_config(struct cryptodesc *crd,
124 u_int *eu, u_int *mode, u_int *hashlen);
125 static int sec_mdeu_newsession(struct sec_softc *sc,
126 struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
127 static int sec_mdeu_make_desc(struct sec_softc *sc,
128 struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
131 static device_method_t sec_methods[] = {
132 /* Device interface */
133 DEVMETHOD(device_probe, sec_probe),
134 DEVMETHOD(device_attach, sec_attach),
135 DEVMETHOD(device_detach, sec_detach),
137 DEVMETHOD(device_suspend, sec_suspend),
138 DEVMETHOD(device_resume, sec_resume),
139 DEVMETHOD(device_shutdown, sec_shutdown),
142 DEVMETHOD(cryptodev_newsession, sec_newsession),
143 DEVMETHOD(cryptodev_freesession,sec_freesession),
144 DEVMETHOD(cryptodev_process, sec_process),
148 static driver_t sec_driver = {
151 sizeof(struct sec_softc),
154 static devclass_t sec_devclass;
155 DRIVER_MODULE(sec, simplebus, sec_driver, sec_devclass, 0, 0);
156 MODULE_DEPEND(sec, crypto, 1, 1, 1);
158 static struct sec_eu_methods sec_eus[] = {
175 sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op)
178 /* Sync only if dma memory is valid */
179 if (dma_mem->dma_vaddr != NULL)
180 bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op);
184 sec_free_session(struct sec_softc *sc, struct sec_session *ses)
187 SEC_LOCK(sc, sessions);
189 SEC_UNLOCK(sc, sessions);
193 sec_get_pointer_data(struct sec_desc *desc, u_int n)
196 return (desc->sd_ptr_dmem[n].dma_vaddr);
200 sec_probe(device_t dev)
202 struct sec_softc *sc;
205 if (!ofw_bus_status_okay(dev))
208 if (!ofw_bus_is_compatible(dev, "fsl,sec2.0"))
211 sc = device_get_softc(dev);
214 sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
217 if (sc->sc_rres == NULL)
220 sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
221 sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
223 id = SEC_READ(sc, SEC_ID);
225 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
229 device_set_desc(dev, "Freescale Security Engine 2.0");
233 device_set_desc(dev, "Freescale Security Engine 3.0");
237 device_set_desc(dev, "Freescale Security Engine 3.1");
241 device_printf(dev, "unknown SEC ID 0x%016"PRIx64"!\n", id);
249 sec_attach(device_t dev)
251 struct sec_softc *sc;
252 struct sec_hw_lt *lt;
256 sc = device_get_softc(dev);
261 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
262 if (sc->sc_cid < 0) {
263 device_printf(dev, "could not get crypto driver ID!\n");
268 mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev),
269 "SEC Controller lock", MTX_DEF);
270 mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev),
271 "SEC Descriptors lock", MTX_DEF);
272 mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev),
273 "SEC Sessions lock", MTX_DEF);
275 /* Allocate I/O memory for SEC registers */
277 sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
280 if (sc->sc_rres == NULL) {
281 device_printf(dev, "could not allocate I/O memory!\n");
285 sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
286 sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
288 /* Setup interrupts */
290 error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand,
291 &sc->sc_pri_irid, sec_primary_intr, "primary");
297 if (sc->sc_version == 3) {
299 error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand,
300 &sc->sc_sec_irid, sec_secondary_intr, "secondary");
306 /* Alloc DMA memory for descriptors and link tables */
307 error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem),
308 SEC_DESCRIPTORS * sizeof(struct sec_hw_desc));
313 error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem),
314 (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt));
319 /* Fill in descriptors and link tables */
320 for (i = 0; i < SEC_DESCRIPTORS; i++) {
321 sc->sc_desc[i].sd_desc =
322 (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i;
323 sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr +
324 (i * sizeof(struct sec_hw_desc));
327 for (i = 0; i < SEC_LT_ENTRIES + 1; i++) {
329 (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i;
330 sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr +
331 (i * sizeof(struct sec_hw_lt));
334 /* Last entry in link table is used to create a circle */
335 lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt;
339 lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr;
341 /* Init descriptor and link table queues pointers */
342 SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS);
343 SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS);
344 SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS);
345 SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS);
346 SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS);
347 SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS);
348 SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES);
349 SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES);
351 /* Create masks for fast checks */
352 sc->sc_int_error_mask = 0;
353 for (i = 0; i < SEC_CHANNELS; i++)
354 sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i));
356 switch (sc->sc_version) {
358 sc->sc_channel_idle_mask =
359 (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) |
360 (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) |
361 (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) |
362 (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S);
365 sc->sc_channel_idle_mask =
366 (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) |
367 (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) |
368 (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) |
369 (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S);
374 error = sec_init(sc);
379 /* Register in OCF (AESU) */
380 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
382 /* Register in OCF (DEU) */
383 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
384 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
386 /* Register in OCF (MDEU) */
387 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
388 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
389 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
390 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
391 crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0);
392 if (sc->sc_version >= 3) {
393 crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0);
394 crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0);
400 sec_free_dma_mem(&(sc->sc_lt_dmem));
402 sec_free_dma_mem(&(sc->sc_desc_dmem));
404 sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
405 sc->sc_sec_irid, "secondary");
407 sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
408 sc->sc_pri_irid, "primary");
410 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
412 mtx_destroy(&sc->sc_controller_lock);
413 mtx_destroy(&sc->sc_descriptors_lock);
414 mtx_destroy(&sc->sc_sessions_lock);
420 sec_detach(device_t dev)
422 struct sec_softc *sc = device_get_softc(dev);
423 int i, error, timeout = SEC_TIMEOUT;
425 /* Prepare driver to shutdown */
426 SEC_LOCK(sc, descriptors);
428 SEC_UNLOCK(sc, descriptors);
430 /* Wait until all queued processing finishes */
432 SEC_LOCK(sc, descriptors);
433 i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc);
434 SEC_UNLOCK(sc, descriptors);
440 device_printf(dev, "queue flush timeout!\n");
442 /* DMA can be still active - stop it */
443 for (i = 0; i < SEC_CHANNELS; i++)
444 sec_channel_reset(sc, i, 1);
453 /* Disable interrupts */
454 SEC_WRITE(sc, SEC_IER, 0);
456 /* Unregister from OCF */
457 crypto_unregister_all(sc->sc_cid);
459 /* Free DMA memory */
460 for (i = 0; i < SEC_DESCRIPTORS; i++)
461 SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i]));
463 sec_free_dma_mem(&(sc->sc_lt_dmem));
464 sec_free_dma_mem(&(sc->sc_desc_dmem));
466 /* Release interrupts */
467 sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
468 sc->sc_pri_irid, "primary");
469 sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
470 sc->sc_sec_irid, "secondary");
474 error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
477 device_printf(dev, "bus_release_resource() failed for"
478 " I/O memory, error %d\n", error);
483 mtx_destroy(&sc->sc_controller_lock);
484 mtx_destroy(&sc->sc_descriptors_lock);
485 mtx_destroy(&sc->sc_sessions_lock);
491 sec_suspend(device_t dev)
498 sec_resume(device_t dev)
505 sec_shutdown(device_t dev)
512 sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand,
513 int *irid, driver_intr_t handler, const char *iname)
517 (*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid,
520 if ((*ires) == NULL) {
521 device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname);
525 error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET,
526 NULL, handler, sc, ihand);
529 device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname);
530 if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires))
531 device_printf(sc->sc_dev, "could not release %s IRQ\n",
542 sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand,
543 int irid, const char *iname)
550 error = bus_teardown_intr(sc->sc_dev, ires, ihand);
552 device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s"
553 " IRQ, error %d\n", iname, error);
555 error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires);
557 device_printf(sc->sc_dev, "bus_release_resource() failed for %s"
558 " IRQ, error %d\n", iname, error);
562 sec_primary_intr(void *arg)
564 struct sec_softc *sc = arg;
565 struct sec_desc *desc;
569 SEC_LOCK(sc, controller);
571 /* Check for errors */
572 isr = SEC_READ(sc, SEC_ISR);
573 if (isr & sc->sc_int_error_mask) {
574 /* Check each channel for error */
575 for (i = 0; i < SEC_CHANNELS; i++) {
576 if ((isr & SEC_INT_CH_ERR(i)) == 0)
579 device_printf(sc->sc_dev,
580 "I/O error on channel %i!\n", i);
582 /* Find and mark problematic descriptor */
583 desc = sec_find_desc(sc, SEC_READ(sc,
587 desc->sd_error = EIO;
589 /* Do partial channel reset */
590 sec_channel_reset(sc, i, 0);
595 SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL);
597 SEC_UNLOCK(sc, controller);
598 SEC_LOCK(sc, descriptors);
600 /* Handle processed descriptors */
601 SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
603 while (SEC_QUEUED_DESC_CNT(sc) > 0) {
604 desc = SEC_GET_QUEUED_DESC(sc);
606 if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) {
607 SEC_PUT_BACK_QUEUED_DESC(sc);
611 SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD |
612 BUS_DMASYNC_PREWRITE);
614 desc->sd_crp->crp_etype = desc->sd_error;
615 crypto_done(desc->sd_crp);
617 SEC_DESC_FREE_POINTERS(desc);
618 SEC_DESC_FREE_LT(sc, desc);
619 SEC_DESC_QUEUED2FREE(sc);
622 SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
624 if (!sc->sc_shutdown) {
625 wakeup = sc->sc_blocked;
629 SEC_UNLOCK(sc, descriptors);
631 /* Enqueue ready descriptors in hardware */
635 crypto_unblock(sc->sc_cid, wakeup);
639 sec_secondary_intr(void *arg)
641 struct sec_softc *sc = arg;
643 device_printf(sc->sc_dev, "spurious secondary interrupt!\n");
644 sec_primary_intr(arg);
648 sec_controller_reset(struct sec_softc *sc)
650 int timeout = SEC_TIMEOUT;
652 /* Reset Controller */
653 SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR);
655 while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) {
660 device_printf(sc->sc_dev, "timeout while waiting for "
670 sec_channel_reset(struct sec_softc *sc, int channel, int full)
672 int timeout = SEC_TIMEOUT;
673 uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON;
677 reg = SEC_READ(sc, SEC_CHAN_CCR(channel));
678 SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit);
680 while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) {
685 device_printf(sc->sc_dev, "timeout while waiting for "
692 reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS;
694 switch(sc->sc_version) {
696 reg |= SEC_CHAN_CCR_CDWE;
699 reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN;
703 SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg);
710 sec_init(struct sec_softc *sc)
715 /* Reset controller twice to clear all pending interrupts */
716 error = sec_controller_reset(sc);
720 error = sec_controller_reset(sc);
725 for (i = 0; i < SEC_CHANNELS; i++) {
726 error = sec_channel_reset(sc, i, 1);
731 /* Enable Interrupts */
733 for (i = 0; i < SEC_CHANNELS; i++)
734 reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i);
736 SEC_WRITE(sc, SEC_IER, reg);
742 sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
744 struct sec_dma_mem *dma_mem = arg;
749 KASSERT(nseg == 1, ("Wrong number of segments, should be 1"));
750 dma_mem->dma_paddr = segs->ds_addr;
754 sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg,
757 struct sec_desc_map_info *sdmi = arg;
758 struct sec_softc *sc = sdmi->sdmi_sc;
759 struct sec_lt *lt = NULL;
764 SEC_LOCK_ASSERT(sc, descriptors);
769 for (i = 0; i < nseg; i++) {
770 addr = segs[i].ds_addr;
771 size = segs[i].ds_len;
773 /* Skip requested offset */
774 if (sdmi->sdmi_offset >= size) {
775 sdmi->sdmi_offset -= size;
779 addr += sdmi->sdmi_offset;
780 size -= sdmi->sdmi_offset;
781 sdmi->sdmi_offset = 0;
783 /* Do not link more than requested */
784 if (sdmi->sdmi_size < size)
785 size = sdmi->sdmi_size;
787 lt = SEC_ALLOC_LT_ENTRY(sc);
788 lt->sl_lt->shl_length = size;
789 lt->sl_lt->shl_r = 0;
790 lt->sl_lt->shl_n = 0;
791 lt->sl_lt->shl_ptr = addr;
793 if (sdmi->sdmi_lt_first == NULL)
794 sdmi->sdmi_lt_first = lt;
796 sdmi->sdmi_lt_used += 1;
798 if ((sdmi->sdmi_size -= size) == 0)
802 sdmi->sdmi_lt_last = lt;
806 sec_dma_map_desc_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
807 bus_size_t size, int error)
810 sec_dma_map_desc_cb(arg, segs, nseg, error);
814 sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
819 if (dma_mem->dma_vaddr != NULL)
822 error = bus_dma_tag_create(NULL, /* parent */
823 SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */
824 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
825 BUS_SPACE_MAXADDR, /* highaddr */
826 NULL, NULL, /* filtfunc, filtfuncarg */
827 size, 1, /* maxsize, nsegments */
828 size, 0, /* maxsegsz, flags */
829 NULL, NULL, /* lockfunc, lockfuncarg */
830 &(dma_mem->dma_tag)); /* dmat */
833 device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
838 error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr),
839 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map));
842 device_printf(sc->sc_dev, "failed to allocate DMA safe"
843 " memory, error %i!\n", error);
847 error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
848 dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem,
852 device_printf(sc->sc_dev, "cannot get address of the DMA"
853 " memory, error %i\n", error);
857 dma_mem->dma_is_map = 0;
861 bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map);
863 bus_dma_tag_destroy(dma_mem->dma_tag);
865 dma_mem->dma_vaddr = NULL;
870 sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, void *mem,
871 bus_size_t size, int type, struct sec_desc_map_info *sdmi)
875 if (dma_mem->dma_vaddr != NULL)
882 size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE;
885 size = m_length((struct mbuf*)mem, NULL);
891 error = bus_dma_tag_create(NULL, /* parent */
892 SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */
893 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
894 BUS_SPACE_MAXADDR, /* highaddr */
895 NULL, NULL, /* filtfunc, filtfuncarg */
897 SEC_FREE_LT_CNT(sc), /* nsegments */
898 SEC_MAX_DMA_BLOCK_SIZE, 0, /* maxsegsz, flags */
899 NULL, NULL, /* lockfunc, lockfuncarg */
900 &(dma_mem->dma_tag)); /* dmat */
903 device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
905 dma_mem->dma_vaddr = NULL;
909 error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map));
912 device_printf(sc->sc_dev, "failed to create DMA map, error %i!"
914 bus_dma_tag_destroy(dma_mem->dma_tag);
920 error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
921 mem, size, sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT);
924 error = bus_dmamap_load_uio(dma_mem->dma_tag, dma_mem->dma_map,
925 mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
928 error = bus_dmamap_load_mbuf(dma_mem->dma_tag, dma_mem->dma_map,
929 mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
934 device_printf(sc->sc_dev, "cannot get address of the DMA"
935 " memory, error %i!\n", error);
936 bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
937 bus_dma_tag_destroy(dma_mem->dma_tag);
941 dma_mem->dma_is_map = 1;
942 dma_mem->dma_vaddr = mem;
948 sec_free_dma_mem(struct sec_dma_mem *dma_mem)
951 /* Check for double free */
952 if (dma_mem->dma_vaddr == NULL)
955 bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map);
957 if (dma_mem->dma_is_map)
958 bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
960 bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr,
963 bus_dma_tag_destroy(dma_mem->dma_tag);
964 dma_mem->dma_vaddr = NULL;
968 sec_eu_channel(struct sec_softc *sc, int eu)
973 SEC_LOCK_ASSERT(sc, controller);
975 reg = SEC_READ(sc, SEC_EUASR);
979 channel = SEC_EUASR_AFEU(reg);
982 channel = SEC_EUASR_DEU(reg);
986 channel = SEC_EUASR_MDEU(reg);
989 channel = SEC_EUASR_RNGU(reg);
992 channel = SEC_EUASR_PKEU(reg);
995 channel = SEC_EUASR_AESU(reg);
998 channel = SEC_EUASR_KEU(reg);
1001 channel = SEC_EUASR_CRCU(reg);
1005 return (channel - 1);
1009 sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel)
1011 u_int fflvl = SEC_MAX_FIFO_LEVEL;
1015 SEC_LOCK_ASSERT(sc, controller);
1017 /* Find free channel if have not got one */
1019 for (i = 0; i < SEC_CHANNELS; i++) {
1020 reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1022 if ((reg & sc->sc_channel_idle_mask) == 0) {
1029 /* There is no free channel */
1033 /* Check FIFO level on selected channel */
1034 reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1036 switch(sc->sc_version) {
1038 fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M;
1041 fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M;
1045 if (fflvl >= SEC_MAX_FIFO_LEVEL)
1048 /* Enqueue descriptor in channel */
1049 SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr);
1055 sec_enqueue(struct sec_softc *sc)
1057 struct sec_desc *desc;
1060 SEC_LOCK(sc, descriptors);
1061 SEC_LOCK(sc, controller);
1063 while (SEC_READY_DESC_CNT(sc) > 0) {
1064 desc = SEC_GET_READY_DESC(sc);
1066 ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0);
1067 ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1);
1070 * Both EU are used by the same channel.
1071 * Enqueue descriptor in channel used by busy EUs.
1073 if (ch0 >= 0 && ch0 == ch1) {
1074 if (sec_enqueue_desc(sc, desc, ch0) >= 0) {
1075 SEC_DESC_READY2QUEUED(sc);
1081 * Only one EU is free.
1082 * Enqueue descriptor in channel used by busy EU.
1084 if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) {
1085 if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1)
1087 SEC_DESC_READY2QUEUED(sc);
1094 * Enqueue descriptor in first free channel.
1096 if (ch0 < 0 && ch1 < 0) {
1097 if (sec_enqueue_desc(sc, desc, -1) >= 0) {
1098 SEC_DESC_READY2QUEUED(sc);
1103 /* Current descriptor can not be queued at the moment */
1104 SEC_PUT_BACK_READY_DESC(sc);
1108 SEC_UNLOCK(sc, controller);
1109 SEC_UNLOCK(sc, descriptors);
1112 static struct sec_desc *
1113 sec_find_desc(struct sec_softc *sc, bus_addr_t paddr)
1115 struct sec_desc *desc = NULL;
1118 SEC_LOCK_ASSERT(sc, descriptors);
1120 for (i = 0; i < SEC_CHANNELS; i++) {
1121 if (sc->sc_desc[i].sd_desc_paddr == paddr) {
1122 desc = &(sc->sc_desc[i]);
1131 sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n,
1132 bus_addr_t data, bus_size_t dsize)
1134 struct sec_hw_desc_ptr *ptr;
1136 SEC_LOCK_ASSERT(sc, descriptors);
1138 ptr = &(desc->sd_desc->shd_pointer[n]);
1139 ptr->shdp_length = dsize;
1140 ptr->shdp_extent = 0;
1142 ptr->shdp_ptr = data;
1148 sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
1149 u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype)
1151 struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 };
1152 struct sec_hw_desc_ptr *ptr;
1155 SEC_LOCK_ASSERT(sc, descriptors);
1157 /* For flat memory map only requested region */
1158 if (dtype == SEC_MEMORY) {
1159 data = (uint8_t*)(data) + doffset;
1160 sdmi.sdmi_offset = 0;
1163 error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), data, dsize,
1169 sdmi.sdmi_lt_last->sl_lt->shl_r = 1;
1170 desc->sd_lt_used += sdmi.sdmi_lt_used;
1172 ptr = &(desc->sd_desc->shd_pointer[n]);
1173 ptr->shdp_length = dsize;
1174 ptr->shdp_extent = 0;
1176 ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr;
1182 sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
1183 struct cryptoini **mac)
1185 struct cryptoini *e, *m;
1190 /* We can haldle only two operations */
1191 if (m && m->cri_next)
1194 if (sec_mdeu_can_handle(e->cri_alg)) {
1200 if (m && !sec_mdeu_can_handle(m->cri_alg))
1210 sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
1211 struct cryptodesc **mac)
1213 struct cryptodesc *e, *m, *t;
1218 /* We can haldle only two operations */
1219 if (m && m->crd_next)
1222 if (sec_mdeu_can_handle(e->crd_alg)) {
1228 if (m && !sec_mdeu_can_handle(m->crd_alg))
1238 sec_alloc_session(struct sec_softc *sc)
1240 struct sec_session *ses = NULL;
1244 SEC_LOCK(sc, sessions);
1246 for (i = 0; i < SEC_MAX_SESSIONS; i++) {
1247 if (sc->sc_sessions[i].ss_used == 0) {
1248 ses = &(sc->sc_sessions[i]);
1258 SEC_UNLOCK(sc, sessions);
1263 static struct sec_session *
1264 sec_get_session(struct sec_softc *sc, u_int sid)
1266 struct sec_session *ses;
1268 if (sid >= SEC_MAX_SESSIONS)
1271 SEC_LOCK(sc, sessions);
1273 ses = &(sc->sc_sessions[sid]);
1275 if (ses->ss_used == 0)
1278 SEC_UNLOCK(sc, sessions);
1284 sec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
1286 struct sec_softc *sc = device_get_softc(dev);
1287 struct sec_eu_methods *eu = sec_eus;
1288 struct cryptoini *enc = NULL;
1289 struct cryptoini *mac = NULL;
1290 struct sec_session *ses;
1294 error = sec_split_cri(cri, &enc, &mac);
1298 /* Check key lengths */
1299 if (enc && enc->cri_key && (enc->cri_klen / 8) > SEC_MAX_KEY_LEN)
1302 if (mac && mac->cri_key && (mac->cri_klen / 8) > SEC_MAX_KEY_LEN)
1305 /* Only SEC 3.0 supports digests larger than 256 bits */
1306 if (sc->sc_version < 3 && mac && mac->cri_klen > 256)
1309 sid = sec_alloc_session(sc);
1313 ses = sec_get_session(sc, sid);
1315 /* Find EU for this session */
1316 while (eu->sem_make_desc != NULL) {
1317 error = eu->sem_newsession(sc, ses, enc, mac);
1324 /* If not found, return EINVAL */
1326 sec_free_session(sc, ses);
1330 /* Save cipher key */
1331 if (enc && enc->cri_key) {
1332 ses->ss_klen = enc->cri_klen / 8;
1333 memcpy(ses->ss_key, enc->cri_key, ses->ss_klen);
1336 /* Save digest key */
1337 if (mac && mac->cri_key) {
1338 ses->ss_mklen = mac->cri_klen / 8;
1339 memcpy(ses->ss_mkey, mac->cri_key, ses->ss_mklen);
1349 sec_freesession(device_t dev, uint64_t tid)
1351 struct sec_softc *sc = device_get_softc(dev);
1352 struct sec_session *ses;
1355 ses = sec_get_session(sc, CRYPTO_SESID2LID(tid));
1359 sec_free_session(sc, ses);
1365 sec_process(device_t dev, struct cryptop *crp, int hint)
1367 struct sec_softc *sc = device_get_softc(dev);
1368 struct sec_desc *desc = NULL;
1369 struct cryptodesc *mac, *enc;
1370 struct sec_session *ses;
1371 int buftype, error = 0;
1373 /* Check Session ID */
1374 ses = sec_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid));
1376 crp->crp_etype = EINVAL;
1381 /* Check for input length */
1382 if (crp->crp_ilen > SEC_MAX_DMA_BLOCK_SIZE) {
1383 crp->crp_etype = E2BIG;
1388 /* Get descriptors */
1389 if (sec_split_crp(crp, &enc, &mac)) {
1390 crp->crp_etype = EINVAL;
1395 SEC_LOCK(sc, descriptors);
1396 SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1398 /* Block driver if there is no free descriptors or we are going down */
1399 if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) {
1400 sc->sc_blocked |= CRYPTO_SYMQ;
1401 SEC_UNLOCK(sc, descriptors);
1405 /* Prepare descriptor */
1406 desc = SEC_GET_FREE_DESC(sc);
1407 desc->sd_lt_used = 0;
1411 if (crp->crp_flags & CRYPTO_F_IOV)
1413 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1416 buftype = SEC_MEMORY;
1418 if (enc && enc->crd_flags & CRD_F_ENCRYPT) {
1419 if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1420 memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1423 arc4rand(desc->sd_desc->shd_iv, ses->ss_ivlen, 0);
1425 if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0)
1426 crypto_copyback(crp->crp_flags, crp->crp_buf,
1427 enc->crd_inject, ses->ss_ivlen,
1428 desc->sd_desc->shd_iv);
1430 if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1431 memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1434 crypto_copydata(crp->crp_flags, crp->crp_buf,
1435 enc->crd_inject, ses->ss_ivlen,
1436 desc->sd_desc->shd_iv);
1439 if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) {
1440 if ((enc->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1441 ses->ss_klen = enc->crd_klen / 8;
1442 memcpy(ses->ss_key, enc->crd_key, ses->ss_klen);
1447 if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) {
1448 if ((mac->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1449 ses->ss_mklen = mac->crd_klen / 8;
1450 memcpy(ses->ss_mkey, mac->crd_key, ses->ss_mklen);
1456 memcpy(desc->sd_desc->shd_key, ses->ss_key, ses->ss_klen);
1457 memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, ses->ss_mklen);
1459 error = ses->ss_eu->sem_make_desc(sc, ses, desc, crp, buftype);
1463 SEC_DESC_FREE_POINTERS(desc);
1464 SEC_DESC_PUT_BACK_LT(sc, desc);
1465 SEC_PUT_BACK_FREE_DESC(sc);
1466 SEC_UNLOCK(sc, descriptors);
1467 crp->crp_etype = error;
1473 * Skip DONE interrupt if this is not last request in burst, but only
1474 * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE
1475 * signaling on each descriptor.
1477 if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3)
1478 desc->sd_desc->shd_dn = 0;
1480 desc->sd_desc->shd_dn = 1;
1482 SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1483 SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD |
1484 BUS_DMASYNC_POSTWRITE);
1485 SEC_DESC_FREE2READY(sc);
1486 SEC_UNLOCK(sc, descriptors);
1488 /* Enqueue ready descriptors in hardware */
1495 sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc,
1496 struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1499 struct sec_hw_desc *hd = desc->sd_desc;
1502 hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1503 hd->shd_eu_sel1 = SEC_EU_NONE;
1506 /* Pointer 0: NULL */
1507 error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1511 /* Pointer 1: IV IN */
1512 error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr +
1513 offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1517 /* Pointer 2: Cipher Key */
1518 error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1519 offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1523 /* Pointer 3: Data IN */
1524 error = sec_make_pointer(sc, desc, 3, crp->crp_buf, enc->crd_skip,
1525 enc->crd_len, buftype);
1529 /* Pointer 4: Data OUT */
1530 error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1531 enc->crd_len, buftype);
1535 /* Pointer 5: IV OUT (Not used: NULL) */
1536 error = sec_make_pointer_direct(sc, desc, 5, 0, 0);
1540 /* Pointer 6: NULL */
1541 error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1547 sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc,
1548 struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1549 struct cryptodesc *mac, int buftype)
1551 struct sec_hw_desc *hd = desc->sd_desc;
1552 u_int eu, mode, hashlen;
1555 if (mac->crd_len < enc->crd_len)
1558 if (mac->crd_skip + mac->crd_len != enc->crd_skip + enc->crd_len)
1561 error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1565 hd->shd_desc_type = SEC_DT_HMAC_SNOOP;
1566 hd->shd_eu_sel1 = eu;
1567 hd->shd_mode1 = mode;
1569 /* Pointer 0: HMAC Key */
1570 error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr +
1571 offsetof(struct sec_hw_desc, shd_mkey), ses->ss_mklen);
1575 /* Pointer 1: HMAC-Only Data IN */
1576 error = sec_make_pointer(sc, desc, 1, crp->crp_buf, mac->crd_skip,
1577 mac->crd_len - enc->crd_len, buftype);
1581 /* Pointer 2: Cipher Key */
1582 error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1583 offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1587 /* Pointer 3: IV IN */
1588 error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr +
1589 offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1593 /* Pointer 4: Data IN */
1594 error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1595 enc->crd_len, buftype);
1599 /* Pointer 5: Data OUT */
1600 error = sec_make_pointer(sc, desc, 5, crp->crp_buf, enc->crd_skip,
1601 enc->crd_len, buftype);
1605 /* Pointer 6: HMAC OUT */
1606 error = sec_make_pointer(sc, desc, 6, crp->crp_buf, mac->crd_inject,
1615 sec_aesu_newsession(struct sec_softc *sc, struct sec_session *ses,
1616 struct cryptoini *enc, struct cryptoini *mac)
1622 if (enc->cri_alg != CRYPTO_AES_CBC)
1625 ses->ss_ivlen = AES_BLOCK_LEN;
1631 sec_aesu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1632 struct sec_desc *desc, struct cryptop *crp, int buftype)
1634 struct sec_hw_desc *hd = desc->sd_desc;
1635 struct cryptodesc *enc, *mac;
1638 error = sec_split_crp(crp, &enc, &mac);
1645 hd->shd_eu_sel0 = SEC_EU_AESU;
1646 hd->shd_mode0 = SEC_AESU_MODE_CBC;
1648 if (enc->crd_alg != CRYPTO_AES_CBC)
1651 if (enc->crd_flags & CRD_F_ENCRYPT) {
1652 hd->shd_mode0 |= SEC_AESU_MODE_ED;
1658 error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1661 error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1670 sec_deu_newsession(struct sec_softc *sc, struct sec_session *ses,
1671 struct cryptoini *enc, struct cryptoini *mac)
1677 switch (enc->cri_alg) {
1678 case CRYPTO_DES_CBC:
1679 case CRYPTO_3DES_CBC:
1685 ses->ss_ivlen = DES_BLOCK_LEN;
1691 sec_deu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1692 struct sec_desc *desc, struct cryptop *crp, int buftype)
1694 struct sec_hw_desc *hd = desc->sd_desc;
1695 struct cryptodesc *enc, *mac;
1698 error = sec_split_crp(crp, &enc, &mac);
1705 hd->shd_eu_sel0 = SEC_EU_DEU;
1706 hd->shd_mode0 = SEC_DEU_MODE_CBC;
1708 switch (enc->crd_alg) {
1709 case CRYPTO_3DES_CBC:
1710 hd->shd_mode0 |= SEC_DEU_MODE_TS;
1712 case CRYPTO_DES_CBC:
1718 if (enc->crd_flags & CRD_F_ENCRYPT) {
1719 hd->shd_mode0 |= SEC_DEU_MODE_ED;
1725 error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1728 error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1737 sec_mdeu_can_handle(u_int alg)
1742 case CRYPTO_MD5_HMAC:
1743 case CRYPTO_SHA1_HMAC:
1744 case CRYPTO_SHA2_256_HMAC:
1745 case CRYPTO_SHA2_384_HMAC:
1746 case CRYPTO_SHA2_512_HMAC:
1754 sec_mdeu_config(struct cryptodesc *crd, u_int *eu, u_int *mode, u_int *hashlen)
1757 *mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT;
1760 switch (crd->crd_alg) {
1761 case CRYPTO_MD5_HMAC:
1762 *mode |= SEC_MDEU_MODE_HMAC;
1765 *eu = SEC_EU_MDEU_A;
1766 *mode |= SEC_MDEU_MODE_MD5;
1767 *hashlen = MD5_HASH_LEN;
1769 case CRYPTO_SHA1_HMAC:
1770 *mode |= SEC_MDEU_MODE_HMAC;
1773 *eu = SEC_EU_MDEU_A;
1774 *mode |= SEC_MDEU_MODE_SHA1;
1775 *hashlen = SHA1_HASH_LEN;
1777 case CRYPTO_SHA2_256_HMAC:
1778 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256;
1779 *eu = SEC_EU_MDEU_A;
1781 case CRYPTO_SHA2_384_HMAC:
1782 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384;
1783 *eu = SEC_EU_MDEU_B;
1785 case CRYPTO_SHA2_512_HMAC:
1786 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512;
1787 *eu = SEC_EU_MDEU_B;
1793 if (*mode & SEC_MDEU_MODE_HMAC)
1794 *hashlen = SEC_HMAC_HASH_LEN;
1800 sec_mdeu_newsession(struct sec_softc *sc, struct sec_session *ses,
1801 struct cryptoini *enc, struct cryptoini *mac)
1804 if (mac && sec_mdeu_can_handle(mac->cri_alg))
1811 sec_mdeu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1812 struct sec_desc *desc, struct cryptop *crp, int buftype)
1814 struct cryptodesc *enc, *mac;
1815 struct sec_hw_desc *hd = desc->sd_desc;
1816 u_int eu, mode, hashlen;
1819 error = sec_split_crp(crp, &enc, &mac);
1826 error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1830 hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1831 hd->shd_eu_sel0 = eu;
1832 hd->shd_mode0 = mode;
1833 hd->shd_eu_sel1 = SEC_EU_NONE;
1836 /* Pointer 0: NULL */
1837 error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1841 /* Pointer 1: Context In (Not used: NULL) */
1842 error = sec_make_pointer_direct(sc, desc, 1, 0, 0);
1846 /* Pointer 2: HMAC Key (or NULL, depending on digest type) */
1847 if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC)
1848 error = sec_make_pointer_direct(sc, desc, 2,
1849 desc->sd_desc_paddr + offsetof(struct sec_hw_desc,
1850 shd_mkey), ses->ss_mklen);
1852 error = sec_make_pointer_direct(sc, desc, 2, 0, 0);
1857 /* Pointer 3: Input Data */
1858 error = sec_make_pointer(sc, desc, 3, crp->crp_buf, mac->crd_skip,
1859 mac->crd_len, buftype);
1863 /* Pointer 4: NULL */
1864 error = sec_make_pointer_direct(sc, desc, 4, 0, 0);
1868 /* Pointer 5: Hash out */
1869 error = sec_make_pointer(sc, desc, 5, crp->crp_buf,
1870 mac->crd_inject, hashlen, buftype);
1874 /* Pointer 6: NULL */
1875 error = sec_make_pointer_direct(sc, desc, 6, 0, 0);