2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
19 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
37 #include <sys/systm.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
42 #include <sys/malloc.h>
44 #include <sys/module.h>
45 #include <sys/mutex.h>
46 #include <sys/random.h>
49 #include <machine/_inttypes.h>
50 #include <machine/bus.h>
51 #include <machine/resource.h>
53 #include <opencrypto/cryptodev.h>
54 #include <opencrypto/xform_auth.h>
55 #include "cryptodev_if.h"
57 #include <dev/ofw/ofw_bus_subr.h>
58 #include <dev/sec/sec.h>
60 static int sec_probe(device_t dev);
61 static int sec_attach(device_t dev);
62 static int sec_detach(device_t dev);
63 static int sec_suspend(device_t dev);
64 static int sec_resume(device_t dev);
65 static int sec_shutdown(device_t dev);
66 static void sec_primary_intr(void *arg);
67 static void sec_secondary_intr(void *arg);
68 static int sec_setup_intr(struct sec_softc *sc, struct resource **ires,
69 void **ihand, int *irid, driver_intr_t handler, const char *iname);
70 static void sec_release_intr(struct sec_softc *sc, struct resource *ires,
71 void *ihand, int irid, const char *iname);
72 static int sec_controller_reset(struct sec_softc *sc);
73 static int sec_channel_reset(struct sec_softc *sc, int channel, int full);
74 static int sec_init(struct sec_softc *sc);
75 static int sec_alloc_dma_mem(struct sec_softc *sc,
76 struct sec_dma_mem *dma_mem, bus_size_t size);
77 static int sec_desc_map_dma(struct sec_softc *sc,
78 struct sec_dma_mem *dma_mem, struct cryptop *crp, bus_size_t size,
79 struct sec_desc_map_info *sdmi);
80 static void sec_free_dma_mem(struct sec_dma_mem *dma_mem);
81 static void sec_enqueue(struct sec_softc *sc);
82 static int sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc,
84 static int sec_eu_channel(struct sec_softc *sc, int eu);
85 static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
86 u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize);
87 static int sec_make_pointer_direct(struct sec_softc *sc,
88 struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize);
89 static int sec_probesession(device_t dev,
90 const struct crypto_session_params *csp);
91 static int sec_newsession(device_t dev, crypto_session_t cses,
92 const struct crypto_session_params *csp);
93 static int sec_process(device_t dev, struct cryptop *crp, int hint);
94 static int sec_build_common_ns_desc(struct sec_softc *sc,
95 struct sec_desc *desc, const struct crypto_session_params *csp,
97 static int sec_build_common_s_desc(struct sec_softc *sc,
98 struct sec_desc *desc, const struct crypto_session_params *csp,
101 static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr);
104 static bool sec_aesu_newsession(const struct crypto_session_params *csp);
105 static int sec_aesu_make_desc(struct sec_softc *sc,
106 const struct crypto_session_params *csp, struct sec_desc *desc,
107 struct cryptop *crp);
110 static bool sec_mdeu_can_handle(u_int alg);
111 static int sec_mdeu_config(const struct crypto_session_params *csp,
112 u_int *eu, u_int *mode, u_int *hashlen);
113 static bool sec_mdeu_newsession(const struct crypto_session_params *csp);
114 static int sec_mdeu_make_desc(struct sec_softc *sc,
115 const struct crypto_session_params *csp, struct sec_desc *desc,
116 struct cryptop *crp);
118 static device_method_t sec_methods[] = {
119 /* Device interface */
120 DEVMETHOD(device_probe, sec_probe),
121 DEVMETHOD(device_attach, sec_attach),
122 DEVMETHOD(device_detach, sec_detach),
124 DEVMETHOD(device_suspend, sec_suspend),
125 DEVMETHOD(device_resume, sec_resume),
126 DEVMETHOD(device_shutdown, sec_shutdown),
129 DEVMETHOD(cryptodev_probesession, sec_probesession),
130 DEVMETHOD(cryptodev_newsession, sec_newsession),
131 DEVMETHOD(cryptodev_process, sec_process),
135 static driver_t sec_driver = {
138 sizeof(struct sec_softc),
141 static devclass_t sec_devclass;
142 DRIVER_MODULE(sec, simplebus, sec_driver, sec_devclass, 0, 0);
143 MODULE_DEPEND(sec, crypto, 1, 1, 1);
145 static struct sec_eu_methods sec_eus[] = {
158 sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op)
161 /* Sync only if dma memory is valid */
162 if (dma_mem->dma_vaddr != NULL)
163 bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op);
167 sec_get_pointer_data(struct sec_desc *desc, u_int n)
170 return (desc->sd_ptr_dmem[n].dma_vaddr);
174 sec_probe(device_t dev)
176 struct sec_softc *sc;
179 if (!ofw_bus_status_okay(dev))
182 if (!ofw_bus_is_compatible(dev, "fsl,sec2.0"))
185 sc = device_get_softc(dev);
188 sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
191 if (sc->sc_rres == NULL)
194 sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
195 sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
197 id = SEC_READ(sc, SEC_ID);
199 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
203 device_set_desc(dev, "Freescale Security Engine 2.0");
207 device_set_desc(dev, "Freescale Security Engine 3.0");
211 device_set_desc(dev, "Freescale Security Engine 3.1");
215 device_printf(dev, "unknown SEC ID 0x%016"PRIx64"!\n", id);
223 sec_attach(device_t dev)
225 struct sec_softc *sc;
226 struct sec_hw_lt *lt;
230 sc = device_get_softc(dev);
235 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct sec_session),
236 CRYPTOCAP_F_HARDWARE);
237 if (sc->sc_cid < 0) {
238 device_printf(dev, "could not get crypto driver ID!\n");
243 mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev),
244 "SEC Controller lock", MTX_DEF);
245 mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev),
246 "SEC Descriptors lock", MTX_DEF);
248 /* Allocate I/O memory for SEC registers */
250 sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
253 if (sc->sc_rres == NULL) {
254 device_printf(dev, "could not allocate I/O memory!\n");
258 sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
259 sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
261 /* Setup interrupts */
263 error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand,
264 &sc->sc_pri_irid, sec_primary_intr, "primary");
270 if (sc->sc_version == 3) {
272 error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand,
273 &sc->sc_sec_irid, sec_secondary_intr, "secondary");
279 /* Alloc DMA memory for descriptors and link tables */
280 error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem),
281 SEC_DESCRIPTORS * sizeof(struct sec_hw_desc));
286 error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem),
287 (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt));
292 /* Fill in descriptors and link tables */
293 for (i = 0; i < SEC_DESCRIPTORS; i++) {
294 sc->sc_desc[i].sd_desc =
295 (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i;
296 sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr +
297 (i * sizeof(struct sec_hw_desc));
300 for (i = 0; i < SEC_LT_ENTRIES + 1; i++) {
302 (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i;
303 sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr +
304 (i * sizeof(struct sec_hw_lt));
307 /* Last entry in link table is used to create a circle */
308 lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt;
312 lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr;
314 /* Init descriptor and link table queues pointers */
315 SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS);
316 SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS);
317 SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS);
318 SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS);
319 SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS);
320 SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS);
321 SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES);
322 SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES);
324 /* Create masks for fast checks */
325 sc->sc_int_error_mask = 0;
326 for (i = 0; i < SEC_CHANNELS; i++)
327 sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i));
329 switch (sc->sc_version) {
331 sc->sc_channel_idle_mask =
332 (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) |
333 (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) |
334 (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) |
335 (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S);
338 sc->sc_channel_idle_mask =
339 (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) |
340 (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) |
341 (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) |
342 (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S);
347 error = sec_init(sc);
355 sec_free_dma_mem(&(sc->sc_lt_dmem));
357 sec_free_dma_mem(&(sc->sc_desc_dmem));
359 sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
360 sc->sc_sec_irid, "secondary");
362 sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
363 sc->sc_pri_irid, "primary");
365 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
367 mtx_destroy(&sc->sc_controller_lock);
368 mtx_destroy(&sc->sc_descriptors_lock);
374 sec_detach(device_t dev)
376 struct sec_softc *sc = device_get_softc(dev);
377 int i, error, timeout = SEC_TIMEOUT;
379 /* Prepare driver to shutdown */
380 SEC_LOCK(sc, descriptors);
382 SEC_UNLOCK(sc, descriptors);
384 /* Wait until all queued processing finishes */
386 SEC_LOCK(sc, descriptors);
387 i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc);
388 SEC_UNLOCK(sc, descriptors);
394 device_printf(dev, "queue flush timeout!\n");
396 /* DMA can be still active - stop it */
397 for (i = 0; i < SEC_CHANNELS; i++)
398 sec_channel_reset(sc, i, 1);
407 /* Disable interrupts */
408 SEC_WRITE(sc, SEC_IER, 0);
410 /* Unregister from OCF */
411 crypto_unregister_all(sc->sc_cid);
413 /* Free DMA memory */
414 for (i = 0; i < SEC_DESCRIPTORS; i++)
415 SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i]));
417 sec_free_dma_mem(&(sc->sc_lt_dmem));
418 sec_free_dma_mem(&(sc->sc_desc_dmem));
420 /* Release interrupts */
421 sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
422 sc->sc_pri_irid, "primary");
423 sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
424 sc->sc_sec_irid, "secondary");
428 error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
431 device_printf(dev, "bus_release_resource() failed for"
432 " I/O memory, error %d\n", error);
437 mtx_destroy(&sc->sc_controller_lock);
438 mtx_destroy(&sc->sc_descriptors_lock);
444 sec_suspend(device_t dev)
451 sec_resume(device_t dev)
458 sec_shutdown(device_t dev)
465 sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand,
466 int *irid, driver_intr_t handler, const char *iname)
470 (*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid,
473 if ((*ires) == NULL) {
474 device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname);
478 error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET,
479 NULL, handler, sc, ihand);
482 device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname);
483 if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires))
484 device_printf(sc->sc_dev, "could not release %s IRQ\n",
495 sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand,
496 int irid, const char *iname)
503 error = bus_teardown_intr(sc->sc_dev, ires, ihand);
505 device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s"
506 " IRQ, error %d\n", iname, error);
508 error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires);
510 device_printf(sc->sc_dev, "bus_release_resource() failed for %s"
511 " IRQ, error %d\n", iname, error);
515 sec_primary_intr(void *arg)
517 struct sec_session *ses;
518 struct sec_softc *sc = arg;
519 struct sec_desc *desc;
522 uint8_t hash[HASH_MAX_LEN];
525 SEC_LOCK(sc, controller);
527 /* Check for errors */
528 isr = SEC_READ(sc, SEC_ISR);
529 if (isr & sc->sc_int_error_mask) {
530 /* Check each channel for error */
531 for (i = 0; i < SEC_CHANNELS; i++) {
532 if ((isr & SEC_INT_CH_ERR(i)) == 0)
535 device_printf(sc->sc_dev,
536 "I/O error on channel %i!\n", i);
538 /* Find and mark problematic descriptor */
539 desc = sec_find_desc(sc, SEC_READ(sc,
543 desc->sd_error = EIO;
545 /* Do partial channel reset */
546 sec_channel_reset(sc, i, 0);
551 SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL);
553 SEC_UNLOCK(sc, controller);
554 SEC_LOCK(sc, descriptors);
556 /* Handle processed descriptors */
557 SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
559 while (SEC_QUEUED_DESC_CNT(sc) > 0) {
560 desc = SEC_GET_QUEUED_DESC(sc);
562 if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) {
563 SEC_PUT_BACK_QUEUED_DESC(sc);
567 SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD |
568 BUS_DMASYNC_PREWRITE);
571 crp->crp_etype = desc->sd_error;
572 if (crp->crp_etype == 0) {
573 ses = crypto_get_driver_session(crp->crp_session);
574 if (ses->ss_mlen != 0) {
575 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
577 crp->crp_digest_start,
580 desc->sd_desc->shd_digest,
581 hash, ses->ss_mlen) != 0)
582 crp->crp_etype = EBADMSG;
585 crp->crp_digest_start,
587 desc->sd_desc->shd_digest);
590 crypto_done(desc->sd_crp);
592 SEC_DESC_FREE_POINTERS(desc);
593 SEC_DESC_FREE_LT(sc, desc);
594 SEC_DESC_QUEUED2FREE(sc);
597 SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
599 if (!sc->sc_shutdown) {
600 wakeup = sc->sc_blocked;
604 SEC_UNLOCK(sc, descriptors);
606 /* Enqueue ready descriptors in hardware */
610 crypto_unblock(sc->sc_cid, wakeup);
614 sec_secondary_intr(void *arg)
616 struct sec_softc *sc = arg;
618 device_printf(sc->sc_dev, "spurious secondary interrupt!\n");
619 sec_primary_intr(arg);
623 sec_controller_reset(struct sec_softc *sc)
625 int timeout = SEC_TIMEOUT;
627 /* Reset Controller */
628 SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR);
630 while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) {
635 device_printf(sc->sc_dev, "timeout while waiting for "
645 sec_channel_reset(struct sec_softc *sc, int channel, int full)
647 int timeout = SEC_TIMEOUT;
648 uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON;
652 reg = SEC_READ(sc, SEC_CHAN_CCR(channel));
653 SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit);
655 while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) {
660 device_printf(sc->sc_dev, "timeout while waiting for "
667 reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS;
669 switch(sc->sc_version) {
671 reg |= SEC_CHAN_CCR_CDWE;
674 reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN;
678 SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg);
685 sec_init(struct sec_softc *sc)
690 /* Reset controller twice to clear all pending interrupts */
691 error = sec_controller_reset(sc);
695 error = sec_controller_reset(sc);
700 for (i = 0; i < SEC_CHANNELS; i++) {
701 error = sec_channel_reset(sc, i, 1);
706 /* Enable Interrupts */
708 for (i = 0; i < SEC_CHANNELS; i++)
709 reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i);
711 SEC_WRITE(sc, SEC_IER, reg);
717 sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
719 struct sec_dma_mem *dma_mem = arg;
724 KASSERT(nseg == 1, ("Wrong number of segments, should be 1"));
725 dma_mem->dma_paddr = segs->ds_addr;
729 sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg,
732 struct sec_desc_map_info *sdmi = arg;
733 struct sec_softc *sc = sdmi->sdmi_sc;
734 struct sec_lt *lt = NULL;
739 SEC_LOCK_ASSERT(sc, descriptors);
744 for (i = 0; i < nseg; i++) {
745 addr = segs[i].ds_addr;
746 size = segs[i].ds_len;
748 /* Skip requested offset */
749 if (sdmi->sdmi_offset >= size) {
750 sdmi->sdmi_offset -= size;
754 addr += sdmi->sdmi_offset;
755 size -= sdmi->sdmi_offset;
756 sdmi->sdmi_offset = 0;
758 /* Do not link more than requested */
759 if (sdmi->sdmi_size < size)
760 size = sdmi->sdmi_size;
762 lt = SEC_ALLOC_LT_ENTRY(sc);
763 lt->sl_lt->shl_length = size;
764 lt->sl_lt->shl_r = 0;
765 lt->sl_lt->shl_n = 0;
766 lt->sl_lt->shl_ptr = addr;
768 if (sdmi->sdmi_lt_first == NULL)
769 sdmi->sdmi_lt_first = lt;
771 sdmi->sdmi_lt_used += 1;
773 if ((sdmi->sdmi_size -= size) == 0)
777 sdmi->sdmi_lt_last = lt;
781 sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
786 if (dma_mem->dma_vaddr != NULL)
789 error = bus_dma_tag_create(NULL, /* parent */
790 SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */
791 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
792 BUS_SPACE_MAXADDR, /* highaddr */
793 NULL, NULL, /* filtfunc, filtfuncarg */
794 size, 1, /* maxsize, nsegments */
795 size, 0, /* maxsegsz, flags */
796 NULL, NULL, /* lockfunc, lockfuncarg */
797 &(dma_mem->dma_tag)); /* dmat */
800 device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
805 error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr),
806 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map));
809 device_printf(sc->sc_dev, "failed to allocate DMA safe"
810 " memory, error %i!\n", error);
814 error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
815 dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem,
819 device_printf(sc->sc_dev, "cannot get address of the DMA"
820 " memory, error %i\n", error);
824 dma_mem->dma_is_map = 0;
828 bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map);
830 bus_dma_tag_destroy(dma_mem->dma_tag);
832 dma_mem->dma_vaddr = NULL;
837 sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
838 struct cryptop *crp, bus_size_t size, struct sec_desc_map_info *sdmi)
842 if (dma_mem->dma_vaddr != NULL)
845 switch (crp->crp_buf.cb_type) {
846 case CRYPTO_BUF_CONTIG:
849 size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE;
851 case CRYPTO_BUF_MBUF:
852 size = m_length(crp->crp_buf.cb_mbuf, NULL);
854 case CRYPTO_BUF_VMPAGE:
855 size = PAGE_SIZE - cb->cb_vm_page_offset;
861 error = bus_dma_tag_create(NULL, /* parent */
862 SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */
863 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
864 BUS_SPACE_MAXADDR, /* highaddr */
865 NULL, NULL, /* filtfunc, filtfuncarg */
867 SEC_FREE_LT_CNT(sc), /* nsegments */
868 SEC_MAX_DMA_BLOCK_SIZE, 0, /* maxsegsz, flags */
869 NULL, NULL, /* lockfunc, lockfuncarg */
870 &(dma_mem->dma_tag)); /* dmat */
873 device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
875 dma_mem->dma_vaddr = NULL;
879 error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map));
882 device_printf(sc->sc_dev, "failed to create DMA map, error %i!"
884 bus_dma_tag_destroy(dma_mem->dma_tag);
888 error = bus_dmamap_load_crp(dma_mem->dma_tag, dma_mem->dma_map, crp,
889 sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT);
892 device_printf(sc->sc_dev, "cannot get address of the DMA"
893 " memory, error %i!\n", error);
894 bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
895 bus_dma_tag_destroy(dma_mem->dma_tag);
899 dma_mem->dma_is_map = 1;
900 dma_mem->dma_vaddr = crp;
906 sec_free_dma_mem(struct sec_dma_mem *dma_mem)
909 /* Check for double free */
910 if (dma_mem->dma_vaddr == NULL)
913 bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map);
915 if (dma_mem->dma_is_map)
916 bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
918 bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr,
921 bus_dma_tag_destroy(dma_mem->dma_tag);
922 dma_mem->dma_vaddr = NULL;
926 sec_eu_channel(struct sec_softc *sc, int eu)
931 SEC_LOCK_ASSERT(sc, controller);
933 reg = SEC_READ(sc, SEC_EUASR);
937 channel = SEC_EUASR_AFEU(reg);
940 channel = SEC_EUASR_DEU(reg);
944 channel = SEC_EUASR_MDEU(reg);
947 channel = SEC_EUASR_RNGU(reg);
950 channel = SEC_EUASR_PKEU(reg);
953 channel = SEC_EUASR_AESU(reg);
956 channel = SEC_EUASR_KEU(reg);
959 channel = SEC_EUASR_CRCU(reg);
963 return (channel - 1);
967 sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel)
969 u_int fflvl = SEC_MAX_FIFO_LEVEL;
973 SEC_LOCK_ASSERT(sc, controller);
975 /* Find free channel if have not got one */
977 for (i = 0; i < SEC_CHANNELS; i++) {
978 reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
980 if ((reg & sc->sc_channel_idle_mask) == 0) {
987 /* There is no free channel */
991 /* Check FIFO level on selected channel */
992 reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
994 switch(sc->sc_version) {
996 fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M;
999 fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M;
1003 if (fflvl >= SEC_MAX_FIFO_LEVEL)
1006 /* Enqueue descriptor in channel */
1007 SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr);
1013 sec_enqueue(struct sec_softc *sc)
1015 struct sec_desc *desc;
1018 SEC_LOCK(sc, descriptors);
1019 SEC_LOCK(sc, controller);
1021 while (SEC_READY_DESC_CNT(sc) > 0) {
1022 desc = SEC_GET_READY_DESC(sc);
1024 ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0);
1025 ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1);
1028 * Both EU are used by the same channel.
1029 * Enqueue descriptor in channel used by busy EUs.
1031 if (ch0 >= 0 && ch0 == ch1) {
1032 if (sec_enqueue_desc(sc, desc, ch0) >= 0) {
1033 SEC_DESC_READY2QUEUED(sc);
1039 * Only one EU is free.
1040 * Enqueue descriptor in channel used by busy EU.
1042 if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) {
1043 if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1)
1045 SEC_DESC_READY2QUEUED(sc);
1052 * Enqueue descriptor in first free channel.
1054 if (ch0 < 0 && ch1 < 0) {
1055 if (sec_enqueue_desc(sc, desc, -1) >= 0) {
1056 SEC_DESC_READY2QUEUED(sc);
1061 /* Current descriptor can not be queued at the moment */
1062 SEC_PUT_BACK_READY_DESC(sc);
1066 SEC_UNLOCK(sc, controller);
1067 SEC_UNLOCK(sc, descriptors);
1070 static struct sec_desc *
1071 sec_find_desc(struct sec_softc *sc, bus_addr_t paddr)
1073 struct sec_desc *desc = NULL;
1076 SEC_LOCK_ASSERT(sc, descriptors);
1078 for (i = 0; i < SEC_CHANNELS; i++) {
1079 if (sc->sc_desc[i].sd_desc_paddr == paddr) {
1080 desc = &(sc->sc_desc[i]);
1089 sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n,
1090 bus_addr_t data, bus_size_t dsize)
1092 struct sec_hw_desc_ptr *ptr;
1094 SEC_LOCK_ASSERT(sc, descriptors);
1096 ptr = &(desc->sd_desc->shd_pointer[n]);
1097 ptr->shdp_length = dsize;
1098 ptr->shdp_extent = 0;
1100 ptr->shdp_ptr = data;
1106 sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
1107 u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize)
1109 struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 };
1110 struct sec_hw_desc_ptr *ptr;
1113 SEC_LOCK_ASSERT(sc, descriptors);
1115 error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), crp, dsize,
1121 sdmi.sdmi_lt_last->sl_lt->shl_r = 1;
1122 desc->sd_lt_used += sdmi.sdmi_lt_used;
1124 ptr = &(desc->sd_desc->shd_pointer[n]);
1125 ptr->shdp_length = dsize;
1126 ptr->shdp_extent = 0;
1128 ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr;
1134 sec_cipher_supported(const struct crypto_session_params *csp)
1137 switch (csp->csp_cipher_alg) {
1138 case CRYPTO_AES_CBC:
1140 if (csp->csp_ivlen != AES_BLOCK_LEN)
1147 if (csp->csp_cipher_klen == 0 || csp->csp_cipher_klen > SEC_MAX_KEY_LEN)
1154 sec_auth_supported(struct sec_softc *sc,
1155 const struct crypto_session_params *csp)
1158 switch (csp->csp_auth_alg) {
1159 case CRYPTO_SHA2_384_HMAC:
1160 case CRYPTO_SHA2_512_HMAC:
1161 if (sc->sc_version < 3)
1164 case CRYPTO_SHA1_HMAC:
1165 case CRYPTO_SHA2_256_HMAC:
1166 if (csp->csp_auth_klen > SEC_MAX_KEY_LEN)
1178 sec_probesession(device_t dev, const struct crypto_session_params *csp)
1180 struct sec_softc *sc = device_get_softc(dev);
1182 if (csp->csp_flags != 0)
1184 switch (csp->csp_mode) {
1185 case CSP_MODE_DIGEST:
1186 if (!sec_auth_supported(sc, csp))
1189 case CSP_MODE_CIPHER:
1190 if (!sec_cipher_supported(csp))
1194 if (!sec_auth_supported(sc, csp) || !sec_cipher_supported(csp))
1200 return (CRYPTODEV_PROBE_HARDWARE);
1204 sec_newsession(device_t dev, crypto_session_t cses,
1205 const struct crypto_session_params *csp)
1207 struct sec_eu_methods *eu = sec_eus;
1208 struct sec_session *ses;
1210 ses = crypto_get_driver_session(cses);
1212 /* Find EU for this session */
1213 while (eu->sem_make_desc != NULL) {
1214 if (eu->sem_newsession(csp))
1218 KASSERT(eu->sem_make_desc != NULL, ("failed to find eu for session"));
1220 /* Save cipher key */
1221 if (csp->csp_cipher_key != NULL)
1222 memcpy(ses->ss_key, csp->csp_cipher_key, csp->csp_cipher_klen);
1224 /* Save digest key */
1225 if (csp->csp_auth_key != NULL)
1226 memcpy(ses->ss_mkey, csp->csp_auth_key, csp->csp_auth_klen);
1228 if (csp->csp_auth_alg != 0) {
1229 if (csp->csp_auth_mlen == 0)
1230 ses->ss_mlen = crypto_auth_hash(csp)->hashsize;
1232 ses->ss_mlen = csp->csp_auth_mlen;
1239 sec_process(device_t dev, struct cryptop *crp, int hint)
1241 struct sec_softc *sc = device_get_softc(dev);
1242 struct sec_desc *desc = NULL;
1243 const struct crypto_session_params *csp;
1244 struct sec_session *ses;
1247 ses = crypto_get_driver_session(crp->crp_session);
1248 csp = crypto_get_params(crp->crp_session);
1250 /* Check for input length */
1251 if (crypto_buffer_len(&crp->crp_buf) > SEC_MAX_DMA_BLOCK_SIZE) {
1252 crp->crp_etype = E2BIG;
1257 SEC_LOCK(sc, descriptors);
1258 SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1260 /* Block driver if there is no free descriptors or we are going down */
1261 if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) {
1262 sc->sc_blocked |= CRYPTO_SYMQ;
1263 SEC_UNLOCK(sc, descriptors);
1267 /* Prepare descriptor */
1268 desc = SEC_GET_FREE_DESC(sc);
1269 desc->sd_lt_used = 0;
1273 if (csp->csp_cipher_alg != 0)
1274 crypto_read_iv(crp, desc->sd_desc->shd_iv);
1276 if (crp->crp_cipher_key != NULL)
1277 memcpy(ses->ss_key, crp->crp_cipher_key, csp->csp_cipher_klen);
1279 if (crp->crp_auth_key != NULL)
1280 memcpy(ses->ss_mkey, crp->crp_auth_key, csp->csp_auth_klen);
1282 memcpy(desc->sd_desc->shd_key, ses->ss_key, csp->csp_cipher_klen);
1283 memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, csp->csp_auth_klen);
1285 error = ses->ss_eu->sem_make_desc(sc, csp, desc, crp);
1288 SEC_DESC_FREE_POINTERS(desc);
1289 SEC_DESC_PUT_BACK_LT(sc, desc);
1290 SEC_PUT_BACK_FREE_DESC(sc);
1291 SEC_UNLOCK(sc, descriptors);
1292 crp->crp_etype = error;
1298 * Skip DONE interrupt if this is not last request in burst, but only
1299 * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE
1300 * signaling on each descriptor.
1302 if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3)
1303 desc->sd_desc->shd_dn = 0;
1305 desc->sd_desc->shd_dn = 1;
1307 SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1308 SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD |
1309 BUS_DMASYNC_POSTWRITE);
1310 SEC_DESC_FREE2READY(sc);
1311 SEC_UNLOCK(sc, descriptors);
1313 /* Enqueue ready descriptors in hardware */
1320 sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc,
1321 const struct crypto_session_params *csp, struct cryptop *crp)
1323 struct sec_hw_desc *hd = desc->sd_desc;
1326 hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1327 hd->shd_eu_sel1 = SEC_EU_NONE;
1330 /* Pointer 0: NULL */
1331 error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1335 /* Pointer 1: IV IN */
1336 error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr +
1337 offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen);
1341 /* Pointer 2: Cipher Key */
1342 error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1343 offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen);
1347 /* Pointer 3: Data IN */
1348 error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start,
1349 crp->crp_payload_length);
1353 /* Pointer 4: Data OUT */
1354 error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start,
1355 crp->crp_payload_length);
1359 /* Pointer 5: IV OUT (Not used: NULL) */
1360 error = sec_make_pointer_direct(sc, desc, 5, 0, 0);
1364 /* Pointer 6: NULL */
1365 error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1371 sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc,
1372 const struct crypto_session_params *csp, struct cryptop *crp)
1374 struct sec_hw_desc *hd = desc->sd_desc;
1375 u_int eu, mode, hashlen;
1378 error = sec_mdeu_config(csp, &eu, &mode, &hashlen);
1382 hd->shd_desc_type = SEC_DT_HMAC_SNOOP;
1383 hd->shd_eu_sel1 = eu;
1384 hd->shd_mode1 = mode;
1386 /* Pointer 0: HMAC Key */
1387 error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr +
1388 offsetof(struct sec_hw_desc, shd_mkey), csp->csp_auth_klen);
1392 /* Pointer 1: HMAC-Only Data IN */
1393 error = sec_make_pointer(sc, desc, 1, crp, crp->crp_aad_start,
1394 crp->crp_aad_length);
1398 /* Pointer 2: Cipher Key */
1399 error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1400 offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen);
1404 /* Pointer 3: IV IN */
1405 error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr +
1406 offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen);
1410 /* Pointer 4: Data IN */
1411 error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start,
1412 crp->crp_payload_length);
1416 /* Pointer 5: Data OUT */
1417 error = sec_make_pointer(sc, desc, 5, crp, crp->crp_payload_start,
1418 crp->crp_payload_length);
1422 /* Pointer 6: HMAC OUT */
1423 error = sec_make_pointer_direct(sc, desc, 6, desc->sd_desc_paddr +
1424 offsetof(struct sec_hw_desc, shd_digest), hashlen);
1432 sec_aesu_newsession(const struct crypto_session_params *csp)
1435 return (csp->csp_cipher_alg == CRYPTO_AES_CBC);
1439 sec_aesu_make_desc(struct sec_softc *sc,
1440 const struct crypto_session_params *csp, struct sec_desc *desc,
1441 struct cryptop *crp)
1443 struct sec_hw_desc *hd = desc->sd_desc;
1446 hd->shd_eu_sel0 = SEC_EU_AESU;
1447 hd->shd_mode0 = SEC_AESU_MODE_CBC;
1449 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1450 hd->shd_mode0 |= SEC_AESU_MODE_ED;
1455 if (csp->csp_mode == CSP_MODE_ETA)
1456 error = sec_build_common_s_desc(sc, desc, csp, crp);
1458 error = sec_build_common_ns_desc(sc, desc, csp, crp);
1466 sec_mdeu_can_handle(u_int alg)
1470 case CRYPTO_SHA1_HMAC:
1471 case CRYPTO_SHA2_256_HMAC:
1472 case CRYPTO_SHA2_384_HMAC:
1473 case CRYPTO_SHA2_512_HMAC:
1481 sec_mdeu_config(const struct crypto_session_params *csp, u_int *eu, u_int *mode,
1485 *mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT;
1488 switch (csp->csp_auth_alg) {
1489 case CRYPTO_SHA1_HMAC:
1490 *mode |= SEC_MDEU_MODE_HMAC;
1493 *eu = SEC_EU_MDEU_A;
1494 *mode |= SEC_MDEU_MODE_SHA1;
1495 *hashlen = SHA1_HASH_LEN;
1497 case CRYPTO_SHA2_256_HMAC:
1498 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256;
1499 *eu = SEC_EU_MDEU_A;
1501 case CRYPTO_SHA2_384_HMAC:
1502 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384;
1503 *eu = SEC_EU_MDEU_B;
1505 case CRYPTO_SHA2_512_HMAC:
1506 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512;
1507 *eu = SEC_EU_MDEU_B;
1513 if (*mode & SEC_MDEU_MODE_HMAC)
1514 *hashlen = SEC_HMAC_HASH_LEN;
1520 sec_mdeu_newsession(const struct crypto_session_params *csp)
1523 return (sec_mdeu_can_handle(csp->csp_auth_alg));
1527 sec_mdeu_make_desc(struct sec_softc *sc,
1528 const struct crypto_session_params *csp,
1529 struct sec_desc *desc, struct cryptop *crp)
1531 struct sec_hw_desc *hd = desc->sd_desc;
1532 u_int eu, mode, hashlen;
1535 error = sec_mdeu_config(csp, &eu, &mode, &hashlen);
1539 hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1540 hd->shd_eu_sel0 = eu;
1541 hd->shd_mode0 = mode;
1542 hd->shd_eu_sel1 = SEC_EU_NONE;
1545 /* Pointer 0: NULL */
1546 error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1550 /* Pointer 1: Context In (Not used: NULL) */
1551 error = sec_make_pointer_direct(sc, desc, 1, 0, 0);
1555 /* Pointer 2: HMAC Key (or NULL, depending on digest type) */
1556 if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC)
1557 error = sec_make_pointer_direct(sc, desc, 2,
1558 desc->sd_desc_paddr + offsetof(struct sec_hw_desc,
1559 shd_mkey), csp->csp_auth_klen);
1561 error = sec_make_pointer_direct(sc, desc, 2, 0, 0);
1566 /* Pointer 3: Input Data */
1567 error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start,
1568 crp->crp_payload_length);
1572 /* Pointer 4: NULL */
1573 error = sec_make_pointer_direct(sc, desc, 4, 0, 0);
1577 /* Pointer 5: Hash out */
1578 error = sec_make_pointer_direct(sc, desc, 5, desc->sd_desc_paddr +
1579 offsetof(struct sec_hw_desc, shd_digest), hashlen);
1583 /* Pointer 6: NULL */
1584 error = sec_make_pointer_direct(sc, desc, 6, 0, 0);