2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2020, 2021 Rubicon Communications, LLC (Netgate)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
32 #include <sys/counter.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
41 #include <sys/sglist.h>
42 #include <sys/sysctl.h>
44 #include <machine/atomic.h>
45 #include <machine/bus.h>
47 #include <crypto/rijndael/rijndael.h>
48 #include <opencrypto/cryptodev.h>
49 #include <opencrypto/xform.h>
51 #include <dev/ofw/ofw_bus.h>
52 #include <dev/ofw/ofw_bus_subr.h>
54 #include "cryptodev_if.h"
56 #include "safexcel_reg.h"
57 #include "safexcel_var.h"
60 * We only support the EIP97 for now.
62 static struct ofw_compat_data safexcel_compat[] = {
63 { "inside-secure,safexcel-eip97ies", (uintptr_t)97 },
64 { "inside-secure,safexcel-eip97", (uintptr_t)97 },
68 const struct safexcel_reg_offsets eip97_regs_offset = {
69 .hia_aic = SAFEXCEL_EIP97_HIA_AIC_BASE,
70 .hia_aic_g = SAFEXCEL_EIP97_HIA_AIC_G_BASE,
71 .hia_aic_r = SAFEXCEL_EIP97_HIA_AIC_R_BASE,
72 .hia_aic_xdr = SAFEXCEL_EIP97_HIA_AIC_xDR_BASE,
73 .hia_dfe = SAFEXCEL_EIP97_HIA_DFE_BASE,
74 .hia_dfe_thr = SAFEXCEL_EIP97_HIA_DFE_THR_BASE,
75 .hia_dse = SAFEXCEL_EIP97_HIA_DSE_BASE,
76 .hia_dse_thr = SAFEXCEL_EIP97_HIA_DSE_THR_BASE,
77 .hia_gen_cfg = SAFEXCEL_EIP97_HIA_GEN_CFG_BASE,
78 .pe = SAFEXCEL_EIP97_PE_BASE,
81 const struct safexcel_reg_offsets eip197_regs_offset = {
82 .hia_aic = SAFEXCEL_EIP197_HIA_AIC_BASE,
83 .hia_aic_g = SAFEXCEL_EIP197_HIA_AIC_G_BASE,
84 .hia_aic_r = SAFEXCEL_EIP197_HIA_AIC_R_BASE,
85 .hia_aic_xdr = SAFEXCEL_EIP197_HIA_AIC_xDR_BASE,
86 .hia_dfe = SAFEXCEL_EIP197_HIA_DFE_BASE,
87 .hia_dfe_thr = SAFEXCEL_EIP197_HIA_DFE_THR_BASE,
88 .hia_dse = SAFEXCEL_EIP197_HIA_DSE_BASE,
89 .hia_dse_thr = SAFEXCEL_EIP197_HIA_DSE_THR_BASE,
90 .hia_gen_cfg = SAFEXCEL_EIP197_HIA_GEN_CFG_BASE,
91 .pe = SAFEXCEL_EIP197_PE_BASE,
94 static struct safexcel_request *
95 safexcel_next_request(struct safexcel_ring *ring)
100 KASSERT(i >= 0 && i < SAFEXCEL_RING_SIZE,
101 ("%s: out of bounds request index %d", __func__, i));
102 return (&ring->requests[i]);
105 static struct safexcel_cmd_descr *
106 safexcel_cmd_descr_next(struct safexcel_cmd_descr_ring *ring)
108 struct safexcel_cmd_descr *cdesc;
110 if (ring->write == ring->read)
112 cdesc = &ring->desc[ring->read];
113 ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE;
117 static struct safexcel_res_descr *
118 safexcel_res_descr_next(struct safexcel_res_descr_ring *ring)
120 struct safexcel_res_descr *rdesc;
122 if (ring->write == ring->read)
124 rdesc = &ring->desc[ring->read];
125 ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE;
129 static struct safexcel_request *
130 safexcel_alloc_request(struct safexcel_softc *sc, struct safexcel_ring *ring)
134 mtx_assert(&ring->mtx, MA_OWNED);
137 if ((i + 1) % SAFEXCEL_RING_SIZE == ring->cdr.read)
139 return (&ring->requests[i]);
143 safexcel_free_request(struct safexcel_ring *ring, struct safexcel_request *req)
145 struct safexcel_context_record *ctx;
147 mtx_assert(&ring->mtx, MA_OWNED);
149 if (req->dmap_loaded) {
150 bus_dmamap_unload(ring->data_dtag, req->dmap);
151 req->dmap_loaded = false;
153 ctx = (struct safexcel_context_record *)req->ctx.vaddr;
154 explicit_bzero(ctx->data, sizeof(ctx->data));
155 explicit_bzero(req->iv, sizeof(req->iv));
159 safexcel_rdr_intr(struct safexcel_softc *sc, int ringidx)
161 TAILQ_HEAD(, cryptop) cq;
162 struct cryptop *crp, *tmp;
163 struct safexcel_cmd_descr *cdesc;
164 struct safexcel_res_descr *rdesc;
165 struct safexcel_request *req;
166 struct safexcel_ring *ring;
167 uint32_t blocked, error, i, ncdescs, nrdescs, nreqs;
170 ring = &sc->sc_ring[ringidx];
172 nreqs = SAFEXCEL_READ(sc,
173 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT);
174 nreqs >>= SAFEXCEL_xDR_PROC_xD_PKT_OFFSET;
175 nreqs &= SAFEXCEL_xDR_PROC_xD_PKT_MASK;
177 SAFEXCEL_DPRINTF(sc, 1,
178 "zero pending requests on ring %d\n", ringidx);
179 mtx_lock(&ring->mtx);
185 ring = &sc->sc_ring[ringidx];
186 bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map,
187 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
188 bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map,
189 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
190 bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map,
191 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
193 ncdescs = nrdescs = 0;
194 for (i = 0; i < nreqs; i++) {
195 req = safexcel_next_request(ring);
197 bus_dmamap_sync(req->ctx.tag, req->ctx.map,
198 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
199 bus_dmamap_sync(ring->data_dtag, req->dmap,
200 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
202 ncdescs += req->cdescs;
203 while (req->cdescs-- > 0) {
204 cdesc = safexcel_cmd_descr_next(&ring->cdr);
205 KASSERT(cdesc != NULL,
206 ("%s: missing control descriptor", __func__));
207 if (req->cdescs == 0)
208 KASSERT(cdesc->last_seg,
209 ("%s: chain is not terminated", __func__));
211 nrdescs += req->rdescs;
212 while (req->rdescs-- > 0) {
213 rdesc = safexcel_res_descr_next(&ring->rdr);
214 error = rdesc->result_data.error_code;
216 if (error == SAFEXCEL_RESULT_ERR_AUTH_FAILED &&
217 req->crp->crp_etype == 0) {
218 req->crp->crp_etype = EBADMSG;
220 SAFEXCEL_DPRINTF(sc, 1,
221 "error code %#x\n", error);
222 req->crp->crp_etype = EIO;
227 TAILQ_INSERT_TAIL(&cq, req->crp, crp_next);
230 mtx_lock(&ring->mtx);
232 KASSERT(ring->queued >= nreqs,
233 ("%s: request count underflow, %d queued %d completed",
234 __func__, ring->queued, nreqs));
235 ring->queued -= nreqs;
238 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT,
239 SAFEXCEL_xDR_PROC_xD_PKT(nreqs) |
240 (sc->sc_config.rd_offset * nrdescs * sizeof(uint32_t)));
241 blocked = ring->blocked;
245 if (ring->queued != 0) {
247 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH,
248 SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | imin(ring->queued, 16));
250 mtx_unlock(&ring->mtx);
253 crypto_unblock(sc->sc_cid, blocked);
255 TAILQ_FOREACH_SAFE(crp, &cq, crp_next, tmp)
260 safexcel_ring_intr(void *arg)
262 struct safexcel_softc *sc;
263 struct safexcel_intr_handle *ih;
264 uint32_t status, stat;
272 status = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) +
273 SAFEXCEL_HIA_AIC_R_ENABLED_STAT(ring));
275 if (status & SAFEXCEL_CDR_IRQ(ring)) {
276 stat = SAFEXCEL_READ(sc,
277 SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT);
279 SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT,
280 stat & SAFEXCEL_CDR_INTR_MASK);
284 if (status & SAFEXCEL_RDR_IRQ(ring)) {
285 stat = SAFEXCEL_READ(sc,
286 SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT);
287 if ((stat & SAFEXCEL_xDR_ERR) == 0)
290 SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT,
291 stat & SAFEXCEL_RDR_INTR_MASK);
294 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ACK(ring),
298 safexcel_rdr_intr(sc, ring);
302 safexcel_configure(struct safexcel_softc *sc)
304 uint32_t i, mask, pemask, reg;
307 if (sc->sc_type == 197) {
308 sc->sc_offsets = eip197_regs_offset;
309 pemask = SAFEXCEL_N_PES_MASK;
311 sc->sc_offsets = eip97_regs_offset;
312 pemask = EIP97_N_PES_MASK;
317 /* Scan for valid ring interrupt controllers. */
318 for (i = 0; i < SAFEXCEL_MAX_RING_AIC; i++) {
319 reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) +
320 SAFEXCEL_HIA_AIC_R_VERSION(i));
321 if (SAFEXCEL_REG_LO16(reg) != EIP201_VERSION_LE)
324 sc->sc_config.aic_rings = i;
325 if (sc->sc_config.aic_rings == 0)
328 reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_OPTIONS);
329 /* Check for 64bit addressing. */
330 if ((reg & SAFEXCEL_OPT_ADDR_64) == 0)
332 /* Check alignment constraints (which we do not support). */
333 if (((reg & SAFEXCEL_OPT_TGT_ALIGN_MASK) >>
334 SAFEXCEL_OPT_TGT_ALIGN_OFFSET) != 0)
338 (reg & SAFEXCEL_xDR_HDW_MASK) >> SAFEXCEL_xDR_HDW_OFFSET;
339 mask = (1 << sc->sc_config.hdw) - 1;
341 sc->sc_config.rings = reg & SAFEXCEL_N_RINGS_MASK;
342 /* Limit the number of rings to the number of the AIC Rings. */
343 sc->sc_config.rings = MIN(sc->sc_config.rings, sc->sc_config.aic_rings);
345 sc->sc_config.pes = (reg & pemask) >> SAFEXCEL_N_PES_OFFSET;
347 sc->sc_config.cd_size =
348 sizeof(struct safexcel_cmd_descr) / sizeof(uint32_t);
349 sc->sc_config.cd_offset = (sc->sc_config.cd_size + mask) & ~mask;
351 sc->sc_config.rd_size =
352 sizeof(struct safexcel_res_descr) / sizeof(uint32_t);
353 sc->sc_config.rd_offset = (sc->sc_config.rd_size + mask) & ~mask;
355 sc->sc_config.atok_offset =
356 (SAFEXCEL_MAX_ATOKENS * sizeof(struct safexcel_instr) + mask) &
363 safexcel_init_hia_bus_access(struct safexcel_softc *sc)
365 uint32_t version, val;
367 /* Determine endianness and configure byte swap. */
368 version = SAFEXCEL_READ(sc,
369 SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_VERSION);
370 val = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL);
371 if (SAFEXCEL_REG_HI16(version) == SAFEXCEL_HIA_VERSION_BE) {
372 val = SAFEXCEL_READ(sc,
373 SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL);
374 val = val ^ (SAFEXCEL_MST_CTRL_NO_BYTE_SWAP >> 24);
376 SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL,
380 /* Configure wr/rd cache values. */
381 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_GEN_CFG(sc) + SAFEXCEL_HIA_MST_CTRL,
382 SAFEXCEL_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
383 SAFEXCEL_MST_CTRL_WD_CACHE(WR_CACHE_4BITS));
387 safexcel_disable_global_interrupts(struct safexcel_softc *sc)
389 /* Disable and clear pending interrupts. */
391 SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ENABLE_CTRL, 0);
393 SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK,
394 SAFEXCEL_AIC_G_ACK_ALL_MASK);
398 * Configure the data fetch engine. This component parses command descriptors
399 * and sets up DMA transfers from host memory to the corresponding processing
403 safexcel_configure_dfe_engine(struct safexcel_softc *sc, int pe)
405 /* Reset all DFE threads. */
407 SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe),
408 SAFEXCEL_DxE_THR_CTRL_RESET_PE);
410 /* Deassert the DFE reset. */
412 SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe), 0);
414 /* DMA transfer size to use. */
415 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE(sc) + SAFEXCEL_HIA_DFE_CFG(pe),
416 SAFEXCEL_HIA_DFE_CFG_DIS_DEBUG |
417 SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
418 SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(9) |
419 SAFEXCEL_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
420 SAFEXCEL_HIA_DxE_CFG_MAX_CTRL_SIZE(7) |
421 SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS) |
422 SAFEXCEL_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS));
424 /* Configure the PE DMA transfer thresholds. */
425 SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_DBUF_THRES(pe),
426 SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) |
427 SAFEXCEL_PE_IN_xBUF_THRES_MAX(9));
428 SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_TBUF_THRES(pe),
429 SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) |
430 SAFEXCEL_PE_IN_xBUF_THRES_MAX(7));
434 * Configure the data store engine. This component parses result descriptors
435 * and sets up DMA transfers from the processing engine to host memory.
438 safexcel_configure_dse(struct safexcel_softc *sc, int pe)
443 /* Disable and reset all DSE threads. */
445 SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe),
446 SAFEXCEL_DxE_THR_CTRL_RESET_PE);
448 /* Wait for a second for threads to go idle. */
450 val = SAFEXCEL_READ(sc,
451 SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_STAT(pe));
452 if ((val & SAFEXCEL_DSE_THR_RDR_ID_MASK) ==
453 SAFEXCEL_DSE_THR_RDR_ID_MASK)
455 if (count++ > 10000) {
456 device_printf(sc->sc_dev, "DSE reset timeout\n");
462 /* Exit the reset state. */
464 SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe), 0);
466 /* DMA transfer size to use */
467 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE(sc) + SAFEXCEL_HIA_DSE_CFG(pe),
468 SAFEXCEL_HIA_DSE_CFG_DIS_DEBUG |
469 SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
470 SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(8) |
471 SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS) |
472 SAFEXCEL_HIA_DSE_CFG_ALLWAYS_BUFFERABLE);
474 /* Configure the procesing engine thresholds */
476 SAFEXCEL_PE(sc) + SAFEXCEL_PE_OUT_DBUF_THRES(pe),
477 SAFEXCEL_PE_OUT_DBUF_THRES_MIN(7) |
478 SAFEXCEL_PE_OUT_DBUF_THRES_MAX(8));
484 safexcel_hw_prepare_rings(struct safexcel_softc *sc)
488 for (i = 0; i < sc->sc_config.rings; i++) {
490 * Command descriptors.
493 /* Clear interrupts for this ring. */
495 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i),
496 SAFEXCEL_HIA_AIC_R_ENABLE_CLR_ALL_MASK);
498 /* Disable external triggering. */
500 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0);
502 /* Clear the pending prepared counter. */
504 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
505 SAFEXCEL_xDR_PREP_CLR_COUNT);
507 /* Clear the pending processed counter. */
509 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
510 SAFEXCEL_xDR_PROC_CLR_COUNT);
513 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
515 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
518 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE,
519 SAFEXCEL_RING_SIZE * sc->sc_config.cd_offset *
523 * Result descriptors.
526 /* Disable external triggering. */
528 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0);
530 /* Clear the pending prepared counter. */
532 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
533 SAFEXCEL_xDR_PREP_CLR_COUNT);
535 /* Clear the pending processed counter. */
537 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
538 SAFEXCEL_xDR_PROC_CLR_COUNT);
541 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
543 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
547 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE,
548 SAFEXCEL_RING_SIZE * sc->sc_config.rd_offset *
554 safexcel_hw_setup_rings(struct safexcel_softc *sc)
556 struct safexcel_ring *ring;
557 uint32_t cd_size_rnd, mask, rd_size_rnd, val;
560 mask = (1 << sc->sc_config.hdw) - 1;
561 cd_size_rnd = (sc->sc_config.cd_size + mask) >> sc->sc_config.hdw;
562 val = (sizeof(struct safexcel_res_descr) -
563 sizeof(struct safexcel_res_data)) / sizeof(uint32_t);
564 rd_size_rnd = (val + mask) >> sc->sc_config.hdw;
566 for (i = 0; i < sc->sc_config.rings; i++) {
567 ring = &sc->sc_ring[i];
570 * Command descriptors.
573 /* Ring base address. */
574 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
575 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO,
576 SAFEXCEL_ADDR_LO(ring->cdr.dma.paddr));
577 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
578 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI,
579 SAFEXCEL_ADDR_HI(ring->cdr.dma.paddr));
582 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE,
583 SAFEXCEL_xDR_DESC_MODE_64BIT | SAFEXCEL_CDR_DESC_MODE_ADCP |
584 (sc->sc_config.cd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) |
585 sc->sc_config.cd_size);
588 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG,
589 ((SAFEXCEL_FETCH_COUNT * (cd_size_rnd << sc->sc_config.hdw)) <<
590 SAFEXCEL_xDR_xD_FETCH_THRESH) |
591 (SAFEXCEL_FETCH_COUNT * sc->sc_config.cd_offset));
593 /* Configure DMA tx control. */
595 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG,
596 SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) |
597 SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS));
599 /* Clear any pending interrupt. */
601 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
602 SAFEXCEL_CDR_INTR_MASK);
605 * Result descriptors.
608 /* Ring base address. */
609 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
610 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO,
611 SAFEXCEL_ADDR_LO(ring->rdr.dma.paddr));
612 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
613 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI,
614 SAFEXCEL_ADDR_HI(ring->rdr.dma.paddr));
617 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE,
618 SAFEXCEL_xDR_DESC_MODE_64BIT |
619 (sc->sc_config.rd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) |
620 sc->sc_config.rd_size);
623 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG,
624 ((SAFEXCEL_FETCH_COUNT * (rd_size_rnd << sc->sc_config.hdw)) <<
625 SAFEXCEL_xDR_xD_FETCH_THRESH) |
626 (SAFEXCEL_FETCH_COUNT * sc->sc_config.rd_offset));
628 /* Configure DMA tx control. */
630 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG,
631 SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) |
632 SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS) |
633 SAFEXCEL_HIA_xDR_WR_RES_BUF | SAFEXCEL_HIA_xDR_WR_CTRL_BUF);
635 /* Clear any pending interrupt. */
637 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
638 SAFEXCEL_RDR_INTR_MASK);
640 /* Enable ring interrupt. */
642 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CTRL(i),
643 SAFEXCEL_RDR_IRQ(i));
647 /* Reset the command and result descriptor rings. */
649 safexcel_hw_reset_rings(struct safexcel_softc *sc)
653 for (i = 0; i < sc->sc_config.rings; i++) {
655 * Result descriptor ring operations.
658 /* Reset ring base address. */
659 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
660 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0);
661 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
662 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0);
664 /* Clear the pending prepared counter. */
666 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
667 SAFEXCEL_xDR_PREP_CLR_COUNT);
669 /* Clear the pending processed counter. */
671 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
672 SAFEXCEL_xDR_PROC_CLR_COUNT);
675 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
677 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
680 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0);
682 /* Clear any pending interrupt. */
684 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
685 SAFEXCEL_RDR_INTR_MASK);
687 /* Disable ring interrupt. */
689 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i),
690 SAFEXCEL_RDR_IRQ(i));
693 * Command descriptor ring operations.
696 /* Reset ring base address. */
697 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
698 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0);
699 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
700 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0);
702 /* Clear the pending prepared counter. */
704 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
705 SAFEXCEL_xDR_PREP_CLR_COUNT);
707 /* Clear the pending processed counter. */
709 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
710 SAFEXCEL_xDR_PROC_CLR_COUNT);
713 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
715 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
718 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0);
720 /* Clear any pending interrupt. */
722 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
723 SAFEXCEL_CDR_INTR_MASK);
728 safexcel_enable_pe_engine(struct safexcel_softc *sc, int pe)
732 for (ring_mask = 0, i = 0; i < sc->sc_config.rings; i++) {
737 /* Enable command descriptor rings. */
738 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe),
739 SAFEXCEL_DxE_THR_CTRL_EN | ring_mask);
741 /* Enable result descriptor rings. */
742 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe),
743 SAFEXCEL_DxE_THR_CTRL_EN | ring_mask);
745 /* Clear any HIA interrupt. */
746 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK,
747 SAFEXCEL_AIC_G_ACK_HIA_MASK);
751 safexcel_execute(struct safexcel_softc *sc, struct safexcel_ring *ring,
752 struct safexcel_request *req, int hint)
754 int ringidx, ncdesc, nrdesc;
757 mtx_assert(&ring->mtx, MA_OWNED);
759 if ((hint & CRYPTO_HINT_MORE) != 0) {
761 ring->pending_cdesc += req->cdescs;
762 ring->pending_rdesc += req->rdescs;
766 ringidx = req->ringidx;
768 busy = ring->queued != 0;
769 ncdesc = ring->pending_cdesc + req->cdescs;
770 nrdesc = ring->pending_rdesc + req->rdescs;
771 ring->queued += ring->pending + 1;
775 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH,
776 SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | ring->queued);
779 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT,
780 nrdesc * sc->sc_config.rd_offset * sizeof(uint32_t));
782 SAFEXCEL_HIA_CDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT,
783 ncdesc * sc->sc_config.cd_offset * sizeof(uint32_t));
785 ring->pending = ring->pending_cdesc = ring->pending_rdesc = 0;
789 safexcel_init_rings(struct safexcel_softc *sc)
791 struct safexcel_cmd_descr *cdesc;
792 struct safexcel_ring *ring;
796 for (i = 0; i < sc->sc_config.rings; i++) {
797 ring = &sc->sc_ring[i];
799 snprintf(ring->lockname, sizeof(ring->lockname),
800 "safexcel_ring%d", i);
801 mtx_init(&ring->mtx, ring->lockname, NULL, MTX_DEF);
803 ring->pending = ring->pending_cdesc = ring->pending_rdesc = 0;
805 ring->cdr.read = ring->cdr.write = 0;
806 ring->rdr.read = ring->rdr.write = 0;
807 for (j = 0; j < SAFEXCEL_RING_SIZE; j++) {
808 cdesc = &ring->cdr.desc[j];
809 atok = ring->dma_atok.paddr +
810 sc->sc_config.atok_offset * j;
811 cdesc->atok_lo = SAFEXCEL_ADDR_LO(atok);
812 cdesc->atok_hi = SAFEXCEL_ADDR_HI(atok);
818 safexcel_dma_alloc_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg,
821 struct safexcel_dma_mem *sdm;
826 KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg));
828 sdm->paddr = segs->ds_addr;
832 safexcel_dma_alloc_mem(struct safexcel_softc *sc, struct safexcel_dma_mem *sdm,
837 KASSERT(sdm->vaddr == NULL,
838 ("%s: DMA memory descriptor in use.", __func__));
840 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
841 PAGE_SIZE, 0, /* alignment, boundary */
842 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
843 BUS_SPACE_MAXADDR, /* highaddr */
844 NULL, NULL, /* filtfunc, filtfuncarg */
845 size, 1, /* maxsize, nsegments */
846 size, BUS_DMA_COHERENT, /* maxsegsz, flags */
847 NULL, NULL, /* lockfunc, lockfuncarg */
848 &sdm->tag); /* dmat */
850 device_printf(sc->sc_dev,
851 "failed to allocate busdma tag, error %d\n", error);
855 error = bus_dmamem_alloc(sdm->tag, (void **)&sdm->vaddr,
856 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sdm->map);
858 device_printf(sc->sc_dev,
859 "failed to allocate DMA safe memory, error %d\n", error);
863 error = bus_dmamap_load(sdm->tag, sdm->map, sdm->vaddr, size,
864 safexcel_dma_alloc_mem_cb, sdm, BUS_DMA_NOWAIT);
866 device_printf(sc->sc_dev,
867 "cannot get address of the DMA memory, error %d\n", error);
873 bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map);
875 bus_dma_tag_destroy(sdm->tag);
883 safexcel_dma_free_mem(struct safexcel_dma_mem *sdm)
885 bus_dmamap_unload(sdm->tag, sdm->map);
886 bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map);
887 bus_dma_tag_destroy(sdm->tag);
891 safexcel_dma_free_rings(struct safexcel_softc *sc)
893 struct safexcel_ring *ring;
896 for (i = 0; i < sc->sc_config.rings; i++) {
897 ring = &sc->sc_ring[i];
898 safexcel_dma_free_mem(&ring->cdr.dma);
899 safexcel_dma_free_mem(&ring->dma_atok);
900 safexcel_dma_free_mem(&ring->rdr.dma);
901 bus_dma_tag_destroy(ring->data_dtag);
902 mtx_destroy(&ring->mtx);
907 safexcel_dma_init(struct safexcel_softc *sc)
909 struct safexcel_ring *ring;
913 for (i = 0; i < sc->sc_config.rings; i++) {
914 ring = &sc->sc_ring[i];
916 error = bus_dma_tag_create(
917 bus_get_dma_tag(sc->sc_dev),/* parent */
918 1, 0, /* alignment, boundary */
919 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
920 BUS_SPACE_MAXADDR, /* highaddr */
921 NULL, NULL, /* filtfunc, filtfuncarg */
922 SAFEXCEL_MAX_REQUEST_SIZE, /* maxsize */
923 SAFEXCEL_MAX_FRAGMENTS, /* nsegments */
924 SAFEXCEL_MAX_REQUEST_SIZE, /* maxsegsz */
925 BUS_DMA_COHERENT, /* flags */
926 NULL, NULL, /* lockfunc, lockfuncarg */
927 &ring->data_dtag); /* dmat */
929 device_printf(sc->sc_dev,
930 "bus_dma_tag_create main failed; error %d\n", error);
934 size = sizeof(uint32_t) * sc->sc_config.cd_offset *
936 error = safexcel_dma_alloc_mem(sc, &ring->cdr.dma, size);
938 device_printf(sc->sc_dev,
939 "failed to allocate CDR DMA memory, error %d\n",
944 (struct safexcel_cmd_descr *)ring->cdr.dma.vaddr;
946 /* Allocate additional CDR token memory. */
947 size = (bus_size_t)sc->sc_config.atok_offset *
949 error = safexcel_dma_alloc_mem(sc, &ring->dma_atok, size);
951 device_printf(sc->sc_dev,
952 "failed to allocate atoken DMA memory, error %d\n",
957 size = sizeof(uint32_t) * sc->sc_config.rd_offset *
959 error = safexcel_dma_alloc_mem(sc, &ring->rdr.dma, size);
961 device_printf(sc->sc_dev,
962 "failed to allocate RDR DMA memory, error %d\n",
967 (struct safexcel_res_descr *)ring->rdr.dma.vaddr;
972 safexcel_dma_free_rings(sc);
977 safexcel_deinit_hw(struct safexcel_softc *sc)
979 safexcel_hw_reset_rings(sc);
980 safexcel_dma_free_rings(sc);
984 safexcel_init_hw(struct safexcel_softc *sc)
988 /* 23.3.7 Initialization */
989 if (safexcel_configure(sc) != 0)
992 if (safexcel_dma_init(sc) != 0)
995 safexcel_init_rings(sc);
997 safexcel_init_hia_bus_access(sc);
999 /* 23.3.7.2 Disable EIP-97 global Interrupts */
1000 safexcel_disable_global_interrupts(sc);
1002 for (pe = 0; pe < sc->sc_config.pes; pe++) {
1003 /* 23.3.7.3 Configure Data Fetch Engine */
1004 safexcel_configure_dfe_engine(sc, pe);
1006 /* 23.3.7.4 Configure Data Store Engine */
1007 if (safexcel_configure_dse(sc, pe)) {
1008 safexcel_deinit_hw(sc);
1012 /* 23.3.7.5 1. Protocol enables */
1014 SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION_EN(pe),
1017 SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION2_EN(pe),
1021 safexcel_hw_prepare_rings(sc);
1023 /* 23.3.7.5 Configure the Processing Engine(s). */
1024 for (pe = 0; pe < sc->sc_config.pes; pe++)
1025 safexcel_enable_pe_engine(sc, pe);
1027 safexcel_hw_setup_rings(sc);
1033 safexcel_setup_dev_interrupts(struct safexcel_softc *sc)
1037 for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++) {
1038 sc->sc_ih[i].sc = sc;
1039 sc->sc_ih[i].ring = i;
1041 if (bus_setup_intr(sc->sc_dev, sc->sc_intr[i],
1042 INTR_TYPE_NET | INTR_MPSAFE, NULL, safexcel_ring_intr,
1043 &sc->sc_ih[i], &sc->sc_ih[i].handle)) {
1044 device_printf(sc->sc_dev,
1045 "couldn't setup interrupt %d\n", i);
1049 error = bus_bind_intr(sc->sc_dev, sc->sc_intr[i], i % mp_ncpus);
1051 device_printf(sc->sc_dev,
1052 "failed to bind ring %d\n", error);
1058 for (j = 0; j < i; j++)
1059 bus_teardown_intr(sc->sc_dev, sc->sc_intr[j],
1060 sc->sc_ih[j].handle);
1066 safexcel_teardown_dev_interrupts(struct safexcel_softc *sc)
1070 for (i = 0; i < SAFEXCEL_MAX_RINGS; i++)
1071 bus_teardown_intr(sc->sc_dev, sc->sc_intr[i],
1072 sc->sc_ih[i].handle);
1076 safexcel_alloc_dev_resources(struct safexcel_softc *sc)
1084 node = ofw_bus_get_node(dev);
1087 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1089 if (sc->sc_res == NULL) {
1090 device_printf(dev, "couldn't allocate memory resources\n");
1094 for (i = 0; i < SAFEXCEL_MAX_RINGS; i++) {
1095 (void)snprintf(name, sizeof(name), "ring%d", i);
1096 error = ofw_bus_find_string_index(node, "interrupt-names", name,
1101 sc->sc_intr[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1102 RF_ACTIVE | RF_SHAREABLE);
1103 if (sc->sc_intr[i] == NULL) {
1109 device_printf(dev, "couldn't allocate interrupt resources\n");
1117 for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
1118 bus_release_resource(dev, SYS_RES_IRQ,
1119 rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
1120 bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->sc_res),
1126 safexcel_free_dev_resources(struct safexcel_softc *sc)
1130 for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
1131 bus_release_resource(sc->sc_dev, SYS_RES_IRQ,
1132 rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
1133 if (sc->sc_res != NULL)
1134 bus_release_resource(sc->sc_dev, SYS_RES_MEMORY,
1135 rman_get_rid(sc->sc_res), sc->sc_res);
1139 safexcel_probe(device_t dev)
1141 struct safexcel_softc *sc;
1143 if (!ofw_bus_status_okay(dev))
1146 sc = device_get_softc(dev);
1147 sc->sc_type = ofw_bus_search_compatible(dev, safexcel_compat)->ocd_data;
1148 if (sc->sc_type == 0)
1151 device_set_desc(dev, "SafeXcel EIP-97 crypto accelerator");
1153 return (BUS_PROBE_DEFAULT);
1157 safexcel_attach(device_t dev)
1159 struct sysctl_ctx_list *ctx;
1160 struct sysctl_oid *oid;
1161 struct sysctl_oid_list *children;
1162 struct safexcel_softc *sc;
1163 struct safexcel_request *req;
1164 struct safexcel_ring *ring;
1167 sc = device_get_softc(dev);
1171 if (safexcel_alloc_dev_resources(sc))
1174 if (safexcel_setup_dev_interrupts(sc))
1177 if (safexcel_init_hw(sc))
1180 for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) {
1181 ring = &sc->sc_ring[ringidx];
1183 ring->cmd_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK);
1184 ring->res_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK);
1186 for (i = 0; i < SAFEXCEL_RING_SIZE; i++) {
1187 req = &ring->requests[i];
1189 req->ringidx = ringidx;
1190 if (bus_dmamap_create(ring->data_dtag,
1191 BUS_DMA_COHERENT, &req->dmap) != 0) {
1192 for (j = 0; j < i; j++)
1193 bus_dmamap_destroy(ring->data_dtag,
1194 ring->requests[j].dmap);
1197 if (safexcel_dma_alloc_mem(sc, &req->ctx,
1198 sizeof(struct safexcel_context_record)) != 0) {
1199 for (j = 0; j < i; j++) {
1200 bus_dmamap_destroy(ring->data_dtag,
1201 ring->requests[j].dmap);
1202 safexcel_dma_free_mem(
1203 &ring->requests[j].ctx);
1210 ctx = device_get_sysctl_ctx(dev);
1211 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1212 OID_AUTO, "debug", CTLFLAG_RWTUN, &sc->sc_debug, 0,
1213 "Debug message verbosity");
1215 oid = device_get_sysctl_tree(sc->sc_dev);
1216 children = SYSCTL_CHILDREN(oid);
1217 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
1218 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics");
1219 children = SYSCTL_CHILDREN(oid);
1221 sc->sc_req_alloc_failures = counter_u64_alloc(M_WAITOK);
1222 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "req_alloc_failures",
1223 CTLFLAG_RD, &sc->sc_req_alloc_failures,
1224 "Number of request allocation failures");
1225 sc->sc_cdesc_alloc_failures = counter_u64_alloc(M_WAITOK);
1226 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cdesc_alloc_failures",
1227 CTLFLAG_RD, &sc->sc_cdesc_alloc_failures,
1228 "Number of command descriptor ring overflows");
1229 sc->sc_rdesc_alloc_failures = counter_u64_alloc(M_WAITOK);
1230 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "rdesc_alloc_failures",
1231 CTLFLAG_RD, &sc->sc_rdesc_alloc_failures,
1232 "Number of result descriptor ring overflows");
1234 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safexcel_session),
1235 CRYPTOCAP_F_HARDWARE);
1242 safexcel_teardown_dev_interrupts(sc);
1244 safexcel_free_dev_resources(sc);
1250 safexcel_detach(device_t dev)
1252 struct safexcel_ring *ring;
1253 struct safexcel_softc *sc;
1256 sc = device_get_softc(dev);
1258 if (sc->sc_cid >= 0)
1259 crypto_unregister_all(sc->sc_cid);
1261 counter_u64_free(sc->sc_req_alloc_failures);
1262 counter_u64_free(sc->sc_cdesc_alloc_failures);
1263 counter_u64_free(sc->sc_rdesc_alloc_failures);
1265 for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) {
1266 ring = &sc->sc_ring[ringidx];
1267 for (i = 0; i < SAFEXCEL_RING_SIZE; i++) {
1268 bus_dmamap_destroy(ring->data_dtag,
1269 ring->requests[i].dmap);
1270 safexcel_dma_free_mem(&ring->requests[i].ctx);
1272 sglist_free(ring->cmd_data);
1273 sglist_free(ring->res_data);
1275 safexcel_deinit_hw(sc);
1276 safexcel_teardown_dev_interrupts(sc);
1277 safexcel_free_dev_resources(sc);
1283 * Pre-compute the hash key used in GHASH, which is a block of zeroes encrypted
1284 * using the cipher key.
1287 safexcel_setkey_ghash(const uint8_t *key, int klen, uint32_t *hashkey)
1289 uint32_t ks[4 * (RIJNDAEL_MAXNR + 1)];
1290 uint8_t zeros[AES_BLOCK_LEN];
1293 memset(zeros, 0, sizeof(zeros));
1295 rounds = rijndaelKeySetupEnc(ks, key, klen * NBBY);
1296 rijndaelEncrypt(ks, rounds, zeros, (uint8_t *)hashkey);
1297 for (i = 0; i < GMAC_BLOCK_LEN / sizeof(uint32_t); i++)
1298 hashkey[i] = htobe32(hashkey[i]);
1300 explicit_bzero(ks, sizeof(ks));
1304 * Pre-compute the combined CBC-MAC key, which consists of three keys K1, K2, K3
1305 * in the hardware implementation. K1 is the cipher key and comes last in the
1306 * buffer since K2 and K3 have a fixed size of AES_BLOCK_LEN. For now XCBC-MAC
1307 * is not implemented so K2 and K3 are fixed.
1310 safexcel_setkey_xcbcmac(const uint8_t *key, int klen, uint32_t *hashkey)
1314 memset(hashkey, 0, 2 * AES_BLOCK_LEN);
1315 off = 2 * AES_BLOCK_LEN / sizeof(uint32_t);
1316 for (i = 0; i < klen / sizeof(uint32_t); i++, key += 4)
1317 hashkey[i + off] = htobe32(le32dec(key));
1321 safexcel_setkey_hmac_digest(struct auth_hash *ahash, union authctx *ctx,
1326 switch (ahash->type) {
1327 case CRYPTO_SHA1_HMAC:
1328 hashwords = ahash->hashsize / sizeof(uint32_t);
1329 for (i = 0; i < hashwords; i++)
1330 ((uint32_t *)buf)[i] = htobe32(ctx->sha1ctx.h.b32[i]);
1332 case CRYPTO_SHA2_224_HMAC:
1333 hashwords = auth_hash_hmac_sha2_256.hashsize / sizeof(uint32_t);
1334 for (i = 0; i < hashwords; i++)
1335 ((uint32_t *)buf)[i] = htobe32(ctx->sha224ctx.state[i]);
1337 case CRYPTO_SHA2_256_HMAC:
1338 hashwords = ahash->hashsize / sizeof(uint32_t);
1339 for (i = 0; i < hashwords; i++)
1340 ((uint32_t *)buf)[i] = htobe32(ctx->sha256ctx.state[i]);
1342 case CRYPTO_SHA2_384_HMAC:
1343 hashwords = auth_hash_hmac_sha2_512.hashsize / sizeof(uint64_t);
1344 for (i = 0; i < hashwords; i++)
1345 ((uint64_t *)buf)[i] = htobe64(ctx->sha384ctx.state[i]);
1347 case CRYPTO_SHA2_512_HMAC:
1348 hashwords = ahash->hashsize / sizeof(uint64_t);
1349 for (i = 0; i < hashwords; i++)
1350 ((uint64_t *)buf)[i] = htobe64(ctx->sha512ctx.state[i]);
1356 * Pre-compute the inner and outer digests used in the HMAC algorithm.
1359 safexcel_setkey_hmac(const struct crypto_session_params *csp,
1360 const uint8_t *key, int klen, uint8_t *ipad, uint8_t *opad)
1363 struct auth_hash *ahash;
1365 ahash = crypto_auth_hash(csp);
1366 hmac_init_ipad(ahash, key, klen, &ctx);
1367 safexcel_setkey_hmac_digest(ahash, &ctx, ipad);
1368 hmac_init_opad(ahash, key, klen, &ctx);
1369 safexcel_setkey_hmac_digest(ahash, &ctx, opad);
1370 explicit_bzero(&ctx, ahash->ctxsize);
1374 safexcel_setkey_xts(const uint8_t *key, int klen, uint8_t *tweakkey)
1376 memcpy(tweakkey, key + klen, klen);
1380 * Populate a context record with paramters from a session. Some consumers
1381 * specify per-request keys, in which case the context must be re-initialized
1385 safexcel_set_context(struct safexcel_context_record *ctx, int op,
1386 const uint8_t *ckey, const uint8_t *akey, struct safexcel_session *sess)
1388 const struct crypto_session_params *csp;
1390 uint32_t ctrl0, ctrl1;
1391 int aklen, alg, cklen, off;
1393 csp = crypto_get_params(sess->cses);
1394 aklen = csp->csp_auth_klen;
1395 cklen = csp->csp_cipher_klen;
1396 if (csp->csp_cipher_alg == CRYPTO_AES_XTS)
1399 ctrl0 = sess->alg | sess->digest | sess->hash;
1402 data = (uint8_t *)ctx->data;
1403 if (csp->csp_cipher_alg != 0) {
1404 memcpy(data, ckey, cklen);
1406 } else if (csp->csp_auth_alg == CRYPTO_AES_NIST_GMAC) {
1407 memcpy(data, akey, aklen);
1413 switch (csp->csp_cipher_alg) {
1414 case CRYPTO_AES_NIST_GCM_16:
1415 safexcel_setkey_ghash(ckey, cklen, (uint32_t *)(data + off));
1416 off += GMAC_BLOCK_LEN;
1418 case CRYPTO_AES_CCM_16:
1419 safexcel_setkey_xcbcmac(ckey, cklen, (uint32_t *)(data + off));
1420 off += AES_BLOCK_LEN * 2 + cklen;
1422 case CRYPTO_AES_XTS:
1423 safexcel_setkey_xts(ckey, cklen, data + off);
1427 switch (csp->csp_auth_alg) {
1428 case CRYPTO_AES_NIST_GMAC:
1429 safexcel_setkey_ghash(akey, aklen, (uint32_t *)(data + off));
1430 off += GMAC_BLOCK_LEN;
1432 case CRYPTO_SHA1_HMAC:
1433 case CRYPTO_SHA2_224_HMAC:
1434 case CRYPTO_SHA2_256_HMAC:
1435 case CRYPTO_SHA2_384_HMAC:
1436 case CRYPTO_SHA2_512_HMAC:
1437 safexcel_setkey_hmac(csp, akey, aklen,
1438 data + off, data + off + sess->statelen);
1439 off += sess->statelen * 2;
1442 ctrl0 |= SAFEXCEL_CONTROL0_SIZE(off / sizeof(uint32_t));
1444 alg = csp->csp_cipher_alg;
1446 alg = csp->csp_auth_alg;
1449 case CRYPTO_AES_CCM_16:
1450 if (CRYPTO_OP_IS_ENCRYPT(op)) {
1451 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_ENCRYPT_OUT |
1452 SAFEXCEL_CONTROL0_KEY_EN;
1454 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_DECRYPT_HASH_IN |
1455 SAFEXCEL_CONTROL0_KEY_EN;
1457 ctrl1 |= SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 |
1458 SAFEXCEL_CONTROL1_IV2 | SAFEXCEL_CONTROL1_IV3;
1460 case CRYPTO_AES_CBC:
1461 case CRYPTO_AES_ICM:
1462 case CRYPTO_AES_XTS:
1463 if (CRYPTO_OP_IS_ENCRYPT(op)) {
1464 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT |
1465 SAFEXCEL_CONTROL0_KEY_EN;
1466 if (csp->csp_auth_alg != 0)
1468 SAFEXCEL_CONTROL0_TYPE_ENCRYPT_HASH_OUT;
1470 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN |
1471 SAFEXCEL_CONTROL0_KEY_EN;
1472 if (csp->csp_auth_alg != 0)
1473 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN;
1476 case CRYPTO_AES_NIST_GCM_16:
1477 case CRYPTO_AES_NIST_GMAC:
1478 if (CRYPTO_OP_IS_ENCRYPT(op) || csp->csp_auth_alg != 0) {
1479 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT |
1480 SAFEXCEL_CONTROL0_KEY_EN |
1481 SAFEXCEL_CONTROL0_TYPE_HASH_OUT;
1483 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN |
1484 SAFEXCEL_CONTROL0_KEY_EN |
1485 SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN;
1487 if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) {
1488 ctrl1 |= SAFEXCEL_CONTROL1_COUNTER_MODE |
1489 SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 |
1490 SAFEXCEL_CONTROL1_IV2;
1494 case CRYPTO_SHA2_224:
1495 case CRYPTO_SHA2_256:
1496 case CRYPTO_SHA2_384:
1497 case CRYPTO_SHA2_512:
1498 ctrl0 |= SAFEXCEL_CONTROL0_RESTART_HASH;
1500 case CRYPTO_SHA1_HMAC:
1501 case CRYPTO_SHA2_224_HMAC:
1502 case CRYPTO_SHA2_256_HMAC:
1503 case CRYPTO_SHA2_384_HMAC:
1504 case CRYPTO_SHA2_512_HMAC:
1505 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_OUT;
1509 ctx->control0 = ctrl0;
1510 ctx->control1 = ctrl1;
1516 * Construct a no-op instruction, used to pad input tokens.
1519 safexcel_instr_nop(struct safexcel_instr **instrp)
1521 struct safexcel_instr *instr;
1524 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1525 instr->length = (1 << 2);
1527 instr->instructions = 0;
1529 *instrp = instr + 1;
1533 * Insert the digest of the input payload. This is typically the last
1534 * instruction of a sequence.
1537 safexcel_instr_insert_digest(struct safexcel_instr **instrp, int len)
1539 struct safexcel_instr *instr;
1542 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1543 instr->length = len;
1544 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1545 SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1546 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1547 SAFEXCEL_INSTR_INSERT_HASH_DIGEST;
1549 *instrp = instr + 1;
1553 * Retrieve and verify a digest.
1556 safexcel_instr_retrieve_digest(struct safexcel_instr **instrp, int len)
1558 struct safexcel_instr *instr;
1561 instr->opcode = SAFEXCEL_INSTR_OPCODE_RETRIEVE;
1562 instr->length = len;
1563 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1564 SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1565 instr->instructions = SAFEXCEL_INSTR_INSERT_HASH_DIGEST;
1568 instr->opcode = SAFEXCEL_INSTR_OPCODE_VERIFY_FIELDS;
1569 instr->length = len | SAFEXCEL_INSTR_VERIFY_HASH;
1570 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1571 SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1572 instr->instructions = SAFEXCEL_INSTR_VERIFY_PADDING;
1574 *instrp = instr + 1;
1578 safexcel_instr_temp_aes_block(struct safexcel_instr **instrp)
1580 struct safexcel_instr *instr;
1583 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT_REMOVE_RESULT;
1586 instr->instructions = AES_BLOCK_LEN;
1589 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1590 instr->length = AES_BLOCK_LEN;
1592 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1593 SAFEXCEL_INSTR_DEST_CRYPTO;
1595 *instrp = instr + 1;
1599 * Handle a request for an unauthenticated block cipher.
1602 safexcel_instr_cipher(struct safexcel_request *req,
1603 struct safexcel_instr *instr, struct safexcel_cmd_descr *cdesc)
1605 struct cryptop *crp;
1609 /* Insert the payload. */
1610 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1611 instr->length = crp->crp_payload_length;
1612 instr->status = SAFEXCEL_INSTR_STATUS_LAST_PACKET |
1613 SAFEXCEL_INSTR_STATUS_LAST_HASH;
1614 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1615 SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_OUTPUT;
1617 cdesc->additional_cdata_size = 1;
1621 safexcel_instr_eta(struct safexcel_request *req, struct safexcel_instr *instr,
1622 struct safexcel_cmd_descr *cdesc)
1624 const struct crypto_session_params *csp;
1625 struct cryptop *crp;
1626 struct safexcel_instr *start;
1629 csp = crypto_get_params(crp->crp_session);
1632 /* Insert the AAD. */
1633 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1634 instr->length = crp->crp_aad_length;
1635 instr->status = crp->crp_payload_length == 0 ?
1636 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1637 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1638 SAFEXCEL_INSTR_DEST_HASH;
1641 /* Encrypt any data left in the request. */
1642 if (crp->crp_payload_length > 0) {
1643 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1644 instr->length = crp->crp_payload_length;
1645 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1646 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1647 SAFEXCEL_INSTR_DEST_CRYPTO |
1648 SAFEXCEL_INSTR_DEST_HASH |
1649 SAFEXCEL_INSTR_DEST_OUTPUT;
1654 * Compute the digest, or extract it and place it in the output stream.
1656 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1657 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1659 safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1660 cdesc->additional_cdata_size = instr - start;
1664 safexcel_instr_sha_hash(struct safexcel_request *req,
1665 struct safexcel_instr *instr)
1667 struct cryptop *crp;
1668 struct safexcel_instr *start;
1673 /* Pass the input data to the hash engine. */
1674 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1675 instr->length = crp->crp_payload_length;
1676 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1677 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1680 /* Insert the hash result into the output stream. */
1681 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1683 /* Pad the rest of the inline instruction space. */
1684 while (instr != start + SAFEXCEL_MAX_ITOKENS)
1685 safexcel_instr_nop(&instr);
1689 safexcel_instr_ccm(struct safexcel_request *req, struct safexcel_instr *instr,
1690 struct safexcel_cmd_descr *cdesc)
1692 struct cryptop *crp;
1693 struct safexcel_instr *start;
1694 uint8_t *a0, *b0, *alenp, L;
1701 * Construct two blocks, A0 and B0, used in encryption and
1702 * authentication, respectively. A0 is embedded in the token
1703 * descriptor, and B0 is inserted directly into the data stream using
1704 * instructions below.
1706 * OCF seems to assume a 12-byte IV, fixing L (the payload length size)
1707 * at 3 bytes due to the layout of B0. This is fine since the driver
1708 * has a maximum of 65535 bytes anyway.
1710 blen = AES_BLOCK_LEN;
1713 a0 = (uint8_t *)&cdesc->control_data.token[0];
1714 memset(a0, 0, blen);
1716 memcpy(&a0[1], req->iv, AES_CCM_IV_LEN);
1719 * Insert B0 and the AAD length into the input stream.
1721 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1722 instr->length = blen + (crp->crp_aad_length > 0 ? 2 : 0);
1724 instr->instructions = SAFEXCEL_INSTR_DEST_HASH |
1725 SAFEXCEL_INSTR_INSERT_IMMEDIATE;
1728 b0 = (uint8_t *)instr;
1729 memset(b0, 0, blen);
1731 (L - 1) | /* payload length size */
1732 ((CCM_CBC_MAX_DIGEST_LEN - 2) / 2) << 3 /* digest length */ |
1733 (crp->crp_aad_length > 0 ? 1 : 0) << 6 /* AAD present bit */;
1734 memcpy(&b0[1], req->iv, AES_CCM_IV_LEN);
1735 b0[14] = crp->crp_payload_length >> 8;
1736 b0[15] = crp->crp_payload_length & 0xff;
1737 instr += blen / sizeof(*instr);
1739 /* Insert the AAD length and data into the input stream. */
1740 if (crp->crp_aad_length > 0) {
1741 alenp = (uint8_t *)instr;
1742 alenp[0] = crp->crp_aad_length >> 8;
1743 alenp[1] = crp->crp_aad_length & 0xff;
1748 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1749 instr->length = crp->crp_aad_length;
1751 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1754 /* Insert zero padding. */
1755 aalign = (crp->crp_aad_length + 2) & (blen - 1);
1756 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1757 instr->length = aalign == 0 ? 0 :
1758 blen - ((crp->crp_aad_length + 2) & (blen - 1));
1759 instr->status = crp->crp_payload_length == 0 ?
1760 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1761 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1765 safexcel_instr_temp_aes_block(&instr);
1767 /* Insert the cipher payload into the input stream. */
1768 if (crp->crp_payload_length > 0) {
1769 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1770 instr->length = crp->crp_payload_length;
1771 instr->status = (crp->crp_payload_length & (blen - 1)) == 0 ?
1772 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1773 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1774 SAFEXCEL_INSTR_DEST_CRYPTO |
1775 SAFEXCEL_INSTR_DEST_HASH |
1776 SAFEXCEL_INSTR_INS_LAST;
1779 /* Insert zero padding. */
1780 if (crp->crp_payload_length & (blen - 1)) {
1781 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1782 instr->length = blen -
1783 (crp->crp_payload_length & (blen - 1));
1784 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1785 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1791 * Compute the digest, or extract it and place it in the output stream.
1793 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1794 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1796 safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1798 cdesc->additional_cdata_size = instr - start;
1802 safexcel_instr_gcm(struct safexcel_request *req, struct safexcel_instr *instr,
1803 struct safexcel_cmd_descr *cdesc)
1805 struct cryptop *crp;
1806 struct safexcel_instr *start;
1808 memcpy(cdesc->control_data.token, req->iv, AES_GCM_IV_LEN);
1809 cdesc->control_data.token[3] = htobe32(1);
1814 /* Insert the AAD into the input stream. */
1815 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1816 instr->length = crp->crp_aad_length;
1817 instr->status = crp->crp_payload_length == 0 ?
1818 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1819 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1820 SAFEXCEL_INSTR_DEST_HASH;
1823 safexcel_instr_temp_aes_block(&instr);
1825 /* Insert the cipher payload into the input stream. */
1826 if (crp->crp_payload_length > 0) {
1827 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1828 instr->length = crp->crp_payload_length;
1829 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1830 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1831 SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_HASH |
1832 SAFEXCEL_INSTR_INS_LAST;
1837 * Compute the digest, or extract it and place it in the output stream.
1839 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1840 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1842 safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1844 cdesc->additional_cdata_size = instr - start;
1848 safexcel_instr_gmac(struct safexcel_request *req, struct safexcel_instr *instr,
1849 struct safexcel_cmd_descr *cdesc)
1851 struct cryptop *crp;
1852 struct safexcel_instr *start;
1854 memcpy(cdesc->control_data.token, req->iv, AES_GCM_IV_LEN);
1855 cdesc->control_data.token[3] = htobe32(1);
1860 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1861 instr->length = crp->crp_payload_length;
1862 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1863 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1864 SAFEXCEL_INSTR_DEST_HASH;
1867 safexcel_instr_temp_aes_block(&instr);
1869 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1871 cdesc->additional_cdata_size = instr - start;
1875 safexcel_set_token(struct safexcel_request *req)
1877 const struct crypto_session_params *csp;
1878 struct cryptop *crp;
1879 struct safexcel_cmd_descr *cdesc;
1880 struct safexcel_context_record *ctx;
1881 struct safexcel_context_template *ctxtmp;
1882 struct safexcel_instr *instr;
1883 struct safexcel_softc *sc;
1884 const uint8_t *akey, *ckey;
1888 csp = crypto_get_params(crp->crp_session);
1891 ringidx = req->ringidx;
1893 akey = crp->crp_auth_key;
1894 ckey = crp->crp_cipher_key;
1895 if (akey != NULL || ckey != NULL) {
1897 * If we have a per-request key we have to generate the context
1898 * record on the fly.
1901 akey = csp->csp_auth_key;
1903 ckey = csp->csp_cipher_key;
1904 ctx = (struct safexcel_context_record *)req->ctx.vaddr;
1905 (void)safexcel_set_context(ctx, crp->crp_op, ckey, akey,
1909 * Use the context record template computed at session
1910 * initialization time.
1912 ctxtmp = CRYPTO_OP_IS_ENCRYPT(crp->crp_op) ?
1913 &req->sess->encctx : &req->sess->decctx;
1915 memcpy(req->ctx.vaddr + 2 * sizeof(uint32_t), ctx->data,
1918 cdesc->control_data.control0 = ctx->control0;
1919 cdesc->control_data.control1 = ctx->control1;
1922 * For keyless hash operations, the token instructions can be embedded
1923 * in the token itself. Otherwise we use an additional token descriptor
1924 * and the embedded instruction space is used to store the IV.
1926 if (csp->csp_cipher_alg == 0 &&
1927 csp->csp_auth_alg != CRYPTO_AES_NIST_GMAC) {
1928 instr = (void *)cdesc->control_data.token;
1930 instr = (void *)(sc->sc_ring[ringidx].dma_atok.vaddr +
1931 sc->sc_config.atok_offset *
1932 (cdesc - sc->sc_ring[ringidx].cdr.desc));
1933 cdesc->control_data.options |= SAFEXCEL_OPTION_4_TOKEN_IV_CMD;
1936 switch (csp->csp_cipher_alg) {
1937 case CRYPTO_AES_NIST_GCM_16:
1938 safexcel_instr_gcm(req, instr, cdesc);
1940 case CRYPTO_AES_CCM_16:
1941 safexcel_instr_ccm(req, instr, cdesc);
1943 case CRYPTO_AES_XTS:
1944 memcpy(cdesc->control_data.token, req->iv, AES_XTS_IV_LEN);
1945 memset(cdesc->control_data.token +
1946 AES_XTS_IV_LEN / sizeof(uint32_t), 0, AES_XTS_IV_LEN);
1948 safexcel_instr_cipher(req, instr, cdesc);
1950 case CRYPTO_AES_CBC:
1951 case CRYPTO_AES_ICM:
1952 memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_LEN);
1953 if (csp->csp_auth_alg != 0)
1954 safexcel_instr_eta(req, instr, cdesc);
1956 safexcel_instr_cipher(req, instr, cdesc);
1959 switch (csp->csp_auth_alg) {
1961 case CRYPTO_SHA1_HMAC:
1962 case CRYPTO_SHA2_224:
1963 case CRYPTO_SHA2_224_HMAC:
1964 case CRYPTO_SHA2_256:
1965 case CRYPTO_SHA2_256_HMAC:
1966 case CRYPTO_SHA2_384:
1967 case CRYPTO_SHA2_384_HMAC:
1968 case CRYPTO_SHA2_512:
1969 case CRYPTO_SHA2_512_HMAC:
1970 safexcel_instr_sha_hash(req, instr);
1972 case CRYPTO_AES_NIST_GMAC:
1973 safexcel_instr_gmac(req, instr, cdesc);
1976 panic("unhandled auth request %d", csp->csp_auth_alg);
1982 static struct safexcel_res_descr *
1983 safexcel_res_descr_add(struct safexcel_ring *ring, bool first, bool last,
1984 bus_addr_t data, uint32_t len)
1986 struct safexcel_res_descr *rdesc;
1987 struct safexcel_res_descr_ring *rring;
1989 mtx_assert(&ring->mtx, MA_OWNED);
1992 if ((rring->write + 1) % SAFEXCEL_RING_SIZE == rring->read)
1995 rdesc = &rring->desc[rring->write];
1996 rring->write = (rring->write + 1) % SAFEXCEL_RING_SIZE;
1998 rdesc->particle_size = len;
2000 rdesc->descriptor_overflow = 0;
2001 rdesc->buffer_overflow = 0;
2002 rdesc->last_seg = last;
2003 rdesc->first_seg = first;
2004 rdesc->result_size =
2005 sizeof(struct safexcel_res_data) / sizeof(uint32_t);
2007 rdesc->data_lo = SAFEXCEL_ADDR_LO(data);
2008 rdesc->data_hi = SAFEXCEL_ADDR_HI(data);
2011 rdesc->result_data.packet_length = 0;
2012 rdesc->result_data.error_code = 0;
2018 static struct safexcel_cmd_descr *
2019 safexcel_cmd_descr_add(struct safexcel_ring *ring, bool first, bool last,
2020 bus_addr_t data, uint32_t seglen, uint32_t reqlen, bus_addr_t context)
2022 struct safexcel_cmd_descr *cdesc;
2023 struct safexcel_cmd_descr_ring *cring;
2025 KASSERT(reqlen <= SAFEXCEL_MAX_REQUEST_SIZE,
2026 ("%s: request length %u too long", __func__, reqlen));
2027 mtx_assert(&ring->mtx, MA_OWNED);
2030 if ((cring->write + 1) % SAFEXCEL_RING_SIZE == cring->read)
2033 cdesc = &cring->desc[cring->write];
2034 cring->write = (cring->write + 1) % SAFEXCEL_RING_SIZE;
2036 cdesc->particle_size = seglen;
2038 cdesc->last_seg = last;
2039 cdesc->first_seg = first;
2040 cdesc->additional_cdata_size = 0;
2042 cdesc->data_lo = SAFEXCEL_ADDR_LO(data);
2043 cdesc->data_hi = SAFEXCEL_ADDR_HI(data);
2045 cdesc->control_data.packet_length = reqlen;
2046 cdesc->control_data.options = SAFEXCEL_OPTION_IP |
2047 SAFEXCEL_OPTION_CP | SAFEXCEL_OPTION_CTX_CTRL_IN_CMD |
2048 SAFEXCEL_OPTION_RC_AUTO;
2049 cdesc->control_data.type = SAFEXCEL_TOKEN_TYPE_BYPASS;
2050 cdesc->control_data.context_lo = SAFEXCEL_ADDR_LO(context) |
2051 SAFEXCEL_CONTEXT_SMALL;
2052 cdesc->control_data.context_hi = SAFEXCEL_ADDR_HI(context);
2059 safexcel_cmd_descr_rollback(struct safexcel_ring *ring, int count)
2061 struct safexcel_cmd_descr_ring *cring;
2063 mtx_assert(&ring->mtx, MA_OWNED);
2066 cring->write -= count;
2067 if (cring->write < 0)
2068 cring->write += SAFEXCEL_RING_SIZE;
2072 safexcel_res_descr_rollback(struct safexcel_ring *ring, int count)
2074 struct safexcel_res_descr_ring *rring;
2076 mtx_assert(&ring->mtx, MA_OWNED);
2079 rring->write -= count;
2080 if (rring->write < 0)
2081 rring->write += SAFEXCEL_RING_SIZE;
2085 safexcel_append_segs(bus_dma_segment_t *segs, int nseg, struct sglist *sg,
2088 bus_dma_segment_t *seg;
2092 for (i = 0; i < nseg && len > 0; i++) {
2095 if (seg->ds_len <= start) {
2096 start -= seg->ds_len;
2100 seglen = MIN(len, seg->ds_len - start);
2101 error = sglist_append_phys(sg, seg->ds_addr + start, seglen);
2103 panic("%s: ran out of segments: %d", __func__, error);
2110 safexcel_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg,
2113 const struct crypto_session_params *csp;
2114 struct cryptop *crp;
2115 struct safexcel_cmd_descr *cdesc;
2116 struct safexcel_request *req;
2117 struct safexcel_ring *ring;
2118 struct safexcel_session *sess;
2131 csp = crypto_get_params(crp->crp_session);
2133 ring = &req->sc->sc_ring[req->ringidx];
2135 mtx_assert(&ring->mtx, MA_OWNED);
2138 * Set up descriptors for input and output data.
2140 * The processing engine programs require that any AAD comes first,
2141 * followed by the cipher plaintext, followed by the digest. Some
2142 * consumers place the digest first in the input buffer, in which case
2143 * we have to create an extra descriptor.
2145 * As an optimization, unmodified data is not passed to the output
2148 sglist_reset(ring->cmd_data);
2149 sglist_reset(ring->res_data);
2150 if (crp->crp_aad_length != 0) {
2151 safexcel_append_segs(segs, nseg, ring->cmd_data,
2152 crp->crp_aad_start, crp->crp_aad_length);
2154 safexcel_append_segs(segs, nseg, ring->cmd_data,
2155 crp->crp_payload_start, crp->crp_payload_length);
2156 if (csp->csp_cipher_alg != 0) {
2157 safexcel_append_segs(segs, nseg, ring->res_data,
2158 crp->crp_payload_start, crp->crp_payload_length);
2160 if (sess->digestlen > 0) {
2161 if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) {
2162 safexcel_append_segs(segs, nseg, ring->cmd_data,
2163 crp->crp_digest_start, sess->digestlen);
2165 safexcel_append_segs(segs, nseg, ring->res_data,
2166 crp->crp_digest_start, sess->digestlen);
2170 sg = ring->cmd_data;
2171 if (sg->sg_nseg == 0) {
2173 * Fake a segment for the command descriptor if the input has
2174 * length zero. The EIP97 apparently does not handle
2175 * zero-length packets properly since subsequent requests return
2176 * bogus errors, so provide a dummy segment using the context
2177 * descriptor. Also, we must allocate at least one command ring
2178 * entry per request to keep the request shadow ring in sync.
2180 (void)sglist_append_phys(sg, req->ctx.paddr, 1);
2182 for (i = 0, inlen = 0; i < sg->sg_nseg; i++)
2183 inlen += sg->sg_segs[i].ss_len;
2184 for (i = 0; i < sg->sg_nseg; i++) {
2186 last = i == sg->sg_nseg - 1;
2188 cdesc = safexcel_cmd_descr_add(ring, first, last,
2189 sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len,
2190 (uint32_t)inlen, req->ctx.paddr);
2191 if (cdesc == NULL) {
2192 safexcel_cmd_descr_rollback(ring, i);
2193 counter_u64_add(req->sc->sc_cdesc_alloc_failures, 1);
2194 req->error = ERESTART;
2200 req->cdescs = sg->sg_nseg;
2202 sg = ring->res_data;
2203 if (sg->sg_nseg == 0) {
2205 * We need a result descriptor even if the output stream will be
2206 * empty, for example when verifying an AAD digest.
2208 sg->sg_segs[0].ss_paddr = 0;
2209 sg->sg_segs[0].ss_len = 0;
2212 for (i = 0; i < sg->sg_nseg; i++) {
2214 last = i == sg->sg_nseg - 1;
2216 if (safexcel_res_descr_add(ring, first, last,
2217 sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len) == NULL) {
2218 safexcel_cmd_descr_rollback(ring,
2219 ring->cmd_data->sg_nseg);
2220 safexcel_res_descr_rollback(ring, i);
2221 counter_u64_add(req->sc->sc_rdesc_alloc_failures, 1);
2222 req->error = ERESTART;
2226 req->rdescs = sg->sg_nseg;
2230 safexcel_create_chain(struct safexcel_ring *ring, struct safexcel_request *req)
2235 req->cdescs = req->rdescs = 0;
2237 error = bus_dmamap_load_crp(ring->data_dtag, req->dmap, req->crp,
2238 safexcel_create_chain_cb, req, BUS_DMA_NOWAIT);
2240 req->dmap_loaded = true;
2242 if (req->error != 0)
2249 safexcel_probe_cipher(const struct crypto_session_params *csp)
2251 switch (csp->csp_cipher_alg) {
2252 case CRYPTO_AES_CBC:
2253 case CRYPTO_AES_ICM:
2254 if (csp->csp_ivlen != AES_BLOCK_LEN)
2257 case CRYPTO_AES_XTS:
2258 if (csp->csp_ivlen != AES_XTS_IV_LEN)
2269 * Determine whether the driver can implement a session with the requested
2273 safexcel_probesession(device_t dev, const struct crypto_session_params *csp)
2275 if (csp->csp_flags != 0)
2278 switch (csp->csp_mode) {
2279 case CSP_MODE_CIPHER:
2280 if (!safexcel_probe_cipher(csp))
2283 case CSP_MODE_DIGEST:
2284 switch (csp->csp_auth_alg) {
2285 case CRYPTO_AES_NIST_GMAC:
2286 if (csp->csp_ivlen != AES_GCM_IV_LEN)
2290 case CRYPTO_SHA1_HMAC:
2291 case CRYPTO_SHA2_224:
2292 case CRYPTO_SHA2_224_HMAC:
2293 case CRYPTO_SHA2_256:
2294 case CRYPTO_SHA2_256_HMAC:
2295 case CRYPTO_SHA2_384:
2296 case CRYPTO_SHA2_384_HMAC:
2297 case CRYPTO_SHA2_512:
2298 case CRYPTO_SHA2_512_HMAC:
2305 switch (csp->csp_cipher_alg) {
2306 case CRYPTO_AES_NIST_GCM_16:
2307 if (csp->csp_ivlen != AES_GCM_IV_LEN)
2310 case CRYPTO_AES_CCM_16:
2311 if (csp->csp_ivlen != AES_CCM_IV_LEN)
2319 if (!safexcel_probe_cipher(csp))
2321 switch (csp->csp_cipher_alg) {
2322 case CRYPTO_AES_CBC:
2323 case CRYPTO_AES_ICM:
2325 * The EIP-97 does not support combining AES-XTS with
2328 if (csp->csp_auth_alg != CRYPTO_SHA1_HMAC &&
2329 csp->csp_auth_alg != CRYPTO_SHA2_224_HMAC &&
2330 csp->csp_auth_alg != CRYPTO_SHA2_256_HMAC &&
2331 csp->csp_auth_alg != CRYPTO_SHA2_384_HMAC &&
2332 csp->csp_auth_alg != CRYPTO_SHA2_512_HMAC)
2343 return (CRYPTODEV_PROBE_HARDWARE);
2347 safexcel_aes_algid(int keylen)
2351 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES128);
2353 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES192);
2355 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES256);
2357 panic("invalid AES key length %d", keylen);
2362 safexcel_aes_ccm_hashid(int keylen)
2366 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC128);
2368 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC192);
2370 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC256);
2372 panic("invalid AES key length %d", keylen);
2377 safexcel_sha_hashid(int alg)
2381 case CRYPTO_SHA1_HMAC:
2382 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA1);
2383 case CRYPTO_SHA2_224:
2384 case CRYPTO_SHA2_224_HMAC:
2385 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA224);
2386 case CRYPTO_SHA2_256:
2387 case CRYPTO_SHA2_256_HMAC:
2388 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA256);
2389 case CRYPTO_SHA2_384:
2390 case CRYPTO_SHA2_384_HMAC:
2391 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA384);
2392 case CRYPTO_SHA2_512:
2393 case CRYPTO_SHA2_512_HMAC:
2394 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA512);
2396 __assert_unreachable();
2401 safexcel_sha_hashlen(int alg)
2405 case CRYPTO_SHA1_HMAC:
2406 return (SHA1_HASH_LEN);
2407 case CRYPTO_SHA2_224:
2408 case CRYPTO_SHA2_224_HMAC:
2409 return (SHA2_224_HASH_LEN);
2410 case CRYPTO_SHA2_256:
2411 case CRYPTO_SHA2_256_HMAC:
2412 return (SHA2_256_HASH_LEN);
2413 case CRYPTO_SHA2_384:
2414 case CRYPTO_SHA2_384_HMAC:
2415 return (SHA2_384_HASH_LEN);
2416 case CRYPTO_SHA2_512:
2417 case CRYPTO_SHA2_512_HMAC:
2418 return (SHA2_512_HASH_LEN);
2420 __assert_unreachable();
2425 safexcel_sha_statelen(int alg)
2429 case CRYPTO_SHA1_HMAC:
2430 return (SHA1_HASH_LEN);
2431 case CRYPTO_SHA2_224:
2432 case CRYPTO_SHA2_224_HMAC:
2433 case CRYPTO_SHA2_256:
2434 case CRYPTO_SHA2_256_HMAC:
2435 return (SHA2_256_HASH_LEN);
2436 case CRYPTO_SHA2_384:
2437 case CRYPTO_SHA2_384_HMAC:
2438 case CRYPTO_SHA2_512:
2439 case CRYPTO_SHA2_512_HMAC:
2440 return (SHA2_512_HASH_LEN);
2442 __assert_unreachable();
2447 safexcel_newsession(device_t dev, crypto_session_t cses,
2448 const struct crypto_session_params *csp)
2450 struct safexcel_session *sess;
2451 struct safexcel_softc *sc;
2453 sc = device_get_softc(dev);
2454 sess = crypto_get_driver_session(cses);
2457 switch (csp->csp_auth_alg) {
2459 case CRYPTO_SHA2_224:
2460 case CRYPTO_SHA2_256:
2461 case CRYPTO_SHA2_384:
2462 case CRYPTO_SHA2_512:
2463 sess->digest = SAFEXCEL_CONTROL0_DIGEST_PRECOMPUTED;
2464 sess->hash = safexcel_sha_hashid(csp->csp_auth_alg);
2465 sess->digestlen = safexcel_sha_hashlen(csp->csp_auth_alg);
2466 sess->statelen = safexcel_sha_statelen(csp->csp_auth_alg);
2468 case CRYPTO_SHA1_HMAC:
2469 case CRYPTO_SHA2_224_HMAC:
2470 case CRYPTO_SHA2_256_HMAC:
2471 case CRYPTO_SHA2_384_HMAC:
2472 case CRYPTO_SHA2_512_HMAC:
2473 sess->digest = SAFEXCEL_CONTROL0_DIGEST_HMAC;
2474 sess->hash = safexcel_sha_hashid(csp->csp_auth_alg);
2475 sess->digestlen = safexcel_sha_hashlen(csp->csp_auth_alg);
2476 sess->statelen = safexcel_sha_statelen(csp->csp_auth_alg);
2478 case CRYPTO_AES_NIST_GMAC:
2479 sess->digest = SAFEXCEL_CONTROL0_DIGEST_GMAC;
2480 sess->digestlen = GMAC_DIGEST_LEN;
2481 sess->hash = SAFEXCEL_CONTROL0_HASH_ALG_GHASH;
2482 sess->alg = safexcel_aes_algid(csp->csp_auth_klen);
2483 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_GCM;
2487 switch (csp->csp_cipher_alg) {
2488 case CRYPTO_AES_NIST_GCM_16:
2489 sess->digest = SAFEXCEL_CONTROL0_DIGEST_GMAC;
2490 sess->digestlen = GMAC_DIGEST_LEN;
2491 sess->hash = SAFEXCEL_CONTROL0_HASH_ALG_GHASH;
2492 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2493 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_GCM;
2495 case CRYPTO_AES_CCM_16:
2496 sess->hash = safexcel_aes_ccm_hashid(csp->csp_cipher_klen);
2497 sess->digest = SAFEXCEL_CONTROL0_DIGEST_CCM;
2498 sess->digestlen = CCM_CBC_MAX_DIGEST_LEN;
2499 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2500 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CCM;
2502 case CRYPTO_AES_CBC:
2503 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2504 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CBC;
2506 case CRYPTO_AES_ICM:
2507 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2508 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CTR;
2510 case CRYPTO_AES_XTS:
2511 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen / 2);
2512 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_XTS;
2516 if (csp->csp_auth_mlen != 0)
2517 sess->digestlen = csp->csp_auth_mlen;
2519 if ((csp->csp_cipher_alg == 0 || csp->csp_cipher_key != NULL) &&
2520 (csp->csp_auth_alg == 0 || csp->csp_auth_key != NULL)) {
2521 sess->encctx.len = safexcel_set_context(&sess->encctx.ctx,
2522 CRYPTO_OP_ENCRYPT, csp->csp_cipher_key, csp->csp_auth_key,
2524 sess->decctx.len = safexcel_set_context(&sess->decctx.ctx,
2525 CRYPTO_OP_DECRYPT, csp->csp_cipher_key, csp->csp_auth_key,
2533 safexcel_process(device_t dev, struct cryptop *crp, int hint)
2535 const struct crypto_session_params *csp;
2536 struct safexcel_request *req;
2537 struct safexcel_ring *ring;
2538 struct safexcel_session *sess;
2539 struct safexcel_softc *sc;
2542 sc = device_get_softc(dev);
2543 sess = crypto_get_driver_session(crp->crp_session);
2544 csp = crypto_get_params(crp->crp_session);
2546 if (__predict_false(crypto_buffer_len(&crp->crp_buf) >
2547 SAFEXCEL_MAX_REQUEST_SIZE)) {
2548 crp->crp_etype = E2BIG;
2553 ring = &sc->sc_ring[curcpu % sc->sc_config.rings];
2554 mtx_lock(&ring->mtx);
2555 req = safexcel_alloc_request(sc, ring);
2556 if (__predict_false(req == NULL)) {
2557 ring->blocked = CRYPTO_SYMQ;
2558 mtx_unlock(&ring->mtx);
2559 counter_u64_add(sc->sc_req_alloc_failures, 1);
2566 crypto_read_iv(crp, req->iv);
2568 error = safexcel_create_chain(ring, req);
2569 if (__predict_false(error != 0)) {
2570 safexcel_free_request(ring, req);
2571 if (error == ERESTART)
2572 ring->blocked = CRYPTO_SYMQ;
2573 mtx_unlock(&ring->mtx);
2574 if (error != ERESTART) {
2575 crp->crp_etype = error;
2583 safexcel_set_token(req);
2585 bus_dmamap_sync(ring->data_dtag, req->dmap,
2586 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2587 bus_dmamap_sync(req->ctx.tag, req->ctx.map,
2588 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2589 bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map,
2590 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2591 bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map,
2592 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2593 bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map,
2594 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2596 safexcel_execute(sc, ring, req, hint);
2598 mtx_unlock(&ring->mtx);
2603 static device_method_t safexcel_methods[] = {
2604 /* Device interface */
2605 DEVMETHOD(device_probe, safexcel_probe),
2606 DEVMETHOD(device_attach, safexcel_attach),
2607 DEVMETHOD(device_detach, safexcel_detach),
2609 /* Cryptodev interface */
2610 DEVMETHOD(cryptodev_probesession, safexcel_probesession),
2611 DEVMETHOD(cryptodev_newsession, safexcel_newsession),
2612 DEVMETHOD(cryptodev_process, safexcel_process),
2617 static devclass_t safexcel_devclass;
2619 static driver_t safexcel_driver = {
2621 .methods = safexcel_methods,
2622 .size = sizeof(struct safexcel_softc),
2625 DRIVER_MODULE(safexcel, simplebus, safexcel_driver, safexcel_devclass, 0, 0);
2626 MODULE_VERSION(safexcel, 1);
2627 MODULE_DEPEND(safexcel, crypto, 1, 1, 1);