2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2020 Rubicon Communications, LLC (Netgate)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
32 #include <sys/endian.h>
33 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
39 #include <sys/sglist.h>
40 #include <sys/sysctl.h>
42 #include <machine/atomic.h>
43 #include <machine/bus.h>
45 #include <crypto/rijndael/rijndael.h>
46 #include <opencrypto/cryptodev.h>
47 #include <opencrypto/xform.h>
49 #include <dev/ofw/ofw_bus.h>
50 #include <dev/ofw/ofw_bus_subr.h>
52 #include "cryptodev_if.h"
54 #include "safexcel_reg.h"
55 #include "safexcel_var.h"
57 static MALLOC_DEFINE(M_SAFEXCEL, "safexcel_req", "safexcel request buffers");
60 * We only support the EIP97 for now.
62 static struct ofw_compat_data safexcel_compat[] = {
63 { "inside-secure,safexcel-eip97ies", (uintptr_t)97 },
64 { "inside-secure,safexcel-eip97", (uintptr_t)97 },
68 const struct safexcel_reg_offsets eip97_regs_offset = {
69 .hia_aic = SAFEXCEL_EIP97_HIA_AIC_BASE,
70 .hia_aic_g = SAFEXCEL_EIP97_HIA_AIC_G_BASE,
71 .hia_aic_r = SAFEXCEL_EIP97_HIA_AIC_R_BASE,
72 .hia_aic_xdr = SAFEXCEL_EIP97_HIA_AIC_xDR_BASE,
73 .hia_dfe = SAFEXCEL_EIP97_HIA_DFE_BASE,
74 .hia_dfe_thr = SAFEXCEL_EIP97_HIA_DFE_THR_BASE,
75 .hia_dse = SAFEXCEL_EIP97_HIA_DSE_BASE,
76 .hia_dse_thr = SAFEXCEL_EIP97_HIA_DSE_THR_BASE,
77 .hia_gen_cfg = SAFEXCEL_EIP97_HIA_GEN_CFG_BASE,
78 .pe = SAFEXCEL_EIP97_PE_BASE,
81 const struct safexcel_reg_offsets eip197_regs_offset = {
82 .hia_aic = SAFEXCEL_EIP197_HIA_AIC_BASE,
83 .hia_aic_g = SAFEXCEL_EIP197_HIA_AIC_G_BASE,
84 .hia_aic_r = SAFEXCEL_EIP197_HIA_AIC_R_BASE,
85 .hia_aic_xdr = SAFEXCEL_EIP197_HIA_AIC_xDR_BASE,
86 .hia_dfe = SAFEXCEL_EIP197_HIA_DFE_BASE,
87 .hia_dfe_thr = SAFEXCEL_EIP197_HIA_DFE_THR_BASE,
88 .hia_dse = SAFEXCEL_EIP197_HIA_DSE_BASE,
89 .hia_dse_thr = SAFEXCEL_EIP197_HIA_DSE_THR_BASE,
90 .hia_gen_cfg = SAFEXCEL_EIP197_HIA_GEN_CFG_BASE,
91 .pe = SAFEXCEL_EIP197_PE_BASE,
94 static struct safexcel_cmd_descr *
95 safexcel_cmd_descr_next(struct safexcel_cmd_descr_ring *ring)
97 struct safexcel_cmd_descr *cdesc;
99 if (ring->write == ring->read)
101 cdesc = &ring->desc[ring->read];
102 ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE;
106 static struct safexcel_res_descr *
107 safexcel_res_descr_next(struct safexcel_res_descr_ring *ring)
109 struct safexcel_res_descr *rdesc;
111 if (ring->write == ring->read)
113 rdesc = &ring->desc[ring->read];
114 ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE;
118 static struct safexcel_request *
119 safexcel_alloc_request(struct safexcel_softc *sc, struct safexcel_ring *ring)
121 struct safexcel_request *req;
123 mtx_assert(&ring->mtx, MA_OWNED);
125 if ((req = STAILQ_FIRST(&ring->free_requests)) != NULL)
126 STAILQ_REMOVE_HEAD(&ring->free_requests, link);
131 safexcel_free_request(struct safexcel_ring *ring, struct safexcel_request *req)
133 struct safexcel_context_record *ctx;
135 mtx_assert(&ring->mtx, MA_OWNED);
137 if (req->dmap_loaded) {
138 bus_dmamap_unload(ring->data_dtag, req->dmap);
139 req->dmap_loaded = false;
141 ctx = (struct safexcel_context_record *)req->ctx.vaddr;
142 explicit_bzero(ctx->data, sizeof(ctx->data));
143 explicit_bzero(req->iv, sizeof(req->iv));
144 STAILQ_INSERT_TAIL(&ring->free_requests, req, link);
148 safexcel_enqueue_request(struct safexcel_softc *sc, struct safexcel_ring *ring,
149 struct safexcel_request *req)
151 mtx_assert(&ring->mtx, MA_OWNED);
153 STAILQ_INSERT_TAIL(&ring->ready_requests, req, link);
157 safexcel_rdr_intr(struct safexcel_softc *sc, int ringidx)
159 struct safexcel_cmd_descr *cdesc;
160 struct safexcel_res_descr *rdesc;
161 struct safexcel_request *req;
162 struct safexcel_ring *ring;
163 uint32_t error, i, ncdescs, nrdescs, nreqs;
165 ring = &sc->sc_ring[ringidx];
167 mtx_lock(&ring->mtx);
168 nreqs = SAFEXCEL_READ(sc,
169 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT);
170 nreqs >>= SAFEXCEL_xDR_PROC_xD_PKT_OFFSET;
171 nreqs &= SAFEXCEL_xDR_PROC_xD_PKT_MASK;
173 SAFEXCEL_DPRINTF(sc, 1,
174 "zero pending requests on ring %d\n", ringidx);
178 ring = &sc->sc_ring[ringidx];
179 bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map,
180 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
181 bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map,
182 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
183 bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map,
184 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
186 ncdescs = nrdescs = 0;
187 for (i = 0; i < nreqs; i++) {
188 req = STAILQ_FIRST(&ring->queued_requests);
189 KASSERT(req != NULL, ("%s: expected %d pending requests",
191 STAILQ_REMOVE_HEAD(&ring->queued_requests, link);
192 mtx_unlock(&ring->mtx);
194 bus_dmamap_sync(req->ctx.tag, req->ctx.map,
195 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
196 bus_dmamap_sync(ring->data_dtag, req->dmap,
197 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
199 ncdescs += req->cdescs;
200 while (req->cdescs-- > 0) {
201 cdesc = safexcel_cmd_descr_next(&ring->cdr);
202 KASSERT(cdesc != NULL,
203 ("%s: missing control descriptor", __func__));
204 if (req->cdescs == 0)
205 KASSERT(cdesc->last_seg,
206 ("%s: chain is not terminated", __func__));
208 nrdescs += req->rdescs;
209 while (req->rdescs-- > 0) {
210 rdesc = safexcel_res_descr_next(&ring->rdr);
211 error = rdesc->result_data.error_code;
213 if (error == SAFEXCEL_RESULT_ERR_AUTH_FAILED &&
214 req->crp->crp_etype == 0) {
215 req->crp->crp_etype = EBADMSG;
217 SAFEXCEL_DPRINTF(sc, 1,
218 "error code %#x\n", error);
219 req->crp->crp_etype = EIO;
224 crypto_done(req->crp);
225 mtx_lock(&ring->mtx);
226 safexcel_free_request(ring, req);
231 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT,
232 SAFEXCEL_xDR_PROC_xD_PKT(nreqs) |
233 (sc->sc_config.rd_offset * nrdescs * sizeof(uint32_t)));
236 if (!STAILQ_EMPTY(&ring->queued_requests)) {
238 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH,
239 SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | 1);
241 mtx_unlock(&ring->mtx);
245 safexcel_ring_intr(void *arg)
247 struct safexcel_softc *sc;
248 struct safexcel_intr_handle *ih;
249 uint32_t status, stat;
251 bool blocked, rdrpending;
257 status = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) +
258 SAFEXCEL_HIA_AIC_R_ENABLED_STAT(ring));
260 if (status & SAFEXCEL_CDR_IRQ(ring)) {
261 stat = SAFEXCEL_READ(sc,
262 SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT);
264 SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT,
265 stat & SAFEXCEL_CDR_INTR_MASK);
269 if (status & SAFEXCEL_RDR_IRQ(ring)) {
270 stat = SAFEXCEL_READ(sc,
271 SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT);
272 if ((stat & SAFEXCEL_xDR_ERR) == 0)
275 SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT,
276 stat & SAFEXCEL_RDR_INTR_MASK);
279 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ACK(ring),
283 safexcel_rdr_intr(sc, ring);
285 mtx_lock(&sc->sc_mtx);
286 blocked = sc->sc_blocked;
288 mtx_unlock(&sc->sc_mtx);
291 crypto_unblock(sc->sc_cid, blocked);
295 safexcel_configure(struct safexcel_softc *sc)
297 uint32_t i, mask, pemask, reg;
300 if (sc->sc_type == 197) {
301 sc->sc_offsets = eip197_regs_offset;
302 pemask = SAFEXCEL_N_PES_MASK;
304 sc->sc_offsets = eip97_regs_offset;
305 pemask = EIP97_N_PES_MASK;
310 /* Scan for valid ring interrupt controllers. */
311 for (i = 0; i < SAFEXCEL_MAX_RING_AIC; i++) {
312 reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) +
313 SAFEXCEL_HIA_AIC_R_VERSION(i));
314 if (SAFEXCEL_REG_LO16(reg) != EIP201_VERSION_LE)
317 sc->sc_config.aic_rings = i;
318 if (sc->sc_config.aic_rings == 0)
321 reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_OPTIONS);
322 /* Check for 64bit addressing. */
323 if ((reg & SAFEXCEL_OPT_ADDR_64) == 0)
325 /* Check alignment constraints (which we do not support). */
326 if (((reg & SAFEXCEL_OPT_TGT_ALIGN_MASK) >>
327 SAFEXCEL_OPT_TGT_ALIGN_OFFSET) != 0)
331 (reg & SAFEXCEL_xDR_HDW_MASK) >> SAFEXCEL_xDR_HDW_OFFSET;
332 mask = (1 << sc->sc_config.hdw) - 1;
334 sc->sc_config.rings = reg & SAFEXCEL_N_RINGS_MASK;
335 /* Limit the number of rings to the number of the AIC Rings. */
336 sc->sc_config.rings = MIN(sc->sc_config.rings, sc->sc_config.aic_rings);
338 sc->sc_config.pes = (reg & pemask) >> SAFEXCEL_N_PES_OFFSET;
340 sc->sc_config.cd_size =
341 sizeof(struct safexcel_cmd_descr) / sizeof(uint32_t);
342 sc->sc_config.cd_offset = (sc->sc_config.cd_size + mask) & ~mask;
344 sc->sc_config.rd_size =
345 sizeof(struct safexcel_res_descr) / sizeof(uint32_t);
346 sc->sc_config.rd_offset = (sc->sc_config.rd_size + mask) & ~mask;
348 sc->sc_config.atok_offset =
349 (SAFEXCEL_MAX_ATOKENS * sizeof(struct safexcel_instr) + mask) &
356 safexcel_init_hia_bus_access(struct safexcel_softc *sc)
358 uint32_t version, val;
360 /* Determine endianness and configure byte swap. */
361 version = SAFEXCEL_READ(sc,
362 SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_VERSION);
363 val = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL);
364 if (SAFEXCEL_REG_HI16(version) == SAFEXCEL_HIA_VERSION_BE) {
365 val = SAFEXCEL_READ(sc,
366 SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL);
367 val = val ^ (SAFEXCEL_MST_CTRL_NO_BYTE_SWAP >> 24);
369 SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL,
373 /* Configure wr/rd cache values. */
374 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_GEN_CFG(sc) + SAFEXCEL_HIA_MST_CTRL,
375 SAFEXCEL_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
376 SAFEXCEL_MST_CTRL_WD_CACHE(WR_CACHE_4BITS));
380 safexcel_disable_global_interrupts(struct safexcel_softc *sc)
382 /* Disable and clear pending interrupts. */
384 SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ENABLE_CTRL, 0);
386 SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK,
387 SAFEXCEL_AIC_G_ACK_ALL_MASK);
391 * Configure the data fetch engine. This component parses command descriptors
392 * and sets up DMA transfers from host memory to the corresponding processing
396 safexcel_configure_dfe_engine(struct safexcel_softc *sc, int pe)
398 /* Reset all DFE threads. */
400 SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe),
401 SAFEXCEL_DxE_THR_CTRL_RESET_PE);
403 /* Deassert the DFE reset. */
405 SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe), 0);
407 /* DMA transfer size to use. */
408 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE(sc) + SAFEXCEL_HIA_DFE_CFG(pe),
409 SAFEXCEL_HIA_DFE_CFG_DIS_DEBUG |
410 SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
411 SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(9) |
412 SAFEXCEL_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
413 SAFEXCEL_HIA_DxE_CFG_MAX_CTRL_SIZE(7) |
414 SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS) |
415 SAFEXCEL_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS));
417 /* Configure the PE DMA transfer thresholds. */
418 SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_DBUF_THRES(pe),
419 SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) |
420 SAFEXCEL_PE_IN_xBUF_THRES_MAX(9));
421 SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_TBUF_THRES(pe),
422 SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) |
423 SAFEXCEL_PE_IN_xBUF_THRES_MAX(7));
427 * Configure the data store engine. This component parses result descriptors
428 * and sets up DMA transfers from the processing engine to host memory.
431 safexcel_configure_dse(struct safexcel_softc *sc, int pe)
436 /* Disable and reset all DSE threads. */
438 SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe),
439 SAFEXCEL_DxE_THR_CTRL_RESET_PE);
441 /* Wait for a second for threads to go idle. */
443 val = SAFEXCEL_READ(sc,
444 SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_STAT(pe));
445 if ((val & SAFEXCEL_DSE_THR_RDR_ID_MASK) ==
446 SAFEXCEL_DSE_THR_RDR_ID_MASK)
448 if (count++ > 10000) {
449 device_printf(sc->sc_dev, "DSE reset timeout\n");
455 /* Exit the reset state. */
457 SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe), 0);
459 /* DMA transfer size to use */
460 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE(sc) + SAFEXCEL_HIA_DSE_CFG(pe),
461 SAFEXCEL_HIA_DSE_CFG_DIS_DEBUG |
462 SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
463 SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(8) |
464 SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS) |
465 SAFEXCEL_HIA_DSE_CFG_ALLWAYS_BUFFERABLE);
467 /* Configure the procesing engine thresholds */
469 SAFEXCEL_PE(sc) + SAFEXCEL_PE_OUT_DBUF_THRES(pe),
470 SAFEXCEL_PE_OUT_DBUF_THRES_MIN(7) |
471 SAFEXCEL_PE_OUT_DBUF_THRES_MAX(8));
477 safexcel_hw_prepare_rings(struct safexcel_softc *sc)
481 for (i = 0; i < sc->sc_config.rings; i++) {
483 * Command descriptors.
486 /* Clear interrupts for this ring. */
488 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i),
489 SAFEXCEL_HIA_AIC_R_ENABLE_CLR_ALL_MASK);
491 /* Disable external triggering. */
493 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0);
495 /* Clear the pending prepared counter. */
497 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
498 SAFEXCEL_xDR_PREP_CLR_COUNT);
500 /* Clear the pending processed counter. */
502 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
503 SAFEXCEL_xDR_PROC_CLR_COUNT);
506 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
508 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
511 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE,
512 SAFEXCEL_RING_SIZE * sc->sc_config.cd_offset *
516 * Result descriptors.
519 /* Disable external triggering. */
521 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0);
523 /* Clear the pending prepared counter. */
525 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
526 SAFEXCEL_xDR_PREP_CLR_COUNT);
528 /* Clear the pending processed counter. */
530 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
531 SAFEXCEL_xDR_PROC_CLR_COUNT);
534 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
536 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
540 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE,
541 SAFEXCEL_RING_SIZE * sc->sc_config.rd_offset *
547 safexcel_hw_setup_rings(struct safexcel_softc *sc)
549 struct safexcel_ring *ring;
550 uint32_t cd_size_rnd, mask, rd_size_rnd, val;
553 mask = (1 << sc->sc_config.hdw) - 1;
554 cd_size_rnd = (sc->sc_config.cd_size + mask) >> sc->sc_config.hdw;
555 val = (sizeof(struct safexcel_res_descr) -
556 sizeof(struct safexcel_res_data)) / sizeof(uint32_t);
557 rd_size_rnd = (val + mask) >> sc->sc_config.hdw;
559 for (i = 0; i < sc->sc_config.rings; i++) {
560 ring = &sc->sc_ring[i];
563 * Command descriptors.
566 /* Ring base address. */
567 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
568 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO,
569 SAFEXCEL_ADDR_LO(ring->cdr.dma.paddr));
570 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
571 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI,
572 SAFEXCEL_ADDR_HI(ring->cdr.dma.paddr));
575 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE,
576 SAFEXCEL_xDR_DESC_MODE_64BIT | SAFEXCEL_CDR_DESC_MODE_ADCP |
577 (sc->sc_config.cd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) |
578 sc->sc_config.cd_size);
581 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG,
582 ((SAFEXCEL_FETCH_COUNT * (cd_size_rnd << sc->sc_config.hdw)) <<
583 SAFEXCEL_xDR_xD_FETCH_THRESH) |
584 (SAFEXCEL_FETCH_COUNT * sc->sc_config.cd_offset));
586 /* Configure DMA tx control. */
588 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG,
589 SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) |
590 SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS));
592 /* Clear any pending interrupt. */
594 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
595 SAFEXCEL_CDR_INTR_MASK);
598 * Result descriptors.
601 /* Ring base address. */
602 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
603 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO,
604 SAFEXCEL_ADDR_LO(ring->rdr.dma.paddr));
605 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
606 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI,
607 SAFEXCEL_ADDR_HI(ring->rdr.dma.paddr));
610 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE,
611 SAFEXCEL_xDR_DESC_MODE_64BIT |
612 (sc->sc_config.rd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) |
613 sc->sc_config.rd_size);
616 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG,
617 ((SAFEXCEL_FETCH_COUNT * (rd_size_rnd << sc->sc_config.hdw)) <<
618 SAFEXCEL_xDR_xD_FETCH_THRESH) |
619 (SAFEXCEL_FETCH_COUNT * sc->sc_config.rd_offset));
621 /* Configure DMA tx control. */
623 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG,
624 SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) |
625 SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS) |
626 SAFEXCEL_HIA_xDR_WR_RES_BUF | SAFEXCEL_HIA_xDR_WR_CTRL_BUF);
628 /* Clear any pending interrupt. */
630 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
631 SAFEXCEL_RDR_INTR_MASK);
633 /* Enable ring interrupt. */
635 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CTRL(i),
636 SAFEXCEL_RDR_IRQ(i));
640 /* Reset the command and result descriptor rings. */
642 safexcel_hw_reset_rings(struct safexcel_softc *sc)
646 for (i = 0; i < sc->sc_config.rings; i++) {
648 * Result descriptor ring operations.
651 /* Reset ring base address. */
652 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
653 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0);
654 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
655 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0);
657 /* Clear the pending prepared counter. */
659 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
660 SAFEXCEL_xDR_PREP_CLR_COUNT);
662 /* Clear the pending processed counter. */
664 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
665 SAFEXCEL_xDR_PROC_CLR_COUNT);
668 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
670 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
673 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0);
675 /* Clear any pending interrupt. */
677 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
678 SAFEXCEL_RDR_INTR_MASK);
680 /* Disable ring interrupt. */
682 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i),
683 SAFEXCEL_RDR_IRQ(i));
686 * Command descriptor ring operations.
689 /* Reset ring base address. */
690 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
691 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0);
692 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
693 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0);
695 /* Clear the pending prepared counter. */
697 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
698 SAFEXCEL_xDR_PREP_CLR_COUNT);
700 /* Clear the pending processed counter. */
702 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
703 SAFEXCEL_xDR_PROC_CLR_COUNT);
706 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
708 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
711 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0);
713 /* Clear any pending interrupt. */
715 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
716 SAFEXCEL_CDR_INTR_MASK);
721 safexcel_enable_pe_engine(struct safexcel_softc *sc, int pe)
725 for (ring_mask = 0, i = 0; i < sc->sc_config.rings; i++) {
730 /* Enable command descriptor rings. */
731 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe),
732 SAFEXCEL_DxE_THR_CTRL_EN | ring_mask);
734 /* Enable result descriptor rings. */
735 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe),
736 SAFEXCEL_DxE_THR_CTRL_EN | ring_mask);
738 /* Clear any HIA interrupt. */
739 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK,
740 SAFEXCEL_AIC_G_ACK_HIA_MASK);
744 safexcel_execute(struct safexcel_softc *sc, struct safexcel_ring *ring,
745 struct safexcel_request *req)
747 uint32_t ncdescs, nrdescs, nreqs;
751 mtx_assert(&ring->mtx, MA_OWNED);
753 ringidx = req->sess->ringidx;
754 if (STAILQ_EMPTY(&ring->ready_requests))
756 busy = !STAILQ_EMPTY(&ring->queued_requests);
757 ncdescs = nrdescs = nreqs = 0;
758 while ((req = STAILQ_FIRST(&ring->ready_requests)) != NULL &&
759 req->cdescs + ncdescs <= SAFEXCEL_MAX_BATCH_SIZE &&
760 req->rdescs + nrdescs <= SAFEXCEL_MAX_BATCH_SIZE) {
761 STAILQ_REMOVE_HEAD(&ring->ready_requests, link);
762 STAILQ_INSERT_TAIL(&ring->queued_requests, req, link);
763 ncdescs += req->cdescs;
764 nrdescs += req->rdescs;
770 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH,
771 SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | nreqs);
774 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT,
775 nrdescs * sc->sc_config.rd_offset * sizeof(uint32_t));
777 SAFEXCEL_HIA_CDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT,
778 ncdescs * sc->sc_config.cd_offset * sizeof(uint32_t));
782 safexcel_init_rings(struct safexcel_softc *sc)
784 struct safexcel_cmd_descr *cdesc;
785 struct safexcel_ring *ring;
789 for (i = 0; i < sc->sc_config.rings; i++) {
790 ring = &sc->sc_ring[i];
792 snprintf(ring->lockname, sizeof(ring->lockname),
793 "safexcel_ring%d", i);
794 mtx_init(&ring->mtx, ring->lockname, NULL, MTX_DEF);
795 STAILQ_INIT(&ring->free_requests);
796 STAILQ_INIT(&ring->ready_requests);
797 STAILQ_INIT(&ring->queued_requests);
799 ring->cdr.read = ring->cdr.write = 0;
800 ring->rdr.read = ring->rdr.write = 0;
801 for (j = 0; j < SAFEXCEL_RING_SIZE; j++) {
802 cdesc = &ring->cdr.desc[j];
803 atok = ring->dma_atok.paddr +
804 sc->sc_config.atok_offset * j;
805 cdesc->atok_lo = SAFEXCEL_ADDR_LO(atok);
806 cdesc->atok_hi = SAFEXCEL_ADDR_HI(atok);
812 safexcel_dma_alloc_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg,
815 struct safexcel_dma_mem *sdm;
820 KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg));
822 sdm->paddr = segs->ds_addr;
826 safexcel_dma_alloc_mem(struct safexcel_softc *sc, struct safexcel_dma_mem *sdm,
831 KASSERT(sdm->vaddr == NULL,
832 ("%s: DMA memory descriptor in use.", __func__));
834 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
835 PAGE_SIZE, 0, /* alignment, boundary */
836 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
837 BUS_SPACE_MAXADDR, /* highaddr */
838 NULL, NULL, /* filtfunc, filtfuncarg */
839 size, 1, /* maxsize, nsegments */
840 size, BUS_DMA_COHERENT, /* maxsegsz, flags */
841 NULL, NULL, /* lockfunc, lockfuncarg */
842 &sdm->tag); /* dmat */
844 device_printf(sc->sc_dev,
845 "failed to allocate busdma tag, error %d\n", error);
849 error = bus_dmamem_alloc(sdm->tag, (void **)&sdm->vaddr,
850 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sdm->map);
852 device_printf(sc->sc_dev,
853 "failed to allocate DMA safe memory, error %d\n", error);
857 error = bus_dmamap_load(sdm->tag, sdm->map, sdm->vaddr, size,
858 safexcel_dma_alloc_mem_cb, sdm, BUS_DMA_NOWAIT);
860 device_printf(sc->sc_dev,
861 "cannot get address of the DMA memory, error %d\n", error);
867 bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map);
869 bus_dma_tag_destroy(sdm->tag);
877 safexcel_dma_free_mem(struct safexcel_dma_mem *sdm)
879 bus_dmamap_unload(sdm->tag, sdm->map);
880 bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map);
881 bus_dma_tag_destroy(sdm->tag);
885 safexcel_dma_free_rings(struct safexcel_softc *sc)
887 struct safexcel_ring *ring;
890 for (i = 0; i < sc->sc_config.rings; i++) {
891 ring = &sc->sc_ring[i];
892 safexcel_dma_free_mem(&ring->cdr.dma);
893 safexcel_dma_free_mem(&ring->dma_atok);
894 safexcel_dma_free_mem(&ring->rdr.dma);
895 bus_dma_tag_destroy(ring->data_dtag);
896 mtx_destroy(&ring->mtx);
901 safexcel_dma_init(struct safexcel_softc *sc)
903 struct safexcel_ring *ring;
907 for (i = 0; i < sc->sc_config.rings; i++) {
908 ring = &sc->sc_ring[i];
910 error = bus_dma_tag_create(
911 bus_get_dma_tag(sc->sc_dev),/* parent */
912 1, 0, /* alignment, boundary */
913 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
914 BUS_SPACE_MAXADDR, /* highaddr */
915 NULL, NULL, /* filtfunc, filtfuncarg */
916 SAFEXCEL_MAX_REQUEST_SIZE, /* maxsize */
917 SAFEXCEL_MAX_FRAGMENTS, /* nsegments */
918 SAFEXCEL_MAX_REQUEST_SIZE, /* maxsegsz */
919 BUS_DMA_COHERENT, /* flags */
920 NULL, NULL, /* lockfunc, lockfuncarg */
921 &ring->data_dtag); /* dmat */
923 device_printf(sc->sc_dev,
924 "bus_dma_tag_create main failed; error %d\n", error);
928 size = sizeof(uint32_t) * sc->sc_config.cd_offset *
930 error = safexcel_dma_alloc_mem(sc, &ring->cdr.dma, size);
932 device_printf(sc->sc_dev,
933 "failed to allocate CDR DMA memory, error %d\n",
938 (struct safexcel_cmd_descr *)ring->cdr.dma.vaddr;
940 /* Allocate additional CDR token memory. */
941 size = (bus_size_t)sc->sc_config.atok_offset *
943 error = safexcel_dma_alloc_mem(sc, &ring->dma_atok, size);
945 device_printf(sc->sc_dev,
946 "failed to allocate atoken DMA memory, error %d\n",
951 size = sizeof(uint32_t) * sc->sc_config.rd_offset *
953 error = safexcel_dma_alloc_mem(sc, &ring->rdr.dma, size);
955 device_printf(sc->sc_dev,
956 "failed to allocate RDR DMA memory, error %d\n",
961 (struct safexcel_res_descr *)ring->rdr.dma.vaddr;
966 safexcel_dma_free_rings(sc);
971 safexcel_deinit_hw(struct safexcel_softc *sc)
973 safexcel_hw_reset_rings(sc);
974 safexcel_dma_free_rings(sc);
978 safexcel_init_hw(struct safexcel_softc *sc)
982 /* 23.3.7 Initialization */
983 if (safexcel_configure(sc) != 0)
986 if (safexcel_dma_init(sc) != 0)
989 safexcel_init_rings(sc);
991 safexcel_init_hia_bus_access(sc);
993 /* 23.3.7.2 Disable EIP-97 global Interrupts */
994 safexcel_disable_global_interrupts(sc);
996 for (pe = 0; pe < sc->sc_config.pes; pe++) {
997 /* 23.3.7.3 Configure Data Fetch Engine */
998 safexcel_configure_dfe_engine(sc, pe);
1000 /* 23.3.7.4 Configure Data Store Engine */
1001 if (safexcel_configure_dse(sc, pe)) {
1002 safexcel_deinit_hw(sc);
1006 /* 23.3.7.5 1. Protocol enables */
1008 SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION_EN(pe),
1011 SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION2_EN(pe),
1015 safexcel_hw_prepare_rings(sc);
1017 /* 23.3.7.5 Configure the Processing Engine(s). */
1018 for (pe = 0; pe < sc->sc_config.pes; pe++)
1019 safexcel_enable_pe_engine(sc, pe);
1021 safexcel_hw_setup_rings(sc);
1027 safexcel_setup_dev_interrupts(struct safexcel_softc *sc)
1031 for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++) {
1032 sc->sc_ih[i].sc = sc;
1033 sc->sc_ih[i].ring = i;
1035 if (bus_setup_intr(sc->sc_dev, sc->sc_intr[i],
1036 INTR_TYPE_NET | INTR_MPSAFE, NULL, safexcel_ring_intr,
1037 &sc->sc_ih[i], &sc->sc_ih[i].handle)) {
1038 device_printf(sc->sc_dev,
1039 "couldn't setup interrupt %d\n", i);
1047 for (j = 0; j < i; j++)
1048 bus_teardown_intr(sc->sc_dev, sc->sc_intr[j],
1049 sc->sc_ih[j].handle);
1055 safexcel_teardown_dev_interrupts(struct safexcel_softc *sc)
1059 for (i = 0; i < SAFEXCEL_MAX_RINGS; i++)
1060 bus_teardown_intr(sc->sc_dev, sc->sc_intr[i],
1061 sc->sc_ih[i].handle);
1065 safexcel_alloc_dev_resources(struct safexcel_softc *sc)
1073 node = ofw_bus_get_node(dev);
1076 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1078 if (sc->sc_res == NULL) {
1079 device_printf(dev, "couldn't allocate memory resources\n");
1083 for (i = 0; i < SAFEXCEL_MAX_RINGS; i++) {
1084 (void)snprintf(name, sizeof(name), "ring%d", i);
1085 error = ofw_bus_find_string_index(node, "interrupt-names", name,
1090 sc->sc_intr[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1091 RF_ACTIVE | RF_SHAREABLE);
1092 if (sc->sc_intr[i] == NULL) {
1098 device_printf(dev, "couldn't allocate interrupt resources\n");
1103 mtx_init(&sc->sc_mtx, "safexcel softc", NULL, MTX_DEF);
1108 for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
1109 bus_release_resource(dev, SYS_RES_IRQ,
1110 rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
1111 bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->sc_res),
1117 safexcel_free_dev_resources(struct safexcel_softc *sc)
1121 mtx_destroy(&sc->sc_mtx);
1123 for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
1124 bus_release_resource(sc->sc_dev, SYS_RES_IRQ,
1125 rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
1126 if (sc->sc_res != NULL)
1127 bus_release_resource(sc->sc_dev, SYS_RES_MEMORY,
1128 rman_get_rid(sc->sc_res), sc->sc_res);
1132 safexcel_probe(device_t dev)
1134 struct safexcel_softc *sc;
1136 if (!ofw_bus_status_okay(dev))
1139 sc = device_get_softc(dev);
1140 sc->sc_type = ofw_bus_search_compatible(dev, safexcel_compat)->ocd_data;
1141 if (sc->sc_type == 0)
1144 device_set_desc(dev, "SafeXcel EIP-97 crypto accelerator");
1146 return (BUS_PROBE_DEFAULT);
1150 safexcel_crypto_register(struct safexcel_softc *sc, int alg)
1152 (void)crypto_register(sc->sc_cid, alg, SAFEXCEL_MAX_REQUEST_SIZE, 0);
1156 safexcel_attach(device_t dev)
1158 struct sysctl_ctx_list *sctx;
1159 struct safexcel_softc *sc;
1160 struct safexcel_request *req;
1161 struct safexcel_ring *ring;
1164 sc = device_get_softc(dev);
1169 if (safexcel_alloc_dev_resources(sc))
1172 if (safexcel_setup_dev_interrupts(sc))
1175 if (safexcel_init_hw(sc))
1178 for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) {
1179 ring = &sc->sc_ring[ringidx];
1181 ring->cmd_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK);
1182 ring->res_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK);
1184 ring->requests = mallocarray(SAFEXCEL_REQUESTS_PER_RING,
1185 sizeof(struct safexcel_request), M_SAFEXCEL,
1188 for (i = 0; i < SAFEXCEL_REQUESTS_PER_RING; i++) {
1189 req = &ring->requests[i];
1191 if (bus_dmamap_create(ring->data_dtag,
1192 BUS_DMA_COHERENT, &req->dmap) != 0) {
1193 for (j = 0; j < i; j++)
1194 bus_dmamap_destroy(ring->data_dtag,
1195 ring->requests[j].dmap);
1198 if (safexcel_dma_alloc_mem(sc, &req->ctx,
1199 sizeof(struct safexcel_context_record)) != 0) {
1200 for (j = 0; j < i; j++) {
1201 bus_dmamap_destroy(ring->data_dtag,
1202 ring->requests[j].dmap);
1203 safexcel_dma_free_mem(
1204 &ring->requests[j].ctx);
1208 STAILQ_INSERT_TAIL(&ring->free_requests, req, link);
1212 sctx = device_get_sysctl_ctx(dev);
1213 SYSCTL_ADD_INT(sctx, SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1214 OID_AUTO, "debug", CTLFLAG_RWTUN, &sc->sc_debug, 0,
1215 "Debug message verbosity");
1217 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safexcel_session),
1218 CRYPTOCAP_F_HARDWARE);
1222 safexcel_crypto_register(sc, CRYPTO_AES_CBC);
1223 safexcel_crypto_register(sc, CRYPTO_AES_ICM);
1224 safexcel_crypto_register(sc, CRYPTO_AES_XTS);
1225 safexcel_crypto_register(sc, CRYPTO_AES_CCM_16);
1226 safexcel_crypto_register(sc, CRYPTO_AES_CCM_CBC_MAC);
1227 safexcel_crypto_register(sc, CRYPTO_AES_NIST_GCM_16);
1228 safexcel_crypto_register(sc, CRYPTO_AES_128_NIST_GMAC);
1229 safexcel_crypto_register(sc, CRYPTO_AES_192_NIST_GMAC);
1230 safexcel_crypto_register(sc, CRYPTO_AES_256_NIST_GMAC);
1231 safexcel_crypto_register(sc, CRYPTO_SHA1);
1232 safexcel_crypto_register(sc, CRYPTO_SHA1_HMAC);
1233 safexcel_crypto_register(sc, CRYPTO_SHA2_224);
1234 safexcel_crypto_register(sc, CRYPTO_SHA2_224_HMAC);
1235 safexcel_crypto_register(sc, CRYPTO_SHA2_256);
1236 safexcel_crypto_register(sc, CRYPTO_SHA2_256_HMAC);
1237 safexcel_crypto_register(sc, CRYPTO_SHA2_384);
1238 safexcel_crypto_register(sc, CRYPTO_SHA2_384_HMAC);
1239 safexcel_crypto_register(sc, CRYPTO_SHA2_512);
1240 safexcel_crypto_register(sc, CRYPTO_SHA2_512_HMAC);
1245 safexcel_teardown_dev_interrupts(sc);
1247 safexcel_free_dev_resources(sc);
1253 safexcel_detach(device_t dev)
1255 struct safexcel_ring *ring;
1256 struct safexcel_softc *sc;
1259 sc = device_get_softc(dev);
1261 if (sc->sc_cid >= 0)
1262 crypto_unregister_all(sc->sc_cid);
1263 for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) {
1264 ring = &sc->sc_ring[ringidx];
1265 for (i = 0; i < SAFEXCEL_REQUESTS_PER_RING; i++) {
1266 bus_dmamap_destroy(ring->data_dtag,
1267 ring->requests[i].dmap);
1268 safexcel_dma_free_mem(&ring->requests[i].ctx);
1270 free(ring->requests, M_SAFEXCEL);
1271 sglist_free(ring->cmd_data);
1272 sglist_free(ring->res_data);
1274 safexcel_deinit_hw(sc);
1275 safexcel_teardown_dev_interrupts(sc);
1276 safexcel_free_dev_resources(sc);
1282 * Populate the request's context record with pre-computed key material.
1285 safexcel_set_context(struct safexcel_request *req)
1287 struct cryptop *crp;
1288 struct safexcel_context_record *ctx;
1289 struct safexcel_session *sess;
1296 ctx = (struct safexcel_context_record *)req->ctx.vaddr;
1297 data = (uint8_t *)ctx->data;
1298 if (req->enc != NULL) {
1299 if ((req->enc->crd_flags & CRD_F_KEY_EXPLICIT) != 0)
1300 memcpy(data, req->enc->crd_key, sess->klen);
1302 memcpy(data, sess->key, sess->klen);
1308 if (req->enc != NULL) {
1309 switch (req->enc->crd_alg) {
1310 case CRYPTO_AES_NIST_GCM_16:
1311 memcpy(data + off, sess->ghash_key, GMAC_BLOCK_LEN);
1312 off += GMAC_BLOCK_LEN;
1314 case CRYPTO_AES_CCM_16:
1315 memcpy(data + off, sess->xcbc_key,
1316 AES_BLOCK_LEN * 2 + sess->klen);
1317 off += AES_BLOCK_LEN * 2 + sess->klen;
1319 case CRYPTO_AES_XTS:
1320 memcpy(data + off, sess->tweak_key, sess->klen);
1326 if (req->mac != NULL) {
1327 switch (req->mac->crd_alg) {
1328 case CRYPTO_SHA1_HMAC:
1329 case CRYPTO_SHA2_224_HMAC:
1330 case CRYPTO_SHA2_256_HMAC:
1331 case CRYPTO_SHA2_384_HMAC:
1332 case CRYPTO_SHA2_512_HMAC:
1333 memcpy(data + off, sess->hmac_ipad, sess->statelen);
1334 off += sess->statelen;
1335 memcpy(data + off, sess->hmac_opad, sess->statelen);
1336 off += sess->statelen;
1345 * Populate fields in the first command descriptor of the chain used to encode
1346 * the specified request. These fields indicate the algorithms used, the size
1347 * of the key material stored in the associated context record, the primitive
1348 * operations to be performed on input data, and the location of the IV if any.
1351 safexcel_set_command(struct safexcel_request *req,
1352 struct safexcel_cmd_descr *cdesc)
1354 struct cryptop *crp;
1355 struct safexcel_session *sess;
1356 uint32_t ctrl0, ctrl1, ctxr_len;
1362 ctrl0 = sess->alg | sess->digest | sess->hash;
1365 ctxr_len = safexcel_set_context(req) / sizeof(uint32_t);
1366 ctrl0 |= SAFEXCEL_CONTROL0_SIZE(ctxr_len);
1368 if (req->enc != NULL)
1369 alg = req->enc->crd_alg;
1371 alg = req->mac->crd_alg;
1374 case CRYPTO_AES_CCM_16:
1375 if ((req->enc->crd_flags & CRD_F_ENCRYPT) != 0) {
1376 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_ENCRYPT_OUT |
1377 SAFEXCEL_CONTROL0_KEY_EN;
1379 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_DECRYPT_HASH_IN |
1380 SAFEXCEL_CONTROL0_KEY_EN;
1382 ctrl1 |= SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 |
1383 SAFEXCEL_CONTROL1_IV2 | SAFEXCEL_CONTROL1_IV3;
1385 case CRYPTO_AES_CBC:
1386 case CRYPTO_AES_ICM:
1387 case CRYPTO_AES_XTS:
1388 if ((req->enc->crd_flags & CRD_F_ENCRYPT) != 0) {
1389 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT |
1390 SAFEXCEL_CONTROL0_KEY_EN;
1391 if (req->mac != NULL)
1393 SAFEXCEL_CONTROL0_TYPE_ENCRYPT_HASH_OUT;
1395 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN |
1396 SAFEXCEL_CONTROL0_KEY_EN;
1397 if (req->mac != NULL) {
1398 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN;
1399 ctrl1 |= SAFEXCEL_CONTROL1_HASH_STORE;
1403 case CRYPTO_AES_NIST_GCM_16:
1404 if ((req->enc->crd_flags & CRD_F_ENCRYPT) != 0) {
1405 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT |
1406 SAFEXCEL_CONTROL0_KEY_EN |
1407 SAFEXCEL_CONTROL0_TYPE_HASH_OUT;
1409 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN |
1410 SAFEXCEL_CONTROL0_KEY_EN |
1411 SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN;
1413 if (req->enc != NULL &&
1414 req->enc->crd_alg == CRYPTO_AES_NIST_GCM_16) {
1415 ctrl1 |= SAFEXCEL_CONTROL1_COUNTER_MODE |
1416 SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 |
1417 SAFEXCEL_CONTROL1_IV2;
1421 case CRYPTO_SHA2_224:
1422 case CRYPTO_SHA2_256:
1423 case CRYPTO_SHA2_384:
1424 case CRYPTO_SHA2_512:
1425 ctrl0 |= SAFEXCEL_CONTROL0_RESTART_HASH;
1427 case CRYPTO_SHA1_HMAC:
1428 case CRYPTO_SHA2_224_HMAC:
1429 case CRYPTO_SHA2_256_HMAC:
1430 case CRYPTO_SHA2_384_HMAC:
1431 case CRYPTO_SHA2_512_HMAC:
1432 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_OUT;
1436 cdesc->control_data.control0 = ctrl0;
1437 cdesc->control_data.control1 = ctrl1;
1441 * Construct a no-op instruction, used to pad input tokens.
1444 safexcel_instr_nop(struct safexcel_instr **instrp)
1446 struct safexcel_instr *instr;
1449 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1450 instr->length = (1 << 2);
1452 instr->instructions = 0;
1454 *instrp = instr + 1;
1458 * Insert the digest of the input payload. This is typically the last
1459 * instruction of a sequence.
1462 safexcel_instr_insert_digest(struct safexcel_instr **instrp, int len)
1464 struct safexcel_instr *instr;
1467 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1468 instr->length = len;
1469 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1470 SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1471 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1472 SAFEXCEL_INSTR_INSERT_HASH_DIGEST;
1474 *instrp = instr + 1;
1478 safexcel_instr_retrieve_digest(struct safexcel_instr **instrp, struct safexcel_request *req, int len)
1480 struct safexcel_instr *instr;
1484 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1485 instr->length = len;
1486 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1487 SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1488 instr->instructions = SAFEXCEL_INSTR_INSERT_HASH_DIGEST |
1489 SAFEXCEL_INSTR_DEST_OUTPUT;
1491 *instrp = instr + 1;
1495 * Retrieve and verify a digest.
1498 safexcel_instr_verify_digest(struct safexcel_instr **instrp, int len)
1500 struct safexcel_instr *instr;
1503 instr->opcode = SAFEXCEL_INSTR_OPCODE_RETRIEVE;
1504 instr->length = len;
1505 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1506 SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1507 instr->instructions = SAFEXCEL_INSTR_INSERT_HASH_DIGEST;
1510 instr->opcode = SAFEXCEL_INSTR_OPCODE_VERIFY_FIELDS;
1511 instr->length = len | SAFEXCEL_INSTR_VERIFY_HASH;
1512 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1513 SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1514 instr->instructions = SAFEXCEL_INSTR_VERIFY_PADDING;
1516 *instrp = instr + 1;
1520 safexcel_instr_temp_aes_block(struct safexcel_instr **instrp)
1522 struct safexcel_instr *instr;
1525 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT_REMOVE_RESULT;
1528 instr->instructions = AES_BLOCK_LEN;
1531 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1532 instr->length = AES_BLOCK_LEN;
1534 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1535 SAFEXCEL_INSTR_DEST_CRYPTO;
1537 *instrp = instr + 1;
1541 * Handle a request for an unauthenticated block cipher.
1544 safexcel_instr_cipher(struct safexcel_request *req,
1545 struct safexcel_instr *instr, struct safexcel_cmd_descr *cdesc)
1547 /* Insert the payload. */
1548 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1549 instr->length = req->enc->crd_len;
1550 instr->status = SAFEXCEL_INSTR_STATUS_LAST_PACKET |
1551 SAFEXCEL_INSTR_STATUS_LAST_HASH;
1552 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1553 SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_OUTPUT;
1555 cdesc->additional_cdata_size = 1;
1559 safexcel_instr_eta(struct safexcel_request *req, struct safexcel_instr *instr,
1560 struct safexcel_cmd_descr *cdesc)
1562 struct safexcel_instr *start;
1566 /* Encrypt any data left in the request. */
1567 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1568 instr->length = req->enc->crd_len;
1569 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1570 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1571 SAFEXCEL_INSTR_DEST_CRYPTO |
1572 SAFEXCEL_INSTR_DEST_HASH |
1573 SAFEXCEL_INSTR_DEST_OUTPUT;
1577 * Compute the digest, or extract it and place it in the output stream.
1579 if ((req->enc->crd_flags & CRD_F_ENCRYPT) != 0)
1580 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1582 safexcel_instr_retrieve_digest(&instr, req, req->sess->digestlen);
1583 cdesc->additional_cdata_size = instr - start;
1587 safexcel_instr_sha_hash(struct safexcel_request *req,
1588 struct safexcel_instr *instr)
1590 struct cryptop *crp;
1591 struct safexcel_instr *start;
1596 /* Pass the input data to the hash engine. */
1597 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1598 instr->length = req->mac->crd_len;
1599 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1600 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1603 /* Insert the hash result into the output stream. */
1604 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1606 /* Pad the rest of the inline instruction space. */
1607 while (instr != start + SAFEXCEL_MAX_ITOKENS)
1608 safexcel_instr_nop(&instr);
1612 safexcel_instr_ccm(struct safexcel_request *req, struct safexcel_instr *instr,
1613 struct safexcel_cmd_descr *cdesc)
1615 struct cryptop *crp;
1616 struct safexcel_instr *start;
1617 uint8_t *a0, *b0, *alenp, L;
1624 * Construct two blocks, A0 and B0, used in encryption and
1625 * authentication, respectively. A0 is embedded in the token
1626 * descriptor, and B0 is inserted directly into the data stream using
1627 * instructions below.
1629 * OCF seems to assume a 12-byte IV, fixing L (the payload length size)
1630 * at 3 bytes due to the layout of B0. This is fine since the driver
1631 * has a maximum of 65535 bytes anyway.
1633 blen = AES_BLOCK_LEN;
1636 a0 = (uint8_t *)&cdesc->control_data.token[0];
1637 memset(a0, 0, blen);
1639 memcpy(&a0[1], req->iv, AES_CCM_IV_LEN);
1642 * Insert B0 and the AAD length into the input stream.
1644 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1645 instr->length = blen + (req->mac->crd_len > 0 ? 2 : 0);
1647 instr->instructions = SAFEXCEL_INSTR_DEST_HASH |
1648 SAFEXCEL_INSTR_INSERT_IMMEDIATE;
1651 b0 = (uint8_t *)instr;
1652 memset(b0, 0, blen);
1654 (L - 1) | /* payload length size */
1655 ((CCM_CBC_MAX_DIGEST_LEN - 2) / 2) << 3 /* digest length */ |
1656 (req->mac->crd_len > 0 ? 1 : 0) << 6 /* AAD present bit */;
1657 memcpy(&b0[1], req->iv, AES_CCM_IV_LEN);
1658 b0[14] = req->enc->crd_len >> 8;
1659 b0[15] = req->enc->crd_len & 0xff;
1660 instr += blen / sizeof(*instr);
1662 /* Insert the AAD length and data into the input stream. */
1663 if (req->mac->crd_len > 0) {
1664 alenp = (uint8_t *)instr;
1665 alenp[0] = req->mac->crd_len >> 8;
1666 alenp[1] = req->mac->crd_len & 0xff;
1671 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1672 instr->length = req->mac->crd_len;
1674 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1677 /* Insert zero padding. */
1678 aalign = (req->mac->crd_len + 2) & (blen - 1);
1679 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1680 instr->length = aalign == 0 ? 0 :
1681 blen - ((req->mac->crd_len + 2) & (blen - 1));
1682 instr->status = req->enc->crd_len == 0 ?
1683 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1684 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1688 safexcel_instr_temp_aes_block(&instr);
1690 /* Insert the cipher payload into the input stream. */
1691 if (req->enc->crd_len > 0) {
1692 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1693 instr->length = req->enc->crd_len;
1694 instr->status = (req->enc->crd_len & (blen - 1)) == 0 ?
1695 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1696 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1697 SAFEXCEL_INSTR_DEST_CRYPTO |
1698 SAFEXCEL_INSTR_DEST_HASH |
1699 SAFEXCEL_INSTR_INS_LAST;
1702 /* Insert zero padding. */
1703 if (req->enc->crd_len & (blen - 1)) {
1704 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1705 instr->length = blen -
1706 (req->enc->crd_len & (blen - 1));
1707 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1708 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1714 * Compute the digest, or extract it and place it in the output stream.
1716 if ((req->enc->crd_flags & CRD_F_ENCRYPT) != 0)
1717 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1719 safexcel_instr_verify_digest(&instr, req->sess->digestlen);
1721 cdesc->additional_cdata_size = instr - start;
1725 safexcel_instr_gcm(struct safexcel_request *req, struct safexcel_instr *instr,
1726 struct safexcel_cmd_descr *cdesc)
1728 struct cryptop *crp;
1729 struct safexcel_instr *start;
1731 memcpy(cdesc->control_data.token, req->iv, AES_GCM_IV_LEN);
1732 cdesc->control_data.token[3] = htobe32(1);
1737 /* Insert the AAD into the input stream. */
1738 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1739 instr->length = req->mac->crd_len;
1740 instr->status = req->enc->crd_len == 0 ?
1741 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1742 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1743 SAFEXCEL_INSTR_DEST_HASH;
1746 safexcel_instr_temp_aes_block(&instr);
1748 /* Insert the cipher payload into the input stream. */
1749 if (req->enc->crd_len > 0) {
1750 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1751 instr->length = req->enc->crd_len;
1752 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1753 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1754 SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_HASH |
1755 SAFEXCEL_INSTR_INS_LAST;
1760 * Compute the digest, or extract it and place it in the output stream.
1762 if ((req->enc->crd_flags & CRD_F_ENCRYPT) != 0)
1763 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1765 safexcel_instr_verify_digest(&instr, req->sess->digestlen);
1767 cdesc->additional_cdata_size = instr - start;
1771 safexcel_set_token(struct safexcel_request *req)
1773 struct safexcel_cmd_descr *cdesc;
1774 struct safexcel_instr *instr;
1775 struct safexcel_softc *sc;
1780 ringidx = req->sess->ringidx;
1782 safexcel_set_command(req, cdesc);
1785 * For keyless hash operations, the token instructions can be embedded
1786 * in the token itself. Otherwise we use an additional token descriptor
1787 * and the embedded instruction space is used to store the IV.
1789 if (req->enc == NULL) {
1790 instr = (void *)cdesc->control_data.token;
1792 instr = (void *)(sc->sc_ring[ringidx].dma_atok.vaddr +
1793 sc->sc_config.atok_offset *
1794 (cdesc - sc->sc_ring[ringidx].cdr.desc));
1795 cdesc->control_data.options |= SAFEXCEL_OPTION_4_TOKEN_IV_CMD;
1798 if (req->enc != NULL) {
1799 switch (req->enc->crd_alg) {
1800 case CRYPTO_AES_NIST_GCM_16:
1801 safexcel_instr_gcm(req, instr, cdesc);
1803 case CRYPTO_AES_CCM_16:
1804 safexcel_instr_ccm(req, instr, cdesc);
1806 case CRYPTO_AES_XTS:
1807 memcpy(cdesc->control_data.token, req->iv, AES_XTS_IV_LEN);
1808 memset(cdesc->control_data.token +
1809 AES_XTS_IV_LEN / sizeof(uint32_t), 0, AES_XTS_IV_LEN);
1811 safexcel_instr_cipher(req, instr, cdesc);
1813 case CRYPTO_AES_CBC:
1814 case CRYPTO_AES_ICM:
1815 memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_LEN);
1816 if (req->mac != NULL)
1817 safexcel_instr_eta(req, instr, cdesc);
1819 safexcel_instr_cipher(req, instr, cdesc);
1823 switch (req->mac->crd_alg) {
1825 case CRYPTO_SHA1_HMAC:
1826 case CRYPTO_SHA2_224:
1827 case CRYPTO_SHA2_224_HMAC:
1828 case CRYPTO_SHA2_256:
1829 case CRYPTO_SHA2_256_HMAC:
1830 case CRYPTO_SHA2_384:
1831 case CRYPTO_SHA2_384_HMAC:
1832 case CRYPTO_SHA2_512:
1833 case CRYPTO_SHA2_512_HMAC:
1834 safexcel_instr_sha_hash(req, instr);
1837 panic("unhandled auth request %d", req->mac->crd_alg);
1842 static struct safexcel_res_descr *
1843 safexcel_res_descr_add(struct safexcel_ring *ring, bool first, bool last,
1844 bus_addr_t data, uint32_t len)
1846 struct safexcel_res_descr *rdesc;
1847 struct safexcel_res_descr_ring *rring;
1849 mtx_assert(&ring->mtx, MA_OWNED);
1852 if ((rring->write + 1) % SAFEXCEL_RING_SIZE == rring->read)
1855 rdesc = &rring->desc[rring->write];
1856 rring->write = (rring->write + 1) % SAFEXCEL_RING_SIZE;
1858 rdesc->particle_size = len;
1860 rdesc->descriptor_overflow = 0;
1861 rdesc->buffer_overflow = 0;
1862 rdesc->last_seg = last;
1863 rdesc->first_seg = first;
1864 rdesc->result_size =
1865 sizeof(struct safexcel_res_data) / sizeof(uint32_t);
1867 rdesc->data_lo = SAFEXCEL_ADDR_LO(data);
1868 rdesc->data_hi = SAFEXCEL_ADDR_HI(data);
1871 rdesc->result_data.packet_length = 0;
1872 rdesc->result_data.error_code = 0;
1878 static struct safexcel_cmd_descr *
1879 safexcel_cmd_descr_add(struct safexcel_ring *ring, bool first, bool last,
1880 bus_addr_t data, uint32_t seglen, uint32_t reqlen, bus_addr_t context)
1882 struct safexcel_cmd_descr *cdesc;
1883 struct safexcel_cmd_descr_ring *cring;
1885 KASSERT(reqlen <= SAFEXCEL_MAX_REQUEST_SIZE,
1886 ("%s: request length %u too long", __func__, reqlen));
1887 mtx_assert(&ring->mtx, MA_OWNED);
1890 if ((cring->write + 1) % SAFEXCEL_RING_SIZE == cring->read)
1893 cdesc = &cring->desc[cring->write];
1894 cring->write = (cring->write + 1) % SAFEXCEL_RING_SIZE;
1896 cdesc->particle_size = seglen;
1898 cdesc->last_seg = last;
1899 cdesc->first_seg = first;
1900 cdesc->additional_cdata_size = 0;
1902 cdesc->data_lo = SAFEXCEL_ADDR_LO(data);
1903 cdesc->data_hi = SAFEXCEL_ADDR_HI(data);
1905 cdesc->control_data.packet_length = reqlen;
1906 cdesc->control_data.options = SAFEXCEL_OPTION_IP |
1907 SAFEXCEL_OPTION_CP | SAFEXCEL_OPTION_CTX_CTRL_IN_CMD |
1908 SAFEXCEL_OPTION_RC_AUTO;
1909 cdesc->control_data.type = SAFEXCEL_TOKEN_TYPE_BYPASS;
1910 cdesc->control_data.context_lo = SAFEXCEL_ADDR_LO(context) |
1911 SAFEXCEL_CONTEXT_SMALL;
1912 cdesc->control_data.context_hi = SAFEXCEL_ADDR_HI(context);
1919 safexcel_cmd_descr_rollback(struct safexcel_ring *ring, int count)
1921 struct safexcel_cmd_descr_ring *cring;
1923 mtx_assert(&ring->mtx, MA_OWNED);
1926 cring->write -= count;
1927 if (cring->write < 0)
1928 cring->write += SAFEXCEL_RING_SIZE;
1932 safexcel_res_descr_rollback(struct safexcel_ring *ring, int count)
1934 struct safexcel_res_descr_ring *rring;
1936 mtx_assert(&ring->mtx, MA_OWNED);
1939 rring->write -= count;
1940 if (rring->write < 0)
1941 rring->write += SAFEXCEL_RING_SIZE;
1945 safexcel_append_segs(bus_dma_segment_t *segs, int nseg, struct sglist *sg,
1948 bus_dma_segment_t *seg;
1952 for (i = 0; i < nseg && len > 0; i++) {
1955 if (seg->ds_len <= start) {
1956 start -= seg->ds_len;
1960 seglen = MIN(len, seg->ds_len - start);
1961 error = sglist_append_phys(sg, seg->ds_addr + start, seglen);
1963 panic("%s: ran out of segments: %d", __func__, error);
1968 KASSERT(len == 0, ("%s: %d residual bytes", __func__, len));
1972 safexcel_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg,
1975 struct cryptop *crp;
1976 struct safexcel_cmd_descr *cdesc;
1977 struct safexcel_request *req;
1978 struct safexcel_ring *ring;
1979 struct safexcel_session *sess;
1993 ring = &req->sc->sc_ring[sess->ringidx];
1995 mtx_assert(&ring->mtx, MA_OWNED);
1998 * Set up descriptors for input and output data.
2000 * The processing engine programs require that any AAD comes first,
2001 * followed by the cipher plaintext, followed by the digest. Some
2002 * consumers place the digest first in the input buffer, in which case
2003 * we have to create an extra descriptor.
2005 * As an optimization, unmodified data is not passed to the output
2008 sglist_reset(ring->cmd_data);
2009 sglist_reset(ring->res_data);
2010 if (req->mac != NULL && (req->enc == NULL ||
2011 req->enc->crd_alg == CRYPTO_AES_NIST_GCM_16 ||
2012 req->enc->crd_alg == CRYPTO_AES_CCM_16)) {
2013 safexcel_append_segs(segs, nseg, ring->cmd_data,
2014 req->mac->crd_skip, req->mac->crd_len);
2016 if (req->enc != NULL) {
2017 safexcel_append_segs(segs, nseg, ring->cmd_data,
2018 req->enc->crd_skip, req->enc->crd_len);
2019 safexcel_append_segs(segs, nseg, ring->res_data,
2020 req->enc->crd_skip, req->enc->crd_len);
2022 if (sess->digestlen > 0) {
2023 if (req->enc == NULL ||
2024 (req->enc->crd_flags & CRD_F_ENCRYPT) != 0)
2025 safexcel_append_segs(segs, nseg, ring->res_data,
2026 req->mac->crd_inject, sess->digestlen);
2027 else if (req->enc->crd_alg == CRYPTO_AES_NIST_GCM_16 ||
2028 req->enc->crd_alg == CRYPTO_AES_CCM_16) {
2029 safexcel_append_segs(segs, nseg, ring->cmd_data,
2030 req->mac->crd_inject, sess->digestlen);
2032 safexcel_append_segs(segs, nseg, ring->res_data,
2033 req->mac->crd_inject, sess->digestlen);
2037 sg = ring->cmd_data;
2038 if (sg->sg_nseg == 0) {
2040 * Fake a segment for the command descriptor if the input has
2041 * length zero. The EIP97 apparently does not handle
2042 * zero-length packets properly since subsequent requests return
2043 * bogus errors, so provide a dummy segment using the context
2046 (void)sglist_append_phys(sg, req->ctx.paddr, 1);
2048 for (i = 0, inlen = 0; i < sg->sg_nseg; i++)
2049 inlen += sg->sg_segs[i].ss_len;
2050 for (i = 0; i < sg->sg_nseg; i++) {
2052 last = i == sg->sg_nseg - 1;
2054 cdesc = safexcel_cmd_descr_add(ring, first, last,
2055 sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len,
2056 (uint32_t)inlen, req->ctx.paddr);
2057 if (cdesc == NULL) {
2058 safexcel_cmd_descr_rollback(ring, i);
2059 req->error = EAGAIN;
2065 req->cdescs = sg->sg_nseg;
2067 sg = ring->res_data;
2068 if (sg->sg_nseg == 0) {
2070 * We need a result descriptor even if the output stream will be
2071 * empty, for example when verifying an AAD digest.
2073 sg->sg_segs[0].ss_paddr = 0;
2074 sg->sg_segs[0].ss_len = 0;
2077 for (i = 0; i < sg->sg_nseg; i++) {
2079 last = i == sg->sg_nseg - 1;
2081 if (safexcel_res_descr_add(ring, first, last,
2082 sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len) == NULL) {
2083 safexcel_cmd_descr_rollback(ring,
2084 ring->cmd_data->sg_nseg);
2085 safexcel_res_descr_rollback(ring, i);
2086 req->error = EAGAIN;
2090 req->rdescs = sg->sg_nseg;
2094 safexcel_create_chain_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
2095 bus_size_t mapsize __unused, int error)
2097 safexcel_create_chain_cb(arg, segs, nseg, error);
2100 #include <sys/uio.h>
2102 safexcel_create_chain(struct safexcel_ring *ring, struct safexcel_request *req)
2104 struct cryptop *crp;
2108 req->cdescs = req->rdescs = 0;
2111 if ((crp->crp_flags & CRYPTO_F_IOV) != 0) {
2112 error = bus_dmamap_load_uio(ring->data_dtag, req->dmap,
2113 (struct uio *)crp->crp_buf, safexcel_create_chain_cb2,
2114 req, BUS_DMA_NOWAIT);
2115 } else if ((crp->crp_flags & CRYPTO_F_IMBUF) != 0) {
2116 error = bus_dmamap_load_mbuf(ring->data_dtag, req->dmap,
2117 (struct mbuf *)crp->crp_buf, safexcel_create_chain_cb2,
2118 req, BUS_DMA_NOWAIT);
2120 error = bus_dmamap_load(ring->data_dtag, req->dmap,
2121 crp->crp_buf, crp->crp_ilen,
2122 safexcel_create_chain_cb, req, BUS_DMA_NOWAIT);
2125 req->dmap_loaded = true;
2126 else if (req->error != 0)
2132 * Determine whether the driver can implement a session with the requested
2136 safexcel_probesession(struct cryptoini *enc, struct cryptoini *mac)
2139 switch (enc->cri_alg) {
2140 case CRYPTO_AES_NIST_GCM_16:
2142 (mac->cri_alg != CRYPTO_AES_128_NIST_GMAC &&
2143 mac->cri_alg != CRYPTO_AES_192_NIST_GMAC &&
2144 mac->cri_alg != CRYPTO_AES_256_NIST_GMAC))
2147 case CRYPTO_AES_CCM_16:
2149 mac->cri_alg != CRYPTO_AES_CCM_CBC_MAC)
2152 case CRYPTO_AES_CBC:
2153 case CRYPTO_AES_ICM:
2155 mac->cri_alg != CRYPTO_SHA1_HMAC &&
2156 mac->cri_alg != CRYPTO_SHA2_224_HMAC &&
2157 mac->cri_alg != CRYPTO_SHA2_256_HMAC &&
2158 mac->cri_alg != CRYPTO_SHA2_384_HMAC &&
2159 mac->cri_alg != CRYPTO_SHA2_512_HMAC)
2162 case CRYPTO_AES_XTS:
2170 switch (mac->cri_alg) {
2172 case CRYPTO_SHA1_HMAC:
2173 case CRYPTO_SHA2_224:
2174 case CRYPTO_SHA2_224_HMAC:
2175 case CRYPTO_SHA2_256:
2176 case CRYPTO_SHA2_256_HMAC:
2177 case CRYPTO_SHA2_384:
2178 case CRYPTO_SHA2_384_HMAC:
2179 case CRYPTO_SHA2_512:
2180 case CRYPTO_SHA2_512_HMAC:
2191 * Pre-compute the hash key used in GHASH, which is a block of zeroes encrypted
2192 * using the cipher key.
2195 safexcel_setkey_ghash(struct safexcel_session *sess, const uint8_t *key,
2198 uint32_t ks[4 * (RIJNDAEL_MAXNR + 1)];
2199 uint8_t zeros[AES_BLOCK_LEN];
2202 memset(zeros, 0, sizeof(zeros));
2204 rounds = rijndaelKeySetupEnc(ks, key, klen * NBBY);
2205 rijndaelEncrypt(ks, rounds, zeros, (uint8_t *)sess->ghash_key);
2206 for (i = 0; i < GMAC_BLOCK_LEN / sizeof(uint32_t); i++)
2207 sess->ghash_key[i] = htobe32(sess->ghash_key[i]);
2209 explicit_bzero(ks, sizeof(ks));
2213 * Pre-compute the combined CBC-MAC key, which consists of three keys K1, K2, K3
2214 * in the hardware implementation. K1 is the cipher key and comes last in the
2215 * buffer since K2 and K3 have a fixed size of AES_BLOCK_LEN. For now XCBC-MAC
2216 * is not implemented so K2 and K3 are fixed.
2219 safexcel_setkey_xcbcmac(struct safexcel_session *sess, const uint8_t *key,
2224 memset(sess->xcbc_key, 0, sizeof(sess->xcbc_key));
2225 off = 2 * AES_BLOCK_LEN / sizeof(uint32_t);
2226 for (i = 0; i < klen / sizeof(uint32_t); i++, key += 4)
2227 sess->xcbc_key[i + off] = htobe32(le32dec(key));
2231 safexcel_setkey_hmac_digest(struct auth_hash *ahash, union authctx *ctx,
2236 switch (ahash->type) {
2237 case CRYPTO_SHA1_HMAC:
2238 hashwords = ahash->hashsize / sizeof(uint32_t);
2239 for (i = 0; i < hashwords; i++)
2240 ((uint32_t *)buf)[i] = htobe32(ctx->sha1ctx.h.b32[i]);
2242 case CRYPTO_SHA2_224_HMAC:
2243 hashwords = auth_hash_hmac_sha2_256.hashsize / sizeof(uint32_t);
2244 for (i = 0; i < hashwords; i++)
2245 ((uint32_t *)buf)[i] = htobe32(ctx->sha224ctx.state[i]);
2247 case CRYPTO_SHA2_256_HMAC:
2248 hashwords = ahash->hashsize / sizeof(uint32_t);
2249 for (i = 0; i < hashwords; i++)
2250 ((uint32_t *)buf)[i] = htobe32(ctx->sha256ctx.state[i]);
2252 case CRYPTO_SHA2_384_HMAC:
2253 hashwords = auth_hash_hmac_sha2_512.hashsize / sizeof(uint64_t);
2254 for (i = 0; i < hashwords; i++)
2255 ((uint64_t *)buf)[i] = htobe64(ctx->sha384ctx.state[i]);
2257 case CRYPTO_SHA2_512_HMAC:
2258 hashwords = ahash->hashsize / sizeof(uint64_t);
2259 for (i = 0; i < hashwords; i++)
2260 ((uint64_t *)buf)[i] = htobe64(ctx->sha512ctx.state[i]);
2266 safexcel_hmac_init_pad(struct auth_hash *axf, const char *key, int klen,
2267 union authctx *auth_ctx, uint8_t padval)
2269 uint8_t hmac_key[HMAC_MAX_BLOCK_LEN];
2272 memset(hmac_key, 0, sizeof(hmac_key));
2273 if (klen > axf->blocksize) {
2274 axf->Init(auth_ctx);
2275 axf->Update(auth_ctx, key, klen);
2276 axf->Final(hmac_key, auth_ctx);
2277 klen = axf->hashsize;
2279 memcpy(hmac_key, key, klen);
2282 for (i = 0; i < axf->blocksize; i++)
2283 hmac_key[i] ^= padval;
2285 axf->Init(auth_ctx);
2286 axf->Update(auth_ctx, hmac_key, axf->blocksize);
2287 explicit_bzero(hmac_key, sizeof(hmac_key));
2291 * Pre-compute the inner and outer digests used in the HMAC algorithm.
2294 safexcel_setkey_hmac(struct safexcel_session *sess, int alg, const uint8_t *key,
2298 struct auth_hash *ahash;
2301 case CRYPTO_SHA1_HMAC:
2302 ahash = &auth_hash_hmac_sha1;
2304 case CRYPTO_SHA2_224_HMAC:
2305 ahash = &auth_hash_hmac_sha2_224;
2307 case CRYPTO_SHA2_256_HMAC:
2308 ahash = &auth_hash_hmac_sha2_256;
2310 case CRYPTO_SHA2_384_HMAC:
2311 ahash = &auth_hash_hmac_sha2_384;
2313 case CRYPTO_SHA2_512_HMAC:
2314 ahash = &auth_hash_hmac_sha2_512;
2317 panic("%s: unknown algorithm %d", __func__, alg);
2320 safexcel_hmac_init_pad(ahash, key, klen, &ctx, HMAC_IPAD_VAL);
2321 safexcel_setkey_hmac_digest(ahash, &ctx, sess->hmac_ipad);
2322 safexcel_hmac_init_pad(ahash, key, klen, &ctx, HMAC_OPAD_VAL);
2323 safexcel_setkey_hmac_digest(ahash, &ctx, sess->hmac_opad);
2324 explicit_bzero(&ctx, ahash->ctxsize);
2328 safexcel_setkey_xts(struct safexcel_session *sess, const uint8_t *key, int klen)
2330 memcpy(sess->tweak_key, key + klen / 2, klen / 2);
2334 safexcel_aes_algid(int keylen)
2338 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES128);
2340 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES192);
2342 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES256);
2344 panic("invalid AES key length %d", keylen);
2349 safexcel_aes_ccm_hashid(int keylen)
2353 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC128);
2355 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC192);
2357 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC256);
2359 panic("invalid AES key length %d", keylen);
2364 safexcel_sha_hashid(int alg)
2368 case CRYPTO_SHA1_HMAC:
2369 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA1);
2370 case CRYPTO_SHA2_224:
2371 case CRYPTO_SHA2_224_HMAC:
2372 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA224);
2373 case CRYPTO_SHA2_256:
2374 case CRYPTO_SHA2_256_HMAC:
2375 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA256);
2376 case CRYPTO_SHA2_384:
2377 case CRYPTO_SHA2_384_HMAC:
2378 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA384);
2379 case CRYPTO_SHA2_512:
2380 case CRYPTO_SHA2_512_HMAC:
2381 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA512);
2383 __assert_unreachable();
2388 safexcel_sha_hashlen(int alg)
2392 case CRYPTO_SHA1_HMAC:
2393 return (SHA1_HASH_LEN);
2394 case CRYPTO_SHA2_224:
2395 case CRYPTO_SHA2_224_HMAC:
2396 return (SHA2_224_HASH_LEN);
2397 case CRYPTO_SHA2_256:
2398 case CRYPTO_SHA2_256_HMAC:
2399 return (SHA2_256_HASH_LEN);
2400 case CRYPTO_SHA2_384:
2401 case CRYPTO_SHA2_384_HMAC:
2402 return (SHA2_384_HASH_LEN);
2403 case CRYPTO_SHA2_512:
2404 case CRYPTO_SHA2_512_HMAC:
2405 return (SHA2_512_HASH_LEN);
2407 __assert_unreachable();
2412 safexcel_sha_statelen(int alg)
2416 case CRYPTO_SHA1_HMAC:
2417 return (SHA1_HASH_LEN);
2418 case CRYPTO_SHA2_224:
2419 case CRYPTO_SHA2_224_HMAC:
2420 case CRYPTO_SHA2_256:
2421 case CRYPTO_SHA2_256_HMAC:
2422 return (SHA2_256_HASH_LEN);
2423 case CRYPTO_SHA2_384:
2424 case CRYPTO_SHA2_384_HMAC:
2425 case CRYPTO_SHA2_512:
2426 case CRYPTO_SHA2_512_HMAC:
2427 return (SHA2_512_HASH_LEN);
2429 __assert_unreachable();
2434 safexcel_is_hash(int alg)
2438 case CRYPTO_SHA1_HMAC:
2439 case CRYPTO_SHA2_224:
2440 case CRYPTO_SHA2_224_HMAC:
2441 case CRYPTO_SHA2_256:
2442 case CRYPTO_SHA2_256_HMAC:
2443 case CRYPTO_SHA2_384:
2444 case CRYPTO_SHA2_384_HMAC:
2445 case CRYPTO_SHA2_512:
2446 case CRYPTO_SHA2_512_HMAC:
2447 case CRYPTO_AES_128_NIST_GMAC:
2448 case CRYPTO_AES_192_NIST_GMAC:
2449 case CRYPTO_AES_256_NIST_GMAC:
2450 case CRYPTO_AES_CCM_CBC_MAC:
2458 safexcel_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
2460 struct safexcel_session *sess;
2461 struct safexcel_softc *sc;
2462 struct cryptoini *enc, *mac;
2465 sc = device_get_softc(dev);
2466 sess = crypto_get_driver_session(cses);
2469 if (safexcel_is_hash(cri->cri_alg))
2473 cri = cri->cri_next;
2476 if (enc == NULL && !safexcel_is_hash(cri->cri_alg))
2478 if (mac == NULL && safexcel_is_hash(cri->cri_alg))
2480 if (cri->cri_next != NULL || !(enc != NULL && mac != NULL))
2484 error = safexcel_probesession(enc, mac);
2489 switch (mac->cri_alg) {
2491 case CRYPTO_SHA2_224:
2492 case CRYPTO_SHA2_256:
2493 case CRYPTO_SHA2_384:
2494 case CRYPTO_SHA2_512:
2495 sess->digest = SAFEXCEL_CONTROL0_DIGEST_PRECOMPUTED;
2496 sess->hash = safexcel_sha_hashid(mac->cri_alg);
2497 sess->digestlen = safexcel_sha_hashlen(mac->cri_alg);
2498 sess->statelen = safexcel_sha_statelen(mac->cri_alg);
2500 case CRYPTO_SHA1_HMAC:
2501 case CRYPTO_SHA2_224_HMAC:
2502 case CRYPTO_SHA2_256_HMAC:
2503 case CRYPTO_SHA2_384_HMAC:
2504 case CRYPTO_SHA2_512_HMAC:
2505 sess->digest = SAFEXCEL_CONTROL0_DIGEST_HMAC;
2506 sess->hash = safexcel_sha_hashid(mac->cri_alg);
2507 sess->digestlen = safexcel_sha_hashlen(mac->cri_alg);
2508 sess->statelen = safexcel_sha_statelen(mac->cri_alg);
2514 switch (enc->cri_alg) {
2515 case CRYPTO_AES_NIST_GCM_16:
2516 sess->digest = SAFEXCEL_CONTROL0_DIGEST_GMAC;
2517 sess->digestlen = GMAC_DIGEST_LEN;
2518 sess->hash = SAFEXCEL_CONTROL0_HASH_ALG_GHASH;
2519 sess->alg = safexcel_aes_algid(enc->cri_klen);
2520 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_GCM;
2521 sess->ivlen = AES_GCM_IV_LEN;
2523 case CRYPTO_AES_CCM_16:
2524 sess->hash = safexcel_aes_ccm_hashid(enc->cri_klen);
2525 sess->digest = SAFEXCEL_CONTROL0_DIGEST_CCM;
2526 sess->digestlen = CCM_CBC_MAX_DIGEST_LEN;
2527 sess->alg = safexcel_aes_algid(enc->cri_klen);
2528 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CCM;
2529 sess->ivlen = AES_CCM_IV_LEN;
2531 case CRYPTO_AES_CBC:
2532 sess->alg = safexcel_aes_algid(enc->cri_klen);
2533 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CBC;
2534 sess->ivlen = AES_BLOCK_LEN;
2536 case CRYPTO_AES_ICM:
2537 sess->alg = safexcel_aes_algid(enc->cri_klen);
2538 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CTR;
2539 sess->ivlen = AES_BLOCK_LEN;
2541 case CRYPTO_AES_XTS:
2542 sess->alg = safexcel_aes_algid(enc->cri_klen / 2);
2543 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_XTS;
2544 sess->ivlen = AES_XTS_IV_LEN;
2549 if (mac != NULL && mac->cri_mlen != 0)
2550 sess->digestlen = mac->cri_mlen;
2553 if (enc->cri_key != NULL) {
2554 sess->klen = enc->cri_klen / 8;
2555 memcpy(sess->key, enc->cri_key, sess->klen);
2556 switch (enc->cri_alg) {
2557 case CRYPTO_AES_NIST_GCM_16:
2558 safexcel_setkey_ghash(sess, sess->key,
2561 case CRYPTO_AES_CCM_16:
2562 safexcel_setkey_xcbcmac(sess, sess->key,
2565 case CRYPTO_AES_XTS:
2566 safexcel_setkey_xts(sess, sess->key,
2575 switch (mac->cri_alg) {
2576 case CRYPTO_SHA1_HMAC:
2577 case CRYPTO_SHA2_224_HMAC:
2578 case CRYPTO_SHA2_256_HMAC:
2579 case CRYPTO_SHA2_384_HMAC:
2580 case CRYPTO_SHA2_512_HMAC:
2581 safexcel_setkey_hmac(sess, mac->cri_alg, mac->cri_key,
2587 /* Bind each session to a fixed ring to minimize lock contention. */
2588 sess->ringidx = atomic_fetchadd_int(&sc->sc_ringidx, 1);
2589 sess->ringidx %= sc->sc_config.rings;
2595 safexcel_process(device_t dev, struct cryptop *crp, int hint)
2597 struct safexcel_request *req;
2598 struct safexcel_ring *ring;
2599 struct safexcel_session *sess;
2600 struct safexcel_softc *sc;
2601 struct cryptodesc *crd, *enc, *mac;
2604 sc = device_get_softc(dev);
2605 sess = crypto_get_driver_session(crp->crp_session);
2607 if (crp->crp_ilen > SAFEXCEL_MAX_REQUEST_SIZE ||
2608 crp->crp_olen > SAFEXCEL_MAX_REQUEST_SIZE) {
2609 crp->crp_etype = E2BIG;
2614 crd = crp->crp_desc;
2617 if (safexcel_is_hash(crd->crd_alg))
2621 crd = crd->crd_next;
2624 if (enc == NULL && !safexcel_is_hash(crd->crd_alg))
2626 if (mac == NULL && safexcel_is_hash(crd->crd_alg))
2628 if (crd->crd_next != NULL || !(enc != NULL && mac != NULL))
2632 if ((enc != NULL && (enc->crd_flags & CRD_F_KEY_EXPLICIT) != 0) ||
2633 (mac != NULL && (mac->crd_flags & CRD_F_KEY_EXPLICIT) != 0)) {
2635 switch (enc->crd_alg) {
2636 case CRYPTO_AES_NIST_GCM_16:
2637 safexcel_setkey_ghash(sess, enc->crd_key,
2640 case CRYPTO_AES_CCM_16:
2641 safexcel_setkey_xcbcmac(sess, enc->crd_key,
2644 case CRYPTO_AES_XTS:
2645 safexcel_setkey_xts(sess, enc->crd_key,
2652 switch (mac->crd_alg) {
2653 case CRYPTO_SHA1_HMAC:
2654 case CRYPTO_SHA2_224_HMAC:
2655 case CRYPTO_SHA2_256_HMAC:
2656 case CRYPTO_SHA2_384_HMAC:
2657 case CRYPTO_SHA2_512_HMAC:
2658 safexcel_setkey_hmac(sess, mac->crd_alg,
2659 mac->crd_key, mac->crd_klen / 8);
2665 ring = &sc->sc_ring[sess->ringidx];
2666 mtx_lock(&ring->mtx);
2667 req = safexcel_alloc_request(sc, ring);
2668 if (__predict_false(req == NULL)) {
2669 mtx_lock(&sc->sc_mtx);
2670 mtx_unlock(&ring->mtx);
2671 sc->sc_blocked = CRYPTO_SYMQ;
2672 mtx_unlock(&sc->sc_mtx);
2681 if (enc != NULL && (enc->crd_flags & CRD_F_ENCRYPT) != 0) {
2682 if ((enc->crd_flags & CRD_F_IV_EXPLICIT) != 0)
2683 memcpy(req->iv, enc->crd_iv, sess->ivlen);
2685 arc4rand(req->iv, sess->ivlen, 0);
2687 if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0) {
2688 crypto_copyback(crp->crp_flags, crp->crp_buf,
2689 enc->crd_inject, sess->ivlen, req->iv);
2691 } else if (enc != NULL) {
2692 if ((enc->crd_flags & CRD_F_IV_EXPLICIT) != 0) {
2693 memcpy(req->iv, enc->crd_iv, sess->ivlen);
2695 crypto_copydata(crp->crp_flags, crp->crp_buf,
2696 enc->crd_inject, sess->ivlen, req->iv);
2700 error = safexcel_create_chain(ring, req);
2701 if (__predict_false(error != 0)) {
2702 safexcel_free_request(ring, req);
2703 mtx_unlock(&ring->mtx);
2704 crp->crp_etype = error;
2709 safexcel_set_token(req);
2711 bus_dmamap_sync(ring->data_dtag, req->dmap,
2712 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2713 bus_dmamap_sync(req->ctx.tag, req->ctx.map,
2714 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2715 bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map,
2716 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2717 bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map,
2718 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2719 bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map,
2720 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2722 safexcel_enqueue_request(sc, ring, req);
2724 if ((hint & CRYPTO_HINT_MORE) == 0)
2725 safexcel_execute(sc, ring, req);
2726 mtx_unlock(&ring->mtx);
2731 static device_method_t safexcel_methods[] = {
2732 /* Device interface */
2733 DEVMETHOD(device_probe, safexcel_probe),
2734 DEVMETHOD(device_attach, safexcel_attach),
2735 DEVMETHOD(device_detach, safexcel_detach),
2737 /* Cryptodev interface */
2738 DEVMETHOD(cryptodev_newsession, safexcel_newsession),
2739 DEVMETHOD(cryptodev_process, safexcel_process),
2744 static devclass_t safexcel_devclass;
2746 static driver_t safexcel_driver = {
2748 .methods = safexcel_methods,
2749 .size = sizeof(struct safexcel_softc),
2752 DRIVER_MODULE(safexcel, simplebus, safexcel_driver, safexcel_devclass, 0, 0);
2753 MODULE_VERSION(safexcel, 1);
2754 MODULE_DEPEND(safexcel, crypto, 1, 1, 1);