]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/safexcel/safexcel.c
safexcel: Stop using a stack buffer for the ring lock name
[FreeBSD/FreeBSD.git] / sys / dev / safexcel / safexcel.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2020 Rubicon Communications, LLC (Netgate)
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/endian.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/rman.h>
39 #include <sys/sglist.h>
40 #include <sys/sysctl.h>
41
42 #include <machine/atomic.h>
43 #include <machine/bus.h>
44
45 #include <crypto/rijndael/rijndael.h>
46 #include <opencrypto/cryptodev.h>
47 #include <opencrypto/xform.h>
48
49 #include <dev/ofw/ofw_bus.h>
50 #include <dev/ofw/ofw_bus_subr.h>
51
52 #include "cryptodev_if.h"
53
54 #include "safexcel_reg.h"
55 #include "safexcel_var.h"
56
57 static MALLOC_DEFINE(M_SAFEXCEL, "safexcel_req", "safexcel request buffers");
58
59 /*
60  * We only support the EIP97 for now.
61  */
62 static struct ofw_compat_data safexcel_compat[] = {
63         { "inside-secure,safexcel-eip97ies",    (uintptr_t)97 },
64         { "inside-secure,safexcel-eip97",       (uintptr_t)97 },
65         { NULL,                                 0 }
66 };
67
68 const struct safexcel_reg_offsets eip97_regs_offset = {
69         .hia_aic        = SAFEXCEL_EIP97_HIA_AIC_BASE,
70         .hia_aic_g      = SAFEXCEL_EIP97_HIA_AIC_G_BASE,
71         .hia_aic_r      = SAFEXCEL_EIP97_HIA_AIC_R_BASE,
72         .hia_aic_xdr    = SAFEXCEL_EIP97_HIA_AIC_xDR_BASE,
73         .hia_dfe        = SAFEXCEL_EIP97_HIA_DFE_BASE,
74         .hia_dfe_thr    = SAFEXCEL_EIP97_HIA_DFE_THR_BASE,
75         .hia_dse        = SAFEXCEL_EIP97_HIA_DSE_BASE,
76         .hia_dse_thr    = SAFEXCEL_EIP97_HIA_DSE_THR_BASE,
77         .hia_gen_cfg    = SAFEXCEL_EIP97_HIA_GEN_CFG_BASE,
78         .pe             = SAFEXCEL_EIP97_PE_BASE,
79 };
80
81 const struct safexcel_reg_offsets eip197_regs_offset = {
82         .hia_aic        = SAFEXCEL_EIP197_HIA_AIC_BASE,
83         .hia_aic_g      = SAFEXCEL_EIP197_HIA_AIC_G_BASE,
84         .hia_aic_r      = SAFEXCEL_EIP197_HIA_AIC_R_BASE,
85         .hia_aic_xdr    = SAFEXCEL_EIP197_HIA_AIC_xDR_BASE,
86         .hia_dfe        = SAFEXCEL_EIP197_HIA_DFE_BASE,
87         .hia_dfe_thr    = SAFEXCEL_EIP197_HIA_DFE_THR_BASE,
88         .hia_dse        = SAFEXCEL_EIP197_HIA_DSE_BASE,
89         .hia_dse_thr    = SAFEXCEL_EIP197_HIA_DSE_THR_BASE,
90         .hia_gen_cfg    = SAFEXCEL_EIP197_HIA_GEN_CFG_BASE,
91         .pe             = SAFEXCEL_EIP197_PE_BASE,
92 };
93
94 static struct safexcel_cmd_descr *
95 safexcel_cmd_descr_next(struct safexcel_cmd_descr_ring *ring)
96 {
97         struct safexcel_cmd_descr *cdesc;
98
99         if (ring->write == ring->read)
100                 return (NULL);
101         cdesc = &ring->desc[ring->read];
102         ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE;
103         return (cdesc);
104 }
105
106 static struct safexcel_res_descr *
107 safexcel_res_descr_next(struct safexcel_res_descr_ring *ring)
108 {
109         struct safexcel_res_descr *rdesc;
110
111         if (ring->write == ring->read)
112                 return (NULL);
113         rdesc = &ring->desc[ring->read];
114         ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE;
115         return (rdesc);
116 }
117
118 static struct safexcel_request *
119 safexcel_alloc_request(struct safexcel_softc *sc, struct safexcel_ring *ring)
120 {
121         struct safexcel_request *req;
122
123         mtx_assert(&ring->mtx, MA_OWNED);
124
125         if ((req = STAILQ_FIRST(&ring->free_requests)) != NULL)
126                 STAILQ_REMOVE_HEAD(&ring->free_requests, link);
127         return (req);
128 }
129
130 static void
131 safexcel_free_request(struct safexcel_ring *ring, struct safexcel_request *req)
132 {
133         struct safexcel_context_record *ctx;
134
135         mtx_assert(&ring->mtx, MA_OWNED);
136
137         if (req->dmap_loaded) {
138                 bus_dmamap_unload(ring->data_dtag, req->dmap);
139                 req->dmap_loaded = false;
140         }
141         ctx = (struct safexcel_context_record *)req->ctx.vaddr;
142         explicit_bzero(ctx->data, sizeof(ctx->data));
143         explicit_bzero(req->iv, sizeof(req->iv));
144         STAILQ_INSERT_TAIL(&ring->free_requests, req, link);
145 }
146
147 static void
148 safexcel_enqueue_request(struct safexcel_softc *sc, struct safexcel_ring *ring,
149     struct safexcel_request *req)
150 {
151         mtx_assert(&ring->mtx, MA_OWNED);
152
153         STAILQ_INSERT_TAIL(&ring->ready_requests, req, link);
154 }
155
156 static void
157 safexcel_rdr_intr(struct safexcel_softc *sc, int ringidx)
158 {
159         struct safexcel_cmd_descr *cdesc;
160         struct safexcel_res_descr *rdesc;
161         struct safexcel_request *req;
162         struct safexcel_ring *ring;
163         uint32_t error, i, ncdescs, nrdescs, nreqs;
164
165         ring = &sc->sc_ring[ringidx];
166
167         mtx_lock(&ring->mtx);
168         nreqs = SAFEXCEL_READ(sc,
169             SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT);
170         nreqs >>= SAFEXCEL_xDR_PROC_xD_PKT_OFFSET;
171         nreqs &= SAFEXCEL_xDR_PROC_xD_PKT_MASK;
172         if (nreqs == 0) {
173                 SAFEXCEL_DPRINTF(sc, 1,
174                     "zero pending requests on ring %d\n", ringidx);
175                 goto out;
176         }
177
178         ring = &sc->sc_ring[ringidx];
179         bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map,
180             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
181         bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map,
182             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
183         bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map,
184             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
185
186         ncdescs = nrdescs = 0;
187         for (i = 0; i < nreqs; i++) {
188                 req = STAILQ_FIRST(&ring->queued_requests);
189                 KASSERT(req != NULL, ("%s: expected %d pending requests",
190                     __func__, nreqs));
191                 STAILQ_REMOVE_HEAD(&ring->queued_requests, link);
192                 mtx_unlock(&ring->mtx);
193
194                 bus_dmamap_sync(req->ctx.tag, req->ctx.map,
195                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
196                 bus_dmamap_sync(ring->data_dtag, req->dmap,
197                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
198
199                 ncdescs += req->cdescs;
200                 while (req->cdescs-- > 0) {
201                         cdesc = safexcel_cmd_descr_next(&ring->cdr);
202                         KASSERT(cdesc != NULL,
203                             ("%s: missing control descriptor", __func__));
204                         if (req->cdescs == 0)
205                                 KASSERT(cdesc->last_seg,
206                                     ("%s: chain is not terminated", __func__));
207                 }
208                 nrdescs += req->rdescs;
209                 while (req->rdescs-- > 0) {
210                         rdesc = safexcel_res_descr_next(&ring->rdr);
211                         error = rdesc->result_data.error_code;
212                         if (error != 0) {
213                                 if (error == SAFEXCEL_RESULT_ERR_AUTH_FAILED &&
214                                     req->crp->crp_etype == 0) {
215                                         req->crp->crp_etype = EBADMSG;
216                                 } else {
217                                         SAFEXCEL_DPRINTF(sc, 1,
218                                             "error code %#x\n", error);
219                                         req->crp->crp_etype = EIO;
220                                 }
221                         }
222                 }
223
224                 crypto_done(req->crp);
225                 mtx_lock(&ring->mtx);
226                 safexcel_free_request(ring, req);
227         }
228
229         if (nreqs != 0) {
230                 SAFEXCEL_WRITE(sc,
231                     SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT,
232                     SAFEXCEL_xDR_PROC_xD_PKT(nreqs) |
233                     (sc->sc_config.rd_offset * nrdescs * sizeof(uint32_t)));
234         }
235 out:
236         if (!STAILQ_EMPTY(&ring->queued_requests)) {
237                 SAFEXCEL_WRITE(sc,
238                     SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH,
239                     SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | 1);
240         }
241         mtx_unlock(&ring->mtx);
242 }
243
244 static void
245 safexcel_ring_intr(void *arg)
246 {
247         struct safexcel_softc *sc;
248         struct safexcel_intr_handle *ih;
249         uint32_t status, stat;
250         int ring;
251         bool blocked, rdrpending;
252
253         ih = arg;
254         sc = ih->sc;
255         ring = ih->ring;
256
257         status = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) +
258             SAFEXCEL_HIA_AIC_R_ENABLED_STAT(ring));
259         /* CDR interrupts */
260         if (status & SAFEXCEL_CDR_IRQ(ring)) {
261                 stat = SAFEXCEL_READ(sc,
262                     SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT);
263                 SAFEXCEL_WRITE(sc,
264                     SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT,
265                     stat & SAFEXCEL_CDR_INTR_MASK);
266         }
267         /* RDR interrupts */
268         rdrpending = false;
269         if (status & SAFEXCEL_RDR_IRQ(ring)) {
270                 stat = SAFEXCEL_READ(sc,
271                     SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT);
272                 if ((stat & SAFEXCEL_xDR_ERR) == 0)
273                         rdrpending = true;
274                 SAFEXCEL_WRITE(sc,
275                     SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT,
276                     stat & SAFEXCEL_RDR_INTR_MASK);
277         }
278         SAFEXCEL_WRITE(sc,
279             SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ACK(ring),
280             status);
281
282         if (rdrpending)
283                 safexcel_rdr_intr(sc, ring);
284
285         mtx_lock(&sc->sc_mtx);
286         blocked = sc->sc_blocked;
287         sc->sc_blocked = 0;
288         mtx_unlock(&sc->sc_mtx);
289
290         if (blocked)
291                 crypto_unblock(sc->sc_cid, blocked);
292 }
293
294 static int
295 safexcel_configure(struct safexcel_softc *sc)
296 {
297         uint32_t i, mask, pemask, reg;
298         device_t dev;
299
300         if (sc->sc_type == 197) {
301                 sc->sc_offsets = eip197_regs_offset;
302                 pemask = SAFEXCEL_N_PES_MASK;
303         } else {
304                 sc->sc_offsets = eip97_regs_offset;
305                 pemask = EIP97_N_PES_MASK;
306         }
307
308         dev = sc->sc_dev;
309
310         /* Scan for valid ring interrupt controllers. */
311         for (i = 0; i < SAFEXCEL_MAX_RING_AIC; i++) {
312                 reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) +
313                     SAFEXCEL_HIA_AIC_R_VERSION(i));
314                 if (SAFEXCEL_REG_LO16(reg) != EIP201_VERSION_LE)
315                         break;
316         }
317         sc->sc_config.aic_rings = i;
318         if (sc->sc_config.aic_rings == 0)
319                 return (-1);
320
321         reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_OPTIONS);
322         /* Check for 64bit addressing. */
323         if ((reg & SAFEXCEL_OPT_ADDR_64) == 0)
324                 return (-1);
325         /* Check alignment constraints (which we do not support). */
326         if (((reg & SAFEXCEL_OPT_TGT_ALIGN_MASK) >>
327             SAFEXCEL_OPT_TGT_ALIGN_OFFSET) != 0)
328                 return (-1);
329
330         sc->sc_config.hdw =
331             (reg & SAFEXCEL_xDR_HDW_MASK) >> SAFEXCEL_xDR_HDW_OFFSET;
332         mask = (1 << sc->sc_config.hdw) - 1;
333
334         sc->sc_config.rings = reg & SAFEXCEL_N_RINGS_MASK;
335         /* Limit the number of rings to the number of the AIC Rings. */
336         sc->sc_config.rings = MIN(sc->sc_config.rings, sc->sc_config.aic_rings);
337
338         sc->sc_config.pes = (reg & pemask) >> SAFEXCEL_N_PES_OFFSET;
339
340         sc->sc_config.cd_size =
341             sizeof(struct safexcel_cmd_descr) / sizeof(uint32_t);
342         sc->sc_config.cd_offset = (sc->sc_config.cd_size + mask) & ~mask;
343
344         sc->sc_config.rd_size =
345             sizeof(struct safexcel_res_descr) / sizeof(uint32_t);
346         sc->sc_config.rd_offset = (sc->sc_config.rd_size + mask) & ~mask;
347
348         sc->sc_config.atok_offset =
349             (SAFEXCEL_MAX_ATOKENS * sizeof(struct safexcel_instr) + mask) &
350             ~mask;
351
352         return (0);
353 }
354
355 static void
356 safexcel_init_hia_bus_access(struct safexcel_softc *sc)
357 {
358         uint32_t version, val;
359
360         /* Determine endianness and configure byte swap. */
361         version = SAFEXCEL_READ(sc,
362             SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_VERSION);
363         val = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL);
364         if (SAFEXCEL_REG_HI16(version) == SAFEXCEL_HIA_VERSION_BE) {
365                 val = SAFEXCEL_READ(sc,
366                     SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL);
367                 val = val ^ (SAFEXCEL_MST_CTRL_NO_BYTE_SWAP >> 24);
368                 SAFEXCEL_WRITE(sc,
369                     SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL,
370                     val);
371         }
372
373         /* Configure wr/rd cache values. */
374         SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_GEN_CFG(sc) + SAFEXCEL_HIA_MST_CTRL,
375             SAFEXCEL_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
376             SAFEXCEL_MST_CTRL_WD_CACHE(WR_CACHE_4BITS));
377 }
378
379 static void
380 safexcel_disable_global_interrupts(struct safexcel_softc *sc)
381 {
382         /* Disable and clear pending interrupts. */
383         SAFEXCEL_WRITE(sc,
384             SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ENABLE_CTRL, 0);
385         SAFEXCEL_WRITE(sc,
386             SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK,
387             SAFEXCEL_AIC_G_ACK_ALL_MASK);
388 }
389
390 /*
391  * Configure the data fetch engine.  This component parses command descriptors
392  * and sets up DMA transfers from host memory to the corresponding processing
393  * engine.
394  */
395 static void
396 safexcel_configure_dfe_engine(struct safexcel_softc *sc, int pe)
397 {
398         /* Reset all DFE threads. */
399         SAFEXCEL_WRITE(sc,
400             SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe),
401             SAFEXCEL_DxE_THR_CTRL_RESET_PE);
402
403         /* Deassert the DFE reset. */
404         SAFEXCEL_WRITE(sc,
405             SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe), 0);
406
407         /* DMA transfer size to use. */
408         SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE(sc) + SAFEXCEL_HIA_DFE_CFG(pe),
409             SAFEXCEL_HIA_DFE_CFG_DIS_DEBUG |
410             SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
411             SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(9) |
412             SAFEXCEL_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
413             SAFEXCEL_HIA_DxE_CFG_MAX_CTRL_SIZE(7) |
414             SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS) |
415             SAFEXCEL_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS));
416
417         /* Configure the PE DMA transfer thresholds. */
418         SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_DBUF_THRES(pe),
419             SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) |
420             SAFEXCEL_PE_IN_xBUF_THRES_MAX(9));
421         SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_TBUF_THRES(pe),
422             SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) |
423             SAFEXCEL_PE_IN_xBUF_THRES_MAX(7));
424 }
425
426 /*
427  * Configure the data store engine.  This component parses result descriptors
428  * and sets up DMA transfers from the processing engine to host memory.
429  */
430 static int
431 safexcel_configure_dse(struct safexcel_softc *sc, int pe)
432 {
433         uint32_t val;
434         int count;
435
436         /* Disable and reset all DSE threads. */
437         SAFEXCEL_WRITE(sc,
438             SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe),
439             SAFEXCEL_DxE_THR_CTRL_RESET_PE);
440
441         /* Wait for a second for threads to go idle. */
442         for (count = 0;;) {
443                 val = SAFEXCEL_READ(sc,
444                     SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_STAT(pe));
445                 if ((val & SAFEXCEL_DSE_THR_RDR_ID_MASK) ==
446                     SAFEXCEL_DSE_THR_RDR_ID_MASK)
447                         break;
448                 if (count++ > 10000) {
449                         device_printf(sc->sc_dev, "DSE reset timeout\n");
450                         return (-1);
451                 }
452                 DELAY(100);
453         }
454
455         /* Exit the reset state. */
456         SAFEXCEL_WRITE(sc,
457             SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe), 0);
458
459         /* DMA transfer size to use */
460         SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE(sc) + SAFEXCEL_HIA_DSE_CFG(pe),
461             SAFEXCEL_HIA_DSE_CFG_DIS_DEBUG |
462             SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
463             SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(8) |
464             SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS) |
465             SAFEXCEL_HIA_DSE_CFG_ALLWAYS_BUFFERABLE);
466
467         /* Configure the procesing engine thresholds */
468         SAFEXCEL_WRITE(sc,
469             SAFEXCEL_PE(sc) + SAFEXCEL_PE_OUT_DBUF_THRES(pe),
470             SAFEXCEL_PE_OUT_DBUF_THRES_MIN(7) |
471             SAFEXCEL_PE_OUT_DBUF_THRES_MAX(8));
472
473         return (0);
474 }
475
476 static void
477 safexcel_hw_prepare_rings(struct safexcel_softc *sc)
478 {
479         int i;
480
481         for (i = 0; i < sc->sc_config.rings; i++) {
482                 /*
483                  * Command descriptors.
484                  */
485
486                 /* Clear interrupts for this ring. */
487                 SAFEXCEL_WRITE(sc,
488                     SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i),
489                     SAFEXCEL_HIA_AIC_R_ENABLE_CLR_ALL_MASK);
490
491                 /* Disable external triggering. */
492                 SAFEXCEL_WRITE(sc,
493                     SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0);
494
495                 /* Clear the pending prepared counter. */
496                 SAFEXCEL_WRITE(sc,
497                     SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
498                     SAFEXCEL_xDR_PREP_CLR_COUNT);
499
500                 /* Clear the pending processed counter. */
501                 SAFEXCEL_WRITE(sc,
502                     SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
503                     SAFEXCEL_xDR_PROC_CLR_COUNT);
504
505                 SAFEXCEL_WRITE(sc,
506                     SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
507                 SAFEXCEL_WRITE(sc,
508                     SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
509
510                 SAFEXCEL_WRITE(sc,
511                     SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE,
512                     SAFEXCEL_RING_SIZE * sc->sc_config.cd_offset *
513                     sizeof(uint32_t));
514
515                 /*
516                  * Result descriptors.
517                  */
518
519                 /* Disable external triggering. */
520                 SAFEXCEL_WRITE(sc,
521                     SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0);
522
523                 /* Clear the pending prepared counter. */
524                 SAFEXCEL_WRITE(sc,
525                     SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
526                     SAFEXCEL_xDR_PREP_CLR_COUNT);
527
528                 /* Clear the pending processed counter. */
529                 SAFEXCEL_WRITE(sc,
530                     SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
531                     SAFEXCEL_xDR_PROC_CLR_COUNT);
532
533                 SAFEXCEL_WRITE(sc,
534                     SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
535                 SAFEXCEL_WRITE(sc,
536                     SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
537
538                 /* Ring size. */
539                 SAFEXCEL_WRITE(sc,
540                     SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE,
541                     SAFEXCEL_RING_SIZE * sc->sc_config.rd_offset *
542                     sizeof(uint32_t));
543         }
544 }
545
546 static void
547 safexcel_hw_setup_rings(struct safexcel_softc *sc)
548 {
549         struct safexcel_ring *ring;
550         uint32_t cd_size_rnd, mask, rd_size_rnd, val;
551         int i;
552
553         mask = (1 << sc->sc_config.hdw) - 1;
554         cd_size_rnd = (sc->sc_config.cd_size + mask) >> sc->sc_config.hdw;
555         val = (sizeof(struct safexcel_res_descr) -
556             sizeof(struct safexcel_res_data)) / sizeof(uint32_t);
557         rd_size_rnd = (val + mask) >> sc->sc_config.hdw;
558
559         for (i = 0; i < sc->sc_config.rings; i++) {
560                 ring = &sc->sc_ring[i];
561
562                 /*
563                  * Command descriptors.
564                  */
565
566                 /* Ring base address. */
567                 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
568                     SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO,
569                     SAFEXCEL_ADDR_LO(ring->cdr.dma.paddr));
570                 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
571                     SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI,
572                     SAFEXCEL_ADDR_HI(ring->cdr.dma.paddr));
573
574                 SAFEXCEL_WRITE(sc,
575                     SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE,
576                     SAFEXCEL_xDR_DESC_MODE_64BIT | SAFEXCEL_CDR_DESC_MODE_ADCP |
577                     (sc->sc_config.cd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) |
578                     sc->sc_config.cd_size);
579
580                 SAFEXCEL_WRITE(sc,
581                     SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG,
582                     ((SAFEXCEL_FETCH_COUNT * (cd_size_rnd << sc->sc_config.hdw)) <<
583                       SAFEXCEL_xDR_xD_FETCH_THRESH) |
584                     (SAFEXCEL_FETCH_COUNT * sc->sc_config.cd_offset));
585
586                 /* Configure DMA tx control. */
587                 SAFEXCEL_WRITE(sc,
588                     SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG,
589                     SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) |
590                     SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS));
591
592                 /* Clear any pending interrupt. */
593                 SAFEXCEL_WRITE(sc,
594                     SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
595                     SAFEXCEL_CDR_INTR_MASK);
596
597                 /*
598                  * Result descriptors.
599                  */
600
601                 /* Ring base address. */
602                 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
603                     SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO,
604                     SAFEXCEL_ADDR_LO(ring->rdr.dma.paddr));
605                 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
606                     SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI,
607                     SAFEXCEL_ADDR_HI(ring->rdr.dma.paddr));
608
609                 SAFEXCEL_WRITE(sc,
610                     SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE,
611                     SAFEXCEL_xDR_DESC_MODE_64BIT |
612                     (sc->sc_config.rd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) |
613                     sc->sc_config.rd_size);
614
615                 SAFEXCEL_WRITE(sc,
616                     SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG,
617                     ((SAFEXCEL_FETCH_COUNT * (rd_size_rnd << sc->sc_config.hdw)) <<
618                     SAFEXCEL_xDR_xD_FETCH_THRESH) |
619                     (SAFEXCEL_FETCH_COUNT * sc->sc_config.rd_offset));
620
621                 /* Configure DMA tx control. */
622                 SAFEXCEL_WRITE(sc,
623                     SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG,
624                     SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) |
625                     SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS) |
626                     SAFEXCEL_HIA_xDR_WR_RES_BUF | SAFEXCEL_HIA_xDR_WR_CTRL_BUF);
627
628                 /* Clear any pending interrupt. */
629                 SAFEXCEL_WRITE(sc,
630                     SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
631                     SAFEXCEL_RDR_INTR_MASK);
632
633                 /* Enable ring interrupt. */
634                 SAFEXCEL_WRITE(sc,
635                     SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CTRL(i),
636                     SAFEXCEL_RDR_IRQ(i));
637         }
638 }
639
640 /* Reset the command and result descriptor rings. */
641 static void
642 safexcel_hw_reset_rings(struct safexcel_softc *sc)
643 {
644         int i;
645
646         for (i = 0; i < sc->sc_config.rings; i++) {
647                 /*
648                  * Result descriptor ring operations.
649                  */
650
651                 /* Reset ring base address. */
652                 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
653                     SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0);
654                 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
655                     SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0);
656
657                 /* Clear the pending prepared counter. */
658                 SAFEXCEL_WRITE(sc,
659                     SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
660                     SAFEXCEL_xDR_PREP_CLR_COUNT);
661
662                 /* Clear the pending processed counter. */
663                 SAFEXCEL_WRITE(sc,
664                     SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
665                     SAFEXCEL_xDR_PROC_CLR_COUNT);
666
667                 SAFEXCEL_WRITE(sc,
668                     SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
669                 SAFEXCEL_WRITE(sc,
670                     SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
671
672                 SAFEXCEL_WRITE(sc,
673                     SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0);
674
675                 /* Clear any pending interrupt. */
676                 SAFEXCEL_WRITE(sc,
677                     SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
678                     SAFEXCEL_RDR_INTR_MASK);
679
680                 /* Disable ring interrupt. */
681                 SAFEXCEL_WRITE(sc,
682                     SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i),
683                     SAFEXCEL_RDR_IRQ(i));
684
685                 /*
686                  * Command descriptor ring operations.
687                  */
688
689                 /* Reset ring base address. */
690                 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
691                     SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0);
692                 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
693                     SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0);
694
695                 /* Clear the pending prepared counter. */
696                 SAFEXCEL_WRITE(sc,
697                     SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
698                     SAFEXCEL_xDR_PREP_CLR_COUNT);
699
700                 /* Clear the pending processed counter. */
701                 SAFEXCEL_WRITE(sc,
702                     SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
703                     SAFEXCEL_xDR_PROC_CLR_COUNT);
704
705                 SAFEXCEL_WRITE(sc,
706                     SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
707                 SAFEXCEL_WRITE(sc,
708                     SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
709
710                 SAFEXCEL_WRITE(sc,
711                     SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0);
712
713                 /* Clear any pending interrupt. */
714                 SAFEXCEL_WRITE(sc,
715                     SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
716                     SAFEXCEL_CDR_INTR_MASK);
717         }
718 }
719
720 static void
721 safexcel_enable_pe_engine(struct safexcel_softc *sc, int pe)
722 {
723         int i, ring_mask;
724
725         for (ring_mask = 0, i = 0; i < sc->sc_config.rings; i++) {
726                 ring_mask <<= 1;
727                 ring_mask |= 1;
728         }
729
730         /* Enable command descriptor rings. */
731         SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe),
732             SAFEXCEL_DxE_THR_CTRL_EN | ring_mask);
733
734         /* Enable result descriptor rings. */
735         SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe),
736             SAFEXCEL_DxE_THR_CTRL_EN | ring_mask);
737
738         /* Clear any HIA interrupt. */
739         SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK,
740             SAFEXCEL_AIC_G_ACK_HIA_MASK);
741 }
742
743 static void
744 safexcel_execute(struct safexcel_softc *sc, struct safexcel_ring *ring,
745     struct safexcel_request *req)
746 {
747         uint32_t ncdescs, nrdescs, nreqs;
748         int ringidx;
749         bool busy;
750
751         mtx_assert(&ring->mtx, MA_OWNED);
752
753         ringidx = req->sess->ringidx;
754         if (STAILQ_EMPTY(&ring->ready_requests))
755                 return;
756         busy = !STAILQ_EMPTY(&ring->queued_requests);
757         ncdescs = nrdescs = nreqs = 0;
758         while ((req = STAILQ_FIRST(&ring->ready_requests)) != NULL &&
759             req->cdescs + ncdescs <= SAFEXCEL_MAX_BATCH_SIZE &&
760             req->rdescs + nrdescs <= SAFEXCEL_MAX_BATCH_SIZE) {
761                 STAILQ_REMOVE_HEAD(&ring->ready_requests, link);
762                 STAILQ_INSERT_TAIL(&ring->queued_requests, req, link);
763                 ncdescs += req->cdescs;
764                 nrdescs += req->rdescs;
765                 nreqs++;
766         }
767
768         if (!busy) {
769                 SAFEXCEL_WRITE(sc,
770                     SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH,
771                     SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | nreqs);
772         }
773         SAFEXCEL_WRITE(sc,
774             SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT,
775             nrdescs * sc->sc_config.rd_offset * sizeof(uint32_t));
776         SAFEXCEL_WRITE(sc,
777             SAFEXCEL_HIA_CDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT,
778             ncdescs * sc->sc_config.cd_offset * sizeof(uint32_t));
779 }
780
781 static void
782 safexcel_init_rings(struct safexcel_softc *sc)
783 {
784         struct safexcel_cmd_descr *cdesc;
785         struct safexcel_ring *ring;
786         uint64_t atok;
787         int i, j;
788
789         for (i = 0; i < sc->sc_config.rings; i++) {
790                 ring = &sc->sc_ring[i];
791
792                 snprintf(ring->lockname, sizeof(ring->lockname),
793                     "safexcel_ring%d", i);
794                 mtx_init(&ring->mtx, ring->lockname, NULL, MTX_DEF);
795                 STAILQ_INIT(&ring->free_requests);
796                 STAILQ_INIT(&ring->ready_requests);
797                 STAILQ_INIT(&ring->queued_requests);
798
799                 ring->cdr.read = ring->cdr.write = 0;
800                 ring->rdr.read = ring->rdr.write = 0;
801                 for (j = 0; j < SAFEXCEL_RING_SIZE; j++) {
802                         cdesc = &ring->cdr.desc[j];
803                         atok = ring->dma_atok.paddr +
804                             sc->sc_config.atok_offset * j;
805                         cdesc->atok_lo = SAFEXCEL_ADDR_LO(atok);
806                         cdesc->atok_hi = SAFEXCEL_ADDR_HI(atok);
807                 }
808         }
809 }
810
811 static void
812 safexcel_dma_alloc_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg,
813     int error)
814 {
815         struct safexcel_dma_mem *sdm;
816
817         if (error != 0)
818                 return;
819
820         KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg));
821         sdm = arg;
822         sdm->paddr = segs->ds_addr;
823 }
824
825 static int
826 safexcel_dma_alloc_mem(struct safexcel_softc *sc, struct safexcel_dma_mem *sdm,
827     bus_size_t size)
828 {
829         int error;
830
831         KASSERT(sdm->vaddr == NULL,
832             ("%s: DMA memory descriptor in use.", __func__));
833
834         error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
835             PAGE_SIZE, 0,               /* alignment, boundary */
836             BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
837             BUS_SPACE_MAXADDR,          /* highaddr */
838             NULL, NULL,                 /* filtfunc, filtfuncarg */
839             size, 1,                    /* maxsize, nsegments */
840             size, BUS_DMA_COHERENT,     /* maxsegsz, flags */
841             NULL, NULL,                 /* lockfunc, lockfuncarg */
842             &sdm->tag);                 /* dmat */
843         if (error != 0) {
844                 device_printf(sc->sc_dev,
845                     "failed to allocate busdma tag, error %d\n", error);
846                 goto err1;
847         }
848
849         error = bus_dmamem_alloc(sdm->tag, (void **)&sdm->vaddr,
850             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sdm->map);
851         if (error != 0) {
852                 device_printf(sc->sc_dev,
853                     "failed to allocate DMA safe memory, error %d\n", error);
854                 goto err2;
855         }
856
857         error = bus_dmamap_load(sdm->tag, sdm->map, sdm->vaddr, size,
858             safexcel_dma_alloc_mem_cb, sdm, BUS_DMA_NOWAIT);
859         if (error != 0) {
860                 device_printf(sc->sc_dev,
861                     "cannot get address of the DMA memory, error %d\n", error);
862                 goto err3;
863         }
864
865         return (0);
866 err3:
867         bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map);
868 err2:
869         bus_dma_tag_destroy(sdm->tag);
870 err1:
871         sdm->vaddr = NULL;
872
873         return (error);
874 }
875
876 static void
877 safexcel_dma_free_mem(struct safexcel_dma_mem *sdm)
878 {
879         bus_dmamap_unload(sdm->tag, sdm->map);
880         bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map);
881         bus_dma_tag_destroy(sdm->tag);
882 }
883
884 static void
885 safexcel_dma_free_rings(struct safexcel_softc *sc)
886 {
887         struct safexcel_ring *ring;
888         int i;
889
890         for (i = 0; i < sc->sc_config.rings; i++) {
891                 ring = &sc->sc_ring[i];
892                 safexcel_dma_free_mem(&ring->cdr.dma);
893                 safexcel_dma_free_mem(&ring->dma_atok);
894                 safexcel_dma_free_mem(&ring->rdr.dma);
895                 bus_dma_tag_destroy(ring->data_dtag);
896                 mtx_destroy(&ring->mtx);
897         }
898 }
899
900 static int
901 safexcel_dma_init(struct safexcel_softc *sc)
902 {
903         struct safexcel_ring *ring;
904         bus_size_t size;
905         int error, i;
906
907         for (i = 0; i < sc->sc_config.rings; i++) {
908                 ring = &sc->sc_ring[i];
909
910                 error = bus_dma_tag_create(
911                     bus_get_dma_tag(sc->sc_dev),/* parent */
912                     1, 0,                       /* alignment, boundary */
913                     BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
914                     BUS_SPACE_MAXADDR,          /* highaddr */
915                     NULL, NULL,                 /* filtfunc, filtfuncarg */
916                     SAFEXCEL_MAX_REQUEST_SIZE,  /* maxsize */
917                     SAFEXCEL_MAX_FRAGMENTS,     /* nsegments */
918                     SAFEXCEL_MAX_REQUEST_SIZE,  /* maxsegsz */
919                     BUS_DMA_COHERENT,           /* flags */
920                     NULL, NULL,                 /* lockfunc, lockfuncarg */
921                     &ring->data_dtag);          /* dmat */
922                 if (error != 0) {
923                         device_printf(sc->sc_dev,
924                             "bus_dma_tag_create main failed; error %d\n", error);
925                         return (error);
926                 }
927
928                 size = sizeof(uint32_t) * sc->sc_config.cd_offset *
929                     SAFEXCEL_RING_SIZE;
930                 error = safexcel_dma_alloc_mem(sc, &ring->cdr.dma, size);
931                 if (error != 0) {
932                         device_printf(sc->sc_dev,
933                             "failed to allocate CDR DMA memory, error %d\n",
934                             error);
935                         goto err;
936                 }
937                 ring->cdr.desc =
938                     (struct safexcel_cmd_descr *)ring->cdr.dma.vaddr;
939
940                 /* Allocate additional CDR token memory. */
941                 size = (bus_size_t)sc->sc_config.atok_offset *
942                     SAFEXCEL_RING_SIZE;
943                 error = safexcel_dma_alloc_mem(sc, &ring->dma_atok, size);
944                 if (error != 0) {
945                         device_printf(sc->sc_dev,
946                             "failed to allocate atoken DMA memory, error %d\n",
947                             error);
948                         goto err;
949                 }
950
951                 size = sizeof(uint32_t) * sc->sc_config.rd_offset *
952                     SAFEXCEL_RING_SIZE;
953                 error = safexcel_dma_alloc_mem(sc, &ring->rdr.dma, size);
954                 if (error) {
955                         device_printf(sc->sc_dev,
956                             "failed to allocate RDR DMA memory, error %d\n",
957                             error);
958                         goto err;
959                 }
960                 ring->rdr.desc =
961                     (struct safexcel_res_descr *)ring->rdr.dma.vaddr;
962         }
963
964         return (0);
965 err:
966         safexcel_dma_free_rings(sc);
967         return (error);
968 }
969
970 static void
971 safexcel_deinit_hw(struct safexcel_softc *sc)
972 {
973         safexcel_hw_reset_rings(sc);
974         safexcel_dma_free_rings(sc);
975 }
976
977 static int
978 safexcel_init_hw(struct safexcel_softc *sc)
979 {
980         int pe;
981
982         /* 23.3.7 Initialization */
983         if (safexcel_configure(sc) != 0)
984                 return (EINVAL);
985
986         if (safexcel_dma_init(sc) != 0)
987                 return (ENOMEM);
988
989         safexcel_init_rings(sc);
990
991         safexcel_init_hia_bus_access(sc);
992
993         /* 23.3.7.2 Disable EIP-97 global Interrupts */
994         safexcel_disable_global_interrupts(sc);
995
996         for (pe = 0; pe < sc->sc_config.pes; pe++) {
997                 /* 23.3.7.3 Configure Data Fetch Engine */
998                 safexcel_configure_dfe_engine(sc, pe);
999
1000                 /* 23.3.7.4 Configure Data Store Engine */
1001                 if (safexcel_configure_dse(sc, pe)) {
1002                         safexcel_deinit_hw(sc);
1003                         return (-1);
1004                 }
1005
1006                 /* 23.3.7.5 1. Protocol enables */
1007                 SAFEXCEL_WRITE(sc,
1008                     SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION_EN(pe),
1009                     0xffffffff);
1010                 SAFEXCEL_WRITE(sc,
1011                     SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION2_EN(pe),
1012                     0xffffffff);
1013         }
1014
1015         safexcel_hw_prepare_rings(sc);
1016
1017         /* 23.3.7.5 Configure the Processing Engine(s). */
1018         for (pe = 0; pe < sc->sc_config.pes; pe++)
1019                 safexcel_enable_pe_engine(sc, pe);
1020
1021         safexcel_hw_setup_rings(sc);
1022
1023         return (0);
1024 }
1025
1026 static int
1027 safexcel_setup_dev_interrupts(struct safexcel_softc *sc)
1028 {
1029         int i, j;
1030
1031         for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++) {
1032                 sc->sc_ih[i].sc = sc;
1033                 sc->sc_ih[i].ring = i;
1034
1035                 if (bus_setup_intr(sc->sc_dev, sc->sc_intr[i],
1036                     INTR_TYPE_NET | INTR_MPSAFE, NULL, safexcel_ring_intr,
1037                     &sc->sc_ih[i], &sc->sc_ih[i].handle)) {
1038                         device_printf(sc->sc_dev,
1039                             "couldn't setup interrupt %d\n", i);
1040                         goto err;
1041                 }
1042         }
1043
1044         return (0);
1045
1046 err:
1047         for (j = 0; j < i; j++)
1048                 bus_teardown_intr(sc->sc_dev, sc->sc_intr[j],
1049                     sc->sc_ih[j].handle);
1050
1051         return (ENXIO);
1052 }
1053
1054 static void
1055 safexcel_teardown_dev_interrupts(struct safexcel_softc *sc)
1056 {
1057         int i;
1058
1059         for (i = 0; i < SAFEXCEL_MAX_RINGS; i++)
1060                 bus_teardown_intr(sc->sc_dev, sc->sc_intr[i],
1061                     sc->sc_ih[i].handle);
1062 }
1063
1064 static int
1065 safexcel_alloc_dev_resources(struct safexcel_softc *sc)
1066 {
1067         char name[16];
1068         device_t dev;
1069         phandle_t node;
1070         int error, i, rid;
1071
1072         dev = sc->sc_dev;
1073         node = ofw_bus_get_node(dev);
1074
1075         rid = 0;
1076         sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1077             RF_ACTIVE);
1078         if (sc->sc_res == NULL) {
1079                 device_printf(dev, "couldn't allocate memory resources\n");
1080                 return (ENXIO);
1081         }
1082
1083         for (i = 0; i < SAFEXCEL_MAX_RINGS; i++) {
1084                 (void)snprintf(name, sizeof(name), "ring%d", i);
1085                 error = ofw_bus_find_string_index(node, "interrupt-names", name,
1086                     &rid);
1087                 if (error != 0)
1088                         break;
1089
1090                 sc->sc_intr[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1091                     RF_ACTIVE | RF_SHAREABLE);
1092                 if (sc->sc_intr[i] == NULL) {
1093                         error = ENXIO;
1094                         goto out;
1095                 }
1096         }
1097         if (i == 0) {
1098                 device_printf(dev, "couldn't allocate interrupt resources\n");
1099                 error = ENXIO;
1100                 goto out;
1101         }
1102
1103         mtx_init(&sc->sc_mtx, "safexcel softc", NULL, MTX_DEF);
1104
1105         return (0);
1106
1107 out:
1108         for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
1109                 bus_release_resource(dev, SYS_RES_IRQ,
1110                     rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
1111         bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->sc_res),
1112             sc->sc_res);
1113         return (error);
1114 }
1115
1116 static void
1117 safexcel_free_dev_resources(struct safexcel_softc *sc)
1118 {
1119         int i;
1120
1121         mtx_destroy(&sc->sc_mtx);
1122
1123         for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
1124                 bus_release_resource(sc->sc_dev, SYS_RES_IRQ,
1125                     rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
1126         if (sc->sc_res != NULL)
1127                 bus_release_resource(sc->sc_dev, SYS_RES_MEMORY,
1128                     rman_get_rid(sc->sc_res), sc->sc_res);
1129 }
1130
1131 static int
1132 safexcel_probe(device_t dev)
1133 {
1134         struct safexcel_softc *sc;
1135
1136         if (!ofw_bus_status_okay(dev))
1137                 return (ENXIO);
1138
1139         sc = device_get_softc(dev);
1140         sc->sc_type = ofw_bus_search_compatible(dev, safexcel_compat)->ocd_data;
1141         if (sc->sc_type == 0)
1142                 return (ENXIO);
1143
1144         device_set_desc(dev, "SafeXcel EIP-97 crypto accelerator");
1145
1146         return (BUS_PROBE_DEFAULT);
1147 }
1148
1149 static void
1150 safexcel_crypto_register(struct safexcel_softc *sc, int alg)
1151 {
1152         (void)crypto_register(sc->sc_cid, alg, SAFEXCEL_MAX_REQUEST_SIZE, 0);
1153 }
1154
1155 static int
1156 safexcel_attach(device_t dev)
1157 {
1158         struct sysctl_ctx_list *sctx;
1159         struct safexcel_softc *sc;
1160         struct safexcel_request *req;
1161         struct safexcel_ring *ring;
1162         int i, j, ringidx;
1163
1164         sc = device_get_softc(dev);
1165         sc->sc_dev = dev;
1166         sc->sc_blocked = 0;
1167         sc->sc_cid = -1;
1168
1169         if (safexcel_alloc_dev_resources(sc))
1170                 goto err;
1171
1172         if (safexcel_setup_dev_interrupts(sc))
1173                 goto err1;
1174
1175         if (safexcel_init_hw(sc))
1176                 goto err2;
1177
1178         for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) {
1179                 ring = &sc->sc_ring[ringidx];
1180
1181                 ring->cmd_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK);
1182                 ring->res_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK);
1183
1184                 ring->requests = mallocarray(SAFEXCEL_REQUESTS_PER_RING,
1185                     sizeof(struct safexcel_request), M_SAFEXCEL,
1186                     M_WAITOK | M_ZERO);
1187
1188                 for (i = 0; i < SAFEXCEL_REQUESTS_PER_RING; i++) {
1189                         req = &ring->requests[i];
1190                         req->sc = sc;
1191                         if (bus_dmamap_create(ring->data_dtag,
1192                             BUS_DMA_COHERENT, &req->dmap) != 0) {
1193                                 for (j = 0; j < i; j++)
1194                                         bus_dmamap_destroy(ring->data_dtag,
1195                                             ring->requests[j].dmap);
1196                                 goto err2;
1197                         }
1198                         if (safexcel_dma_alloc_mem(sc, &req->ctx,
1199                             sizeof(struct safexcel_context_record)) != 0) {
1200                                 for (j = 0; j < i; j++) {
1201                                         bus_dmamap_destroy(ring->data_dtag,
1202                                             ring->requests[j].dmap);
1203                                         safexcel_dma_free_mem(
1204                                             &ring->requests[j].ctx);
1205                                 }
1206                                 goto err2;
1207                         }
1208                         STAILQ_INSERT_TAIL(&ring->free_requests, req, link);
1209                 }
1210         }
1211
1212         sctx = device_get_sysctl_ctx(dev);
1213         SYSCTL_ADD_INT(sctx, SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1214             OID_AUTO, "debug", CTLFLAG_RWTUN, &sc->sc_debug, 0,
1215             "Debug message verbosity");
1216
1217         sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safexcel_session),
1218             CRYPTOCAP_F_HARDWARE);
1219         if (sc->sc_cid < 0)
1220                 goto err2;
1221
1222         safexcel_crypto_register(sc, CRYPTO_AES_CBC);
1223         safexcel_crypto_register(sc, CRYPTO_AES_ICM);
1224         safexcel_crypto_register(sc, CRYPTO_AES_XTS);
1225         safexcel_crypto_register(sc, CRYPTO_AES_CCM_16);
1226         safexcel_crypto_register(sc, CRYPTO_AES_CCM_CBC_MAC);
1227         safexcel_crypto_register(sc, CRYPTO_AES_NIST_GCM_16);
1228         safexcel_crypto_register(sc, CRYPTO_AES_128_NIST_GMAC);
1229         safexcel_crypto_register(sc, CRYPTO_AES_192_NIST_GMAC);
1230         safexcel_crypto_register(sc, CRYPTO_AES_256_NIST_GMAC);
1231         safexcel_crypto_register(sc, CRYPTO_SHA1);
1232         safexcel_crypto_register(sc, CRYPTO_SHA1_HMAC);
1233         safexcel_crypto_register(sc, CRYPTO_SHA2_224);
1234         safexcel_crypto_register(sc, CRYPTO_SHA2_224_HMAC);
1235         safexcel_crypto_register(sc, CRYPTO_SHA2_256);
1236         safexcel_crypto_register(sc, CRYPTO_SHA2_256_HMAC);
1237         safexcel_crypto_register(sc, CRYPTO_SHA2_384);
1238         safexcel_crypto_register(sc, CRYPTO_SHA2_384_HMAC);
1239         safexcel_crypto_register(sc, CRYPTO_SHA2_512);
1240         safexcel_crypto_register(sc, CRYPTO_SHA2_512_HMAC);
1241
1242         return (0);
1243
1244 err2:
1245         safexcel_teardown_dev_interrupts(sc);
1246 err1:
1247         safexcel_free_dev_resources(sc);
1248 err:
1249         return (ENXIO);
1250 }
1251
1252 static int
1253 safexcel_detach(device_t dev)
1254 {
1255         struct safexcel_ring *ring;
1256         struct safexcel_softc *sc;
1257         int i, ringidx;
1258
1259         sc = device_get_softc(dev);
1260
1261         if (sc->sc_cid >= 0)
1262                 crypto_unregister_all(sc->sc_cid);
1263         for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) {
1264                 ring = &sc->sc_ring[ringidx];
1265                 for (i = 0; i < SAFEXCEL_REQUESTS_PER_RING; i++) {
1266                         bus_dmamap_destroy(ring->data_dtag,
1267                             ring->requests[i].dmap);
1268                         safexcel_dma_free_mem(&ring->requests[i].ctx);
1269                 }
1270                 free(ring->requests, M_SAFEXCEL);
1271                 sglist_free(ring->cmd_data);
1272                 sglist_free(ring->res_data);
1273         }
1274         safexcel_deinit_hw(sc);
1275         safexcel_teardown_dev_interrupts(sc);
1276         safexcel_free_dev_resources(sc);
1277
1278         return (0);
1279 }
1280
1281 /*
1282  * Populate the request's context record with pre-computed key material.
1283  */
1284 static int
1285 safexcel_set_context(struct safexcel_request *req)
1286 {
1287         struct cryptop *crp;
1288         struct safexcel_context_record *ctx;
1289         struct safexcel_session *sess;
1290         uint8_t *data;
1291         int off;
1292
1293         crp = req->crp;
1294         sess = req->sess;
1295
1296         ctx = (struct safexcel_context_record *)req->ctx.vaddr;
1297         data = (uint8_t *)ctx->data;
1298         if (req->enc != NULL) {
1299                 if ((req->enc->crd_flags & CRD_F_KEY_EXPLICIT) != 0)
1300                         memcpy(data, req->enc->crd_key, sess->klen);
1301                 else
1302                         memcpy(data, sess->key, sess->klen);
1303                 off = sess->klen;
1304         } else {
1305                 off = 0;
1306         }
1307
1308         if (req->enc != NULL) {
1309                 switch (req->enc->crd_alg) {
1310                 case CRYPTO_AES_NIST_GCM_16:
1311                         memcpy(data + off, sess->ghash_key, GMAC_BLOCK_LEN);
1312                         off += GMAC_BLOCK_LEN;
1313                         break;
1314                 case CRYPTO_AES_CCM_16:
1315                         memcpy(data + off, sess->xcbc_key,
1316                             AES_BLOCK_LEN * 2 + sess->klen);
1317                         off += AES_BLOCK_LEN * 2 + sess->klen;
1318                         break;
1319                 case CRYPTO_AES_XTS:
1320                         memcpy(data + off, sess->tweak_key, sess->klen);
1321                         off += sess->klen;
1322                         break;
1323                 }
1324         }
1325
1326         if (req->mac != NULL) {
1327                 switch (req->mac->crd_alg) {
1328                 case CRYPTO_SHA1_HMAC:
1329                 case CRYPTO_SHA2_224_HMAC:
1330                 case CRYPTO_SHA2_256_HMAC:
1331                 case CRYPTO_SHA2_384_HMAC:
1332                 case CRYPTO_SHA2_512_HMAC:
1333                         memcpy(data + off, sess->hmac_ipad, sess->statelen);
1334                         off += sess->statelen;
1335                         memcpy(data + off, sess->hmac_opad, sess->statelen);
1336                         off += sess->statelen;
1337                         break;
1338                 }
1339         }
1340
1341         return (off);
1342 }
1343
1344 /*
1345  * Populate fields in the first command descriptor of the chain used to encode
1346  * the specified request.  These fields indicate the algorithms used, the size
1347  * of the key material stored in the associated context record, the primitive
1348  * operations to be performed on input data, and the location of the IV if any.
1349  */
1350 static void
1351 safexcel_set_command(struct safexcel_request *req,
1352     struct safexcel_cmd_descr *cdesc)
1353 {
1354         struct cryptop *crp;
1355         struct safexcel_session *sess;
1356         uint32_t ctrl0, ctrl1, ctxr_len;
1357         int alg;
1358
1359         crp = req->crp;
1360         sess = req->sess;
1361
1362         ctrl0 = sess->alg | sess->digest | sess->hash;
1363         ctrl1 = sess->mode;
1364
1365         ctxr_len = safexcel_set_context(req) / sizeof(uint32_t);
1366         ctrl0 |= SAFEXCEL_CONTROL0_SIZE(ctxr_len);
1367
1368         if (req->enc != NULL)
1369                 alg = req->enc->crd_alg;
1370         else
1371                 alg = req->mac->crd_alg;
1372
1373         switch (alg) {
1374         case CRYPTO_AES_CCM_16:
1375                 if ((req->enc->crd_flags & CRD_F_ENCRYPT) != 0) {
1376                         ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_ENCRYPT_OUT |
1377                             SAFEXCEL_CONTROL0_KEY_EN;
1378                 } else {
1379                         ctrl0 |= SAFEXCEL_CONTROL0_TYPE_DECRYPT_HASH_IN |
1380                             SAFEXCEL_CONTROL0_KEY_EN;
1381                 }
1382                 ctrl1 |= SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 |
1383                     SAFEXCEL_CONTROL1_IV2 | SAFEXCEL_CONTROL1_IV3;
1384                 break;
1385         case CRYPTO_AES_CBC:
1386         case CRYPTO_AES_ICM:
1387         case CRYPTO_AES_XTS:
1388                 if ((req->enc->crd_flags & CRD_F_ENCRYPT) != 0) {
1389                         ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT |
1390                             SAFEXCEL_CONTROL0_KEY_EN;
1391                         if (req->mac != NULL)
1392                                 ctrl0 |=
1393                                     SAFEXCEL_CONTROL0_TYPE_ENCRYPT_HASH_OUT;
1394                 } else {
1395                         ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN |
1396                             SAFEXCEL_CONTROL0_KEY_EN;
1397                         if (req->mac != NULL) {
1398                                 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN;
1399                                 ctrl1 |= SAFEXCEL_CONTROL1_HASH_STORE;
1400                         }
1401                 }
1402                 break;
1403         case CRYPTO_AES_NIST_GCM_16:
1404                 if ((req->enc->crd_flags & CRD_F_ENCRYPT) != 0) {
1405                         ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT |
1406                             SAFEXCEL_CONTROL0_KEY_EN |
1407                             SAFEXCEL_CONTROL0_TYPE_HASH_OUT;
1408                 } else {
1409                         ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN |
1410                             SAFEXCEL_CONTROL0_KEY_EN |
1411                             SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN;
1412                 }
1413                 if (req->enc != NULL &&
1414                     req->enc->crd_alg == CRYPTO_AES_NIST_GCM_16) {
1415                         ctrl1 |= SAFEXCEL_CONTROL1_COUNTER_MODE |
1416                             SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 |
1417                             SAFEXCEL_CONTROL1_IV2;
1418                 }
1419                 break;
1420         case CRYPTO_SHA1:
1421         case CRYPTO_SHA2_224:
1422         case CRYPTO_SHA2_256:
1423         case CRYPTO_SHA2_384:
1424         case CRYPTO_SHA2_512:
1425                 ctrl0 |= SAFEXCEL_CONTROL0_RESTART_HASH;
1426                 /* FALLTHROUGH */
1427         case CRYPTO_SHA1_HMAC:
1428         case CRYPTO_SHA2_224_HMAC:
1429         case CRYPTO_SHA2_256_HMAC:
1430         case CRYPTO_SHA2_384_HMAC:
1431         case CRYPTO_SHA2_512_HMAC:
1432                 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_OUT;
1433                 break;
1434         }
1435
1436         cdesc->control_data.control0 = ctrl0;
1437         cdesc->control_data.control1 = ctrl1;
1438 }
1439
1440 /*
1441  * Construct a no-op instruction, used to pad input tokens.
1442  */
1443 static void
1444 safexcel_instr_nop(struct safexcel_instr **instrp)
1445 {
1446         struct safexcel_instr *instr;
1447
1448         instr = *instrp;
1449         instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1450         instr->length = (1 << 2);
1451         instr->status = 0;
1452         instr->instructions = 0;
1453
1454         *instrp = instr + 1;
1455 }
1456
1457 /*
1458  * Insert the digest of the input payload.  This is typically the last
1459  * instruction of a sequence.
1460  */
1461 static void
1462 safexcel_instr_insert_digest(struct safexcel_instr **instrp, int len)
1463 {
1464         struct safexcel_instr *instr;
1465
1466         instr = *instrp;
1467         instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1468         instr->length = len;
1469         instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1470             SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1471         instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1472             SAFEXCEL_INSTR_INSERT_HASH_DIGEST;
1473
1474         *instrp = instr + 1;
1475 }
1476
1477 static void
1478 safexcel_instr_retrieve_digest(struct safexcel_instr **instrp, struct safexcel_request *req, int len)
1479 {
1480         struct safexcel_instr *instr;
1481
1482         instr = *instrp;
1483
1484         instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1485         instr->length = len;
1486         instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1487             SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1488         instr->instructions = SAFEXCEL_INSTR_INSERT_HASH_DIGEST |
1489             SAFEXCEL_INSTR_DEST_OUTPUT;
1490
1491         *instrp = instr + 1;
1492 }
1493
1494 /*
1495  * Retrieve and verify a digest.
1496  */
1497 static void
1498 safexcel_instr_verify_digest(struct safexcel_instr **instrp, int len)
1499 {
1500         struct safexcel_instr *instr;
1501
1502         instr = *instrp;
1503         instr->opcode = SAFEXCEL_INSTR_OPCODE_RETRIEVE;
1504         instr->length = len;
1505         instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1506             SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1507         instr->instructions = SAFEXCEL_INSTR_INSERT_HASH_DIGEST;
1508         instr++;
1509
1510         instr->opcode = SAFEXCEL_INSTR_OPCODE_VERIFY_FIELDS;
1511         instr->length = len | SAFEXCEL_INSTR_VERIFY_HASH;
1512         instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1513             SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1514         instr->instructions = SAFEXCEL_INSTR_VERIFY_PADDING;
1515
1516         *instrp = instr + 1;
1517 }
1518
1519 static void
1520 safexcel_instr_temp_aes_block(struct safexcel_instr **instrp)
1521 {
1522         struct safexcel_instr *instr;
1523
1524         instr = *instrp;
1525         instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT_REMOVE_RESULT;
1526         instr->length = 0;
1527         instr->status = 0;
1528         instr->instructions = AES_BLOCK_LEN;
1529         instr++;
1530
1531         instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1532         instr->length = AES_BLOCK_LEN;
1533         instr->status = 0;
1534         instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1535             SAFEXCEL_INSTR_DEST_CRYPTO;
1536
1537         *instrp = instr + 1;
1538 }
1539
1540 /*
1541  * Handle a request for an unauthenticated block cipher.
1542  */
1543 static void
1544 safexcel_instr_cipher(struct safexcel_request *req,
1545     struct safexcel_instr *instr, struct safexcel_cmd_descr *cdesc)
1546 {
1547         /* Insert the payload. */
1548         instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1549         instr->length = req->enc->crd_len;
1550         instr->status = SAFEXCEL_INSTR_STATUS_LAST_PACKET |
1551             SAFEXCEL_INSTR_STATUS_LAST_HASH;
1552         instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1553             SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_OUTPUT;
1554
1555         cdesc->additional_cdata_size = 1;
1556 }
1557
1558 static void
1559 safexcel_instr_eta(struct safexcel_request *req, struct safexcel_instr *instr,
1560     struct safexcel_cmd_descr *cdesc)
1561 {
1562         struct safexcel_instr *start;
1563
1564         start = instr;
1565
1566         /* Encrypt any data left in the request. */
1567         instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1568         instr->length = req->enc->crd_len;
1569         instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1570         instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1571             SAFEXCEL_INSTR_DEST_CRYPTO |
1572             SAFEXCEL_INSTR_DEST_HASH |
1573             SAFEXCEL_INSTR_DEST_OUTPUT;
1574         instr++;
1575
1576         /*
1577          * Compute the digest, or extract it and place it in the output stream.
1578          */
1579         if ((req->enc->crd_flags & CRD_F_ENCRYPT) != 0)
1580                 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1581         else
1582                 safexcel_instr_retrieve_digest(&instr, req, req->sess->digestlen);
1583         cdesc->additional_cdata_size = instr - start;
1584 }
1585
1586 static void
1587 safexcel_instr_sha_hash(struct safexcel_request *req,
1588     struct safexcel_instr *instr)
1589 {
1590         struct cryptop *crp;
1591         struct safexcel_instr *start;
1592
1593         crp = req->crp;
1594         start = instr;
1595
1596         /* Pass the input data to the hash engine. */
1597         instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1598         instr->length = req->mac->crd_len;
1599         instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1600         instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1601         instr++;
1602
1603         /* Insert the hash result into the output stream. */
1604         safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1605
1606         /* Pad the rest of the inline instruction space. */
1607         while (instr != start + SAFEXCEL_MAX_ITOKENS)
1608                 safexcel_instr_nop(&instr);
1609 }
1610
1611 static void
1612 safexcel_instr_ccm(struct safexcel_request *req, struct safexcel_instr *instr,
1613     struct safexcel_cmd_descr *cdesc)
1614 {
1615         struct cryptop *crp;
1616         struct safexcel_instr *start;
1617         uint8_t *a0, *b0, *alenp, L;
1618         int aalign, blen;
1619
1620         crp = req->crp;
1621         start = instr;
1622
1623         /*
1624          * Construct two blocks, A0 and B0, used in encryption and
1625          * authentication, respectively.  A0 is embedded in the token
1626          * descriptor, and B0 is inserted directly into the data stream using
1627          * instructions below.
1628          *
1629          * OCF seems to assume a 12-byte IV, fixing L (the payload length size)
1630          * at 3 bytes due to the layout of B0.  This is fine since the driver
1631          * has a maximum of 65535 bytes anyway.
1632          */
1633         blen = AES_BLOCK_LEN;
1634         L = 3;
1635
1636         a0 = (uint8_t *)&cdesc->control_data.token[0];
1637         memset(a0, 0, blen);
1638         a0[0] = L - 1;
1639         memcpy(&a0[1], req->iv, AES_CCM_IV_LEN);
1640
1641         /*
1642          * Insert B0 and the AAD length into the input stream.
1643          */
1644         instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1645         instr->length = blen + (req->mac->crd_len > 0 ? 2 : 0);
1646         instr->status = 0;
1647         instr->instructions = SAFEXCEL_INSTR_DEST_HASH |
1648             SAFEXCEL_INSTR_INSERT_IMMEDIATE;
1649         instr++;
1650
1651         b0 = (uint8_t *)instr;
1652         memset(b0, 0, blen);
1653         b0[0] =
1654             (L - 1) | /* payload length size */
1655             ((CCM_CBC_MAX_DIGEST_LEN - 2) / 2) << 3 /* digest length */ |
1656             (req->mac->crd_len > 0 ? 1 : 0) << 6 /* AAD present bit */;
1657         memcpy(&b0[1], req->iv, AES_CCM_IV_LEN);
1658         b0[14] = req->enc->crd_len >> 8;
1659         b0[15] = req->enc->crd_len & 0xff;
1660         instr += blen / sizeof(*instr);
1661
1662         /* Insert the AAD length and data into the input stream. */
1663         if (req->mac->crd_len > 0) {
1664                 alenp = (uint8_t *)instr;
1665                 alenp[0] = req->mac->crd_len >> 8;
1666                 alenp[1] = req->mac->crd_len & 0xff;
1667                 alenp[2] = 0;
1668                 alenp[3] = 0;
1669                 instr++;
1670
1671                 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1672                 instr->length = req->mac->crd_len;
1673                 instr->status = 0;
1674                 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1675                 instr++;
1676
1677                 /* Insert zero padding. */
1678                 aalign = (req->mac->crd_len + 2) & (blen - 1);
1679                 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1680                 instr->length = aalign == 0 ? 0 :
1681                     blen - ((req->mac->crd_len + 2) & (blen - 1));
1682                 instr->status = req->enc->crd_len == 0 ?
1683                     SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1684                 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1685                 instr++;
1686         }
1687
1688         safexcel_instr_temp_aes_block(&instr);
1689
1690         /* Insert the cipher payload into the input stream. */
1691         if (req->enc->crd_len > 0) {
1692                 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1693                 instr->length = req->enc->crd_len;
1694                 instr->status = (req->enc->crd_len & (blen - 1)) == 0 ?
1695                     SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1696                 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1697                     SAFEXCEL_INSTR_DEST_CRYPTO |
1698                     SAFEXCEL_INSTR_DEST_HASH |
1699                     SAFEXCEL_INSTR_INS_LAST;
1700                 instr++;
1701
1702                 /* Insert zero padding. */
1703                 if (req->enc->crd_len & (blen - 1)) {
1704                         instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1705                         instr->length = blen -
1706                             (req->enc->crd_len & (blen - 1));
1707                         instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1708                         instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1709                         instr++;
1710                 }
1711         }
1712
1713         /*
1714          * Compute the digest, or extract it and place it in the output stream.
1715          */
1716         if ((req->enc->crd_flags & CRD_F_ENCRYPT) != 0)
1717                 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1718         else
1719                 safexcel_instr_verify_digest(&instr, req->sess->digestlen);
1720
1721         cdesc->additional_cdata_size = instr - start;
1722 }
1723
1724 static void
1725 safexcel_instr_gcm(struct safexcel_request *req, struct safexcel_instr *instr,
1726     struct safexcel_cmd_descr *cdesc)
1727 {
1728         struct cryptop *crp;
1729         struct safexcel_instr *start;
1730
1731         memcpy(cdesc->control_data.token, req->iv, AES_GCM_IV_LEN);
1732         cdesc->control_data.token[3] = htobe32(1);
1733
1734         crp = req->crp;
1735         start = instr;
1736
1737         /* Insert the AAD into the input stream. */
1738         instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1739         instr->length = req->mac->crd_len;
1740         instr->status = req->enc->crd_len == 0 ?
1741             SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1742         instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1743             SAFEXCEL_INSTR_DEST_HASH;
1744         instr++;
1745
1746         safexcel_instr_temp_aes_block(&instr);
1747
1748         /* Insert the cipher payload into the input stream. */
1749         if (req->enc->crd_len > 0) {
1750                 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1751                 instr->length = req->enc->crd_len;
1752                 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1753                 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1754                     SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_HASH |
1755                     SAFEXCEL_INSTR_INS_LAST;
1756                 instr++;
1757         }
1758
1759         /*
1760          * Compute the digest, or extract it and place it in the output stream.
1761          */
1762         if ((req->enc->crd_flags & CRD_F_ENCRYPT) != 0)
1763                 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1764         else
1765                 safexcel_instr_verify_digest(&instr, req->sess->digestlen);
1766
1767         cdesc->additional_cdata_size = instr - start;
1768 }
1769
1770 static void
1771 safexcel_set_token(struct safexcel_request *req)
1772 {
1773         struct safexcel_cmd_descr *cdesc;
1774         struct safexcel_instr *instr;
1775         struct safexcel_softc *sc;
1776         int ringidx;
1777
1778         cdesc = req->cdesc;
1779         sc = req->sc;
1780         ringidx = req->sess->ringidx;
1781
1782         safexcel_set_command(req, cdesc);
1783
1784         /*
1785          * For keyless hash operations, the token instructions can be embedded
1786          * in the token itself.  Otherwise we use an additional token descriptor
1787          * and the embedded instruction space is used to store the IV.
1788          */
1789         if (req->enc == NULL) {
1790                 instr = (void *)cdesc->control_data.token;
1791         } else {
1792                 instr = (void *)(sc->sc_ring[ringidx].dma_atok.vaddr +
1793                     sc->sc_config.atok_offset *
1794                     (cdesc - sc->sc_ring[ringidx].cdr.desc));
1795                 cdesc->control_data.options |= SAFEXCEL_OPTION_4_TOKEN_IV_CMD;
1796         }
1797
1798         if (req->enc != NULL) {
1799                 switch (req->enc->crd_alg) {
1800                 case CRYPTO_AES_NIST_GCM_16:
1801                         safexcel_instr_gcm(req, instr, cdesc);
1802                         break;
1803                 case CRYPTO_AES_CCM_16:
1804                         safexcel_instr_ccm(req, instr, cdesc);
1805                         break;
1806                 case CRYPTO_AES_XTS:
1807                         memcpy(cdesc->control_data.token, req->iv, AES_XTS_IV_LEN);
1808                         memset(cdesc->control_data.token +
1809                             AES_XTS_IV_LEN / sizeof(uint32_t), 0, AES_XTS_IV_LEN);
1810
1811                         safexcel_instr_cipher(req, instr, cdesc);
1812                         break;
1813                 case CRYPTO_AES_CBC:
1814                 case CRYPTO_AES_ICM:
1815                         memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_LEN);
1816                         if (req->mac != NULL)
1817                                 safexcel_instr_eta(req, instr, cdesc);
1818                         else
1819                                 safexcel_instr_cipher(req, instr, cdesc);
1820                         break;
1821                 }
1822         } else {
1823                 switch (req->mac->crd_alg) {
1824                 case CRYPTO_SHA1:
1825                 case CRYPTO_SHA1_HMAC:
1826                 case CRYPTO_SHA2_224:
1827                 case CRYPTO_SHA2_224_HMAC:
1828                 case CRYPTO_SHA2_256:
1829                 case CRYPTO_SHA2_256_HMAC:
1830                 case CRYPTO_SHA2_384:
1831                 case CRYPTO_SHA2_384_HMAC:
1832                 case CRYPTO_SHA2_512:
1833                 case CRYPTO_SHA2_512_HMAC:
1834                         safexcel_instr_sha_hash(req, instr);
1835                         break;
1836                 default:
1837                         panic("unhandled auth request %d", req->mac->crd_alg);
1838                 }
1839         }
1840 }
1841
1842 static struct safexcel_res_descr *
1843 safexcel_res_descr_add(struct safexcel_ring *ring, bool first, bool last,
1844     bus_addr_t data, uint32_t len)
1845 {
1846         struct safexcel_res_descr *rdesc;
1847         struct safexcel_res_descr_ring *rring;
1848
1849         mtx_assert(&ring->mtx, MA_OWNED);
1850
1851         rring = &ring->rdr;
1852         if ((rring->write + 1) % SAFEXCEL_RING_SIZE == rring->read)
1853                 return (NULL);
1854
1855         rdesc = &rring->desc[rring->write];
1856         rring->write = (rring->write + 1) % SAFEXCEL_RING_SIZE;
1857
1858         rdesc->particle_size = len;
1859         rdesc->rsvd0 = 0;
1860         rdesc->descriptor_overflow = 0;
1861         rdesc->buffer_overflow = 0;
1862         rdesc->last_seg = last;
1863         rdesc->first_seg = first;
1864         rdesc->result_size =
1865             sizeof(struct safexcel_res_data) / sizeof(uint32_t);
1866         rdesc->rsvd1 = 0;
1867         rdesc->data_lo = SAFEXCEL_ADDR_LO(data);
1868         rdesc->data_hi = SAFEXCEL_ADDR_HI(data);
1869
1870         if (first) {
1871                 rdesc->result_data.packet_length = 0;
1872                 rdesc->result_data.error_code = 0;
1873         }
1874
1875         return (rdesc);
1876 }
1877
1878 static struct safexcel_cmd_descr *
1879 safexcel_cmd_descr_add(struct safexcel_ring *ring, bool first, bool last,
1880     bus_addr_t data, uint32_t seglen, uint32_t reqlen, bus_addr_t context)
1881 {
1882         struct safexcel_cmd_descr *cdesc;
1883         struct safexcel_cmd_descr_ring *cring;
1884
1885         KASSERT(reqlen <= SAFEXCEL_MAX_REQUEST_SIZE,
1886             ("%s: request length %u too long", __func__, reqlen));
1887         mtx_assert(&ring->mtx, MA_OWNED);
1888
1889         cring = &ring->cdr;
1890         if ((cring->write + 1) % SAFEXCEL_RING_SIZE == cring->read)
1891                 return (NULL);
1892
1893         cdesc = &cring->desc[cring->write];
1894         cring->write = (cring->write + 1) % SAFEXCEL_RING_SIZE;
1895
1896         cdesc->particle_size = seglen;
1897         cdesc->rsvd0 = 0;
1898         cdesc->last_seg = last;
1899         cdesc->first_seg = first;
1900         cdesc->additional_cdata_size = 0;
1901         cdesc->rsvd1 = 0;
1902         cdesc->data_lo = SAFEXCEL_ADDR_LO(data);
1903         cdesc->data_hi = SAFEXCEL_ADDR_HI(data);
1904         if (first) {
1905                 cdesc->control_data.packet_length = reqlen;
1906                 cdesc->control_data.options = SAFEXCEL_OPTION_IP |
1907                     SAFEXCEL_OPTION_CP | SAFEXCEL_OPTION_CTX_CTRL_IN_CMD |
1908                     SAFEXCEL_OPTION_RC_AUTO;
1909                 cdesc->control_data.type = SAFEXCEL_TOKEN_TYPE_BYPASS;
1910                 cdesc->control_data.context_lo = SAFEXCEL_ADDR_LO(context) |
1911                     SAFEXCEL_CONTEXT_SMALL;
1912                 cdesc->control_data.context_hi = SAFEXCEL_ADDR_HI(context);
1913         }
1914
1915         return (cdesc);
1916 }
1917
1918 static void
1919 safexcel_cmd_descr_rollback(struct safexcel_ring *ring, int count)
1920 {
1921         struct safexcel_cmd_descr_ring *cring;
1922
1923         mtx_assert(&ring->mtx, MA_OWNED);
1924
1925         cring = &ring->cdr;
1926         cring->write -= count;
1927         if (cring->write < 0)
1928                 cring->write += SAFEXCEL_RING_SIZE;
1929 }
1930
1931 static void
1932 safexcel_res_descr_rollback(struct safexcel_ring *ring, int count)
1933 {
1934         struct safexcel_res_descr_ring *rring;
1935
1936         mtx_assert(&ring->mtx, MA_OWNED);
1937
1938         rring = &ring->rdr;
1939         rring->write -= count;
1940         if (rring->write < 0)
1941                 rring->write += SAFEXCEL_RING_SIZE;
1942 }
1943
1944 static void
1945 safexcel_append_segs(bus_dma_segment_t *segs, int nseg, struct sglist *sg,
1946     int start, int len)
1947 {
1948         bus_dma_segment_t *seg;
1949         size_t seglen;
1950         int error, i;
1951
1952         for (i = 0; i < nseg && len > 0; i++) {
1953                 seg = &segs[i];
1954
1955                 if (seg->ds_len <= start) {
1956                         start -= seg->ds_len;
1957                         continue;
1958                 }
1959
1960                 seglen = MIN(len, seg->ds_len - start);
1961                 error = sglist_append_phys(sg, seg->ds_addr + start, seglen);
1962                 if (error != 0)
1963                         panic("%s: ran out of segments: %d", __func__, error);
1964                 len -= seglen;
1965                 start = 0;
1966         }
1967
1968         KASSERT(len == 0, ("%s: %d residual bytes", __func__, len));
1969 }
1970
1971 static void
1972 safexcel_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg,
1973     int error)
1974 {
1975         struct cryptop *crp;
1976         struct safexcel_cmd_descr *cdesc;
1977         struct safexcel_request *req;
1978         struct safexcel_ring *ring;
1979         struct safexcel_session *sess;
1980         struct sglist *sg;
1981         size_t inlen;
1982         int i;
1983         bool first, last;
1984
1985         req = arg;
1986         if (error != 0) {
1987                 req->error = error;
1988                 return;
1989         }
1990
1991         crp = req->crp;
1992         sess = req->sess;
1993         ring = &req->sc->sc_ring[sess->ringidx];
1994
1995         mtx_assert(&ring->mtx, MA_OWNED);
1996
1997         /*
1998          * Set up descriptors for input and output data.
1999          *
2000          * The processing engine programs require that any AAD comes first,
2001          * followed by the cipher plaintext, followed by the digest.  Some
2002          * consumers place the digest first in the input buffer, in which case
2003          * we have to create an extra descriptor.
2004          *
2005          * As an optimization, unmodified data is not passed to the output
2006          * stream.
2007          */
2008         sglist_reset(ring->cmd_data);
2009         sglist_reset(ring->res_data);
2010         if (req->mac != NULL && (req->enc == NULL ||
2011             req->enc->crd_alg == CRYPTO_AES_NIST_GCM_16 ||
2012             req->enc->crd_alg == CRYPTO_AES_CCM_16)) {
2013                 safexcel_append_segs(segs, nseg, ring->cmd_data,
2014                     req->mac->crd_skip, req->mac->crd_len);
2015         }
2016         if (req->enc != NULL) {
2017                 safexcel_append_segs(segs, nseg, ring->cmd_data,
2018                     req->enc->crd_skip, req->enc->crd_len);
2019                 safexcel_append_segs(segs, nseg, ring->res_data,
2020                     req->enc->crd_skip, req->enc->crd_len);
2021         }
2022         if (sess->digestlen > 0) {
2023                 if (req->enc == NULL ||
2024                     (req->enc->crd_flags & CRD_F_ENCRYPT) != 0)
2025                         safexcel_append_segs(segs, nseg, ring->res_data,
2026                             req->mac->crd_inject, sess->digestlen);
2027                 else if (req->enc->crd_alg == CRYPTO_AES_NIST_GCM_16 ||
2028                     req->enc->crd_alg == CRYPTO_AES_CCM_16) {
2029                         safexcel_append_segs(segs, nseg, ring->cmd_data,
2030                             req->mac->crd_inject, sess->digestlen);
2031                 } else {
2032                         safexcel_append_segs(segs, nseg, ring->res_data,
2033                             req->mac->crd_inject, sess->digestlen);
2034                 }
2035         }
2036
2037         sg = ring->cmd_data;
2038         if (sg->sg_nseg == 0) {
2039                 /*
2040                  * Fake a segment for the command descriptor if the input has
2041                  * length zero.  The EIP97 apparently does not handle
2042                  * zero-length packets properly since subsequent requests return
2043                  * bogus errors, so provide a dummy segment using the context
2044                  * descriptor.
2045                  */
2046                 (void)sglist_append_phys(sg, req->ctx.paddr, 1);
2047         }
2048         for (i = 0, inlen = 0; i < sg->sg_nseg; i++)
2049                 inlen += sg->sg_segs[i].ss_len;
2050         for (i = 0; i < sg->sg_nseg; i++) {
2051                 first = i == 0;
2052                 last = i == sg->sg_nseg - 1;
2053
2054                 cdesc = safexcel_cmd_descr_add(ring, first, last,
2055                     sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len,
2056                     (uint32_t)inlen, req->ctx.paddr);
2057                 if (cdesc == NULL) {
2058                         safexcel_cmd_descr_rollback(ring, i);
2059                         req->error = EAGAIN;
2060                         return;
2061                 }
2062                 if (i == 0)
2063                         req->cdesc = cdesc;
2064         }
2065         req->cdescs = sg->sg_nseg;
2066
2067         sg = ring->res_data;
2068         if (sg->sg_nseg == 0) {
2069                 /*
2070                  * We need a result descriptor even if the output stream will be
2071                  * empty, for example when verifying an AAD digest.
2072                  */
2073                 sg->sg_segs[0].ss_paddr = 0;
2074                 sg->sg_segs[0].ss_len = 0;
2075                 sg->sg_nseg = 1;
2076         }
2077         for (i = 0; i < sg->sg_nseg; i++) {
2078                 first = i == 0;
2079                 last = i == sg->sg_nseg - 1;
2080
2081                 if (safexcel_res_descr_add(ring, first, last,
2082                     sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len) == NULL) {
2083                         safexcel_cmd_descr_rollback(ring,
2084                             ring->cmd_data->sg_nseg);
2085                         safexcel_res_descr_rollback(ring, i);
2086                         req->error = EAGAIN;
2087                         return;
2088                 }
2089         }
2090         req->rdescs = sg->sg_nseg;
2091 }
2092
2093 static void
2094 safexcel_create_chain_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
2095     bus_size_t mapsize __unused, int error)
2096 {
2097         safexcel_create_chain_cb(arg, segs, nseg, error);
2098 }
2099
2100 #include <sys/uio.h>
2101 static int
2102 safexcel_create_chain(struct safexcel_ring *ring, struct safexcel_request *req)
2103 {
2104         struct cryptop *crp;
2105         int error;
2106
2107         req->error = 0;
2108         req->cdescs = req->rdescs = 0;
2109         crp = req->crp;
2110
2111         if ((crp->crp_flags & CRYPTO_F_IOV) != 0) {
2112                 error = bus_dmamap_load_uio(ring->data_dtag, req->dmap,
2113                     (struct uio *)crp->crp_buf, safexcel_create_chain_cb2,
2114                     req, BUS_DMA_NOWAIT);
2115         } else if ((crp->crp_flags & CRYPTO_F_IMBUF) != 0) {
2116                 error = bus_dmamap_load_mbuf(ring->data_dtag, req->dmap,
2117                     (struct mbuf *)crp->crp_buf, safexcel_create_chain_cb2,
2118                     req, BUS_DMA_NOWAIT);
2119         } else {
2120                 error = bus_dmamap_load(ring->data_dtag, req->dmap,
2121                     crp->crp_buf, crp->crp_ilen,
2122                     safexcel_create_chain_cb, req, BUS_DMA_NOWAIT);
2123         }
2124         if (error == 0)
2125                 req->dmap_loaded = true;
2126         else if (req->error != 0)
2127                 error = req->error;
2128         return (error);
2129 }
2130
2131 /*
2132  * Determine whether the driver can implement a session with the requested
2133  * parameters.
2134  */
2135 static int
2136 safexcel_probesession(struct cryptoini *enc, struct cryptoini *mac)
2137 {
2138         if (enc != NULL) {
2139                 switch (enc->cri_alg) {
2140                 case CRYPTO_AES_NIST_GCM_16:
2141                         if (mac == NULL ||
2142                             (mac->cri_alg != CRYPTO_AES_128_NIST_GMAC &&
2143                              mac->cri_alg != CRYPTO_AES_192_NIST_GMAC &&
2144                              mac->cri_alg != CRYPTO_AES_256_NIST_GMAC))
2145                                 return (EINVAL);
2146                         break;
2147                 case CRYPTO_AES_CCM_16:
2148                         if (mac == NULL ||
2149                             mac->cri_alg != CRYPTO_AES_CCM_CBC_MAC)
2150                                 return (EINVAL);
2151                         break;
2152                 case CRYPTO_AES_CBC:
2153                 case CRYPTO_AES_ICM:
2154                         if (mac != NULL &&
2155                             mac->cri_alg != CRYPTO_SHA1_HMAC &&
2156                             mac->cri_alg != CRYPTO_SHA2_224_HMAC &&
2157                             mac->cri_alg != CRYPTO_SHA2_256_HMAC &&
2158                             mac->cri_alg != CRYPTO_SHA2_384_HMAC &&
2159                             mac->cri_alg != CRYPTO_SHA2_512_HMAC)
2160                                 return (EINVAL);
2161                         break;
2162                 case CRYPTO_AES_XTS:
2163                         if (mac != NULL)
2164                                 return (EINVAL);
2165                         break;
2166                 default:
2167                         return (EINVAL);
2168                 }
2169         } else {
2170                 switch (mac->cri_alg) {
2171                 case CRYPTO_SHA1:
2172                 case CRYPTO_SHA1_HMAC:
2173                 case CRYPTO_SHA2_224:
2174                 case CRYPTO_SHA2_224_HMAC:
2175                 case CRYPTO_SHA2_256:
2176                 case CRYPTO_SHA2_256_HMAC:
2177                 case CRYPTO_SHA2_384:
2178                 case CRYPTO_SHA2_384_HMAC:
2179                 case CRYPTO_SHA2_512:
2180                 case CRYPTO_SHA2_512_HMAC:
2181                         break;
2182                 default:
2183                         return (EINVAL);
2184                 }
2185         }
2186
2187         return (0);
2188 }
2189
2190 /*
2191  * Pre-compute the hash key used in GHASH, which is a block of zeroes encrypted
2192  * using the cipher key.
2193  */
2194 static void
2195 safexcel_setkey_ghash(struct safexcel_session *sess, const uint8_t *key,
2196     int klen)
2197 {
2198         uint32_t ks[4 * (RIJNDAEL_MAXNR + 1)];
2199         uint8_t zeros[AES_BLOCK_LEN];
2200         int i, rounds;
2201
2202         memset(zeros, 0, sizeof(zeros));
2203
2204         rounds = rijndaelKeySetupEnc(ks, key, klen * NBBY);
2205         rijndaelEncrypt(ks, rounds, zeros, (uint8_t *)sess->ghash_key);
2206         for (i = 0; i < GMAC_BLOCK_LEN / sizeof(uint32_t); i++)
2207                 sess->ghash_key[i] = htobe32(sess->ghash_key[i]);
2208
2209         explicit_bzero(ks, sizeof(ks));
2210 }
2211
2212 /*
2213  * Pre-compute the combined CBC-MAC key, which consists of three keys K1, K2, K3
2214  * in the hardware implementation.  K1 is the cipher key and comes last in the
2215  * buffer since K2 and K3 have a fixed size of AES_BLOCK_LEN.  For now XCBC-MAC
2216  * is not implemented so K2 and K3 are fixed.
2217  */
2218 static void
2219 safexcel_setkey_xcbcmac(struct safexcel_session *sess, const uint8_t *key,
2220     int klen)
2221 {
2222         int i, off;
2223
2224         memset(sess->xcbc_key, 0, sizeof(sess->xcbc_key));
2225         off = 2 * AES_BLOCK_LEN / sizeof(uint32_t);
2226         for (i = 0; i < klen / sizeof(uint32_t); i++, key += 4)
2227                 sess->xcbc_key[i + off] = htobe32(le32dec(key));
2228 }
2229
2230 static void
2231 safexcel_setkey_hmac_digest(struct auth_hash *ahash, union authctx *ctx,
2232     char *buf)
2233 {
2234         int hashwords, i;
2235
2236         switch (ahash->type) {
2237         case CRYPTO_SHA1_HMAC:
2238                 hashwords = ahash->hashsize / sizeof(uint32_t);
2239                 for (i = 0; i < hashwords; i++)
2240                         ((uint32_t *)buf)[i] = htobe32(ctx->sha1ctx.h.b32[i]);
2241                 break;
2242         case CRYPTO_SHA2_224_HMAC:
2243                 hashwords = auth_hash_hmac_sha2_256.hashsize / sizeof(uint32_t);
2244                 for (i = 0; i < hashwords; i++)
2245                         ((uint32_t *)buf)[i] = htobe32(ctx->sha224ctx.state[i]);
2246                 break;
2247         case CRYPTO_SHA2_256_HMAC:
2248                 hashwords = ahash->hashsize / sizeof(uint32_t);
2249                 for (i = 0; i < hashwords; i++)
2250                         ((uint32_t *)buf)[i] = htobe32(ctx->sha256ctx.state[i]);
2251                 break;
2252         case CRYPTO_SHA2_384_HMAC:
2253                 hashwords = auth_hash_hmac_sha2_512.hashsize / sizeof(uint64_t);
2254                 for (i = 0; i < hashwords; i++)
2255                         ((uint64_t *)buf)[i] = htobe64(ctx->sha384ctx.state[i]);
2256                 break;
2257         case CRYPTO_SHA2_512_HMAC:
2258                 hashwords = ahash->hashsize / sizeof(uint64_t);
2259                 for (i = 0; i < hashwords; i++)
2260                         ((uint64_t *)buf)[i] = htobe64(ctx->sha512ctx.state[i]);
2261                 break;
2262         }
2263 }
2264
2265 static void
2266 safexcel_hmac_init_pad(struct auth_hash *axf, const char *key, int klen,
2267     union authctx *auth_ctx, uint8_t padval)
2268 {
2269         uint8_t hmac_key[HMAC_MAX_BLOCK_LEN];
2270         u_int i;
2271
2272         memset(hmac_key, 0, sizeof(hmac_key));
2273         if (klen > axf->blocksize) {
2274                 axf->Init(auth_ctx);
2275                 axf->Update(auth_ctx, key, klen);
2276                 axf->Final(hmac_key, auth_ctx);
2277                 klen = axf->hashsize;
2278         } else {
2279                 memcpy(hmac_key, key, klen);
2280         }
2281
2282         for (i = 0; i < axf->blocksize; i++)
2283                 hmac_key[i] ^= padval;
2284
2285         axf->Init(auth_ctx);
2286         axf->Update(auth_ctx, hmac_key, axf->blocksize);
2287         explicit_bzero(hmac_key, sizeof(hmac_key));
2288 }
2289
2290 /*
2291  * Pre-compute the inner and outer digests used in the HMAC algorithm.
2292  */
2293 static void
2294 safexcel_setkey_hmac(struct safexcel_session *sess, int alg, const uint8_t *key,
2295     int klen)
2296 {
2297         union authctx ctx;
2298         struct auth_hash *ahash;
2299
2300         switch (alg) {
2301         case CRYPTO_SHA1_HMAC:
2302                 ahash = &auth_hash_hmac_sha1;
2303                 break;
2304         case CRYPTO_SHA2_224_HMAC:
2305                 ahash = &auth_hash_hmac_sha2_224;
2306                 break;
2307         case CRYPTO_SHA2_256_HMAC:
2308                 ahash = &auth_hash_hmac_sha2_256;
2309                 break;
2310         case CRYPTO_SHA2_384_HMAC:
2311                 ahash = &auth_hash_hmac_sha2_384;
2312                 break;
2313         case CRYPTO_SHA2_512_HMAC:
2314                 ahash = &auth_hash_hmac_sha2_512;
2315                 break;
2316         default:
2317                 panic("%s: unknown algorithm %d", __func__, alg);
2318         }
2319
2320         safexcel_hmac_init_pad(ahash, key, klen, &ctx, HMAC_IPAD_VAL);
2321         safexcel_setkey_hmac_digest(ahash, &ctx, sess->hmac_ipad);
2322         safexcel_hmac_init_pad(ahash, key, klen, &ctx, HMAC_OPAD_VAL);
2323         safexcel_setkey_hmac_digest(ahash, &ctx, sess->hmac_opad);
2324         explicit_bzero(&ctx, ahash->ctxsize);
2325 }
2326
2327 static void
2328 safexcel_setkey_xts(struct safexcel_session *sess, const uint8_t *key, int klen)
2329 {
2330         memcpy(sess->tweak_key, key + klen / 2, klen / 2);
2331 }
2332
2333 static uint32_t
2334 safexcel_aes_algid(int keylen)
2335 {
2336         switch (keylen) {
2337         case 128:
2338                 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES128);
2339         case 192:
2340                 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES192);
2341         case 256:
2342                 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES256);
2343         default:
2344                 panic("invalid AES key length %d", keylen);
2345         }
2346 }
2347
2348 static uint32_t
2349 safexcel_aes_ccm_hashid(int keylen)
2350 {
2351         switch (keylen) {
2352         case 128:
2353                 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC128);
2354         case 192:
2355                 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC192);
2356         case 256:
2357                 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC256);
2358         default:
2359                 panic("invalid AES key length %d", keylen);
2360         }
2361 }
2362
2363 static uint32_t
2364 safexcel_sha_hashid(int alg)
2365 {
2366         switch (alg) {
2367         case CRYPTO_SHA1:
2368         case CRYPTO_SHA1_HMAC:
2369                 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA1);
2370         case CRYPTO_SHA2_224:
2371         case CRYPTO_SHA2_224_HMAC:
2372                 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA224);
2373         case CRYPTO_SHA2_256:
2374         case CRYPTO_SHA2_256_HMAC:
2375                 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA256);
2376         case CRYPTO_SHA2_384:
2377         case CRYPTO_SHA2_384_HMAC:
2378                 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA384);
2379         case CRYPTO_SHA2_512:
2380         case CRYPTO_SHA2_512_HMAC:
2381                 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA512);
2382         default:
2383                 __assert_unreachable();
2384         }
2385 }
2386
2387 static int
2388 safexcel_sha_hashlen(int alg)
2389 {
2390         switch (alg) {
2391         case CRYPTO_SHA1:
2392         case CRYPTO_SHA1_HMAC:
2393                 return (SHA1_HASH_LEN);
2394         case CRYPTO_SHA2_224:
2395         case CRYPTO_SHA2_224_HMAC:
2396                 return (SHA2_224_HASH_LEN);
2397         case CRYPTO_SHA2_256:
2398         case CRYPTO_SHA2_256_HMAC:
2399                 return (SHA2_256_HASH_LEN);
2400         case CRYPTO_SHA2_384:
2401         case CRYPTO_SHA2_384_HMAC:
2402                 return (SHA2_384_HASH_LEN);
2403         case CRYPTO_SHA2_512:
2404         case CRYPTO_SHA2_512_HMAC:
2405                 return (SHA2_512_HASH_LEN);
2406         default:
2407                 __assert_unreachable();
2408         }
2409 }
2410
2411 static int
2412 safexcel_sha_statelen(int alg)
2413 {
2414         switch (alg) {
2415         case CRYPTO_SHA1:
2416         case CRYPTO_SHA1_HMAC:
2417                 return (SHA1_HASH_LEN);
2418         case CRYPTO_SHA2_224:
2419         case CRYPTO_SHA2_224_HMAC:
2420         case CRYPTO_SHA2_256:
2421         case CRYPTO_SHA2_256_HMAC:
2422                 return (SHA2_256_HASH_LEN);
2423         case CRYPTO_SHA2_384:
2424         case CRYPTO_SHA2_384_HMAC:
2425         case CRYPTO_SHA2_512:
2426         case CRYPTO_SHA2_512_HMAC:
2427                 return (SHA2_512_HASH_LEN);
2428         default:
2429                 __assert_unreachable();
2430         }
2431 }
2432
2433 static bool
2434 safexcel_is_hash(int alg)
2435 {
2436         switch (alg) {
2437         case CRYPTO_SHA1:
2438         case CRYPTO_SHA1_HMAC:
2439         case CRYPTO_SHA2_224:
2440         case CRYPTO_SHA2_224_HMAC:
2441         case CRYPTO_SHA2_256:
2442         case CRYPTO_SHA2_256_HMAC:
2443         case CRYPTO_SHA2_384:
2444         case CRYPTO_SHA2_384_HMAC:
2445         case CRYPTO_SHA2_512:
2446         case CRYPTO_SHA2_512_HMAC:
2447         case CRYPTO_AES_128_NIST_GMAC:
2448         case CRYPTO_AES_192_NIST_GMAC:
2449         case CRYPTO_AES_256_NIST_GMAC:
2450         case CRYPTO_AES_CCM_CBC_MAC:
2451                 return (true);
2452         default:
2453                 return (false);
2454         }
2455 }
2456
2457 static int
2458 safexcel_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
2459 {
2460         struct safexcel_session *sess;
2461         struct safexcel_softc *sc;
2462         struct cryptoini *enc, *mac;
2463         int error;
2464
2465         sc = device_get_softc(dev);
2466         sess = crypto_get_driver_session(cses);
2467
2468         enc = mac = NULL;
2469         if (safexcel_is_hash(cri->cri_alg))
2470                 mac = cri;
2471         else
2472                 enc = cri;
2473         cri = cri->cri_next;
2474
2475         if (cri != NULL) {
2476                 if (enc == NULL && !safexcel_is_hash(cri->cri_alg))
2477                         enc = cri;
2478                 if (mac == NULL && safexcel_is_hash(cri->cri_alg))
2479                         mac = cri;
2480                 if (cri->cri_next != NULL || !(enc != NULL && mac != NULL))
2481                         return (EINVAL);
2482         }
2483
2484         error = safexcel_probesession(enc, mac);
2485         if (error != 0)
2486                 return (error);
2487
2488         if (mac != NULL) {
2489                 switch (mac->cri_alg) {
2490                 case CRYPTO_SHA1:
2491                 case CRYPTO_SHA2_224:
2492                 case CRYPTO_SHA2_256:
2493                 case CRYPTO_SHA2_384:
2494                 case CRYPTO_SHA2_512:
2495                         sess->digest = SAFEXCEL_CONTROL0_DIGEST_PRECOMPUTED;
2496                         sess->hash = safexcel_sha_hashid(mac->cri_alg);
2497                         sess->digestlen = safexcel_sha_hashlen(mac->cri_alg);
2498                         sess->statelen = safexcel_sha_statelen(mac->cri_alg);
2499                         break;
2500                 case CRYPTO_SHA1_HMAC:
2501                 case CRYPTO_SHA2_224_HMAC:
2502                 case CRYPTO_SHA2_256_HMAC:
2503                 case CRYPTO_SHA2_384_HMAC:
2504                 case CRYPTO_SHA2_512_HMAC:
2505                         sess->digest = SAFEXCEL_CONTROL0_DIGEST_HMAC;
2506                         sess->hash = safexcel_sha_hashid(mac->cri_alg);
2507                         sess->digestlen = safexcel_sha_hashlen(mac->cri_alg);
2508                         sess->statelen = safexcel_sha_statelen(mac->cri_alg);
2509                         break;
2510                 }
2511         }
2512
2513         if (enc != NULL) {
2514                 switch (enc->cri_alg) {
2515                 case CRYPTO_AES_NIST_GCM_16:
2516                         sess->digest = SAFEXCEL_CONTROL0_DIGEST_GMAC;
2517                         sess->digestlen = GMAC_DIGEST_LEN;
2518                         sess->hash = SAFEXCEL_CONTROL0_HASH_ALG_GHASH;
2519                         sess->alg = safexcel_aes_algid(enc->cri_klen);
2520                         sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_GCM;
2521                         sess->ivlen = AES_GCM_IV_LEN;
2522                         break;
2523                 case CRYPTO_AES_CCM_16:
2524                         sess->hash = safexcel_aes_ccm_hashid(enc->cri_klen);
2525                         sess->digest = SAFEXCEL_CONTROL0_DIGEST_CCM;
2526                         sess->digestlen = CCM_CBC_MAX_DIGEST_LEN;
2527                         sess->alg = safexcel_aes_algid(enc->cri_klen);
2528                         sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CCM;
2529                         sess->ivlen = AES_CCM_IV_LEN;
2530                         break;
2531                 case CRYPTO_AES_CBC:
2532                         sess->alg = safexcel_aes_algid(enc->cri_klen);
2533                         sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CBC;
2534                         sess->ivlen = AES_BLOCK_LEN;
2535                         break;
2536                 case CRYPTO_AES_ICM:
2537                         sess->alg = safexcel_aes_algid(enc->cri_klen);
2538                         sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CTR;
2539                         sess->ivlen = AES_BLOCK_LEN;
2540                         break;
2541                 case CRYPTO_AES_XTS:
2542                         sess->alg = safexcel_aes_algid(enc->cri_klen / 2);
2543                         sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_XTS;
2544                         sess->ivlen = AES_XTS_IV_LEN;
2545                         break;
2546                 }
2547         }
2548
2549         if (mac != NULL && mac->cri_mlen != 0)
2550                 sess->digestlen = mac->cri_mlen;
2551
2552         if (enc != NULL) {
2553                 if (enc->cri_key != NULL) {
2554                         sess->klen = enc->cri_klen / 8;
2555                         memcpy(sess->key, enc->cri_key, sess->klen);
2556                         switch (enc->cri_alg) {
2557                         case CRYPTO_AES_NIST_GCM_16:
2558                                 safexcel_setkey_ghash(sess, sess->key,
2559                                     sess->klen);
2560                                 break;
2561                         case CRYPTO_AES_CCM_16:
2562                                 safexcel_setkey_xcbcmac(sess, sess->key,
2563                                     sess->klen);
2564                                 break;
2565                         case CRYPTO_AES_XTS:
2566                                 safexcel_setkey_xts(sess, sess->key,
2567                                     sess->klen);
2568                                 sess->klen /= 2;
2569                                 break;
2570                         }
2571                 }
2572         }
2573
2574         if (mac != NULL) {
2575                 switch (mac->cri_alg) {
2576                 case CRYPTO_SHA1_HMAC:
2577                 case CRYPTO_SHA2_224_HMAC:
2578                 case CRYPTO_SHA2_256_HMAC:
2579                 case CRYPTO_SHA2_384_HMAC:
2580                 case CRYPTO_SHA2_512_HMAC:
2581                         safexcel_setkey_hmac(sess, mac->cri_alg, mac->cri_key,
2582                             mac->cri_klen / 8);
2583                         break;
2584                 }
2585         }
2586
2587         /* Bind each session to a fixed ring to minimize lock contention. */
2588         sess->ringidx = atomic_fetchadd_int(&sc->sc_ringidx, 1);
2589         sess->ringidx %= sc->sc_config.rings;
2590
2591         return (0);
2592 }
2593
2594 static int
2595 safexcel_process(device_t dev, struct cryptop *crp, int hint)
2596 {
2597         struct safexcel_request *req;
2598         struct safexcel_ring *ring;
2599         struct safexcel_session *sess;
2600         struct safexcel_softc *sc;
2601         struct cryptodesc *crd, *enc, *mac;
2602         int error;
2603
2604         sc = device_get_softc(dev);
2605         sess = crypto_get_driver_session(crp->crp_session);
2606
2607         if (crp->crp_ilen > SAFEXCEL_MAX_REQUEST_SIZE ||
2608             crp->crp_olen > SAFEXCEL_MAX_REQUEST_SIZE) {
2609                 crp->crp_etype = E2BIG;
2610                 crypto_done(crp);
2611                 return (0);
2612         }
2613
2614         crd = crp->crp_desc;
2615
2616         enc = mac = NULL;
2617         if (safexcel_is_hash(crd->crd_alg))
2618                 mac = crd;
2619         else
2620                 enc = crd;
2621         crd = crd->crd_next;
2622
2623         if (crd != NULL) {
2624                 if (enc == NULL && !safexcel_is_hash(crd->crd_alg))
2625                         enc = crd;
2626                 if (mac == NULL && safexcel_is_hash(crd->crd_alg))
2627                         mac = crd;
2628                 if (crd->crd_next != NULL || !(enc != NULL && mac != NULL))
2629                         return (EINVAL);
2630         }
2631
2632         if ((enc != NULL && (enc->crd_flags & CRD_F_KEY_EXPLICIT) != 0) ||
2633             (mac != NULL && (mac->crd_flags & CRD_F_KEY_EXPLICIT) != 0)) {
2634                 if (enc != NULL) {
2635                         switch (enc->crd_alg) {
2636                         case CRYPTO_AES_NIST_GCM_16:
2637                                 safexcel_setkey_ghash(sess, enc->crd_key,
2638                                     enc->crd_klen / 8);
2639                                 break;
2640                         case CRYPTO_AES_CCM_16:
2641                                 safexcel_setkey_xcbcmac(sess, enc->crd_key,
2642                                     enc->crd_klen / 8);
2643                                 break;
2644                         case CRYPTO_AES_XTS:
2645                                 safexcel_setkey_xts(sess, enc->crd_key,
2646                                     enc->crd_klen / 8);
2647                                 break;
2648                         }
2649                 }
2650
2651                 if (mac != NULL) {
2652                         switch (mac->crd_alg) {
2653                         case CRYPTO_SHA1_HMAC:
2654                         case CRYPTO_SHA2_224_HMAC:
2655                         case CRYPTO_SHA2_256_HMAC:
2656                         case CRYPTO_SHA2_384_HMAC:
2657                         case CRYPTO_SHA2_512_HMAC:
2658                                 safexcel_setkey_hmac(sess, mac->crd_alg,
2659                                     mac->crd_key, mac->crd_klen / 8);
2660                                 break;
2661                         }
2662                 }
2663         }
2664
2665         ring = &sc->sc_ring[sess->ringidx];
2666         mtx_lock(&ring->mtx);
2667         req = safexcel_alloc_request(sc, ring);
2668         if (__predict_false(req == NULL)) {
2669                 mtx_lock(&sc->sc_mtx);
2670                 mtx_unlock(&ring->mtx);
2671                 sc->sc_blocked = CRYPTO_SYMQ;
2672                 mtx_unlock(&sc->sc_mtx);
2673                 return (ERESTART);
2674         }
2675
2676         req->crp = crp;
2677         req->enc = enc;
2678         req->mac = mac;
2679         req->sess = sess;
2680
2681         if (enc != NULL && (enc->crd_flags & CRD_F_ENCRYPT) != 0) {
2682                 if ((enc->crd_flags & CRD_F_IV_EXPLICIT) != 0)
2683                         memcpy(req->iv, enc->crd_iv, sess->ivlen);
2684                 else
2685                         arc4rand(req->iv, sess->ivlen, 0);
2686
2687                 if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0) {
2688                         crypto_copyback(crp->crp_flags, crp->crp_buf,
2689                             enc->crd_inject, sess->ivlen, req->iv);
2690                 }
2691         } else if (enc != NULL) {
2692                 if ((enc->crd_flags & CRD_F_IV_EXPLICIT) != 0) {
2693                         memcpy(req->iv, enc->crd_iv, sess->ivlen);
2694                 } else {
2695                         crypto_copydata(crp->crp_flags, crp->crp_buf,
2696                             enc->crd_inject, sess->ivlen, req->iv);
2697                 }
2698         }
2699
2700         error = safexcel_create_chain(ring, req);
2701         if (__predict_false(error != 0)) {
2702                 safexcel_free_request(ring, req);
2703                 mtx_unlock(&ring->mtx);
2704                 crp->crp_etype = error;
2705                 crypto_done(crp);
2706                 return (0);
2707         }
2708
2709         safexcel_set_token(req);
2710
2711         bus_dmamap_sync(ring->data_dtag, req->dmap,
2712             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2713         bus_dmamap_sync(req->ctx.tag, req->ctx.map,
2714             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2715         bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map,
2716             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2717         bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map,
2718             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2719         bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map,
2720             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2721
2722         safexcel_enqueue_request(sc, ring, req);
2723
2724         if ((hint & CRYPTO_HINT_MORE) == 0)
2725                 safexcel_execute(sc, ring, req);
2726         mtx_unlock(&ring->mtx);
2727
2728         return (0);
2729 }
2730
2731 static device_method_t safexcel_methods[] = {
2732         /* Device interface */
2733         DEVMETHOD(device_probe,         safexcel_probe),
2734         DEVMETHOD(device_attach,        safexcel_attach),
2735         DEVMETHOD(device_detach,        safexcel_detach),
2736
2737         /* Cryptodev interface */
2738         DEVMETHOD(cryptodev_newsession, safexcel_newsession),
2739         DEVMETHOD(cryptodev_process,    safexcel_process),
2740
2741         DEVMETHOD_END
2742 };
2743
2744 static devclass_t safexcel_devclass;
2745
2746 static driver_t safexcel_driver = {
2747         .name           = "safexcel",
2748         .methods        = safexcel_methods,
2749         .size           = sizeof(struct safexcel_softc),
2750 };
2751
2752 DRIVER_MODULE(safexcel, simplebus, safexcel_driver, safexcel_devclass, 0, 0);
2753 MODULE_VERSION(safexcel, 1);
2754 MODULE_DEPEND(safexcel, crypto, 1, 1, 1);