2 * Copyright (C) 2009-2011 Semihalf.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * CESA SRAM Memory Map:
30 * +------------------------+ <= sc->sc_sram_base_va + CESA_SRAM_SIZE
34 * +------------------------+ <= sc->sc_sram_base_va + CESA_DATA(0)
35 * | struct cesa_sa_data |
36 * +------------------------+
37 * | struct cesa_sa_hdesc |
38 * +------------------------+ <= sc->sc_sram_base_va
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include <sys/param.h>
45 #include <sys/systm.h>
47 #include <sys/endian.h>
48 #include <sys/kernel.h>
51 #include <sys/module.h>
52 #include <sys/mutex.h>
55 #include <machine/bus.h>
56 #include <machine/intr.h>
57 #include <machine/resource.h>
58 #include <machine/fdt.h>
60 #include <dev/fdt/fdt_common.h>
61 #include <dev/ofw/ofw_bus.h>
62 #include <dev/ofw/ofw_bus_subr.h>
65 #include <crypto/sha1.h>
66 #include <crypto/sha2/sha256.h>
67 #include <crypto/rijndael/rijndael.h>
68 #include <opencrypto/cryptodev.h>
69 #include "cryptodev_if.h"
71 #include <arm/mv/mvreg.h>
72 #include <arm/mv/mvvar.h>
75 static int cesa_probe(device_t);
76 static int cesa_attach(device_t);
77 static int cesa_detach(device_t);
78 static void cesa_intr(void *);
79 static int cesa_newsession(device_t, u_int32_t *, struct cryptoini *);
80 static int cesa_freesession(device_t, u_int64_t);
81 static int cesa_process(device_t, struct cryptop *, int);
83 static struct resource_spec cesa_res_spec[] = {
84 { SYS_RES_MEMORY, 0, RF_ACTIVE },
85 { SYS_RES_MEMORY, 1, RF_ACTIVE },
86 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
90 static device_method_t cesa_methods[] = {
91 /* Device interface */
92 DEVMETHOD(device_probe, cesa_probe),
93 DEVMETHOD(device_attach, cesa_attach),
94 DEVMETHOD(device_detach, cesa_detach),
96 /* Crypto device methods */
97 DEVMETHOD(cryptodev_newsession, cesa_newsession),
98 DEVMETHOD(cryptodev_freesession,cesa_freesession),
99 DEVMETHOD(cryptodev_process, cesa_process),
104 static driver_t cesa_driver = {
107 sizeof (struct cesa_softc)
109 static devclass_t cesa_devclass;
111 DRIVER_MODULE(cesa, simplebus, cesa_driver, cesa_devclass, 0, 0);
112 MODULE_DEPEND(cesa, crypto, 1, 1, 1);
115 cesa_dump_cshd(struct cesa_softc *sc, struct cesa_sa_hdesc *cshd)
121 device_printf(dev, "CESA SA Hardware Descriptor:\n");
122 device_printf(dev, "\t\tconfig: 0x%08X\n", cshd->cshd_config);
123 device_printf(dev, "\t\te_src: 0x%08X\n", cshd->cshd_enc_src);
124 device_printf(dev, "\t\te_dst: 0x%08X\n", cshd->cshd_enc_dst);
125 device_printf(dev, "\t\te_dlen: 0x%08X\n", cshd->cshd_enc_dlen);
126 device_printf(dev, "\t\te_key: 0x%08X\n", cshd->cshd_enc_key);
127 device_printf(dev, "\t\te_iv_1: 0x%08X\n", cshd->cshd_enc_iv);
128 device_printf(dev, "\t\te_iv_2: 0x%08X\n", cshd->cshd_enc_iv_buf);
129 device_printf(dev, "\t\tm_src: 0x%08X\n", cshd->cshd_mac_src);
130 device_printf(dev, "\t\tm_dst: 0x%08X\n", cshd->cshd_mac_dst);
131 device_printf(dev, "\t\tm_dlen: 0x%08X\n", cshd->cshd_mac_dlen);
132 device_printf(dev, "\t\tm_tlen: 0x%08X\n", cshd->cshd_mac_total_dlen);
133 device_printf(dev, "\t\tm_iv_i: 0x%08X\n", cshd->cshd_mac_iv_in);
134 device_printf(dev, "\t\tm_iv_o: 0x%08X\n", cshd->cshd_mac_iv_out);
139 cesa_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
141 struct cesa_dma_mem *cdm;
146 KASSERT(nseg == 1, ("Got wrong number of DMA segments, should be 1."));
148 cdm->cdm_paddr = segs->ds_addr;
152 cesa_alloc_dma_mem(struct cesa_softc *sc, struct cesa_dma_mem *cdm,
157 KASSERT(cdm->cdm_vaddr == NULL,
158 ("%s(): DMA memory descriptor in use.", __func__));
160 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
161 PAGE_SIZE, 0, /* alignment, boundary */
162 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
163 BUS_SPACE_MAXADDR, /* highaddr */
164 NULL, NULL, /* filtfunc, filtfuncarg */
165 size, 1, /* maxsize, nsegments */
166 size, 0, /* maxsegsz, flags */
167 NULL, NULL, /* lockfunc, lockfuncarg */
168 &cdm->cdm_tag); /* dmat */
170 device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
176 error = bus_dmamem_alloc(cdm->cdm_tag, &cdm->cdm_vaddr,
177 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &cdm->cdm_map);
179 device_printf(sc->sc_dev, "failed to allocate DMA safe"
180 " memory, error %i!\n", error);
185 error = bus_dmamap_load(cdm->cdm_tag, cdm->cdm_map, cdm->cdm_vaddr,
186 size, cesa_alloc_dma_mem_cb, cdm, BUS_DMA_NOWAIT);
188 device_printf(sc->sc_dev, "cannot get address of the DMA"
189 " memory, error %i\n", error);
196 bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map);
198 bus_dma_tag_destroy(cdm->cdm_tag);
200 cdm->cdm_vaddr = NULL;
205 cesa_free_dma_mem(struct cesa_dma_mem *cdm)
208 bus_dmamap_unload(cdm->cdm_tag, cdm->cdm_map);
209 bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map);
210 bus_dma_tag_destroy(cdm->cdm_tag);
211 cdm->cdm_vaddr = NULL;
215 cesa_sync_dma_mem(struct cesa_dma_mem *cdm, bus_dmasync_op_t op)
218 /* Sync only if dma memory is valid */
219 if (cdm->cdm_vaddr != NULL)
220 bus_dmamap_sync(cdm->cdm_tag, cdm->cdm_map, op);
224 cesa_sync_desc(struct cesa_softc *sc, bus_dmasync_op_t op)
227 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, op);
228 cesa_sync_dma_mem(&sc->sc_sdesc_cdm, op);
229 cesa_sync_dma_mem(&sc->sc_requests_cdm, op);
232 static struct cesa_session *
233 cesa_alloc_session(struct cesa_softc *sc)
235 struct cesa_session *cs;
237 CESA_GENERIC_ALLOC_LOCKED(sc, cs, sessions);
242 static struct cesa_session *
243 cesa_get_session(struct cesa_softc *sc, uint32_t sid)
246 if (sid >= CESA_SESSIONS)
249 return (&sc->sc_sessions[sid]);
253 cesa_free_session(struct cesa_softc *sc, struct cesa_session *cs)
256 CESA_GENERIC_FREE_LOCKED(sc, cs, sessions);
259 static struct cesa_request *
260 cesa_alloc_request(struct cesa_softc *sc)
262 struct cesa_request *cr;
264 CESA_GENERIC_ALLOC_LOCKED(sc, cr, requests);
268 STAILQ_INIT(&cr->cr_tdesc);
269 STAILQ_INIT(&cr->cr_sdesc);
275 cesa_free_request(struct cesa_softc *sc, struct cesa_request *cr)
278 /* Free TDMA descriptors assigned to this request */
279 CESA_LOCK(sc, tdesc);
280 STAILQ_CONCAT(&sc->sc_free_tdesc, &cr->cr_tdesc);
281 CESA_UNLOCK(sc, tdesc);
283 /* Free SA descriptors assigned to this request */
284 CESA_LOCK(sc, sdesc);
285 STAILQ_CONCAT(&sc->sc_free_sdesc, &cr->cr_sdesc);
286 CESA_UNLOCK(sc, sdesc);
288 /* Unload DMA memory associated with request */
289 if (cr->cr_dmap_loaded) {
290 bus_dmamap_unload(sc->sc_data_dtag, cr->cr_dmap);
291 cr->cr_dmap_loaded = 0;
294 CESA_GENERIC_FREE_LOCKED(sc, cr, requests);
298 cesa_enqueue_request(struct cesa_softc *sc, struct cesa_request *cr)
301 CESA_LOCK(sc, requests);
302 STAILQ_INSERT_TAIL(&sc->sc_ready_requests, cr, cr_stq);
303 CESA_UNLOCK(sc, requests);
306 static struct cesa_tdma_desc *
307 cesa_alloc_tdesc(struct cesa_softc *sc)
309 struct cesa_tdma_desc *ctd;
311 CESA_GENERIC_ALLOC_LOCKED(sc, ctd, tdesc);
314 device_printf(sc->sc_dev, "TDMA descriptors pool exhaused. "
315 "Consider increasing CESA_TDMA_DESCRIPTORS.\n");
320 static struct cesa_sa_desc *
321 cesa_alloc_sdesc(struct cesa_softc *sc, struct cesa_request *cr)
323 struct cesa_sa_desc *csd;
325 CESA_GENERIC_ALLOC_LOCKED(sc, csd, sdesc);
327 device_printf(sc->sc_dev, "SA descriptors pool exhaused. "
328 "Consider increasing CESA_SA_DESCRIPTORS.\n");
332 STAILQ_INSERT_TAIL(&cr->cr_sdesc, csd, csd_stq);
334 /* Fill-in SA descriptor with default values */
335 csd->csd_cshd->cshd_enc_key = CESA_SA_DATA(csd_key);
336 csd->csd_cshd->cshd_enc_iv = CESA_SA_DATA(csd_iv);
337 csd->csd_cshd->cshd_enc_iv_buf = CESA_SA_DATA(csd_iv);
338 csd->csd_cshd->cshd_enc_src = 0;
339 csd->csd_cshd->cshd_enc_dst = 0;
340 csd->csd_cshd->cshd_enc_dlen = 0;
341 csd->csd_cshd->cshd_mac_dst = CESA_SA_DATA(csd_hash);
342 csd->csd_cshd->cshd_mac_iv_in = CESA_SA_DATA(csd_hiv_in);
343 csd->csd_cshd->cshd_mac_iv_out = CESA_SA_DATA(csd_hiv_out);
344 csd->csd_cshd->cshd_mac_src = 0;
345 csd->csd_cshd->cshd_mac_dlen = 0;
350 static struct cesa_tdma_desc *
351 cesa_tdma_copy(struct cesa_softc *sc, bus_addr_t dst, bus_addr_t src,
354 struct cesa_tdma_desc *ctd;
356 ctd = cesa_alloc_tdesc(sc);
360 ctd->ctd_cthd->cthd_dst = dst;
361 ctd->ctd_cthd->cthd_src = src;
362 ctd->ctd_cthd->cthd_byte_count = size;
364 /* Handle special control packet */
366 ctd->ctd_cthd->cthd_flags = CESA_CTHD_OWNED;
368 ctd->ctd_cthd->cthd_flags = 0;
373 static struct cesa_tdma_desc *
374 cesa_tdma_copyin_sa_data(struct cesa_softc *sc, struct cesa_request *cr)
377 return (cesa_tdma_copy(sc, sc->sc_sram_base_pa +
378 sizeof(struct cesa_sa_hdesc), cr->cr_csd_paddr,
379 sizeof(struct cesa_sa_data)));
382 static struct cesa_tdma_desc *
383 cesa_tdma_copyout_sa_data(struct cesa_softc *sc, struct cesa_request *cr)
386 return (cesa_tdma_copy(sc, cr->cr_csd_paddr, sc->sc_sram_base_pa +
387 sizeof(struct cesa_sa_hdesc), sizeof(struct cesa_sa_data)));
390 static struct cesa_tdma_desc *
391 cesa_tdma_copy_sdesc(struct cesa_softc *sc, struct cesa_sa_desc *csd)
394 return (cesa_tdma_copy(sc, sc->sc_sram_base_pa, csd->csd_cshd_paddr,
395 sizeof(struct cesa_sa_hdesc)));
399 cesa_append_tdesc(struct cesa_request *cr, struct cesa_tdma_desc *ctd)
401 struct cesa_tdma_desc *ctd_prev;
403 if (!STAILQ_EMPTY(&cr->cr_tdesc)) {
404 ctd_prev = STAILQ_LAST(&cr->cr_tdesc, cesa_tdma_desc, ctd_stq);
405 ctd_prev->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr;
408 ctd->ctd_cthd->cthd_next = 0;
409 STAILQ_INSERT_TAIL(&cr->cr_tdesc, ctd, ctd_stq);
413 cesa_append_packet(struct cesa_softc *sc, struct cesa_request *cr,
414 struct cesa_packet *cp, struct cesa_sa_desc *csd)
416 struct cesa_tdma_desc *ctd, *tmp;
418 /* Copy SA descriptor for this packet */
419 ctd = cesa_tdma_copy_sdesc(sc, csd);
423 cesa_append_tdesc(cr, ctd);
425 /* Copy data to be processed */
426 STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyin, ctd_stq, tmp)
427 cesa_append_tdesc(cr, ctd);
428 STAILQ_INIT(&cp->cp_copyin);
430 /* Insert control descriptor */
431 ctd = cesa_tdma_copy(sc, 0, 0, 0);
435 cesa_append_tdesc(cr, ctd);
437 /* Copy back results */
438 STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyout, ctd_stq, tmp)
439 cesa_append_tdesc(cr, ctd);
440 STAILQ_INIT(&cp->cp_copyout);
446 cesa_set_mkey(struct cesa_session *cs, int alg, const uint8_t *mkey, int mklen)
448 uint8_t ipad[CESA_MAX_HMAC_BLOCK_LEN];
449 uint8_t opad[CESA_MAX_HMAC_BLOCK_LEN];
451 SHA256_CTX sha256ctx;
457 memset(ipad, HMAC_IPAD_VAL, CESA_MAX_HMAC_BLOCK_LEN);
458 memset(opad, HMAC_OPAD_VAL, CESA_MAX_HMAC_BLOCK_LEN);
459 for (i = 0; i < mklen; i++) {
464 hin = (uint32_t *)cs->cs_hiv_in;
465 hout = (uint32_t *)cs->cs_hiv_out;
468 case CRYPTO_MD5_HMAC:
470 MD5Update(&md5ctx, ipad, MD5_HMAC_BLOCK_LEN);
471 memcpy(hin, md5ctx.state, sizeof(md5ctx.state));
473 MD5Update(&md5ctx, opad, MD5_HMAC_BLOCK_LEN);
474 memcpy(hout, md5ctx.state, sizeof(md5ctx.state));
476 case CRYPTO_SHA1_HMAC:
478 SHA1Update(&sha1ctx, ipad, SHA1_HMAC_BLOCK_LEN);
479 memcpy(hin, sha1ctx.h.b32, sizeof(sha1ctx.h.b32));
481 SHA1Update(&sha1ctx, opad, SHA1_HMAC_BLOCK_LEN);
482 memcpy(hout, sha1ctx.h.b32, sizeof(sha1ctx.h.b32));
484 case CRYPTO_SHA2_256_HMAC:
485 SHA256_Init(&sha256ctx);
486 SHA256_Update(&sha256ctx, ipad, SHA2_256_HMAC_BLOCK_LEN);
487 memcpy(hin, sha256ctx.state, sizeof(sha256ctx.state));
488 SHA256_Init(&sha256ctx);
489 SHA256_Update(&sha256ctx, opad, SHA2_256_HMAC_BLOCK_LEN);
490 memcpy(hout, sha256ctx.state, sizeof(sha256ctx.state));
496 for (i = 0; i < CESA_MAX_HASH_LEN / sizeof(uint32_t); i++) {
497 hin[i] = htobe32(hin[i]);
498 hout[i] = htobe32(hout[i]);
505 cesa_prep_aes_key(struct cesa_session *cs)
507 uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
511 rijndaelKeySetupEnc(ek, cs->cs_key, cs->cs_klen * 8);
513 cs->cs_config &= ~CESA_CSH_AES_KLEN_MASK;
514 dkey = (uint32_t *)cs->cs_aes_dkey;
516 switch (cs->cs_klen) {
518 cs->cs_config |= CESA_CSH_AES_KLEN_128;
519 for (i = 0; i < 4; i++)
520 *dkey++ = htobe32(ek[4 * 10 + i]);
523 cs->cs_config |= CESA_CSH_AES_KLEN_192;
524 for (i = 0; i < 4; i++)
525 *dkey++ = htobe32(ek[4 * 12 + i]);
526 for (i = 0; i < 2; i++)
527 *dkey++ = htobe32(ek[4 * 11 + 2 + i]);
530 cs->cs_config |= CESA_CSH_AES_KLEN_256;
531 for (i = 0; i < 4; i++)
532 *dkey++ = htobe32(ek[4 * 14 + i]);
533 for (i = 0; i < 4; i++)
534 *dkey++ = htobe32(ek[4 * 13 + i]);
544 cesa_is_hash(int alg)
549 case CRYPTO_MD5_HMAC:
551 case CRYPTO_SHA1_HMAC:
552 case CRYPTO_SHA2_256_HMAC:
560 cesa_start_packet(struct cesa_packet *cp, unsigned int size)
565 STAILQ_INIT(&cp->cp_copyin);
566 STAILQ_INIT(&cp->cp_copyout);
570 cesa_fill_packet(struct cesa_softc *sc, struct cesa_packet *cp,
571 bus_dma_segment_t *seg)
573 struct cesa_tdma_desc *ctd;
576 /* Calculate size of block copy */
577 bsize = MIN(seg->ds_len, cp->cp_size - cp->cp_offset);
580 ctd = cesa_tdma_copy(sc, sc->sc_sram_base_pa +
581 CESA_DATA(cp->cp_offset), seg->ds_addr, bsize);
585 STAILQ_INSERT_TAIL(&cp->cp_copyin, ctd, ctd_stq);
587 ctd = cesa_tdma_copy(sc, seg->ds_addr, sc->sc_sram_base_pa +
588 CESA_DATA(cp->cp_offset), bsize);
592 STAILQ_INSERT_TAIL(&cp->cp_copyout, ctd, ctd_stq);
594 seg->ds_len -= bsize;
595 seg->ds_addr += bsize;
596 cp->cp_offset += bsize;
603 cesa_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
605 unsigned int mpsize, fragmented;
606 unsigned int mlen, mskip, tmlen;
607 struct cesa_chain_info *cci;
608 unsigned int elen, eskip;
609 unsigned int skip, len;
610 struct cesa_sa_desc *csd;
611 struct cesa_request *cr;
612 struct cesa_softc *sc;
613 struct cesa_packet cp;
614 bus_dma_segment_t seg;
623 cci->cci_error = error;
627 elen = cci->cci_enc ? cci->cci_enc->crd_len : 0;
628 eskip = cci->cci_enc ? cci->cci_enc->crd_skip : 0;
629 mlen = cci->cci_mac ? cci->cci_mac->crd_len : 0;
630 mskip = cci->cci_mac ? cci->cci_mac->crd_skip : 0;
633 ((eskip > mskip && ((eskip - mskip) & (cr->cr_cs->cs_ivlen - 1))) ||
634 (mskip > eskip && ((mskip - eskip) & (cr->cr_cs->cs_mblen - 1))) ||
635 (eskip > (mskip + mlen)) || (mskip > (eskip + elen)))) {
637 * Data alignment in the request does not meet CESA requiremnts
638 * for combined encryption/decryption and hashing. We have to
639 * split the request to separate operations and process them
642 config = cci->cci_config;
643 if ((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC) {
644 config &= ~CESA_CSHD_OP_MASK;
646 cci->cci_config = config | CESA_CSHD_MAC;
648 cci->cci_mac = cr->cr_mac;
649 cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
651 cci->cci_config = config | CESA_CSHD_ENC;
652 cci->cci_enc = cr->cr_enc;
654 cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
656 config &= ~CESA_CSHD_OP_MASK;
658 cci->cci_config = config | CESA_CSHD_ENC;
659 cci->cci_enc = cr->cr_enc;
661 cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
663 cci->cci_config = config | CESA_CSHD_MAC;
665 cci->cci_mac = cr->cr_mac;
666 cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
674 mpsize = CESA_MAX_PACKET_SIZE;
675 mpsize &= ~((cr->cr_cs->cs_ivlen - 1) | (cr->cr_cs->cs_mblen - 1));
678 skip = MIN(eskip, mskip);
679 len = MAX(elen + eskip, mlen + mskip) - skip;
688 /* Start first packet in chain */
689 cesa_start_packet(&cp, MIN(mpsize, len));
691 while (nseg-- && len > 0) {
695 * Skip data in buffer on which neither ENC nor MAC operation
699 size = MIN(skip, seg.ds_len);
717 * Fill in current packet with data. Break if there is
718 * no more data in current DMA segment or an error
721 size = cesa_fill_packet(sc, &cp, &seg);
729 /* If packet is full, append it to the chain */
730 if (cp.cp_size == cp.cp_offset) {
731 csd = cesa_alloc_sdesc(sc, cr);
737 /* Create SA descriptor for this packet */
738 csd->csd_cshd->cshd_config = cci->cci_config;
739 csd->csd_cshd->cshd_mac_total_dlen = tmlen;
742 * Enable fragmentation if request will not fit
748 csd->csd_cshd->cshd_config |=
749 CESA_CSHD_FRAG_FIRST;
751 csd->csd_cshd->cshd_config |=
752 CESA_CSHD_FRAG_MIDDLE;
753 } else if (fragmented)
754 csd->csd_cshd->cshd_config |=
757 if (eskip < cp.cp_size && elen > 0) {
758 csd->csd_cshd->cshd_enc_src =
760 csd->csd_cshd->cshd_enc_dst =
762 csd->csd_cshd->cshd_enc_dlen =
763 MIN(elen, cp.cp_size - eskip);
766 if (mskip < cp.cp_size && mlen > 0) {
767 csd->csd_cshd->cshd_mac_src =
769 csd->csd_cshd->cshd_mac_dlen =
770 MIN(mlen, cp.cp_size - mskip);
773 elen -= csd->csd_cshd->cshd_enc_dlen;
774 eskip -= MIN(eskip, cp.cp_size);
775 mlen -= csd->csd_cshd->cshd_mac_dlen;
776 mskip -= MIN(mskip, cp.cp_size);
778 cesa_dump_cshd(sc, csd->csd_cshd);
780 /* Append packet to the request */
781 error = cesa_append_packet(sc, cr, &cp, csd);
785 /* Start a new packet, as current is full */
786 cesa_start_packet(&cp, MIN(mpsize, len));
796 * Move all allocated resources to the request. They will be
799 STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyin);
800 STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyout);
801 cci->cci_error = error;
806 cesa_create_chain_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
807 bus_size_t size, int error)
810 cesa_create_chain_cb(arg, segs, nseg, error);
814 cesa_create_chain(struct cesa_softc *sc, struct cesa_request *cr)
816 struct cesa_chain_info cci;
817 struct cesa_tdma_desc *ctd;
822 CESA_LOCK_ASSERT(sc, sessions);
824 /* Create request metadata */
826 if (cr->cr_enc->crd_alg == CRYPTO_AES_CBC &&
827 (cr->cr_enc->crd_flags & CRD_F_ENCRYPT) == 0)
828 memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_aes_dkey,
831 memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_key,
836 memcpy(cr->cr_csd->csd_hiv_in, cr->cr_cs->cs_hiv_in,
838 memcpy(cr->cr_csd->csd_hiv_out, cr->cr_cs->cs_hiv_out,
842 ctd = cesa_tdma_copyin_sa_data(sc, cr);
846 cesa_append_tdesc(cr, ctd);
848 /* Prepare SA configuration */
849 config = cr->cr_cs->cs_config;
851 if (cr->cr_enc && (cr->cr_enc->crd_flags & CRD_F_ENCRYPT) == 0)
852 config |= CESA_CSHD_DECRYPT;
853 if (cr->cr_enc && !cr->cr_mac)
854 config |= CESA_CSHD_ENC;
855 if (!cr->cr_enc && cr->cr_mac)
856 config |= CESA_CSHD_MAC;
857 if (cr->cr_enc && cr->cr_mac)
858 config |= (config & CESA_CSHD_DECRYPT) ? CESA_CSHD_MAC_AND_ENC :
859 CESA_CSHD_ENC_AND_MAC;
861 /* Create data packets */
864 cci.cci_enc = cr->cr_enc;
865 cci.cci_mac = cr->cr_mac;
866 cci.cci_config = config;
869 if (cr->cr_crp->crp_flags & CRYPTO_F_IOV)
870 error = bus_dmamap_load_uio(sc->sc_data_dtag,
871 cr->cr_dmap, (struct uio *)cr->cr_crp->crp_buf,
872 cesa_create_chain_cb2, &cci, BUS_DMA_NOWAIT);
873 else if (cr->cr_crp->crp_flags & CRYPTO_F_IMBUF)
874 error = bus_dmamap_load_mbuf(sc->sc_data_dtag,
875 cr->cr_dmap, (struct mbuf *)cr->cr_crp->crp_buf,
876 cesa_create_chain_cb2, &cci, BUS_DMA_NOWAIT);
878 error = bus_dmamap_load(sc->sc_data_dtag,
879 cr->cr_dmap, cr->cr_crp->crp_buf,
880 cr->cr_crp->crp_ilen, cesa_create_chain_cb, &cci,
884 cr->cr_dmap_loaded = 1;
887 error = cci.cci_error;
892 /* Read back request metadata */
893 ctd = cesa_tdma_copyout_sa_data(sc, cr);
897 cesa_append_tdesc(cr, ctd);
903 cesa_execute(struct cesa_softc *sc)
905 struct cesa_tdma_desc *prev_ctd, *ctd;
906 struct cesa_request *prev_cr, *cr;
908 CESA_LOCK(sc, requests);
911 * If ready list is empty, there is nothing to execute. If queued list
912 * is not empty, the hardware is busy and we cannot start another
915 if (STAILQ_EMPTY(&sc->sc_ready_requests) ||
916 !STAILQ_EMPTY(&sc->sc_queued_requests)) {
917 CESA_UNLOCK(sc, requests);
921 /* Move all ready requests to queued list */
922 STAILQ_CONCAT(&sc->sc_queued_requests, &sc->sc_ready_requests);
923 STAILQ_INIT(&sc->sc_ready_requests);
925 /* Create one execution chain from all requests on the list */
926 if (STAILQ_FIRST(&sc->sc_queued_requests) !=
927 STAILQ_LAST(&sc->sc_queued_requests, cesa_request, cr_stq)) {
929 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_POSTREAD |
930 BUS_DMASYNC_POSTWRITE);
932 STAILQ_FOREACH(cr, &sc->sc_queued_requests, cr_stq) {
934 ctd = STAILQ_FIRST(&cr->cr_tdesc);
935 prev_ctd = STAILQ_LAST(&prev_cr->cr_tdesc,
936 cesa_tdma_desc, ctd_stq);
938 prev_ctd->ctd_cthd->cthd_next =
945 cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_PREREAD |
946 BUS_DMASYNC_PREWRITE);
949 /* Start chain execution in hardware */
950 cr = STAILQ_FIRST(&sc->sc_queued_requests);
951 ctd = STAILQ_FIRST(&cr->cr_tdesc);
953 CESA_TDMA_WRITE(sc, CESA_TDMA_ND, ctd->ctd_cthd_paddr);
955 if (sc->sc_soc_id == MV_DEV_88F6828 ||
956 sc->sc_soc_id == MV_DEV_88F6820 ||
957 sc->sc_soc_id == MV_DEV_88F6810)
958 CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE | CESA_SA_CMD_SHA2);
960 CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE);
962 CESA_UNLOCK(sc, requests);
966 cesa_setup_sram(struct cesa_softc *sc)
969 ihandle_t sram_ihandle;
970 pcell_t sram_handle, sram_reg[2];
974 rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "sram-handle",
975 (void *)&sram_handle, sizeof(sram_handle));
979 sram_ihandle = (ihandle_t)sram_handle;
980 sram_node = OF_instance_to_package(sram_ihandle);
982 rv = OF_getencprop(sram_node, "reg", (void *)sram_reg, sizeof(sram_reg));
986 sc->sc_sram_base_pa = sram_reg[0];
987 /* Store SRAM size to be able to unmap in detach() */
988 sc->sc_sram_size = sram_reg[1];
990 if (sc->sc_soc_id != MV_DEV_88F6828 &&
991 sc->sc_soc_id != MV_DEV_88F6820 &&
992 sc->sc_soc_id != MV_DEV_88F6810)
995 /* SRAM memory was not mapped in platform_sram_devmap(), map it now */
996 sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size);
999 sc->sc_sram_base_va = (vm_offset_t)sram_va;
1005 cesa_probe(device_t dev)
1008 if (!ofw_bus_status_okay(dev))
1011 if (!ofw_bus_is_compatible(dev, "mrvl,cesa"))
1014 device_set_desc(dev, "Marvell Cryptographic Engine and Security "
1017 return (BUS_PROBE_DEFAULT);
1021 cesa_attach(device_t dev)
1023 struct cesa_softc *sc;
1028 sc = device_get_softc(dev);
1036 case MV_DEV_88F6281:
1037 case MV_DEV_88F6282:
1038 /* Check if CESA peripheral device has power turned on */
1039 if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) ==
1040 CPU_PM_CTRL_CRYPTO) {
1041 device_printf(dev, "not powered on\n");
1046 case MV_DEV_88F6828:
1047 case MV_DEV_88F6820:
1048 case MV_DEV_88F6810:
1051 case MV_DEV_MV78100:
1052 case MV_DEV_MV78100_Z0:
1053 /* Check if CESA peripheral device has power turned on */
1054 if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) !=
1055 CPU_PM_CTRL_CRYPTO) {
1056 device_printf(dev, "not powered on\n");
1059 sc->sc_tperr = CESA_ICR_TPERR;
1067 /* Initialize mutexes */
1068 mtx_init(&sc->sc_sc_lock, device_get_nameunit(dev),
1069 "CESA Shared Data", MTX_DEF);
1070 mtx_init(&sc->sc_tdesc_lock, device_get_nameunit(dev),
1071 "CESA TDMA Descriptors Pool", MTX_DEF);
1072 mtx_init(&sc->sc_sdesc_lock, device_get_nameunit(dev),
1073 "CESA SA Descriptors Pool", MTX_DEF);
1074 mtx_init(&sc->sc_requests_lock, device_get_nameunit(dev),
1075 "CESA Requests Pool", MTX_DEF);
1076 mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev),
1077 "CESA Sessions Pool", MTX_DEF);
1079 /* Allocate I/O and IRQ resources */
1080 error = bus_alloc_resources(dev, cesa_res_spec, sc->sc_res);
1082 device_printf(dev, "could not allocate resources\n");
1086 /* Acquire SRAM base address */
1087 error = cesa_setup_sram(sc);
1089 device_printf(dev, "could not setup SRAM\n");
1093 /* Setup interrupt handler */
1094 error = bus_setup_intr(dev, sc->sc_res[RES_CESA_IRQ], INTR_TYPE_NET |
1095 INTR_MPSAFE, NULL, cesa_intr, sc, &(sc->sc_icookie));
1097 device_printf(dev, "could not setup engine completion irq\n");
1101 /* Create DMA tag for processed data */
1102 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1103 1, 0, /* alignment, boundary */
1104 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1105 BUS_SPACE_MAXADDR, /* highaddr */
1106 NULL, NULL, /* filtfunc, filtfuncarg */
1107 CESA_MAX_REQUEST_SIZE, /* maxsize */
1108 CESA_MAX_FRAGMENTS, /* nsegments */
1109 CESA_MAX_REQUEST_SIZE, 0, /* maxsegsz, flags */
1110 NULL, NULL, /* lockfunc, lockfuncarg */
1111 &sc->sc_data_dtag); /* dmat */
1115 /* Initialize data structures: TDMA Descriptors Pool */
1116 error = cesa_alloc_dma_mem(sc, &sc->sc_tdesc_cdm,
1117 CESA_TDMA_DESCRIPTORS * sizeof(struct cesa_tdma_hdesc));
1121 STAILQ_INIT(&sc->sc_free_tdesc);
1122 for (i = 0; i < CESA_TDMA_DESCRIPTORS; i++) {
1123 sc->sc_tdesc[i].ctd_cthd =
1124 (struct cesa_tdma_hdesc *)(sc->sc_tdesc_cdm.cdm_vaddr) + i;
1125 sc->sc_tdesc[i].ctd_cthd_paddr = sc->sc_tdesc_cdm.cdm_paddr +
1126 (i * sizeof(struct cesa_tdma_hdesc));
1127 STAILQ_INSERT_TAIL(&sc->sc_free_tdesc, &sc->sc_tdesc[i],
1131 /* Initialize data structures: SA Descriptors Pool */
1132 error = cesa_alloc_dma_mem(sc, &sc->sc_sdesc_cdm,
1133 CESA_SA_DESCRIPTORS * sizeof(struct cesa_sa_hdesc));
1137 STAILQ_INIT(&sc->sc_free_sdesc);
1138 for (i = 0; i < CESA_SA_DESCRIPTORS; i++) {
1139 sc->sc_sdesc[i].csd_cshd =
1140 (struct cesa_sa_hdesc *)(sc->sc_sdesc_cdm.cdm_vaddr) + i;
1141 sc->sc_sdesc[i].csd_cshd_paddr = sc->sc_sdesc_cdm.cdm_paddr +
1142 (i * sizeof(struct cesa_sa_hdesc));
1143 STAILQ_INSERT_TAIL(&sc->sc_free_sdesc, &sc->sc_sdesc[i],
1147 /* Initialize data structures: Requests Pool */
1148 error = cesa_alloc_dma_mem(sc, &sc->sc_requests_cdm,
1149 CESA_REQUESTS * sizeof(struct cesa_sa_data));
1153 STAILQ_INIT(&sc->sc_free_requests);
1154 STAILQ_INIT(&sc->sc_ready_requests);
1155 STAILQ_INIT(&sc->sc_queued_requests);
1156 for (i = 0; i < CESA_REQUESTS; i++) {
1157 sc->sc_requests[i].cr_csd =
1158 (struct cesa_sa_data *)(sc->sc_requests_cdm.cdm_vaddr) + i;
1159 sc->sc_requests[i].cr_csd_paddr =
1160 sc->sc_requests_cdm.cdm_paddr +
1161 (i * sizeof(struct cesa_sa_data));
1163 /* Preallocate DMA maps */
1164 error = bus_dmamap_create(sc->sc_data_dtag, 0,
1165 &sc->sc_requests[i].cr_dmap);
1166 if (error && i > 0) {
1169 bus_dmamap_destroy(sc->sc_data_dtag,
1170 sc->sc_requests[i].cr_dmap);
1176 STAILQ_INSERT_TAIL(&sc->sc_free_requests, &sc->sc_requests[i],
1180 /* Initialize data structures: Sessions Pool */
1181 STAILQ_INIT(&sc->sc_free_sessions);
1182 for (i = 0; i < CESA_SESSIONS; i++) {
1183 sc->sc_sessions[i].cs_sid = i;
1184 STAILQ_INSERT_TAIL(&sc->sc_free_sessions, &sc->sc_sessions[i],
1190 * - Burst limit: 128 bytes,
1191 * - Outstanding reads enabled,
1194 val = CESA_TDMA_CR_DBL128 | CESA_TDMA_CR_SBL128 |
1195 CESA_TDMA_CR_ORDEN | CESA_TDMA_CR_NBS | CESA_TDMA_CR_ENABLE;
1197 if (sc->sc_soc_id == MV_DEV_88F6828 ||
1198 sc->sc_soc_id == MV_DEV_88F6820 ||
1199 sc->sc_soc_id == MV_DEV_88F6810)
1200 val |= CESA_TDMA_NUM_OUTSTAND;
1202 CESA_TDMA_WRITE(sc, CESA_TDMA_CR, val);
1206 * - SA descriptor is present at beginning of CESA SRAM,
1207 * - Multi-packet chain mode,
1208 * - Cooperation with TDMA enabled.
1210 CESA_REG_WRITE(sc, CESA_SA_DPR, 0);
1211 CESA_REG_WRITE(sc, CESA_SA_CR, CESA_SA_CR_ACTIVATE_TDMA |
1212 CESA_SA_CR_WAIT_FOR_TDMA | CESA_SA_CR_MULTI_MODE);
1214 /* Unmask interrupts */
1215 CESA_REG_WRITE(sc, CESA_ICR, 0);
1216 CESA_REG_WRITE(sc, CESA_ICM, CESA_ICM_ACCTDMA | sc->sc_tperr);
1217 CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0);
1218 CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, CESA_TDMA_EMR_MISS |
1219 CESA_TDMA_EMR_DOUBLE_HIT | CESA_TDMA_EMR_BOTH_HIT |
1220 CESA_TDMA_EMR_DATA_ERROR);
1222 /* Register in OCF */
1223 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
1224 if (sc->sc_cid < 0) {
1225 device_printf(dev, "could not get crypto driver id\n");
1229 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
1230 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
1231 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
1232 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
1233 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
1234 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
1235 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
1236 if (sc->sc_soc_id == MV_DEV_88F6828 ||
1237 sc->sc_soc_id == MV_DEV_88F6820 ||
1238 sc->sc_soc_id == MV_DEV_88F6810)
1239 crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0);
1243 for (i = 0; i < CESA_REQUESTS; i++)
1244 bus_dmamap_destroy(sc->sc_data_dtag,
1245 sc->sc_requests[i].cr_dmap);
1247 cesa_free_dma_mem(&sc->sc_requests_cdm);
1249 cesa_free_dma_mem(&sc->sc_sdesc_cdm);
1251 cesa_free_dma_mem(&sc->sc_tdesc_cdm);
1253 bus_dma_tag_destroy(sc->sc_data_dtag);
1255 bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie);
1257 if (sc->sc_soc_id == MV_DEV_88F6828 ||
1258 sc->sc_soc_id == MV_DEV_88F6820 ||
1259 sc->sc_soc_id == MV_DEV_88F6810)
1260 pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size);
1262 bus_release_resources(dev, cesa_res_spec, sc->sc_res);
1264 mtx_destroy(&sc->sc_sessions_lock);
1265 mtx_destroy(&sc->sc_requests_lock);
1266 mtx_destroy(&sc->sc_sdesc_lock);
1267 mtx_destroy(&sc->sc_tdesc_lock);
1268 mtx_destroy(&sc->sc_sc_lock);
1273 cesa_detach(device_t dev)
1275 struct cesa_softc *sc;
1278 sc = device_get_softc(dev);
1280 /* TODO: Wait for queued requests completion before shutdown. */
1282 /* Mask interrupts */
1283 CESA_REG_WRITE(sc, CESA_ICM, 0);
1284 CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, 0);
1286 /* Unregister from OCF */
1287 crypto_unregister_all(sc->sc_cid);
1290 for (i = 0; i < CESA_REQUESTS; i++)
1291 bus_dmamap_destroy(sc->sc_data_dtag,
1292 sc->sc_requests[i].cr_dmap);
1294 /* Free DMA Memory */
1295 cesa_free_dma_mem(&sc->sc_requests_cdm);
1296 cesa_free_dma_mem(&sc->sc_sdesc_cdm);
1297 cesa_free_dma_mem(&sc->sc_tdesc_cdm);
1300 bus_dma_tag_destroy(sc->sc_data_dtag);
1302 /* Stop interrupt */
1303 bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie);
1305 /* Relase I/O and IRQ resources */
1306 bus_release_resources(dev, cesa_res_spec, sc->sc_res);
1308 /* Unmap SRAM memory */
1309 if (sc->sc_soc_id == MV_DEV_88F6828 ||
1310 sc->sc_soc_id == MV_DEV_88F6820 ||
1311 sc->sc_soc_id == MV_DEV_88F6810)
1312 pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size);
1314 /* Destroy mutexes */
1315 mtx_destroy(&sc->sc_sessions_lock);
1316 mtx_destroy(&sc->sc_requests_lock);
1317 mtx_destroy(&sc->sc_sdesc_lock);
1318 mtx_destroy(&sc->sc_tdesc_lock);
1319 mtx_destroy(&sc->sc_sc_lock);
1325 cesa_intr(void *arg)
1327 STAILQ_HEAD(, cesa_request) requests;
1328 struct cesa_request *cr, *tmp;
1329 struct cesa_softc *sc;
1336 ecr = CESA_TDMA_READ(sc, CESA_TDMA_ECR);
1337 CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0);
1338 icr = CESA_REG_READ(sc, CESA_ICR);
1339 CESA_REG_WRITE(sc, CESA_ICR, 0);
1341 /* Check for TDMA errors */
1342 if (ecr & CESA_TDMA_ECR_MISS) {
1343 device_printf(sc->sc_dev, "TDMA Miss error detected!\n");
1347 if (ecr & CESA_TDMA_ECR_DOUBLE_HIT) {
1348 device_printf(sc->sc_dev, "TDMA Double Hit error detected!\n");
1352 if (ecr & CESA_TDMA_ECR_BOTH_HIT) {
1353 device_printf(sc->sc_dev, "TDMA Both Hit error detected!\n");
1357 if (ecr & CESA_TDMA_ECR_DATA_ERROR) {
1358 device_printf(sc->sc_dev, "TDMA Data error detected!\n");
1362 /* Check for CESA errors */
1363 if (icr & sc->sc_tperr) {
1364 device_printf(sc->sc_dev, "CESA SRAM Parity error detected!\n");
1368 /* If there is nothing more to do, return */
1369 if ((icr & CESA_ICR_ACCTDMA) == 0)
1372 /* Get all finished requests */
1373 CESA_LOCK(sc, requests);
1374 STAILQ_INIT(&requests);
1375 STAILQ_CONCAT(&requests, &sc->sc_queued_requests);
1376 STAILQ_INIT(&sc->sc_queued_requests);
1377 CESA_UNLOCK(sc, requests);
1379 /* Execute all ready requests */
1382 /* Process completed requests */
1383 cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_POSTREAD |
1384 BUS_DMASYNC_POSTWRITE);
1386 STAILQ_FOREACH_SAFE(cr, &requests, cr_stq, tmp) {
1387 bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap,
1388 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1390 cr->cr_crp->crp_etype = sc->sc_error;
1392 crypto_copyback(cr->cr_crp->crp_flags,
1393 cr->cr_crp->crp_buf, cr->cr_mac->crd_inject,
1394 cr->cr_cs->cs_hlen, cr->cr_csd->csd_hash);
1396 crypto_done(cr->cr_crp);
1397 cesa_free_request(sc, cr);
1400 cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_PREREAD |
1401 BUS_DMASYNC_PREWRITE);
1405 /* Unblock driver if it ran out of resources */
1407 blocked = sc->sc_blocked;
1409 CESA_UNLOCK(sc, sc);
1412 crypto_unblock(sc->sc_cid, blocked);
1416 cesa_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
1418 struct cesa_session *cs;
1419 struct cesa_softc *sc;
1420 struct cryptoini *enc;
1421 struct cryptoini *mac;
1424 sc = device_get_softc(dev);
1429 /* Check and parse input */
1430 if (cesa_is_hash(cri->cri_alg))
1435 cri = cri->cri_next;
1438 if (!enc && !cesa_is_hash(cri->cri_alg))
1441 if (!mac && cesa_is_hash(cri->cri_alg))
1444 if (cri->cri_next || !(enc && mac))
1448 if ((enc && (enc->cri_klen / 8) > CESA_MAX_KEY_LEN) ||
1449 (mac && (mac->cri_klen / 8) > CESA_MAX_MKEY_LEN))
1452 /* Allocate session */
1453 cs = cesa_alloc_session(sc);
1457 /* Prepare CESA configuration */
1463 switch (enc->cri_alg) {
1464 case CRYPTO_AES_CBC:
1465 cs->cs_config |= CESA_CSHD_AES | CESA_CSHD_CBC;
1466 cs->cs_ivlen = AES_BLOCK_LEN;
1468 case CRYPTO_DES_CBC:
1469 cs->cs_config |= CESA_CSHD_DES | CESA_CSHD_CBC;
1470 cs->cs_ivlen = DES_BLOCK_LEN;
1472 case CRYPTO_3DES_CBC:
1473 cs->cs_config |= CESA_CSHD_3DES | CESA_CSHD_3DES_EDE |
1475 cs->cs_ivlen = DES3_BLOCK_LEN;
1483 if (!error && mac) {
1484 switch (mac->cri_alg) {
1487 cs->cs_hlen = (mac->cri_mlen == 0) ? MD5_HASH_LEN :
1489 cs->cs_config |= CESA_CSHD_MD5;
1491 case CRYPTO_MD5_HMAC:
1492 cs->cs_mblen = MD5_HMAC_BLOCK_LEN;
1493 cs->cs_hlen = (mac->cri_mlen == 0) ? MD5_HASH_LEN :
1495 cs->cs_config |= CESA_CSHD_MD5_HMAC;
1496 if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN)
1497 cs->cs_config |= CESA_CSHD_96_BIT_HMAC;
1501 cs->cs_hlen = (mac->cri_mlen == 0) ? SHA1_HASH_LEN :
1503 cs->cs_config |= CESA_CSHD_SHA1;
1505 case CRYPTO_SHA1_HMAC:
1506 cs->cs_mblen = SHA1_HMAC_BLOCK_LEN;
1507 cs->cs_hlen = (mac->cri_mlen == 0) ? SHA1_HASH_LEN :
1509 cs->cs_config |= CESA_CSHD_SHA1_HMAC;
1510 if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN)
1511 cs->cs_config |= CESA_CSHD_96_BIT_HMAC;
1513 case CRYPTO_SHA2_256_HMAC:
1514 cs->cs_mblen = SHA2_256_HMAC_BLOCK_LEN;
1515 cs->cs_hlen = (mac->cri_mlen == 0) ? SHA2_256_HASH_LEN :
1517 cs->cs_config |= CESA_CSHD_SHA2_256_HMAC;
1525 /* Save cipher key */
1526 if (!error && enc && enc->cri_key) {
1527 cs->cs_klen = enc->cri_klen / 8;
1528 memcpy(cs->cs_key, enc->cri_key, cs->cs_klen);
1529 if (enc->cri_alg == CRYPTO_AES_CBC)
1530 error = cesa_prep_aes_key(cs);
1533 /* Save digest key */
1534 if (!error && mac && mac->cri_key)
1535 error = cesa_set_mkey(cs, mac->cri_alg, mac->cri_key,
1539 cesa_free_session(sc, cs);
1549 cesa_freesession(device_t dev, uint64_t tid)
1551 struct cesa_session *cs;
1552 struct cesa_softc *sc;
1554 sc = device_get_softc(dev);
1555 cs = cesa_get_session(sc, CRYPTO_SESID2LID(tid));
1560 cesa_free_session(sc, cs);
1566 cesa_process(device_t dev, struct cryptop *crp, int hint)
1568 struct cesa_request *cr;
1569 struct cesa_session *cs;
1570 struct cryptodesc *crd;
1571 struct cryptodesc *enc;
1572 struct cryptodesc *mac;
1573 struct cesa_softc *sc;
1576 sc = device_get_softc(dev);
1577 crd = crp->crp_desc;
1582 /* Check session ID */
1583 cs = cesa_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid));
1585 crp->crp_etype = EINVAL;
1590 /* Check and parse input */
1591 if (crp->crp_ilen > CESA_MAX_REQUEST_SIZE) {
1592 crp->crp_etype = E2BIG;
1597 if (cesa_is_hash(crd->crd_alg))
1602 crd = crd->crd_next;
1605 if (!enc && !cesa_is_hash(crd->crd_alg))
1608 if (!mac && cesa_is_hash(crd->crd_alg))
1611 if (crd->crd_next || !(enc && mac)) {
1612 crp->crp_etype = EINVAL;
1619 * Get request descriptor. Block driver if there is no free
1620 * descriptors in pool.
1622 cr = cesa_alloc_request(sc);
1625 sc->sc_blocked = CRYPTO_SYMQ;
1626 CESA_UNLOCK(sc, sc);
1630 /* Prepare request */
1636 CESA_LOCK(sc, sessions);
1637 cesa_sync_desc(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1639 if (enc && enc->crd_flags & CRD_F_ENCRYPT) {
1640 if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1641 memcpy(cr->cr_csd->csd_iv, enc->crd_iv, cs->cs_ivlen);
1643 arc4rand(cr->cr_csd->csd_iv, cs->cs_ivlen, 0);
1645 if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0)
1646 crypto_copyback(crp->crp_flags, crp->crp_buf,
1647 enc->crd_inject, cs->cs_ivlen, cr->cr_csd->csd_iv);
1649 if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1650 memcpy(cr->cr_csd->csd_iv, enc->crd_iv, cs->cs_ivlen);
1652 crypto_copydata(crp->crp_flags, crp->crp_buf,
1653 enc->crd_inject, cs->cs_ivlen, cr->cr_csd->csd_iv);
1656 if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) {
1657 if ((enc->crd_klen / 8) <= CESA_MAX_KEY_LEN) {
1658 cs->cs_klen = enc->crd_klen / 8;
1659 memcpy(cs->cs_key, enc->crd_key, cs->cs_klen);
1660 if (enc->crd_alg == CRYPTO_AES_CBC)
1661 error = cesa_prep_aes_key(cs);
1666 if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) {
1667 if ((mac->crd_klen / 8) <= CESA_MAX_MKEY_LEN)
1668 error = cesa_set_mkey(cs, mac->crd_alg, mac->crd_key,
1674 /* Convert request to chain of TDMA and SA descriptors */
1676 error = cesa_create_chain(sc, cr);
1678 cesa_sync_desc(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1679 CESA_UNLOCK(sc, sessions);
1682 cesa_free_request(sc, cr);
1683 crp->crp_etype = error;
1688 bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_PREREAD |
1689 BUS_DMASYNC_PREWRITE);
1691 /* Enqueue request to execution */
1692 cesa_enqueue_request(sc, cr);
1694 /* Start execution, if we have no more requests in queue */
1695 if ((hint & CRYPTO_HINT_MORE) == 0)