2 * Copyright (c) 2003 Sam Leffler, Errno Consulting
3 * Copyright (c) 2003 Global Technology Associates, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 * SafeNet SafeXcel-1141 hardware crypto accelerator
36 #include <sys/param.h>
37 #include <sys/systm.h>
39 #include <sys/errno.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h>
43 #include <sys/module.h>
45 #include <sys/mutex.h>
46 #include <sys/sysctl.h>
47 #include <sys/endian.h>
52 #include <machine/bus.h>
53 #include <machine/resource.h>
57 #include <crypto/sha1.h>
58 #include <opencrypto/cryptodev.h>
59 #include <opencrypto/cryptosoft.h>
61 #include <sys/random.h>
64 #include "cryptodev_if.h"
66 #include <dev/pci/pcivar.h>
67 #include <dev/pci/pcireg.h>
70 #include <dev/rndtest/rndtest.h>
72 #include <dev/safe/safereg.h>
73 #include <dev/safe/safevar.h>
80 * Prototypes and count for the pci_device structure
82 static int safe_probe(device_t);
83 static int safe_attach(device_t);
84 static int safe_detach(device_t);
85 static int safe_suspend(device_t);
86 static int safe_resume(device_t);
87 static int safe_shutdown(device_t);
89 static int safe_newsession(device_t, u_int32_t *, struct cryptoini *);
90 static int safe_freesession(device_t, u_int64_t);
91 static int safe_process(device_t, struct cryptop *, int);
93 static device_method_t safe_methods[] = {
94 /* Device interface */
95 DEVMETHOD(device_probe, safe_probe),
96 DEVMETHOD(device_attach, safe_attach),
97 DEVMETHOD(device_detach, safe_detach),
98 DEVMETHOD(device_suspend, safe_suspend),
99 DEVMETHOD(device_resume, safe_resume),
100 DEVMETHOD(device_shutdown, safe_shutdown),
102 /* crypto device methods */
103 DEVMETHOD(cryptodev_newsession, safe_newsession),
104 DEVMETHOD(cryptodev_freesession,safe_freesession),
105 DEVMETHOD(cryptodev_process, safe_process),
109 static driver_t safe_driver = {
112 sizeof (struct safe_softc)
114 static devclass_t safe_devclass;
116 DRIVER_MODULE(safe, pci, safe_driver, safe_devclass, 0, 0);
117 MODULE_DEPEND(safe, crypto, 1, 1, 1);
119 MODULE_DEPEND(safe, rndtest, 1, 1, 1);
122 static void safe_intr(void *);
123 static void safe_callback(struct safe_softc *, struct safe_ringentry *);
124 static void safe_feed(struct safe_softc *, struct safe_ringentry *);
125 static void safe_mcopy(struct mbuf *, struct mbuf *, u_int);
127 static void safe_rng_init(struct safe_softc *);
128 static void safe_rng(void *);
129 #endif /* SAFE_NO_RNG */
130 static int safe_dma_malloc(struct safe_softc *, bus_size_t,
131 struct safe_dma_alloc *, int);
132 #define safe_dma_sync(_dma, _flags) \
133 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags))
134 static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *);
135 static int safe_dmamap_aligned(const struct safe_operand *);
136 static int safe_dmamap_uniform(const struct safe_operand *);
138 static void safe_reset_board(struct safe_softc *);
139 static void safe_init_board(struct safe_softc *);
140 static void safe_init_pciregs(device_t dev);
141 static void safe_cleanchip(struct safe_softc *);
142 static void safe_totalreset(struct safe_softc *);
144 static int safe_free_entry(struct safe_softc *, struct safe_ringentry *);
146 static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD, 0,
147 "SafeNet driver parameters");
150 static void safe_dump_dmastatus(struct safe_softc *, const char *);
151 static void safe_dump_ringstate(struct safe_softc *, const char *);
152 static void safe_dump_intrstate(struct safe_softc *, const char *);
153 static void safe_dump_request(struct safe_softc *, const char *,
154 struct safe_ringentry *);
156 static struct safe_softc *safec; /* for use by hw.safe.dump */
158 static int safe_debug = 0;
159 SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug,
160 0, "control debugging msgs");
161 #define DPRINTF(_x) if (safe_debug) printf _x
166 #define READ_REG(sc,r) \
167 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r))
169 #define WRITE_REG(sc,reg,val) \
170 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val)
172 struct safe_stats safestats;
173 SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats,
174 safe_stats, "driver statistics");
176 static int safe_rnginterval = 1; /* poll once a second */
177 SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval,
178 0, "RNG polling interval (secs)");
179 static int safe_rngbufsize = 16; /* 64 bytes each poll */
180 SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize,
181 0, "RNG polling buffer size (32-bit words)");
182 static int safe_rngmaxalarm = 8; /* max alarms before reset */
183 SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm,
184 0, "RNG max alarms before reset");
185 #endif /* SAFE_NO_RNG */
188 safe_probe(device_t dev)
190 if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET &&
191 pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL)
192 return (BUS_PROBE_DEFAULT);
197 safe_partname(struct safe_softc *sc)
199 /* XXX sprintf numbers when not decoded */
200 switch (pci_get_vendor(sc->sc_dev)) {
201 case PCI_VENDOR_SAFENET:
202 switch (pci_get_device(sc->sc_dev)) {
203 case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141";
205 return "SafeNet unknown-part";
207 return "Unknown-vendor unknown-part";
212 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
214 random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE);
216 #endif /* SAFE_NO_RNG */
219 safe_attach(device_t dev)
221 struct safe_softc *sc = device_get_softc(dev);
223 u_int32_t cmd, i, devinfo;
226 bzero(sc, sizeof (*sc));
229 /* XXX handle power management */
231 cmd = pci_read_config(dev, PCIR_COMMAND, 4);
232 cmd |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
233 pci_write_config(dev, PCIR_COMMAND, cmd, 4);
234 cmd = pci_read_config(dev, PCIR_COMMAND, 4);
236 if (!(cmd & PCIM_CMD_MEMEN)) {
237 device_printf(dev, "failed to enable memory mapping\n");
241 if (!(cmd & PCIM_CMD_BUSMASTEREN)) {
242 device_printf(dev, "failed to enable bus mastering\n");
247 * Setup memory-mapping of PCI registers.
250 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
252 if (sc->sc_sr == NULL) {
253 device_printf(dev, "cannot map register space\n");
256 sc->sc_st = rman_get_bustag(sc->sc_sr);
257 sc->sc_sh = rman_get_bushandle(sc->sc_sr);
260 * Arrange interrupt line.
263 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
264 RF_SHAREABLE|RF_ACTIVE);
265 if (sc->sc_irq == NULL) {
266 device_printf(dev, "could not map interrupt\n");
270 * NB: Network code assumes we are blocked with splimp()
271 * so make sure the IRQ is mapped appropriately.
273 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
274 NULL, safe_intr, sc, &sc->sc_ih)) {
275 device_printf(dev, "could not establish interrupt\n");
279 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
280 if (sc->sc_cid < 0) {
281 device_printf(dev, "could not get crypto driver id\n");
285 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
286 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
289 * Setup DMA descriptor area.
291 if (bus_dma_tag_create(NULL, /* parent */
293 SAFE_DMA_BOUNDARY, /* boundary */
294 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
295 BUS_SPACE_MAXADDR, /* highaddr */
296 NULL, NULL, /* filter, filterarg */
297 SAFE_MAX_DMA, /* maxsize */
298 SAFE_MAX_PART, /* nsegments */
299 SAFE_MAX_SSIZE, /* maxsegsize */
300 BUS_DMA_ALLOCNOW, /* flags */
301 NULL, NULL, /* locking */
303 device_printf(dev, "cannot allocate DMA tag\n");
306 if (bus_dma_tag_create(NULL, /* parent */
308 SAFE_MAX_DSIZE, /* boundary */
309 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
310 BUS_SPACE_MAXADDR, /* highaddr */
311 NULL, NULL, /* filter, filterarg */
312 SAFE_MAX_DMA, /* maxsize */
313 SAFE_MAX_PART, /* nsegments */
314 SAFE_MAX_DSIZE, /* maxsegsize */
315 BUS_DMA_ALLOCNOW, /* flags */
316 NULL, NULL, /* locking */
318 device_printf(dev, "cannot allocate DMA tag\n");
323 * Allocate packet engine descriptors.
325 if (safe_dma_malloc(sc,
326 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
327 &sc->sc_ringalloc, 0)) {
328 device_printf(dev, "cannot allocate PE descriptor ring\n");
329 bus_dma_tag_destroy(sc->sc_srcdmat);
333 * Hookup the static portion of all our data structures.
335 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
336 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
337 sc->sc_front = sc->sc_ring;
338 sc->sc_back = sc->sc_ring;
339 raddr = sc->sc_ringalloc.dma_paddr;
340 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
341 for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
342 struct safe_ringentry *re = &sc->sc_ring[i];
344 re->re_desc.d_sa = raddr +
345 offsetof(struct safe_ringentry, re_sa);
346 re->re_sa.sa_staterec = raddr +
347 offsetof(struct safe_ringentry, re_sastate);
349 raddr += sizeof (struct safe_ringentry);
351 mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev),
352 "packet engine ring", MTX_DEF);
355 * Allocate scatter and gather particle descriptors.
357 if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
358 &sc->sc_spalloc, 0)) {
359 device_printf(dev, "cannot allocate source particle "
360 "descriptor ring\n");
361 mtx_destroy(&sc->sc_ringmtx);
362 safe_dma_free(sc, &sc->sc_ringalloc);
363 bus_dma_tag_destroy(sc->sc_srcdmat);
366 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
367 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
368 sc->sc_spfree = sc->sc_spring;
369 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
371 if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
372 &sc->sc_dpalloc, 0)) {
373 device_printf(dev, "cannot allocate destination particle "
374 "descriptor ring\n");
375 mtx_destroy(&sc->sc_ringmtx);
376 safe_dma_free(sc, &sc->sc_spalloc);
377 safe_dma_free(sc, &sc->sc_ringalloc);
378 bus_dma_tag_destroy(sc->sc_dstdmat);
381 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
382 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
383 sc->sc_dpfree = sc->sc_dpring;
384 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
386 device_printf(sc->sc_dev, "%s", safe_partname(sc));
388 devinfo = READ_REG(sc, SAFE_DEVINFO);
389 if (devinfo & SAFE_DEVINFO_RNG) {
390 sc->sc_flags |= SAFE_FLAGS_RNG;
393 if (devinfo & SAFE_DEVINFO_PKEY) {
396 sc->sc_flags |= SAFE_FLAGS_KEY;
397 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
398 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
401 if (devinfo & SAFE_DEVINFO_DES) {
403 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
404 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
406 if (devinfo & SAFE_DEVINFO_AES) {
408 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
410 if (devinfo & SAFE_DEVINFO_MD5) {
412 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
414 if (devinfo & SAFE_DEVINFO_SHA1) {
416 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
419 crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
420 crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
421 /* XXX other supported algorithms */
424 safe_reset_board(sc); /* reset h/w */
425 safe_init_pciregs(dev); /* init pci settings */
426 safe_init_board(sc); /* init h/w */
429 if (sc->sc_flags & SAFE_FLAGS_RNG) {
431 sc->sc_rndtest = rndtest_attach(dev);
433 sc->sc_harvest = rndtest_harvest;
435 sc->sc_harvest = default_harvest;
437 sc->sc_harvest = default_harvest;
441 callout_init(&sc->sc_rngto, CALLOUT_MPSAFE);
442 callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc);
444 #endif /* SAFE_NO_RNG */
446 safec = sc; /* for use by hw.safe.dump */
450 crypto_unregister_all(sc->sc_cid);
452 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
454 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
456 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
462 * Detach a device that successfully probed.
465 safe_detach(device_t dev)
467 struct safe_softc *sc = device_get_softc(dev);
469 /* XXX wait/abort active ops */
471 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */
473 callout_stop(&sc->sc_rngto);
475 crypto_unregister_all(sc->sc_cid);
479 rndtest_detach(sc->sc_rndtest);
483 safe_dma_free(sc, &sc->sc_dpalloc);
484 safe_dma_free(sc, &sc->sc_spalloc);
485 mtx_destroy(&sc->sc_ringmtx);
486 safe_dma_free(sc, &sc->sc_ringalloc);
488 bus_generic_detach(dev);
489 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
490 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
492 bus_dma_tag_destroy(sc->sc_srcdmat);
493 bus_dma_tag_destroy(sc->sc_dstdmat);
494 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
500 * Stop all chip i/o so that the kernel's probe routines don't
501 * get confused by errant DMAs when rebooting.
504 safe_shutdown(device_t dev)
507 safe_stop(device_get_softc(dev));
513 * Device suspend routine.
516 safe_suspend(device_t dev)
518 struct safe_softc *sc = device_get_softc(dev);
521 /* XXX stop the device and save PCI settings */
523 sc->sc_suspended = 1;
529 safe_resume(device_t dev)
531 struct safe_softc *sc = device_get_softc(dev);
534 /* XXX retore PCI settings and start the device */
536 sc->sc_suspended = 0;
541 * SafeXcel Interrupt routine
546 struct safe_softc *sc = arg;
547 volatile u_int32_t stat;
549 stat = READ_REG(sc, SAFE_HM_STAT);
550 if (stat == 0) /* shared irq, not for us */
553 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */
555 if ((stat & SAFE_INT_PE_DDONE)) {
557 * Descriptor(s) done; scan the ring and
558 * process completed operations.
560 mtx_lock(&sc->sc_ringmtx);
561 while (sc->sc_back != sc->sc_front) {
562 struct safe_ringentry *re = sc->sc_back;
565 safe_dump_ringstate(sc, __func__);
566 safe_dump_request(sc, __func__, re);
570 * safe_process marks ring entries that were allocated
571 * but not used with a csr of zero. This insures the
572 * ring front pointer never needs to be set backwards
573 * in the event that an entry is allocated but not used
574 * because of a setup error.
576 if (re->re_desc.d_csr != 0) {
577 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr))
579 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len))
582 safe_callback(sc, re);
584 if (++(sc->sc_back) == sc->sc_ringtop)
585 sc->sc_back = sc->sc_ring;
587 mtx_unlock(&sc->sc_ringmtx);
591 * Check to see if we got any DMA Error
593 if (stat & SAFE_INT_PE_ERROR) {
594 DPRINTF(("dmaerr dmastat %08x\n",
595 READ_REG(sc, SAFE_PE_DMASTAT)));
596 safestats.st_dmaerr++;
603 if (sc->sc_needwakeup) { /* XXX check high watermark */
604 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
605 DPRINTF(("%s: wakeup crypto %x\n", __func__,
607 sc->sc_needwakeup &= ~wakeup;
608 crypto_unblock(sc->sc_cid, wakeup);
613 * safe_feed() - post a request to chip
616 safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
618 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE);
619 if (re->re_dst_map != NULL)
620 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map,
621 BUS_DMASYNC_PREREAD);
622 /* XXX have no smaller granularity */
623 safe_dma_sync(&sc->sc_ringalloc,
624 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
625 safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE);
626 safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE);
630 safe_dump_ringstate(sc, __func__);
631 safe_dump_request(sc, __func__, re);
635 if (sc->sc_nqchip > safestats.st_maxqchip)
636 safestats.st_maxqchip = sc->sc_nqchip;
637 /* poke h/w to check descriptor ring, any value can be written */
638 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
641 #define N(a) (sizeof(a) / sizeof (a[0]))
643 safe_setup_enckey(struct safe_session *ses, caddr_t key)
647 bcopy(key, ses->ses_key, ses->ses_klen / 8);
649 /* PE is little-endian, insure proper byte order */
650 for (i = 0; i < N(ses->ses_key); i++)
651 ses->ses_key[i] = htole32(ses->ses_key[i]);
655 safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
662 for (i = 0; i < klen; i++)
663 key[i] ^= HMAC_IPAD_VAL;
665 if (algo == CRYPTO_MD5_HMAC) {
667 MD5Update(&md5ctx, key, klen);
668 MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
669 bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state));
672 SHA1Update(&sha1ctx, key, klen);
673 SHA1Update(&sha1ctx, hmac_ipad_buffer,
674 SHA1_HMAC_BLOCK_LEN - klen);
675 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
678 for (i = 0; i < klen; i++)
679 key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
681 if (algo == CRYPTO_MD5_HMAC) {
683 MD5Update(&md5ctx, key, klen);
684 MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
685 bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state));
688 SHA1Update(&sha1ctx, key, klen);
689 SHA1Update(&sha1ctx, hmac_opad_buffer,
690 SHA1_HMAC_BLOCK_LEN - klen);
691 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
694 for (i = 0; i < klen; i++)
695 key[i] ^= HMAC_OPAD_VAL;
697 /* PE is little-endian, insure proper byte order */
698 for (i = 0; i < N(ses->ses_hminner); i++) {
699 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
700 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
706 * Allocate a new 'session' and return an encoded session id. 'sidp'
707 * contains our registration id, and should contain an encoded session
708 * id on successful allocation.
711 safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
713 struct safe_softc *sc = device_get_softc(dev);
714 struct cryptoini *c, *encini = NULL, *macini = NULL;
715 struct safe_session *ses = NULL;
718 if (sidp == NULL || cri == NULL || sc == NULL)
721 for (c = cri; c != NULL; c = c->cri_next) {
722 if (c->cri_alg == CRYPTO_MD5_HMAC ||
723 c->cri_alg == CRYPTO_SHA1_HMAC ||
724 c->cri_alg == CRYPTO_NULL_HMAC) {
728 } else if (c->cri_alg == CRYPTO_DES_CBC ||
729 c->cri_alg == CRYPTO_3DES_CBC ||
730 c->cri_alg == CRYPTO_AES_CBC ||
731 c->cri_alg == CRYPTO_NULL_CBC) {
738 if (encini == NULL && macini == NULL)
740 if (encini) { /* validate key length */
741 switch (encini->cri_alg) {
743 if (encini->cri_klen != 64)
746 case CRYPTO_3DES_CBC:
747 if (encini->cri_klen != 192)
751 if (encini->cri_klen != 128 &&
752 encini->cri_klen != 192 &&
753 encini->cri_klen != 256)
759 if (sc->sc_sessions == NULL) {
760 ses = sc->sc_sessions = (struct safe_session *)malloc(
761 sizeof(struct safe_session), M_DEVBUF, M_NOWAIT);
765 sc->sc_nsessions = 1;
767 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
768 if (sc->sc_sessions[sesn].ses_used == 0) {
769 ses = &sc->sc_sessions[sesn];
775 sesn = sc->sc_nsessions;
776 ses = (struct safe_session *)malloc((sesn + 1) *
777 sizeof(struct safe_session), M_DEVBUF, M_NOWAIT);
780 bcopy(sc->sc_sessions, ses, sesn *
781 sizeof(struct safe_session));
782 bzero(sc->sc_sessions, sesn *
783 sizeof(struct safe_session));
784 free(sc->sc_sessions, M_DEVBUF);
785 sc->sc_sessions = ses;
786 ses = &sc->sc_sessions[sesn];
791 bzero(ses, sizeof(struct safe_session));
796 /* XXX may read fewer than requested */
797 read_random(ses->ses_iv, sizeof(ses->ses_iv));
799 ses->ses_klen = encini->cri_klen;
800 if (encini->cri_key != NULL)
801 safe_setup_enckey(ses, encini->cri_key);
805 ses->ses_mlen = macini->cri_mlen;
806 if (ses->ses_mlen == 0) {
807 if (macini->cri_alg == CRYPTO_MD5_HMAC)
808 ses->ses_mlen = MD5_HASH_LEN;
810 ses->ses_mlen = SHA1_HASH_LEN;
813 if (macini->cri_key != NULL) {
814 safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
815 macini->cri_klen / 8);
819 *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
824 * Deallocate a session.
827 safe_freesession(device_t dev, u_int64_t tid)
829 struct safe_softc *sc = device_get_softc(dev);
831 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
836 session = SAFE_SESSION(sid);
837 if (session < sc->sc_nsessions) {
838 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
846 safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
848 struct safe_operand *op = arg;
850 DPRINTF(("%s: mapsize %u nsegs %d error %d\n", __func__,
851 (u_int) mapsize, nsegs, error));
854 op->mapsize = mapsize;
856 bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
860 safe_process(device_t dev, struct cryptop *crp, int hint)
862 struct safe_softc *sc = device_get_softc(dev);
863 int err = 0, i, nicealign, uniform;
864 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
865 int bypass, oplen, ivsize;
868 struct safe_session *ses;
869 struct safe_ringentry *re;
870 struct safe_sarec *sa;
871 struct safe_pdesc *pd;
872 u_int32_t cmd0, cmd1, staterec;
874 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
875 safestats.st_invalid++;
878 if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
879 safestats.st_badsession++;
883 mtx_lock(&sc->sc_ringmtx);
884 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
885 safestats.st_ringfull++;
886 sc->sc_needwakeup |= CRYPTO_SYMQ;
887 mtx_unlock(&sc->sc_ringmtx);
892 staterec = re->re_sa.sa_staterec; /* save */
893 /* NB: zero everything but the PE descriptor */
894 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
895 re->re_sa.sa_staterec = staterec; /* restore */
898 re->re_sesn = SAFE_SESSION(crp->crp_sid);
900 if (crp->crp_flags & CRYPTO_F_IMBUF) {
901 re->re_src_m = (struct mbuf *)crp->crp_buf;
902 re->re_dst_m = (struct mbuf *)crp->crp_buf;
903 } else if (crp->crp_flags & CRYPTO_F_IOV) {
904 re->re_src_io = (struct uio *)crp->crp_buf;
905 re->re_dst_io = (struct uio *)crp->crp_buf;
907 safestats.st_badflags++;
909 goto errout; /* XXX we don't handle contiguous blocks! */
913 ses = &sc->sc_sessions[re->re_sesn];
915 crd1 = crp->crp_desc;
917 safestats.st_nodesc++;
921 crd2 = crd1->crd_next;
923 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */
926 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
927 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
928 crd1->crd_alg == CRYPTO_NULL_HMAC) {
931 cmd0 |= SAFE_SA_CMD0_OP_HASH;
932 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
933 crd1->crd_alg == CRYPTO_3DES_CBC ||
934 crd1->crd_alg == CRYPTO_AES_CBC ||
935 crd1->crd_alg == CRYPTO_NULL_CBC) {
938 cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
940 safestats.st_badalg++;
945 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
946 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
947 crd1->crd_alg == CRYPTO_NULL_HMAC) &&
948 (crd2->crd_alg == CRYPTO_DES_CBC ||
949 crd2->crd_alg == CRYPTO_3DES_CBC ||
950 crd2->crd_alg == CRYPTO_AES_CBC ||
951 crd2->crd_alg == CRYPTO_NULL_CBC) &&
952 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
955 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
956 crd1->crd_alg == CRYPTO_3DES_CBC ||
957 crd1->crd_alg == CRYPTO_AES_CBC ||
958 crd1->crd_alg == CRYPTO_NULL_CBC) &&
959 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
960 crd2->crd_alg == CRYPTO_SHA1_HMAC ||
961 crd2->crd_alg == CRYPTO_NULL_HMAC) &&
962 (crd1->crd_flags & CRD_F_ENCRYPT)) {
966 safestats.st_badalg++;
970 cmd0 |= SAFE_SA_CMD0_OP_BOTH;
974 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
975 safe_setup_enckey(ses, enccrd->crd_key);
977 if (enccrd->crd_alg == CRYPTO_DES_CBC) {
978 cmd0 |= SAFE_SA_CMD0_DES;
979 cmd1 |= SAFE_SA_CMD1_CBC;
980 ivsize = 2*sizeof(u_int32_t);
981 } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
982 cmd0 |= SAFE_SA_CMD0_3DES;
983 cmd1 |= SAFE_SA_CMD1_CBC;
984 ivsize = 2*sizeof(u_int32_t);
985 } else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
986 cmd0 |= SAFE_SA_CMD0_AES;
987 cmd1 |= SAFE_SA_CMD1_CBC;
988 if (ses->ses_klen == 128)
989 cmd1 |= SAFE_SA_CMD1_AES128;
990 else if (ses->ses_klen == 192)
991 cmd1 |= SAFE_SA_CMD1_AES192;
993 cmd1 |= SAFE_SA_CMD1_AES256;
994 ivsize = 4*sizeof(u_int32_t);
996 cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
1001 * Setup encrypt/decrypt state. When using basic ops
1002 * we can't use an inline IV because hash/crypt offset
1003 * must be from the end of the IV to the start of the
1004 * crypt data and this leaves out the preceding header
1005 * from the hash calculation. Instead we place the IV
1006 * in the state record and set the hash/crypt offset to
1007 * copy both the header+IV.
1009 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
1010 cmd0 |= SAFE_SA_CMD0_OUTBOUND;
1012 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
1013 iv = enccrd->crd_iv;
1015 iv = (caddr_t) ses->ses_iv;
1016 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
1017 crypto_copyback(crp->crp_flags, crp->crp_buf,
1018 enccrd->crd_inject, ivsize, iv);
1020 bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
1021 cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
1022 re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
1024 cmd0 |= SAFE_SA_CMD0_INBOUND;
1026 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
1027 bcopy(enccrd->crd_iv,
1028 re->re_sastate.sa_saved_iv, ivsize);
1030 crypto_copydata(crp->crp_flags, crp->crp_buf,
1031 enccrd->crd_inject, ivsize,
1032 (caddr_t)re->re_sastate.sa_saved_iv);
1034 cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
1037 * For basic encryption use the zero pad algorithm.
1038 * This pads results to an 8-byte boundary and
1039 * suppresses padding verification for inbound (i.e.
1040 * decrypt) operations.
1042 * NB: Not sure if the 8-byte pad boundary is a problem.
1044 cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
1046 /* XXX assert key bufs have the same size */
1047 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
1051 if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
1052 safe_setup_mackey(ses, maccrd->crd_alg,
1053 maccrd->crd_key, maccrd->crd_klen / 8);
1056 if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
1057 cmd0 |= SAFE_SA_CMD0_MD5;
1058 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
1059 } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
1060 cmd0 |= SAFE_SA_CMD0_SHA1;
1061 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
1063 cmd0 |= SAFE_SA_CMD0_HASH_NULL;
1066 * Digest data is loaded from the SA and the hash
1067 * result is saved to the state block where we
1068 * retrieve it for return to the caller.
1070 /* XXX assert digest bufs have the same size */
1071 bcopy(ses->ses_hminner, sa->sa_indigest,
1072 sizeof(sa->sa_indigest));
1073 bcopy(ses->ses_hmouter, sa->sa_outdigest,
1074 sizeof(sa->sa_outdigest));
1076 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
1077 re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
1080 if (enccrd && maccrd) {
1082 * The offset from hash data to the start of
1083 * crypt data is the difference in the skips.
1085 bypass = maccrd->crd_skip;
1086 coffset = enccrd->crd_skip - maccrd->crd_skip;
1088 DPRINTF(("%s: hash does not precede crypt; "
1089 "mac skip %u enc skip %u\n",
1090 __func__, maccrd->crd_skip, enccrd->crd_skip));
1091 safestats.st_skipmismatch++;
1095 oplen = enccrd->crd_skip + enccrd->crd_len;
1096 if (maccrd->crd_skip + maccrd->crd_len != oplen) {
1097 DPRINTF(("%s: hash amount %u != crypt amount %u\n",
1098 __func__, maccrd->crd_skip + maccrd->crd_len,
1100 safestats.st_lenmismatch++;
1106 printf("mac: skip %d, len %d, inject %d\n",
1107 maccrd->crd_skip, maccrd->crd_len,
1108 maccrd->crd_inject);
1109 printf("enc: skip %d, len %d, inject %d\n",
1110 enccrd->crd_skip, enccrd->crd_len,
1111 enccrd->crd_inject);
1112 printf("bypass %d coffset %d oplen %d\n",
1113 bypass, coffset, oplen);
1116 if (coffset & 3) { /* offset must be 32-bit aligned */
1117 DPRINTF(("%s: coffset %u misaligned\n",
1118 __func__, coffset));
1119 safestats.st_coffmisaligned++;
1124 if (coffset > 255) { /* offset must be <256 dwords */
1125 DPRINTF(("%s: coffset %u too big\n",
1126 __func__, coffset));
1127 safestats.st_cofftoobig++;
1132 * Tell the hardware to copy the header to the output.
1133 * The header is defined as the data from the end of
1134 * the bypass to the start of data to be encrypted.
1135 * Typically this is the inline IV. Note that you need
1136 * to do this even if src+dst are the same; it appears
1137 * that w/o this bit the crypted data is written
1138 * immediately after the bypass data.
1140 cmd1 |= SAFE_SA_CMD1_HDRCOPY;
1142 * Disable IP header mutable bit handling. This is
1143 * needed to get correct HMAC calculations.
1145 cmd1 |= SAFE_SA_CMD1_MUTABLE;
1148 bypass = enccrd->crd_skip;
1149 oplen = bypass + enccrd->crd_len;
1151 bypass = maccrd->crd_skip;
1152 oplen = bypass + maccrd->crd_len;
1156 /* XXX verify multiple of 4 when using s/g */
1157 if (bypass > 96) { /* bypass offset must be <= 96 bytes */
1158 DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
1159 safestats.st_bypasstoobig++;
1164 if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) {
1165 safestats.st_nomap++;
1169 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1170 if (bus_dmamap_load_mbuf(sc->sc_srcdmat, re->re_src_map,
1171 re->re_src_m, safe_op_cb,
1172 &re->re_src, BUS_DMA_NOWAIT) != 0) {
1173 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1174 re->re_src_map = NULL;
1175 safestats.st_noload++;
1179 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1180 if (bus_dmamap_load_uio(sc->sc_srcdmat, re->re_src_map,
1181 re->re_src_io, safe_op_cb,
1182 &re->re_src, BUS_DMA_NOWAIT) != 0) {
1183 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1184 re->re_src_map = NULL;
1185 safestats.st_noload++;
1190 nicealign = safe_dmamap_aligned(&re->re_src);
1191 uniform = safe_dmamap_uniform(&re->re_src);
1193 DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
1194 nicealign, uniform, re->re_src.nsegs));
1195 if (re->re_src.nsegs > 1) {
1196 re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
1197 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
1198 for (i = 0; i < re->re_src_nsegs; i++) {
1199 /* NB: no need to check if there's space */
1201 if (++(sc->sc_spfree) == sc->sc_springtop)
1202 sc->sc_spfree = sc->sc_spring;
1204 KASSERT((pd->pd_flags&3) == 0 ||
1205 (pd->pd_flags&3) == SAFE_PD_DONE,
1206 ("bogus source particle descriptor; flags %x",
1208 pd->pd_addr = re->re_src_segs[i].ds_addr;
1209 pd->pd_size = re->re_src_segs[i].ds_len;
1210 pd->pd_flags = SAFE_PD_READY;
1212 cmd0 |= SAFE_SA_CMD0_IGATHER;
1215 * No need for gather, reference the operand directly.
1217 re->re_desc.d_src = re->re_src_segs[0].ds_addr;
1220 if (enccrd == NULL && maccrd != NULL) {
1222 * Hash op; no destination needed.
1225 if (crp->crp_flags & CRYPTO_F_IOV) {
1227 safestats.st_iovmisaligned++;
1233 * Source is not suitable for direct use as
1234 * the destination. Create a new scatter/gather
1235 * list based on the destination requirements
1236 * and check if that's ok.
1238 if (bus_dmamap_create(sc->sc_dstdmat,
1239 BUS_DMA_NOWAIT, &re->re_dst_map)) {
1240 safestats.st_nomap++;
1244 if (bus_dmamap_load_uio(sc->sc_dstdmat,
1245 re->re_dst_map, re->re_dst_io,
1246 safe_op_cb, &re->re_dst,
1247 BUS_DMA_NOWAIT) != 0) {
1248 bus_dmamap_destroy(sc->sc_dstdmat,
1250 re->re_dst_map = NULL;
1251 safestats.st_noload++;
1255 uniform = safe_dmamap_uniform(&re->re_dst);
1258 * There's no way to handle the DMA
1259 * requirements with this uio. We
1260 * could create a separate DMA area for
1261 * the result and then copy it back,
1262 * but for now we just bail and return
1263 * an error. Note that uio requests
1264 * > SAFE_MAX_DSIZE are handled because
1265 * the DMA map and segment list for the
1266 * destination wil result in a
1267 * destination particle list that does
1268 * the necessary scatter DMA.
1270 safestats.st_iovnotuniform++;
1275 re->re_dst = re->re_src;
1276 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1277 if (nicealign && uniform == 1) {
1279 * Source layout is suitable for direct
1280 * sharing of the DMA map and segment list.
1282 re->re_dst = re->re_src;
1283 } else if (nicealign && uniform == 2) {
1285 * The source is properly aligned but requires a
1286 * different particle list to handle DMA of the
1287 * result. Create a new map and do the load to
1288 * create the segment list. The particle
1289 * descriptor setup code below will handle the
1292 if (bus_dmamap_create(sc->sc_dstdmat,
1293 BUS_DMA_NOWAIT, &re->re_dst_map)) {
1294 safestats.st_nomap++;
1298 if (bus_dmamap_load_mbuf(sc->sc_dstdmat,
1299 re->re_dst_map, re->re_dst_m,
1300 safe_op_cb, &re->re_dst,
1301 BUS_DMA_NOWAIT) != 0) {
1302 bus_dmamap_destroy(sc->sc_dstdmat,
1304 re->re_dst_map = NULL;
1305 safestats.st_noload++;
1309 } else { /* !(aligned and/or uniform) */
1311 struct mbuf *m, *top, **mp;
1314 * DMA constraints require that we allocate a
1315 * new mbuf chain for the destination. We
1316 * allocate an entire new set of mbufs of
1317 * optimal/required size and then tell the
1318 * hardware to copy any bits that are not
1319 * created as a byproduct of the operation.
1322 safestats.st_unaligned++;
1324 safestats.st_notuniform++;
1325 totlen = re->re_src_mapsize;
1326 if (re->re_src_m->m_flags & M_PKTHDR) {
1328 MGETHDR(m, M_DONTWAIT, MT_DATA);
1329 if (m && !m_dup_pkthdr(m, re->re_src_m,
1336 MGET(m, M_DONTWAIT, MT_DATA);
1339 safestats.st_nombuf++;
1340 err = sc->sc_nqchip ? ERESTART : ENOMEM;
1343 if (totlen >= MINCLSIZE) {
1344 MCLGET(m, M_DONTWAIT);
1345 if ((m->m_flags & M_EXT) == 0) {
1347 safestats.st_nomcl++;
1348 err = sc->sc_nqchip ?
1358 while (totlen > 0) {
1360 MGET(m, M_DONTWAIT, MT_DATA);
1363 safestats.st_nombuf++;
1364 err = sc->sc_nqchip ?
1370 if (top && totlen >= MINCLSIZE) {
1371 MCLGET(m, M_DONTWAIT);
1372 if ((m->m_flags & M_EXT) == 0) {
1375 safestats.st_nomcl++;
1376 err = sc->sc_nqchip ?
1382 m->m_len = len = min(totlen, len);
1388 if (bus_dmamap_create(sc->sc_dstdmat,
1389 BUS_DMA_NOWAIT, &re->re_dst_map) != 0) {
1390 safestats.st_nomap++;
1394 if (bus_dmamap_load_mbuf(sc->sc_dstdmat,
1395 re->re_dst_map, re->re_dst_m,
1396 safe_op_cb, &re->re_dst,
1397 BUS_DMA_NOWAIT) != 0) {
1398 bus_dmamap_destroy(sc->sc_dstdmat,
1400 re->re_dst_map = NULL;
1401 safestats.st_noload++;
1405 if (re->re_src.mapsize > oplen) {
1407 * There's data following what the
1408 * hardware will copy for us. If this
1409 * isn't just the ICV (that's going to
1410 * be written on completion), copy it
1414 (re->re_src.mapsize-oplen) == 12 &&
1415 maccrd->crd_inject == oplen))
1416 safe_mcopy(re->re_src_m,
1420 safestats.st_noicvcopy++;
1424 safestats.st_badflags++;
1429 if (re->re_dst.nsegs > 1) {
1430 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
1431 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
1432 for (i = 0; i < re->re_dst_nsegs; i++) {
1434 KASSERT((pd->pd_flags&3) == 0 ||
1435 (pd->pd_flags&3) == SAFE_PD_DONE,
1436 ("bogus dest particle descriptor; flags %x",
1438 if (++(sc->sc_dpfree) == sc->sc_dpringtop)
1439 sc->sc_dpfree = sc->sc_dpring;
1440 pd->pd_addr = re->re_dst_segs[i].ds_addr;
1441 pd->pd_flags = SAFE_PD_READY;
1443 cmd0 |= SAFE_SA_CMD0_OSCATTER;
1446 * No need for scatter, reference the operand directly.
1448 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
1453 * All done with setup; fillin the SA command words
1454 * and the packet engine descriptor. The operation
1455 * is now ready for submission to the hardware.
1457 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
1459 | (coffset << SAFE_SA_CMD1_OFFSET_S)
1460 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */
1461 | SAFE_SA_CMD1_SRPCI
1464 * NB: the order of writes is important here. In case the
1465 * chip is scanning the ring because of an outstanding request
1466 * it might nab this one too. In that case we need to make
1467 * sure the setup is complete before we write the length
1468 * field of the descriptor as it signals the descriptor is
1469 * ready for processing.
1471 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
1473 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
1474 re->re_desc.d_len = oplen
1476 | (bypass << SAFE_PE_LEN_BYPASS_S)
1479 safestats.st_ipackets++;
1480 safestats.st_ibytes += oplen;
1482 if (++(sc->sc_front) == sc->sc_ringtop)
1483 sc->sc_front = sc->sc_ring;
1485 /* XXX honor batching */
1487 mtx_unlock(&sc->sc_ringmtx);
1491 if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m))
1492 m_freem(re->re_dst_m);
1494 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
1495 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map);
1496 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map);
1498 if (re->re_src_map != NULL) {
1499 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map);
1500 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1502 mtx_unlock(&sc->sc_ringmtx);
1503 if (err != ERESTART) {
1504 crp->crp_etype = err;
1507 sc->sc_needwakeup |= CRYPTO_SYMQ;
1513 safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
1515 struct cryptop *crp = (struct cryptop *)re->re_crp;
1516 struct cryptodesc *crd;
1518 safestats.st_opackets++;
1519 safestats.st_obytes += re->re_dst.mapsize;
1521 safe_dma_sync(&sc->sc_ringalloc,
1522 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1523 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
1524 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
1526 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
1527 safestats.st_peoperr++;
1528 crp->crp_etype = EIO; /* something more meaningful? */
1530 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
1531 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map,
1532 BUS_DMASYNC_POSTREAD);
1533 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map);
1534 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map);
1536 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE);
1537 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map);
1538 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1541 * If result was written to a differet mbuf chain, swap
1542 * it in as the return value and reclaim the original.
1544 if ((crp->crp_flags & CRYPTO_F_IMBUF) && re->re_src_m != re->re_dst_m) {
1545 m_freem(re->re_src_m);
1546 crp->crp_buf = (caddr_t)re->re_dst_m;
1549 if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) {
1550 /* copy out IV for future use */
1551 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1554 if (crd->crd_alg == CRYPTO_DES_CBC ||
1555 crd->crd_alg == CRYPTO_3DES_CBC) {
1556 ivsize = 2*sizeof(u_int32_t);
1557 } else if (crd->crd_alg == CRYPTO_AES_CBC) {
1558 ivsize = 4*sizeof(u_int32_t);
1561 crypto_copydata(crp->crp_flags, crp->crp_buf,
1562 crd->crd_skip + crd->crd_len - ivsize, ivsize,
1563 (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv);
1568 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
1569 /* copy out ICV result */
1570 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1571 if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
1572 crd->crd_alg == CRYPTO_SHA1_HMAC ||
1573 crd->crd_alg == CRYPTO_NULL_HMAC))
1575 if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
1577 * SHA-1 ICV's are byte-swapped; fix 'em up
1578 * before copy them to their destination.
1580 re->re_sastate.sa_saved_indigest[0] =
1581 bswap32(re->re_sastate.sa_saved_indigest[0]);
1582 re->re_sastate.sa_saved_indigest[1] =
1583 bswap32(re->re_sastate.sa_saved_indigest[1]);
1584 re->re_sastate.sa_saved_indigest[2] =
1585 bswap32(re->re_sastate.sa_saved_indigest[2]);
1587 crypto_copyback(crp->crp_flags, crp->crp_buf,
1589 sc->sc_sessions[re->re_sesn].ses_mlen,
1590 (caddr_t)re->re_sastate.sa_saved_indigest);
1598 * Copy all data past offset from srcm to dstm.
1601 safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset)
1603 u_int j, dlen, slen;
1607 * Advance src and dst to offset.
1611 if (srcm->m_len > j)
1614 srcm = srcm->m_next;
1618 sptr = mtod(srcm, caddr_t) + j;
1619 slen = srcm->m_len - j;
1623 if (dstm->m_len > j)
1626 dstm = dstm->m_next;
1630 dptr = mtod(dstm, caddr_t) + j;
1631 dlen = dstm->m_len - j;
1634 * Copy everything that remains.
1637 j = min(slen, dlen);
1638 bcopy(sptr, dptr, j);
1640 srcm = srcm->m_next;
1643 sptr = srcm->m_data;
1646 sptr += j, slen -= j;
1648 dstm = dstm->m_next;
1651 dptr = dstm->m_data;
1654 dptr += j, dlen -= j;
1659 #define SAFE_RNG_MAXWAIT 1000
1662 safe_rng_init(struct safe_softc *sc)
1667 WRITE_REG(sc, SAFE_RNG_CTRL, 0);
1668 /* use default value according to the manual */
1669 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */
1670 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1673 * There is a bug in rev 1.0 of the 1140 that when the RNG
1674 * is brought out of reset the ready status flag does not
1675 * work until the RNG has finished its internal initialization.
1677 * So in order to determine the device is through its
1678 * initialization we must read the data register, using the
1679 * status reg in the read in case it is initialized. Then read
1680 * the data register until it changes from the first read.
1681 * Once it changes read the data register until it changes
1682 * again. At this time the RNG is considered initialized.
1683 * This could take between 750ms - 1000ms in time.
1686 w = READ_REG(sc, SAFE_RNG_OUT);
1688 v = READ_REG(sc, SAFE_RNG_OUT);
1694 } while (++i < SAFE_RNG_MAXWAIT);
1696 /* Wait Until data changes again */
1699 v = READ_REG(sc, SAFE_RNG_OUT);
1703 } while (++i < SAFE_RNG_MAXWAIT);
1706 static __inline void
1707 safe_rng_disable_short_cycle(struct safe_softc *sc)
1709 WRITE_REG(sc, SAFE_RNG_CTRL,
1710 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
1713 static __inline void
1714 safe_rng_enable_short_cycle(struct safe_softc *sc)
1716 WRITE_REG(sc, SAFE_RNG_CTRL,
1717 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
1720 static __inline u_int32_t
1721 safe_rng_read(struct safe_softc *sc)
1726 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
1728 return READ_REG(sc, SAFE_RNG_OUT);
1734 struct safe_softc *sc = arg;
1735 u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */
1741 * Fetch the next block of data.
1743 maxwords = safe_rngbufsize;
1744 if (maxwords > SAFE_RNG_MAXBUFSIZ)
1745 maxwords = SAFE_RNG_MAXBUFSIZ;
1747 for (i = 0; i < maxwords; i++)
1748 buf[i] = safe_rng_read(sc);
1750 * Check the comparator alarm count and reset the h/w if
1751 * it exceeds our threshold. This guards against the
1752 * hardware oscillators resonating with external signals.
1754 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
1755 u_int32_t freq_inc, w;
1757 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
1758 READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
1759 safestats.st_rngalarm++;
1760 safe_rng_enable_short_cycle(sc);
1762 for (i = 0; i < 64; i++) {
1763 w = READ_REG(sc, SAFE_RNG_CNFG);
1764 freq_inc = ((w + freq_inc) & 0x3fL);
1765 w = ((w & ~0x3fL) | freq_inc);
1766 WRITE_REG(sc, SAFE_RNG_CNFG, w);
1768 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1770 (void) safe_rng_read(sc);
1773 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
1774 safe_rng_disable_short_cycle(sc);
1779 safe_rng_disable_short_cycle(sc);
1781 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1783 (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t));
1784 callout_reset(&sc->sc_rngto,
1785 hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc);
1787 #endif /* SAFE_NO_RNG */
1790 safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1792 bus_addr_t *paddr = (bus_addr_t*) arg;
1793 *paddr = segs->ds_addr;
1798 struct safe_softc *sc,
1800 struct safe_dma_alloc *dma,
1806 r = bus_dma_tag_create(NULL, /* parent */
1807 sizeof(u_int32_t), 0, /* alignment, bounds */
1808 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1809 BUS_SPACE_MAXADDR, /* highaddr */
1810 NULL, NULL, /* filter, filterarg */
1813 size, /* maxsegsize */
1814 BUS_DMA_ALLOCNOW, /* flags */
1815 NULL, NULL, /* locking */
1818 device_printf(sc->sc_dev, "safe_dma_malloc: "
1819 "bus_dma_tag_create failed; error %u\n", r);
1823 r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map);
1825 device_printf(sc->sc_dev, "safe_dma_malloc: "
1826 "bus_dmamap_create failed; error %u\n", r);
1830 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1831 BUS_DMA_NOWAIT, &dma->dma_map);
1833 device_printf(sc->sc_dev, "safe_dma_malloc: "
1834 "bus_dmammem_alloc failed; size %zu, error %u\n",
1839 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1843 mapflags | BUS_DMA_NOWAIT);
1845 device_printf(sc->sc_dev, "safe_dma_malloc: "
1846 "bus_dmamap_load failed; error %u\n", r);
1850 dma->dma_size = size;
1854 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1856 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1858 bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1859 bus_dma_tag_destroy(dma->dma_tag);
1861 dma->dma_map = NULL;
1862 dma->dma_tag = NULL;
1867 safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma)
1869 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1870 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1871 bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1872 bus_dma_tag_destroy(dma->dma_tag);
1876 * Resets the board. Values in the regesters are left as is
1877 * from the reset (i.e. initial values are assigned elsewhere).
1880 safe_reset_board(struct safe_softc *sc)
1884 * Reset the device. The manual says no delay
1885 * is needed between marking and clearing reset.
1887 v = READ_REG(sc, SAFE_PE_DMACFG) &~
1888 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
1889 SAFE_PE_DMACFG_SGRESET);
1890 WRITE_REG(sc, SAFE_PE_DMACFG, v
1891 | SAFE_PE_DMACFG_PERESET
1892 | SAFE_PE_DMACFG_PDRRESET
1893 | SAFE_PE_DMACFG_SGRESET);
1894 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1898 * Initialize registers we need to touch only once.
1901 safe_init_board(struct safe_softc *sc)
1903 u_int32_t v, dwords;
1905 v = READ_REG(sc, SAFE_PE_DMACFG);
1906 v &=~ SAFE_PE_DMACFG_PEMODE;
1907 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */
1908 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
1909 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
1910 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
1911 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
1912 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
1914 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1916 /* XXX select byte swap based on host byte order */
1917 WRITE_REG(sc, SAFE_ENDIAN, 0x1b);
1919 if (sc->sc_chiprev == SAFE_REV(1,0)) {
1921 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where
1922 * "target mode transfers" done while the chip is DMA'ing
1923 * >1020 bytes cause the hardware to lockup. To avoid this
1924 * we reduce the max PCI transfer size and use small source
1925 * particle descriptors (<= 256 bytes).
1927 WRITE_REG(sc, SAFE_DMA_CFG, 256);
1928 device_printf(sc->sc_dev,
1929 "Reduce max DMA size to %u words for rev %u.%u WAR\n",
1930 (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff,
1931 SAFE_REV_MAJ(sc->sc_chiprev),
1932 SAFE_REV_MIN(sc->sc_chiprev));
1935 /* NB: operands+results are overlaid */
1936 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
1937 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
1939 * Configure ring entry size and number of items in the ring.
1941 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
1942 ("PE ring entry not 32-bit aligned!"));
1943 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
1944 WRITE_REG(sc, SAFE_PE_RINGCFG,
1945 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
1946 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */
1948 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
1949 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
1950 WRITE_REG(sc, SAFE_PE_PARTSIZE,
1951 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
1953 * NB: destination particles are fixed size. We use
1954 * an mbuf cluster and require all results go to
1955 * clusters or smaller.
1957 WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE);
1959 /* it's now safe to enable PE mode, do it */
1960 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
1963 * Configure hardware to use level-triggered interrupts and
1964 * to interrupt after each descriptor is processed.
1966 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
1967 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
1968 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
1972 * Init PCI registers
1975 safe_init_pciregs(device_t dev)
1980 * Clean up after a chip crash.
1981 * It is assumed that the caller in splimp()
1984 safe_cleanchip(struct safe_softc *sc)
1987 if (sc->sc_nqchip != 0) {
1988 struct safe_ringentry *re = sc->sc_back;
1990 while (re != sc->sc_front) {
1991 if (re->re_desc.d_csr != 0)
1992 safe_free_entry(sc, re);
1993 if (++re == sc->sc_ringtop)
2003 * It is assumed that the caller is within splimp().
2006 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
2008 struct cryptop *crp;
2013 if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m))
2014 m_freem(re->re_dst_m);
2016 crp = (struct cryptop *)re->re_crp;
2018 re->re_desc.d_csr = 0;
2020 crp->crp_etype = EFAULT;
2026 * Routine to reset the chip and clean up.
2027 * It is assumed that the caller is in splimp()
2030 safe_totalreset(struct safe_softc *sc)
2032 safe_reset_board(sc);
2033 safe_init_board(sc);
2038 * Is the operand suitable aligned for direct DMA. Each
2039 * segment must be aligned on a 32-bit boundary and all
2040 * but the last segment must be a multiple of 4 bytes.
2043 safe_dmamap_aligned(const struct safe_operand *op)
2047 for (i = 0; i < op->nsegs; i++) {
2048 if (op->segs[i].ds_addr & 3)
2050 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
2057 * Is the operand suitable for direct DMA as the destination
2058 * of an operation. The hardware requires that each ``particle''
2059 * but the last in an operation result have the same size. We
2060 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns
2061 * 0 if some segment is not a multiple of of this size, 1 if all
2062 * segments are exactly this size, or 2 if segments are at worst
2063 * a multple of this size.
2066 safe_dmamap_uniform(const struct safe_operand *op)
2070 if (op->nsegs > 0) {
2073 for (i = 0; i < op->nsegs-1; i++) {
2074 if (op->segs[i].ds_len % SAFE_MAX_DSIZE)
2076 if (op->segs[i].ds_len != SAFE_MAX_DSIZE)
2085 safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
2087 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
2089 , READ_REG(sc, SAFE_DMA_ENDIAN)
2090 , READ_REG(sc, SAFE_DMA_SRCADDR)
2091 , READ_REG(sc, SAFE_DMA_DSTADDR)
2092 , READ_REG(sc, SAFE_DMA_STAT)
2097 safe_dump_intrstate(struct safe_softc *sc, const char *tag)
2099 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
2101 , READ_REG(sc, SAFE_HI_CFG)
2102 , READ_REG(sc, SAFE_HI_MASK)
2103 , READ_REG(sc, SAFE_HI_DESC_CNT)
2104 , READ_REG(sc, SAFE_HU_STAT)
2105 , READ_REG(sc, SAFE_HM_STAT)
2110 safe_dump_ringstate(struct safe_softc *sc, const char *tag)
2112 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
2114 /* NB: assume caller has lock on ring */
2115 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
2117 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
2118 (unsigned long)(sc->sc_back - sc->sc_ring),
2119 (unsigned long)(sc->sc_front - sc->sc_ring));
2123 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
2127 ix = re - sc->sc_ring;
2128 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
2137 if (re->re_src.nsegs > 1) {
2138 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
2139 sizeof(struct safe_pdesc);
2140 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
2141 printf(" spd[%u] %p: %p size %u flags %x"
2142 , ix, &sc->sc_spring[ix]
2143 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
2144 , sc->sc_spring[ix].pd_size
2145 , sc->sc_spring[ix].pd_flags
2147 if (sc->sc_spring[ix].pd_size == 0)
2150 if (++ix == SAFE_TOTAL_SPART)
2154 if (re->re_dst.nsegs > 1) {
2155 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
2156 sizeof(struct safe_pdesc);
2157 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
2158 printf(" dpd[%u] %p: %p flags %x\n"
2159 , ix, &sc->sc_dpring[ix]
2160 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
2161 , sc->sc_dpring[ix].pd_flags
2163 if (++ix == SAFE_TOTAL_DPART)
2167 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
2168 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
2169 printf("sa: key %x %x %x %x %x %x %x %x\n"
2170 , re->re_sa.sa_key[0]
2171 , re->re_sa.sa_key[1]
2172 , re->re_sa.sa_key[2]
2173 , re->re_sa.sa_key[3]
2174 , re->re_sa.sa_key[4]
2175 , re->re_sa.sa_key[5]
2176 , re->re_sa.sa_key[6]
2177 , re->re_sa.sa_key[7]
2179 printf("sa: indigest %x %x %x %x %x\n"
2180 , re->re_sa.sa_indigest[0]
2181 , re->re_sa.sa_indigest[1]
2182 , re->re_sa.sa_indigest[2]
2183 , re->re_sa.sa_indigest[3]
2184 , re->re_sa.sa_indigest[4]
2186 printf("sa: outdigest %x %x %x %x %x\n"
2187 , re->re_sa.sa_outdigest[0]
2188 , re->re_sa.sa_outdigest[1]
2189 , re->re_sa.sa_outdigest[2]
2190 , re->re_sa.sa_outdigest[3]
2191 , re->re_sa.sa_outdigest[4]
2193 printf("sr: iv %x %x %x %x\n"
2194 , re->re_sastate.sa_saved_iv[0]
2195 , re->re_sastate.sa_saved_iv[1]
2196 , re->re_sastate.sa_saved_iv[2]
2197 , re->re_sastate.sa_saved_iv[3]
2199 printf("sr: hashbc %u indigest %x %x %x %x %x\n"
2200 , re->re_sastate.sa_saved_hashbc
2201 , re->re_sastate.sa_saved_indigest[0]
2202 , re->re_sastate.sa_saved_indigest[1]
2203 , re->re_sastate.sa_saved_indigest[2]
2204 , re->re_sastate.sa_saved_indigest[3]
2205 , re->re_sastate.sa_saved_indigest[4]
2210 safe_dump_ring(struct safe_softc *sc, const char *tag)
2212 mtx_lock(&sc->sc_ringmtx);
2213 printf("\nSafeNet Ring State:\n");
2214 safe_dump_intrstate(sc, tag);
2215 safe_dump_dmastatus(sc, tag);
2216 safe_dump_ringstate(sc, tag);
2217 if (sc->sc_nqchip) {
2218 struct safe_ringentry *re = sc->sc_back;
2220 safe_dump_request(sc, tag, re);
2221 if (++re == sc->sc_ringtop)
2223 } while (re != sc->sc_front);
2225 mtx_unlock(&sc->sc_ringmtx);
2229 sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS)
2234 strncpy(dmode, "", sizeof(dmode) - 1);
2235 dmode[sizeof(dmode) - 1] = '\0';
2236 error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req);
2238 if (error == 0 && req->newptr != NULL) {
2239 struct safe_softc *sc = safec;
2243 if (strncmp(dmode, "dma", 3) == 0)
2244 safe_dump_dmastatus(sc, "safe0");
2245 else if (strncmp(dmode, "int", 3) == 0)
2246 safe_dump_intrstate(sc, "safe0");
2247 else if (strncmp(dmode, "ring", 4) == 0)
2248 safe_dump_ring(sc, "safe0");
2254 SYSCTL_PROC(_hw_safe, OID_AUTO, dump, CTLTYPE_STRING | CTLFLAG_RW,
2255 0, 0, sysctl_hw_safe_dump, "A", "Dump driver state");
2256 #endif /* SAFE_DEBUG */