]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/hifn/hifn7751.c
MFV r360158:
[FreeBSD/FreeBSD.git] / sys / dev / hifn / hifn7751.c
1 /*      $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $  */
2
3 /*-
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  * Invertex AEON / Hifn 7751 driver
7  * Copyright (c) 1999 Invertex Inc. All rights reserved.
8  * Copyright (c) 1999 Theo de Raadt
9  * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10  *                      http://www.netsec.net
11  * Copyright (c) 2003 Hifn Inc.
12  *
13  * This driver is based on a previous driver by Invertex, for which they
14  * requested:  Please send any comments, feedback, bug-fixes, or feature
15  * requests to software@invertex.com.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  *
21  * 1. Redistributions of source code must retain the above copyright
22  *   notice, this list of conditions and the following disclaimer.
23  * 2. Redistributions in binary form must reproduce the above copyright
24  *   notice, this list of conditions and the following disclaimer in the
25  *   documentation and/or other materials provided with the distribution.
26  * 3. The name of the author may not be used to endorse or promote products
27  *   derived from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
30  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
31  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
33  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
34  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
38  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  *
40  * Effort sponsored in part by the Defense Advanced Research Projects
41  * Agency (DARPA) and Air Force Research Laboratory, Air Force
42  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43  */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 /*
49  * Driver for various Hifn encryption processors.
50  */
51 #include "opt_hifn.h"
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/errno.h>
57 #include <sys/malloc.h>
58 #include <sys/kernel.h>
59 #include <sys/module.h>
60 #include <sys/mbuf.h>
61 #include <sys/lock.h>
62 #include <sys/mutex.h>
63 #include <sys/sysctl.h>
64 #include <sys/uio.h>
65
66 #include <vm/vm.h>
67 #include <vm/pmap.h>
68
69 #include <machine/bus.h>
70 #include <machine/resource.h>
71 #include <sys/bus.h>
72 #include <sys/rman.h>
73
74 #include <opencrypto/cryptodev.h>
75 #include <opencrypto/xform_auth.h>
76 #include <sys/random.h>
77 #include <sys/kobj.h>
78
79 #include "cryptodev_if.h"
80
81 #include <dev/pci/pcivar.h>
82 #include <dev/pci/pcireg.h>
83
84 #ifdef HIFN_RNDTEST
85 #include <dev/rndtest/rndtest.h>
86 #endif
87 #include <dev/hifn/hifn7751reg.h>
88 #include <dev/hifn/hifn7751var.h>
89
90 #ifdef HIFN_VULCANDEV
91 #include <sys/conf.h>
92 #include <sys/uio.h>
93
94 static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
95 #endif
96
97 /*
98  * Prototypes and count for the pci_device structure
99  */
100 static  int hifn_probe(device_t);
101 static  int hifn_attach(device_t);
102 static  int hifn_detach(device_t);
103 static  int hifn_suspend(device_t);
104 static  int hifn_resume(device_t);
105 static  int hifn_shutdown(device_t);
106
107 static  int hifn_probesession(device_t, const struct crypto_session_params *);
108 static  int hifn_newsession(device_t, crypto_session_t,
109     const struct crypto_session_params *);
110 static  int hifn_process(device_t, struct cryptop *, int);
111
112 static device_method_t hifn_methods[] = {
113         /* Device interface */
114         DEVMETHOD(device_probe,         hifn_probe),
115         DEVMETHOD(device_attach,        hifn_attach),
116         DEVMETHOD(device_detach,        hifn_detach),
117         DEVMETHOD(device_suspend,       hifn_suspend),
118         DEVMETHOD(device_resume,        hifn_resume),
119         DEVMETHOD(device_shutdown,      hifn_shutdown),
120
121         /* crypto device methods */
122         DEVMETHOD(cryptodev_probesession, hifn_probesession),
123         DEVMETHOD(cryptodev_newsession, hifn_newsession),
124         DEVMETHOD(cryptodev_process,    hifn_process),
125
126         DEVMETHOD_END
127 };
128 static driver_t hifn_driver = {
129         "hifn",
130         hifn_methods,
131         sizeof (struct hifn_softc)
132 };
133 static devclass_t hifn_devclass;
134
135 DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0);
136 MODULE_DEPEND(hifn, crypto, 1, 1, 1);
137 #ifdef HIFN_RNDTEST
138 MODULE_DEPEND(hifn, rndtest, 1, 1, 1);
139 #endif
140
141 static  void hifn_reset_board(struct hifn_softc *, int);
142 static  void hifn_reset_puc(struct hifn_softc *);
143 static  void hifn_puc_wait(struct hifn_softc *);
144 static  int hifn_enable_crypto(struct hifn_softc *);
145 static  void hifn_set_retry(struct hifn_softc *sc);
146 static  void hifn_init_dma(struct hifn_softc *);
147 static  void hifn_init_pci_registers(struct hifn_softc *);
148 static  int hifn_sramsize(struct hifn_softc *);
149 static  int hifn_dramsize(struct hifn_softc *);
150 static  int hifn_ramtype(struct hifn_softc *);
151 static  void hifn_sessions(struct hifn_softc *);
152 static  void hifn_intr(void *);
153 static  u_int hifn_write_command(struct hifn_command *, u_int8_t *);
154 static  u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
155 static  void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
156 static  int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
157 static  int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
158 static  int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
159 static  int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
160 static  int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
161 static  int hifn_init_pubrng(struct hifn_softc *);
162 static  void hifn_rng(void *);
163 static  void hifn_tick(void *);
164 static  void hifn_abort(struct hifn_softc *);
165 static  void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
166
167 static  void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
168 static  void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
169
170 static __inline u_int32_t
171 READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
172 {
173     u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg);
174     sc->sc_bar0_lastreg = (bus_size_t) -1;
175     return (v);
176 }
177 #define WRITE_REG_0(sc, reg, val)       hifn_write_reg_0(sc, reg, val)
178
179 static __inline u_int32_t
180 READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
181 {
182     u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg);
183     sc->sc_bar1_lastreg = (bus_size_t) -1;
184     return (v);
185 }
186 #define WRITE_REG_1(sc, reg, val)       hifn_write_reg_1(sc, reg, val)
187
188 static SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
189     "Hifn driver parameters");
190
191 #ifdef HIFN_DEBUG
192 static  int hifn_debug = 0;
193 SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug,
194             0, "control debugging msgs");
195 #endif
196
197 static  struct hifn_stats hifnstats;
198 SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats,
199             hifn_stats, "driver statistics");
200 static  int hifn_maxbatch = 1;
201 SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch,
202             0, "max ops to batch w/o interrupt");
203
204 /*
205  * Probe for a supported device.  The PCI vendor and device
206  * IDs are used to detect devices we know how to handle.
207  */
208 static int
209 hifn_probe(device_t dev)
210 {
211         if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX &&
212             pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON)
213                 return (BUS_PROBE_DEFAULT);
214         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
215             (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 ||
216              pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
217              pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
218              pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 ||
219              pci_get_device(dev) == PCI_PRODUCT_HIFN_7811))
220                 return (BUS_PROBE_DEFAULT);
221         if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
222             pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751)
223                 return (BUS_PROBE_DEFAULT);
224         return (ENXIO);
225 }
226
227 static void
228 hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
229 {
230         bus_addr_t *paddr = (bus_addr_t*) arg;
231         *paddr = segs->ds_addr;
232 }
233
234 static const char*
235 hifn_partname(struct hifn_softc *sc)
236 {
237         /* XXX sprintf numbers when not decoded */
238         switch (pci_get_vendor(sc->sc_dev)) {
239         case PCI_VENDOR_HIFN:
240                 switch (pci_get_device(sc->sc_dev)) {
241                 case PCI_PRODUCT_HIFN_6500:     return "Hifn 6500";
242                 case PCI_PRODUCT_HIFN_7751:     return "Hifn 7751";
243                 case PCI_PRODUCT_HIFN_7811:     return "Hifn 7811";
244                 case PCI_PRODUCT_HIFN_7951:     return "Hifn 7951";
245                 case PCI_PRODUCT_HIFN_7955:     return "Hifn 7955";
246                 case PCI_PRODUCT_HIFN_7956:     return "Hifn 7956";
247                 }
248                 return "Hifn unknown-part";
249         case PCI_VENDOR_INVERTEX:
250                 switch (pci_get_device(sc->sc_dev)) {
251                 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
252                 }
253                 return "Invertex unknown-part";
254         case PCI_VENDOR_NETSEC:
255                 switch (pci_get_device(sc->sc_dev)) {
256                 case PCI_PRODUCT_NETSEC_7751:   return "NetSec 7751";
257                 }
258                 return "NetSec unknown-part";
259         }
260         return "Unknown-vendor unknown-part";
261 }
262
263 static void
264 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
265 {
266         /* MarkM: FIX!! Check that this does not swamp the harvester! */
267         random_harvest_queue(buf, count, RANDOM_PURE_HIFN);
268 }
269
270 static u_int
271 checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max)
272 {
273         if (v > max) {
274                 device_printf(dev, "Warning, %s %u out of range, "
275                         "using max %u\n", what, v, max);
276                 v = max;
277         } else if (v < min) {
278                 device_printf(dev, "Warning, %s %u out of range, "
279                         "using min %u\n", what, v, min);
280                 v = min;
281         }
282         return v;
283 }
284
285 /*
286  * Select PLL configuration for 795x parts.  This is complicated in
287  * that we cannot determine the optimal parameters without user input.
288  * The reference clock is derived from an external clock through a
289  * multiplier.  The external clock is either the host bus (i.e. PCI)
290  * or an external clock generator.  When using the PCI bus we assume
291  * the clock is either 33 or 66 MHz; for an external source we cannot
292  * tell the speed.
293  *
294  * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
295  * for an external source, followed by the frequency.  We calculate
296  * the appropriate multiplier and PLL register contents accordingly.
297  * When no configuration is given we default to "pci66" since that
298  * always will allow the card to work.  If a card is using the PCI
299  * bus clock and in a 33MHz slot then it will be operating at half
300  * speed until the correct information is provided.
301  *
302  * We use a default setting of "ext66" because according to Mike Ham
303  * of HiFn, almost every board in existence has an external crystal
304  * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
305  * because PCI33 can have clocks from 0 to 33Mhz, and some have
306  * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
307  */
308 static void
309 hifn_getpllconfig(device_t dev, u_int *pll)
310 {
311         const char *pllspec;
312         u_int freq, mul, fl, fh;
313         u_int32_t pllconfig;
314         char *nxt;
315
316         if (resource_string_value("hifn", device_get_unit(dev),
317             "pllconfig", &pllspec))
318                 pllspec = "ext66";
319         fl = 33, fh = 66;
320         pllconfig = 0;
321         if (strncmp(pllspec, "ext", 3) == 0) {
322                 pllspec += 3;
323                 pllconfig |= HIFN_PLL_REF_SEL;
324                 switch (pci_get_device(dev)) {
325                 case PCI_PRODUCT_HIFN_7955:
326                 case PCI_PRODUCT_HIFN_7956:
327                         fl = 20, fh = 100;
328                         break;
329 #ifdef notyet
330                 case PCI_PRODUCT_HIFN_7954:
331                         fl = 20, fh = 66;
332                         break;
333 #endif
334                 }
335         } else if (strncmp(pllspec, "pci", 3) == 0)
336                 pllspec += 3;
337         freq = strtoul(pllspec, &nxt, 10);
338         if (nxt == pllspec)
339                 freq = 66;
340         else
341                 freq = checkmaxmin(dev, "frequency", freq, fl, fh);
342         /*
343          * Calculate multiplier.  We target a Fck of 266 MHz,
344          * allowing only even values, possibly rounded down.
345          * Multipliers > 8 must set the charge pump current.
346          */
347         mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
348         pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
349         if (mul > 8)
350                 pllconfig |= HIFN_PLL_IS;
351         *pll = pllconfig;
352 }
353
354 /*
355  * Attach an interface that successfully probed.
356  */
357 static int 
358 hifn_attach(device_t dev)
359 {
360         struct hifn_softc *sc = device_get_softc(dev);
361         caddr_t kva;
362         int rseg, rid;
363         char rbase;
364         uint16_t rev;
365
366         sc->sc_dev = dev;
367
368         mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF);
369
370         /* XXX handle power management */
371
372         /*
373          * The 7951 and 795x have a random number generator and
374          * public key support; note this.
375          */
376         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
377             (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
378              pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
379              pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
380                 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
381         /*
382          * The 7811 has a random number generator and
383          * we also note it's identity 'cuz of some quirks.
384          */
385         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
386             pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
387                 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
388
389         /*
390          * The 795x parts support AES.
391          */
392         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
393             (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
394              pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
395                 sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
396                 /*
397                  * Select PLL configuration.  This depends on the
398                  * bus and board design and must be manually configured
399                  * if the default setting is unacceptable.
400                  */
401                 hifn_getpllconfig(dev, &sc->sc_pllconfig);
402         }
403
404         /*
405          * Setup PCI resources. Note that we record the bus
406          * tag and handle for each register mapping, this is
407          * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
408          * and WRITE_REG_1 macros throughout the driver.
409          */
410         pci_enable_busmaster(dev);
411
412         rid = HIFN_BAR0;
413         sc->sc_bar0res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
414                                                 RF_ACTIVE);
415         if (sc->sc_bar0res == NULL) {
416                 device_printf(dev, "cannot map bar%d register space\n", 0);
417                 goto fail_pci;
418         }
419         sc->sc_st0 = rman_get_bustag(sc->sc_bar0res);
420         sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res);
421         sc->sc_bar0_lastreg = (bus_size_t) -1;
422
423         rid = HIFN_BAR1;
424         sc->sc_bar1res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
425                                                 RF_ACTIVE);
426         if (sc->sc_bar1res == NULL) {
427                 device_printf(dev, "cannot map bar%d register space\n", 1);
428                 goto fail_io0;
429         }
430         sc->sc_st1 = rman_get_bustag(sc->sc_bar1res);
431         sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res);
432         sc->sc_bar1_lastreg = (bus_size_t) -1;
433
434         hifn_set_retry(sc);
435
436         /*
437          * Setup the area where the Hifn DMA's descriptors
438          * and associated data structures.
439          */
440         if (bus_dma_tag_create(bus_get_dma_tag(dev),    /* PCI parent */
441                                1, 0,                    /* alignment,boundary */
442                                BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
443                                BUS_SPACE_MAXADDR,       /* highaddr */
444                                NULL, NULL,              /* filter, filterarg */
445                                HIFN_MAX_DMALEN,         /* maxsize */
446                                MAX_SCATTER,             /* nsegments */
447                                HIFN_MAX_SEGLEN,         /* maxsegsize */
448                                BUS_DMA_ALLOCNOW,        /* flags */
449                                NULL,                    /* lockfunc */
450                                NULL,                    /* lockarg */
451                                &sc->sc_dmat)) {
452                 device_printf(dev, "cannot allocate DMA tag\n");
453                 goto fail_io1;
454         }
455         if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
456                 device_printf(dev, "cannot create dma map\n");
457                 bus_dma_tag_destroy(sc->sc_dmat);
458                 goto fail_io1;
459         }
460         if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
461                 device_printf(dev, "cannot alloc dma buffer\n");
462                 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
463                 bus_dma_tag_destroy(sc->sc_dmat);
464                 goto fail_io1;
465         }
466         if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva,
467                              sizeof (*sc->sc_dma),
468                              hifn_dmamap_cb, &sc->sc_dma_physaddr,
469                              BUS_DMA_NOWAIT)) {
470                 device_printf(dev, "cannot load dma map\n");
471                 bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap);
472                 bus_dma_tag_destroy(sc->sc_dmat);
473                 goto fail_io1;
474         }
475         sc->sc_dma = (struct hifn_dma *)kva;
476         bzero(sc->sc_dma, sizeof(*sc->sc_dma));
477
478         KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!"));
479         KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!"));
480         KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!"));
481         KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!"));
482
483         /*
484          * Reset the board and do the ``secret handshake''
485          * to enable the crypto support.  Then complete the
486          * initialization procedure by setting up the interrupt
487          * and hooking in to the system crypto support so we'll
488          * get used for system services like the crypto device,
489          * IPsec, RNG device, etc.
490          */
491         hifn_reset_board(sc, 0);
492
493         if (hifn_enable_crypto(sc) != 0) {
494                 device_printf(dev, "crypto enabling failed\n");
495                 goto fail_mem;
496         }
497         hifn_reset_puc(sc);
498
499         hifn_init_dma(sc);
500         hifn_init_pci_registers(sc);
501
502         /* XXX can't dynamically determine ram type for 795x; force dram */
503         if (sc->sc_flags & HIFN_IS_7956)
504                 sc->sc_drammodel = 1;
505         else if (hifn_ramtype(sc))
506                 goto fail_mem;
507
508         if (sc->sc_drammodel == 0)
509                 hifn_sramsize(sc);
510         else
511                 hifn_dramsize(sc);
512
513         /*
514          * Workaround for NetSec 7751 rev A: half ram size because two
515          * of the address lines were left floating
516          */
517         if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
518             pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
519             pci_get_revid(dev) == 0x61) /*XXX???*/
520                 sc->sc_ramsize >>= 1;
521
522         /*
523          * Arrange the interrupt line.
524          */
525         rid = 0;
526         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
527                                             RF_SHAREABLE|RF_ACTIVE);
528         if (sc->sc_irq == NULL) {
529                 device_printf(dev, "could not map interrupt\n");
530                 goto fail_mem;
531         }
532         /*
533          * NB: Network code assumes we are blocked with splimp()
534          *     so make sure the IRQ is marked appropriately.
535          */
536         if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
537                            NULL, hifn_intr, sc, &sc->sc_intrhand)) {
538                 device_printf(dev, "could not setup interrupt\n");
539                 goto fail_intr2;
540         }
541
542         hifn_sessions(sc);
543
544         /*
545          * NB: Keep only the low 16 bits; this masks the chip id
546          *     from the 7951.
547          */
548         rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
549
550         rseg = sc->sc_ramsize / 1024;
551         rbase = 'K';
552         if (sc->sc_ramsize >= (1024 * 1024)) {
553                 rbase = 'M';
554                 rseg /= 1024;
555         }
556         device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
557                 hifn_partname(sc), rev,
558                 rseg, rbase, sc->sc_drammodel ? 'd' : 's');
559         if (sc->sc_flags & HIFN_IS_7956)
560                 printf(", pll=0x%x<%s clk, %ux mult>",
561                         sc->sc_pllconfig,
562                         sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
563                         2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
564         printf("\n");
565
566         WRITE_REG_0(sc, HIFN_0_PUCNFG,
567             READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
568         sc->sc_ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
569
570         switch (sc->sc_ena) {
571         case HIFN_PUSTAT_ENA_2:
572         case HIFN_PUSTAT_ENA_1:
573                 sc->sc_cid = crypto_get_driverid(dev,
574                     sizeof(struct hifn_session), CRYPTOCAP_F_HARDWARE);
575                 if (sc->sc_cid < 0) {
576                         device_printf(dev, "could not get crypto driver id\n");
577                         goto fail_intr;
578                 }
579                 break;
580         }
581                 
582         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
583             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
584
585         if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
586                 hifn_init_pubrng(sc);
587
588         callout_init(&sc->sc_tickto, 1);
589         callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
590
591         return (0);
592
593 fail_intr:
594         bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
595 fail_intr2:
596         /* XXX don't store rid */
597         bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
598 fail_mem:
599         bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
600         bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
601         bus_dma_tag_destroy(sc->sc_dmat);
602
603         /* Turn off DMA polling */
604         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
605             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
606 fail_io1:
607         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
608 fail_io0:
609         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
610 fail_pci:
611         mtx_destroy(&sc->sc_mtx);
612         return (ENXIO);
613 }
614
615 /*
616  * Detach an interface that successfully probed.
617  */
618 static int 
619 hifn_detach(device_t dev)
620 {
621         struct hifn_softc *sc = device_get_softc(dev);
622
623         KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
624
625         /* disable interrupts */
626         WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
627
628         /*XXX other resources */
629         callout_stop(&sc->sc_tickto);
630         callout_stop(&sc->sc_rngto);
631 #ifdef HIFN_RNDTEST
632         if (sc->sc_rndtest)
633                 rndtest_detach(sc->sc_rndtest);
634 #endif
635
636         /* Turn off DMA polling */
637         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
638             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
639
640         crypto_unregister_all(sc->sc_cid);
641
642         bus_generic_detach(dev);        /*XXX should be no children, right? */
643
644         bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
645         /* XXX don't store rid */
646         bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
647
648         bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
649         bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
650         bus_dma_tag_destroy(sc->sc_dmat);
651
652         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
653         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
654
655         mtx_destroy(&sc->sc_mtx);
656
657         return (0);
658 }
659
660 /*
661  * Stop all chip I/O so that the kernel's probe routines don't
662  * get confused by errant DMAs when rebooting.
663  */
664 static int
665 hifn_shutdown(device_t dev)
666 {
667 #ifdef notyet
668         hifn_stop(device_get_softc(dev));
669 #endif
670         return (0);
671 }
672
673 /*
674  * Device suspend routine.  Stop the interface and save some PCI
675  * settings in case the BIOS doesn't restore them properly on
676  * resume.
677  */
678 static int
679 hifn_suspend(device_t dev)
680 {
681         struct hifn_softc *sc = device_get_softc(dev);
682 #ifdef notyet
683         hifn_stop(sc);
684 #endif
685         sc->sc_suspended = 1;
686
687         return (0);
688 }
689
690 /*
691  * Device resume routine.  Restore some PCI settings in case the BIOS
692  * doesn't, re-enable busmastering, and restart the interface if
693  * appropriate.
694  */
695 static int
696 hifn_resume(device_t dev)
697 {
698         struct hifn_softc *sc = device_get_softc(dev);
699 #ifdef notyet
700         /* reinitialize interface if necessary */
701         if (ifp->if_flags & IFF_UP)
702                 rl_init(sc);
703 #endif
704         sc->sc_suspended = 0;
705
706         return (0);
707 }
708
709 static int
710 hifn_init_pubrng(struct hifn_softc *sc)
711 {
712         u_int32_t r;
713         int i;
714
715 #ifdef HIFN_RNDTEST
716         sc->sc_rndtest = rndtest_attach(sc->sc_dev);
717         if (sc->sc_rndtest)
718                 sc->sc_harvest = rndtest_harvest;
719         else
720                 sc->sc_harvest = default_harvest;
721 #else
722         sc->sc_harvest = default_harvest;
723 #endif
724         if ((sc->sc_flags & HIFN_IS_7811) == 0) {
725                 /* Reset 7951 public key/rng engine */
726                 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
727                     READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
728
729                 for (i = 0; i < 100; i++) {
730                         DELAY(1000);
731                         if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
732                             HIFN_PUBRST_RESET) == 0)
733                                 break;
734                 }
735
736                 if (i == 100) {
737                         device_printf(sc->sc_dev, "public key init failed\n");
738                         return (1);
739                 }
740         }
741
742         /* Enable the rng, if available */
743         if (sc->sc_flags & HIFN_HAS_RNG) {
744                 if (sc->sc_flags & HIFN_IS_7811) {
745                         r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
746                         if (r & HIFN_7811_RNGENA_ENA) {
747                                 r &= ~HIFN_7811_RNGENA_ENA;
748                                 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
749                         }
750                         WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
751                             HIFN_7811_RNGCFG_DEFL);
752                         r |= HIFN_7811_RNGENA_ENA;
753                         WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
754                 } else
755                         WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
756                             READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
757                             HIFN_RNGCFG_ENA);
758
759                 sc->sc_rngfirst = 1;
760                 if (hz >= 100)
761                         sc->sc_rnghz = hz / 100;
762                 else
763                         sc->sc_rnghz = 1;
764                 callout_init(&sc->sc_rngto, 1);
765                 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
766         }
767
768         /* Enable public key engine, if available */
769         if (sc->sc_flags & HIFN_HAS_PUBLIC) {
770                 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
771                 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
772                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
773 #ifdef HIFN_VULCANDEV
774                 sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0, 
775                                         UID_ROOT, GID_WHEEL, 0666,
776                                         "vulcanpk");
777                 sc->sc_pkdev->si_drv1 = sc;
778 #endif
779         }
780
781         return (0);
782 }
783
784 static void
785 hifn_rng(void *vsc)
786 {
787 #define RANDOM_BITS(n)  (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0
788         struct hifn_softc *sc = vsc;
789         u_int32_t sts, num[2];
790         int i;
791
792         if (sc->sc_flags & HIFN_IS_7811) {
793                 /* ONLY VALID ON 7811!!!! */
794                 for (i = 0; i < 5; i++) {
795                         sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
796                         if (sts & HIFN_7811_RNGSTS_UFL) {
797                                 device_printf(sc->sc_dev,
798                                               "RNG underflow: disabling\n");
799                                 return;
800                         }
801                         if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
802                                 break;
803
804                         /*
805                          * There are at least two words in the RNG FIFO
806                          * at this point.
807                          */
808                         num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
809                         num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
810                         /* NB: discard first data read */
811                         if (sc->sc_rngfirst)
812                                 sc->sc_rngfirst = 0;
813                         else
814                                 (*sc->sc_harvest)(sc->sc_rndtest,
815                                         num, sizeof (num));
816                 }
817         } else {
818                 num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA);
819
820                 /* NB: discard first data read */
821                 if (sc->sc_rngfirst)
822                         sc->sc_rngfirst = 0;
823                 else
824                         (*sc->sc_harvest)(sc->sc_rndtest,
825                                 num, sizeof (num[0]));
826         }
827
828         callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
829 #undef RANDOM_BITS
830 }
831
832 static void
833 hifn_puc_wait(struct hifn_softc *sc)
834 {
835         int i;
836         int reg = HIFN_0_PUCTRL;
837
838         if (sc->sc_flags & HIFN_IS_7956) {
839                 reg = HIFN_0_PUCTRL2;
840         }
841
842         for (i = 5000; i > 0; i--) {
843                 DELAY(1);
844                 if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
845                         break;
846         }
847         if (!i)
848                 device_printf(sc->sc_dev, "proc unit did not reset\n");
849 }
850
851 /*
852  * Reset the processing unit.
853  */
854 static void
855 hifn_reset_puc(struct hifn_softc *sc)
856 {
857         /* Reset processing unit */
858         int reg = HIFN_0_PUCTRL;
859
860         if (sc->sc_flags & HIFN_IS_7956) {
861                 reg = HIFN_0_PUCTRL2;
862         }
863         WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
864
865         hifn_puc_wait(sc);
866 }
867
868 /*
869  * Set the Retry and TRDY registers; note that we set them to
870  * zero because the 7811 locks up when forced to retry (section
871  * 3.6 of "Specification Update SU-0014-04".  Not clear if we
872  * should do this for all Hifn parts, but it doesn't seem to hurt.
873  */
874 static void
875 hifn_set_retry(struct hifn_softc *sc)
876 {
877         /* NB: RETRY only responds to 8-bit reads/writes */
878         pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1);
879         pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 1);
880 }
881
882 /*
883  * Resets the board.  Values in the regesters are left as is
884  * from the reset (i.e. initial values are assigned elsewhere).
885  */
886 static void
887 hifn_reset_board(struct hifn_softc *sc, int full)
888 {
889         u_int32_t reg;
890
891         /*
892          * Set polling in the DMA configuration register to zero.  0x7 avoids
893          * resetting the board and zeros out the other fields.
894          */
895         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
896             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
897
898         /*
899          * Now that polling has been disabled, we have to wait 1 ms
900          * before resetting the board.
901          */
902         DELAY(1000);
903
904         /* Reset the DMA unit */
905         if (full) {
906                 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
907                 DELAY(1000);
908         } else {
909                 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
910                     HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
911                 hifn_reset_puc(sc);
912         }
913
914         KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
915         bzero(sc->sc_dma, sizeof(*sc->sc_dma));
916
917         /* Bring dma unit out of reset */
918         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
919             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
920
921         hifn_puc_wait(sc);
922         hifn_set_retry(sc);
923
924         if (sc->sc_flags & HIFN_IS_7811) {
925                 for (reg = 0; reg < 1000; reg++) {
926                         if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
927                             HIFN_MIPSRST_CRAMINIT)
928                                 break;
929                         DELAY(1000);
930                 }
931                 if (reg == 1000)
932                         printf(": cram init timeout\n");
933         } else {
934           /* set up DMA configuration register #2 */
935           /* turn off all PK and BAR0 swaps */
936           WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
937                       (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
938                       (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
939                       (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
940                       (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
941         }
942                       
943 }
944
945 static u_int32_t
946 hifn_next_signature(u_int32_t a, u_int cnt)
947 {
948         int i;
949         u_int32_t v;
950
951         for (i = 0; i < cnt; i++) {
952
953                 /* get the parity */
954                 v = a & 0x80080125;
955                 v ^= v >> 16;
956                 v ^= v >> 8;
957                 v ^= v >> 4;
958                 v ^= v >> 2;
959                 v ^= v >> 1;
960
961                 a = (v & 1) ^ (a << 1);
962         }
963
964         return a;
965 }
966
967 struct pci2id {
968         u_short         pci_vendor;
969         u_short         pci_prod;
970         char            card_id[13];
971 };
972 static struct pci2id pci2id[] = {
973         {
974                 PCI_VENDOR_HIFN,
975                 PCI_PRODUCT_HIFN_7951,
976                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
977                   0x00, 0x00, 0x00, 0x00, 0x00 }
978         }, {
979                 PCI_VENDOR_HIFN,
980                 PCI_PRODUCT_HIFN_7955,
981                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
982                   0x00, 0x00, 0x00, 0x00, 0x00 }
983         }, {
984                 PCI_VENDOR_HIFN,
985                 PCI_PRODUCT_HIFN_7956,
986                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
987                   0x00, 0x00, 0x00, 0x00, 0x00 }
988         }, {
989                 PCI_VENDOR_NETSEC,
990                 PCI_PRODUCT_NETSEC_7751,
991                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
992                   0x00, 0x00, 0x00, 0x00, 0x00 }
993         }, {
994                 PCI_VENDOR_INVERTEX,
995                 PCI_PRODUCT_INVERTEX_AEON,
996                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
997                   0x00, 0x00, 0x00, 0x00, 0x00 }
998         }, {
999                 PCI_VENDOR_HIFN,
1000                 PCI_PRODUCT_HIFN_7811,
1001                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1002                   0x00, 0x00, 0x00, 0x00, 0x00 }
1003         }, {
1004                 /*
1005                  * Other vendors share this PCI ID as well, such as
1006                  * http://www.powercrypt.com, and obviously they also
1007                  * use the same key.
1008                  */
1009                 PCI_VENDOR_HIFN,
1010                 PCI_PRODUCT_HIFN_7751,
1011                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1012                   0x00, 0x00, 0x00, 0x00, 0x00 }
1013         },
1014 };
1015
1016 /*
1017  * Checks to see if crypto is already enabled.  If crypto isn't enable,
1018  * "hifn_enable_crypto" is called to enable it.  The check is important,
1019  * as enabling crypto twice will lock the board.
1020  */
1021 static int 
1022 hifn_enable_crypto(struct hifn_softc *sc)
1023 {
1024         u_int32_t dmacfg, ramcfg, encl, addr, i;
1025         char *offtbl = NULL;
1026
1027         for (i = 0; i < nitems(pci2id); i++) {
1028                 if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) &&
1029                     pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) {
1030                         offtbl = pci2id[i].card_id;
1031                         break;
1032                 }
1033         }
1034         if (offtbl == NULL) {
1035                 device_printf(sc->sc_dev, "Unknown card!\n");
1036                 return (1);
1037         }
1038
1039         ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1040         dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
1041
1042         /*
1043          * The RAM config register's encrypt level bit needs to be set before
1044          * every read performed on the encryption level register.
1045          */
1046         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1047
1048         encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1049
1050         /*
1051          * Make sure we don't re-unlock.  Two unlocks kills chip until the
1052          * next reboot.
1053          */
1054         if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
1055 #ifdef HIFN_DEBUG
1056                 if (hifn_debug)
1057                         device_printf(sc->sc_dev,
1058                             "Strong crypto already enabled!\n");
1059 #endif
1060                 goto report;
1061         }
1062
1063         if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
1064 #ifdef HIFN_DEBUG
1065                 if (hifn_debug)
1066                         device_printf(sc->sc_dev,
1067                               "Unknown encryption level 0x%x\n", encl);
1068 #endif
1069                 return 1;
1070         }
1071
1072         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
1073             HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
1074         DELAY(1000);
1075         addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
1076         DELAY(1000);
1077         WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
1078         DELAY(1000);
1079
1080         for (i = 0; i <= 12; i++) {
1081                 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
1082                 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
1083
1084                 DELAY(1000);
1085         }
1086
1087         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1088         encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1089
1090 #ifdef HIFN_DEBUG
1091         if (hifn_debug) {
1092                 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
1093                         device_printf(sc->sc_dev, "Engine is permanently "
1094                                 "locked until next system reset!\n");
1095                 else
1096                         device_printf(sc->sc_dev, "Engine enabled "
1097                                 "successfully!\n");
1098         }
1099 #endif
1100
1101 report:
1102         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1103         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1104
1105         switch (encl) {
1106         case HIFN_PUSTAT_ENA_1:
1107         case HIFN_PUSTAT_ENA_2:
1108                 break;
1109         case HIFN_PUSTAT_ENA_0:
1110         default:
1111                 device_printf(sc->sc_dev, "disabled");
1112                 break;
1113         }
1114
1115         return 0;
1116 }
1117
1118 /*
1119  * Give initial values to the registers listed in the "Register Space"
1120  * section of the HIFN Software Development reference manual.
1121  */
1122 static void 
1123 hifn_init_pci_registers(struct hifn_softc *sc)
1124 {
1125         /* write fixed values needed by the Initialization registers */
1126         WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1127         WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1128         WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1129
1130         /* write all 4 ring address registers */
1131         WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
1132             offsetof(struct hifn_dma, cmdr[0]));
1133         WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
1134             offsetof(struct hifn_dma, srcr[0]));
1135         WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
1136             offsetof(struct hifn_dma, dstr[0]));
1137         WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
1138             offsetof(struct hifn_dma, resr[0]));
1139
1140         DELAY(2000);
1141
1142         /* write status register */
1143         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1144             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1145             HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1146             HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1147             HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1148             HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1149             HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1150             HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1151             HIFN_DMACSR_S_WAIT |
1152             HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1153             HIFN_DMACSR_C_WAIT |
1154             HIFN_DMACSR_ENGINE |
1155             ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1156                 HIFN_DMACSR_PUBDONE : 0) |
1157             ((sc->sc_flags & HIFN_IS_7811) ?
1158                 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1159
1160         sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1161         sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1162             HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1163             HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1164             ((sc->sc_flags & HIFN_IS_7811) ?
1165                 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1166         sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1167         WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1168
1169
1170         if (sc->sc_flags & HIFN_IS_7956) {
1171                 u_int32_t pll;
1172
1173                 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1174                     HIFN_PUCNFG_TCALLPHASES |
1175                     HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1176
1177                 /* turn off the clocks and insure bypass is set */
1178                 pll = READ_REG_1(sc, HIFN_1_PLL);
1179                 pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
1180                   | HIFN_PLL_BP | HIFN_PLL_MBSET;
1181                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1182                 DELAY(10*1000);         /* 10ms */
1183
1184                 /* change configuration */
1185                 pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
1186                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1187                 DELAY(10*1000);         /* 10ms */
1188
1189                 /* disable bypass */
1190                 pll &= ~HIFN_PLL_BP;
1191                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1192                 /* enable clocks with new configuration */
1193                 pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
1194                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1195         } else {
1196                 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1197                     HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1198                     HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1199                     (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1200         }
1201
1202         WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1203         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1204             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1205             ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1206             ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1207 }
1208
1209 /*
1210  * The maximum number of sessions supported by the card
1211  * is dependent on the amount of context ram, which
1212  * encryption algorithms are enabled, and how compression
1213  * is configured.  This should be configured before this
1214  * routine is called.
1215  */
1216 static void
1217 hifn_sessions(struct hifn_softc *sc)
1218 {
1219         u_int32_t pucnfg;
1220         int ctxsize;
1221
1222         pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1223
1224         if (pucnfg & HIFN_PUCNFG_COMPSING) {
1225                 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1226                         ctxsize = 128;
1227                 else
1228                         ctxsize = 512;
1229                 /*
1230                  * 7955/7956 has internal context memory of 32K
1231                  */
1232                 if (sc->sc_flags & HIFN_IS_7956)
1233                         sc->sc_maxses = 32768 / ctxsize;
1234                 else
1235                         sc->sc_maxses = 1 +
1236                             ((sc->sc_ramsize - 32768) / ctxsize);
1237         } else
1238                 sc->sc_maxses = sc->sc_ramsize / 16384;
1239
1240         if (sc->sc_maxses > 2048)
1241                 sc->sc_maxses = 2048;
1242 }
1243
1244 /*
1245  * Determine ram type (sram or dram).  Board should be just out of a reset
1246  * state when this is called.
1247  */
1248 static int
1249 hifn_ramtype(struct hifn_softc *sc)
1250 {
1251         u_int8_t data[8], dataexpect[8];
1252         int i;
1253
1254         for (i = 0; i < sizeof(data); i++)
1255                 data[i] = dataexpect[i] = 0x55;
1256         if (hifn_writeramaddr(sc, 0, data))
1257                 return (-1);
1258         if (hifn_readramaddr(sc, 0, data))
1259                 return (-1);
1260         if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1261                 sc->sc_drammodel = 1;
1262                 return (0);
1263         }
1264
1265         for (i = 0; i < sizeof(data); i++)
1266                 data[i] = dataexpect[i] = 0xaa;
1267         if (hifn_writeramaddr(sc, 0, data))
1268                 return (-1);
1269         if (hifn_readramaddr(sc, 0, data))
1270                 return (-1);
1271         if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1272                 sc->sc_drammodel = 1;
1273                 return (0);
1274         }
1275
1276         return (0);
1277 }
1278
1279 #define HIFN_SRAM_MAX           (32 << 20)
1280 #define HIFN_SRAM_STEP_SIZE     16384
1281 #define HIFN_SRAM_GRANULARITY   (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1282
1283 static int
1284 hifn_sramsize(struct hifn_softc *sc)
1285 {
1286         u_int32_t a;
1287         u_int8_t data[8];
1288         u_int8_t dataexpect[sizeof(data)];
1289         int32_t i;
1290
1291         for (i = 0; i < sizeof(data); i++)
1292                 data[i] = dataexpect[i] = i ^ 0x5a;
1293
1294         for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1295                 a = i * HIFN_SRAM_STEP_SIZE;
1296                 bcopy(&i, data, sizeof(i));
1297                 hifn_writeramaddr(sc, a, data);
1298         }
1299
1300         for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1301                 a = i * HIFN_SRAM_STEP_SIZE;
1302                 bcopy(&i, dataexpect, sizeof(i));
1303                 if (hifn_readramaddr(sc, a, data) < 0)
1304                         return (0);
1305                 if (bcmp(data, dataexpect, sizeof(data)) != 0)
1306                         return (0);
1307                 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1308         }
1309
1310         return (0);
1311 }
1312
1313 /*
1314  * XXX For dram boards, one should really try all of the
1315  * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
1316  * is already set up correctly.
1317  */
1318 static int
1319 hifn_dramsize(struct hifn_softc *sc)
1320 {
1321         u_int32_t cnfg;
1322
1323         if (sc->sc_flags & HIFN_IS_7956) {
1324                 /*
1325                  * 7955/7956 have a fixed internal ram of only 32K.
1326                  */
1327                 sc->sc_ramsize = 32768;
1328         } else {
1329                 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1330                     HIFN_PUCNFG_DRAMMASK;
1331                 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1332         }
1333         return (0);
1334 }
1335
1336 static void
1337 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
1338 {
1339         struct hifn_dma *dma = sc->sc_dma;
1340
1341         if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) {
1342                 sc->sc_cmdi = 0;
1343                 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1344                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1345                 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1346                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1347         }
1348         *cmdp = sc->sc_cmdi++;
1349         sc->sc_cmdk = sc->sc_cmdi;
1350
1351         if (sc->sc_srci == HIFN_D_SRC_RSIZE) {
1352                 sc->sc_srci = 0;
1353                 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1354                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1355                 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1356                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1357         }
1358         *srcp = sc->sc_srci++;
1359         sc->sc_srck = sc->sc_srci;
1360
1361         if (sc->sc_dsti == HIFN_D_DST_RSIZE) {
1362                 sc->sc_dsti = 0;
1363                 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1364                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1365                 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1366                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1367         }
1368         *dstp = sc->sc_dsti++;
1369         sc->sc_dstk = sc->sc_dsti;
1370
1371         if (sc->sc_resi == HIFN_D_RES_RSIZE) {
1372                 sc->sc_resi = 0;
1373                 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1374                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1375                 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1376                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1377         }
1378         *resp = sc->sc_resi++;
1379         sc->sc_resk = sc->sc_resi;
1380 }
1381
1382 static int
1383 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1384 {
1385         struct hifn_dma *dma = sc->sc_dma;
1386         hifn_base_command_t wc;
1387         const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1388         int r, cmdi, resi, srci, dsti;
1389
1390         wc.masks = htole16(3 << 13);
1391         wc.session_num = htole16(addr >> 14);
1392         wc.total_source_count = htole16(8);
1393         wc.total_dest_count = htole16(addr & 0x3fff);
1394
1395         hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1396
1397         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1398             HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1399             HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1400
1401         /* build write command */
1402         bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1403         *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
1404         bcopy(data, &dma->test_src, sizeof(dma->test_src));
1405
1406         dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
1407             + offsetof(struct hifn_dma, test_src));
1408         dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
1409             + offsetof(struct hifn_dma, test_dst));
1410
1411         dma->cmdr[cmdi].l = htole32(16 | masks);
1412         dma->srcr[srci].l = htole32(8 | masks);
1413         dma->dstr[dsti].l = htole32(4 | masks);
1414         dma->resr[resi].l = htole32(4 | masks);
1415
1416         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1417             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1418
1419         for (r = 10000; r >= 0; r--) {
1420                 DELAY(10);
1421                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1422                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1423                 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1424                         break;
1425                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1426                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1427         }
1428         if (r == 0) {
1429                 device_printf(sc->sc_dev, "writeramaddr -- "
1430                     "result[%d](addr %d) still valid\n", resi, addr);
1431                 r = -1;
1432                 return (-1);
1433         } else
1434                 r = 0;
1435
1436         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1437             HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1438             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1439
1440         return (r);
1441 }
1442
1443 static int
1444 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1445 {
1446         struct hifn_dma *dma = sc->sc_dma;
1447         hifn_base_command_t rc;
1448         const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1449         int r, cmdi, srci, dsti, resi;
1450
1451         rc.masks = htole16(2 << 13);
1452         rc.session_num = htole16(addr >> 14);
1453         rc.total_source_count = htole16(addr & 0x3fff);
1454         rc.total_dest_count = htole16(8);
1455
1456         hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1457
1458         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1459             HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1460             HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1461
1462         bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1463         *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
1464
1465         dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
1466             offsetof(struct hifn_dma, test_src));
1467         dma->test_src = 0;
1468         dma->dstr[dsti].p =  htole32(sc->sc_dma_physaddr +
1469             offsetof(struct hifn_dma, test_dst));
1470         dma->test_dst = 0;
1471         dma->cmdr[cmdi].l = htole32(8 | masks);
1472         dma->srcr[srci].l = htole32(8 | masks);
1473         dma->dstr[dsti].l = htole32(8 | masks);
1474         dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1475
1476         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1477             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1478
1479         for (r = 10000; r >= 0; r--) {
1480                 DELAY(10);
1481                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1482                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1483                 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1484                         break;
1485                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1486                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1487         }
1488         if (r == 0) {
1489                 device_printf(sc->sc_dev, "readramaddr -- "
1490                     "result[%d](addr %d) still valid\n", resi, addr);
1491                 r = -1;
1492         } else {
1493                 r = 0;
1494                 bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1495         }
1496
1497         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1498             HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1499             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1500
1501         return (r);
1502 }
1503
1504 /*
1505  * Initialize the descriptor rings.
1506  */
1507 static void 
1508 hifn_init_dma(struct hifn_softc *sc)
1509 {
1510         struct hifn_dma *dma = sc->sc_dma;
1511         int i;
1512
1513         hifn_set_retry(sc);
1514
1515         /* initialize static pointer values */
1516         for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1517                 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
1518                     offsetof(struct hifn_dma, command_bufs[i][0]));
1519         for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1520                 dma->resr[i].p = htole32(sc->sc_dma_physaddr +
1521                     offsetof(struct hifn_dma, result_bufs[i][0]));
1522
1523         dma->cmdr[HIFN_D_CMD_RSIZE].p =
1524             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
1525         dma->srcr[HIFN_D_SRC_RSIZE].p =
1526             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
1527         dma->dstr[HIFN_D_DST_RSIZE].p =
1528             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
1529         dma->resr[HIFN_D_RES_RSIZE].p =
1530             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
1531
1532         sc->sc_cmdu = sc->sc_srcu = sc->sc_dstu = sc->sc_resu = 0;
1533         sc->sc_cmdi = sc->sc_srci = sc->sc_dsti = sc->sc_resi = 0;
1534         sc->sc_cmdk = sc->sc_srck = sc->sc_dstk = sc->sc_resk = 0;
1535 }
1536
1537 /*
1538  * Writes out the raw command buffer space.  Returns the
1539  * command buffer size.
1540  */
1541 static u_int
1542 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1543 {
1544         struct cryptop *crp;
1545         u_int8_t *buf_pos;
1546         hifn_base_command_t *base_cmd;
1547         hifn_mac_command_t *mac_cmd;
1548         hifn_crypt_command_t *cry_cmd;
1549         int using_mac, using_crypt, len, ivlen;
1550         u_int32_t dlen, slen;
1551
1552         crp = cmd->crp;
1553         buf_pos = buf;
1554         using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1555         using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1556
1557         base_cmd = (hifn_base_command_t *)buf_pos;
1558         base_cmd->masks = htole16(cmd->base_masks);
1559         slen = cmd->src_mapsize;
1560         if (cmd->sloplen)
1561                 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
1562         else
1563                 dlen = cmd->dst_mapsize;
1564         base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1565         base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1566         dlen >>= 16;
1567         slen >>= 16;
1568         base_cmd->session_num = htole16(
1569             ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1570             ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1571         buf_pos += sizeof(hifn_base_command_t);
1572
1573         if (using_mac) {
1574                 mac_cmd = (hifn_mac_command_t *)buf_pos;
1575                 dlen = crp->crp_aad_length + crp->crp_payload_length;
1576                 mac_cmd->source_count = htole16(dlen & 0xffff);
1577                 dlen >>= 16;
1578                 mac_cmd->masks = htole16(cmd->mac_masks |
1579                     ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1580                 if (crp->crp_aad_length != 0)
1581                         mac_cmd->header_skip = htole16(crp->crp_aad_start);
1582                 else
1583                         mac_cmd->header_skip = htole16(crp->crp_payload_start);
1584                 mac_cmd->reserved = 0;
1585                 buf_pos += sizeof(hifn_mac_command_t);
1586         }
1587
1588         if (using_crypt) {
1589                 cry_cmd = (hifn_crypt_command_t *)buf_pos;
1590                 dlen = crp->crp_payload_length;
1591                 cry_cmd->source_count = htole16(dlen & 0xffff);
1592                 dlen >>= 16;
1593                 cry_cmd->masks = htole16(cmd->cry_masks |
1594                     ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1595                 cry_cmd->header_skip = htole16(crp->crp_payload_length);
1596                 cry_cmd->reserved = 0;
1597                 buf_pos += sizeof(hifn_crypt_command_t);
1598         }
1599
1600         if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1601                 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1602                 buf_pos += HIFN_MAC_KEY_LENGTH;
1603         }
1604
1605         if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1606                 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1607                 case HIFN_CRYPT_CMD_ALG_3DES:
1608                         bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1609                         buf_pos += HIFN_3DES_KEY_LENGTH;
1610                         break;
1611                 case HIFN_CRYPT_CMD_ALG_DES:
1612                         bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1613                         buf_pos += HIFN_DES_KEY_LENGTH;
1614                         break;
1615                 case HIFN_CRYPT_CMD_ALG_RC4:
1616                         len = 256;
1617                         do {
1618                                 int clen;
1619
1620                                 clen = MIN(cmd->cklen, len);
1621                                 bcopy(cmd->ck, buf_pos, clen);
1622                                 len -= clen;
1623                                 buf_pos += clen;
1624                         } while (len > 0);
1625                         bzero(buf_pos, 4);
1626                         buf_pos += 4;
1627                         break;
1628                 case HIFN_CRYPT_CMD_ALG_AES:
1629                         /*
1630                          * AES keys are variable 128, 192 and
1631                          * 256 bits (16, 24 and 32 bytes).
1632                          */
1633                         bcopy(cmd->ck, buf_pos, cmd->cklen);
1634                         buf_pos += cmd->cklen;
1635                         break;
1636                 }
1637         }
1638
1639         if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1640                 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1641                 case HIFN_CRYPT_CMD_ALG_AES:
1642                         ivlen = HIFN_AES_IV_LENGTH;
1643                         break;
1644                 default:
1645                         ivlen = HIFN_IV_LENGTH;
1646                         break;
1647                 }
1648                 bcopy(cmd->iv, buf_pos, ivlen);
1649                 buf_pos += ivlen;
1650         }
1651
1652         if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
1653                 bzero(buf_pos, 8);
1654                 buf_pos += 8;
1655         }
1656
1657         return (buf_pos - buf);
1658 }
1659
1660 static int
1661 hifn_dmamap_aligned(struct hifn_operand *op)
1662 {
1663         int i;
1664
1665         for (i = 0; i < op->nsegs; i++) {
1666                 if (op->segs[i].ds_addr & 3)
1667                         return (0);
1668                 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
1669                         return (0);
1670         }
1671         return (1);
1672 }
1673
1674 static __inline int
1675 hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
1676 {
1677         struct hifn_dma *dma = sc->sc_dma;
1678
1679         if (++idx == HIFN_D_DST_RSIZE) {
1680                 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1681                     HIFN_D_MASKDONEIRQ);
1682                 HIFN_DSTR_SYNC(sc, idx,
1683                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1684                 idx = 0;
1685         }
1686         return (idx);
1687 }
1688
1689 static int
1690 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1691 {
1692         struct hifn_dma *dma = sc->sc_dma;
1693         struct hifn_operand *dst = &cmd->dst;
1694         u_int32_t p, l;
1695         int idx, used = 0, i;
1696
1697         idx = sc->sc_dsti;
1698         for (i = 0; i < dst->nsegs - 1; i++) {
1699                 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1700                 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1701                     HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
1702                 HIFN_DSTR_SYNC(sc, idx,
1703                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1704                 used++;
1705
1706                 idx = hifn_dmamap_dstwrap(sc, idx);
1707         }
1708
1709         if (cmd->sloplen == 0) {
1710                 p = dst->segs[i].ds_addr;
1711                 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1712                     dst->segs[i].ds_len;
1713         } else {
1714                 p = sc->sc_dma_physaddr +
1715                     offsetof(struct hifn_dma, slop[cmd->slopidx]);
1716                 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1717                     sizeof(u_int32_t);
1718
1719                 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
1720                         dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1721                         dma->dstr[idx].l = htole32(HIFN_D_VALID |
1722                             HIFN_D_MASKDONEIRQ |
1723                             (dst->segs[i].ds_len - cmd->sloplen));
1724                         HIFN_DSTR_SYNC(sc, idx,
1725                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1726                         used++;
1727
1728                         idx = hifn_dmamap_dstwrap(sc, idx);
1729                 }
1730         }
1731         dma->dstr[idx].p = htole32(p);
1732         dma->dstr[idx].l = htole32(l);
1733         HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1734         used++;
1735
1736         idx = hifn_dmamap_dstwrap(sc, idx);
1737
1738         sc->sc_dsti = idx;
1739         sc->sc_dstu += used;
1740         return (idx);
1741 }
1742
1743 static __inline int
1744 hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
1745 {
1746         struct hifn_dma *dma = sc->sc_dma;
1747
1748         if (++idx == HIFN_D_SRC_RSIZE) {
1749                 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1750                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1751                 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1752                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1753                 idx = 0;
1754         }
1755         return (idx);
1756 }
1757
1758 static int
1759 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1760 {
1761         struct hifn_dma *dma = sc->sc_dma;
1762         struct hifn_operand *src = &cmd->src;
1763         int idx, i;
1764         u_int32_t last = 0;
1765
1766         idx = sc->sc_srci;
1767         for (i = 0; i < src->nsegs; i++) {
1768                 if (i == src->nsegs - 1)
1769                         last = HIFN_D_LAST;
1770
1771                 dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
1772                 dma->srcr[idx].l = htole32(src->segs[i].ds_len |
1773                     HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1774                 HIFN_SRCR_SYNC(sc, idx,
1775                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1776
1777                 idx = hifn_dmamap_srcwrap(sc, idx);
1778         }
1779         sc->sc_srci = idx;
1780         sc->sc_srcu += src->nsegs;
1781         return (idx);
1782
1783
1784 static bus_size_t
1785 hifn_crp_length(struct cryptop *crp)
1786 {
1787
1788         switch (crp->crp_buf_type) {
1789         case CRYPTO_BUF_MBUF:
1790                 return (crp->crp_mbuf->m_pkthdr.len);
1791         case CRYPTO_BUF_UIO:
1792                 return (crp->crp_uio->uio_resid);
1793         case CRYPTO_BUF_CONTIG:
1794                 return (crp->crp_ilen);
1795         default:
1796                 panic("bad crp buffer type");
1797         }
1798 }
1799
1800 static void
1801 hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, int error)
1802 {
1803         struct hifn_operand *op = arg;
1804
1805         KASSERT(nsegs <= MAX_SCATTER,
1806                 ("hifn_op_cb: too many DMA segments (%u > %u) "
1807                  "returned when mapping operand", nsegs, MAX_SCATTER));
1808         op->nsegs = nsegs;
1809         bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
1810 }
1811
1812 static int 
1813 hifn_crypto(
1814         struct hifn_softc *sc,
1815         struct hifn_command *cmd,
1816         struct cryptop *crp,
1817         int hint)
1818 {
1819         struct  hifn_dma *dma = sc->sc_dma;
1820         u_int32_t cmdlen, csr;
1821         int cmdi, resi, err = 0;
1822
1823         /*
1824          * need 1 cmd, and 1 res
1825          *
1826          * NB: check this first since it's easy.
1827          */
1828         HIFN_LOCK(sc);
1829         if ((sc->sc_cmdu + 1) > HIFN_D_CMD_RSIZE ||
1830             (sc->sc_resu + 1) > HIFN_D_RES_RSIZE) {
1831 #ifdef HIFN_DEBUG
1832                 if (hifn_debug) {
1833                         device_printf(sc->sc_dev,
1834                                 "cmd/result exhaustion, cmdu %u resu %u\n",
1835                                 sc->sc_cmdu, sc->sc_resu);
1836                 }
1837 #endif
1838                 hifnstats.hst_nomem_cr++;
1839                 HIFN_UNLOCK(sc);
1840                 return (ERESTART);
1841         }
1842
1843         if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) {
1844                 hifnstats.hst_nomem_map++;
1845                 HIFN_UNLOCK(sc);
1846                 return (ENOMEM);
1847         }
1848
1849         if (bus_dmamap_load_crp(sc->sc_dmat, cmd->src_map, crp, hifn_op_cb,
1850             &cmd->src, BUS_DMA_NOWAIT)) {
1851                 hifnstats.hst_nomem_load++;
1852                 err = ENOMEM;
1853                 goto err_srcmap1;
1854         }
1855         cmd->src_mapsize = hifn_crp_length(crp);
1856
1857         if (hifn_dmamap_aligned(&cmd->src)) {
1858                 cmd->sloplen = cmd->src_mapsize & 3;
1859                 cmd->dst = cmd->src;
1860         } else if (crp->crp_buf_type == CRYPTO_BUF_MBUF) {
1861                 int totlen, len;
1862                 struct mbuf *m, *m0, *mlast;
1863
1864                 KASSERT(cmd->dst_m == NULL,
1865                     ("hifn_crypto: dst_m initialized improperly"));
1866                 hifnstats.hst_unaligned++;
1867
1868                 /*
1869                  * Source is not aligned on a longword boundary.
1870                  * Copy the data to insure alignment.  If we fail
1871                  * to allocate mbufs or clusters while doing this
1872                  * we return ERESTART so the operation is requeued
1873                  * at the crypto later, but only if there are
1874                  * ops already posted to the hardware; otherwise we
1875                  * have no guarantee that we'll be re-entered.
1876                  */
1877                 totlen = cmd->src_mapsize;
1878                 if (crp->crp_mbuf->m_flags & M_PKTHDR) {
1879                         len = MHLEN;
1880                         MGETHDR(m0, M_NOWAIT, MT_DATA);
1881                         if (m0 && !m_dup_pkthdr(m0, crp->crp_mbuf, M_NOWAIT)) {
1882                                 m_free(m0);
1883                                 m0 = NULL;
1884                         }
1885                 } else {
1886                         len = MLEN;
1887                         MGET(m0, M_NOWAIT, MT_DATA);
1888                 }
1889                 if (m0 == NULL) {
1890                         hifnstats.hst_nomem_mbuf++;
1891                         err = sc->sc_cmdu ? ERESTART : ENOMEM;
1892                         goto err_srcmap;
1893                 }
1894                 if (totlen >= MINCLSIZE) {
1895                         if (!(MCLGET(m0, M_NOWAIT))) {
1896                                 hifnstats.hst_nomem_mcl++;
1897                                 err = sc->sc_cmdu ? ERESTART : ENOMEM;
1898                                 m_freem(m0);
1899                                 goto err_srcmap;
1900                         }
1901                         len = MCLBYTES;
1902                 }
1903                 totlen -= len;
1904                 m0->m_pkthdr.len = m0->m_len = len;
1905                 mlast = m0;
1906
1907                 while (totlen > 0) {
1908                         MGET(m, M_NOWAIT, MT_DATA);
1909                         if (m == NULL) {
1910                                 hifnstats.hst_nomem_mbuf++;
1911                                 err = sc->sc_cmdu ? ERESTART : ENOMEM;
1912                                 m_freem(m0);
1913                                 goto err_srcmap;
1914                         }
1915                         len = MLEN;
1916                         if (totlen >= MINCLSIZE) {
1917                                 if (!(MCLGET(m, M_NOWAIT))) {
1918                                         hifnstats.hst_nomem_mcl++;
1919                                         err = sc->sc_cmdu ? ERESTART : ENOMEM;
1920                                         mlast->m_next = m;
1921                                         m_freem(m0);
1922                                         goto err_srcmap;
1923                                 }
1924                                 len = MCLBYTES;
1925                         }
1926
1927                         m->m_len = len;
1928                         m0->m_pkthdr.len += len;
1929                         totlen -= len;
1930
1931                         mlast->m_next = m;
1932                         mlast = m;
1933                 }
1934                 cmd->dst_m = m0;
1935
1936                 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
1937                     &cmd->dst_map)) {
1938                         hifnstats.hst_nomem_map++;
1939                         err = ENOMEM;
1940                         goto err_srcmap;
1941                 }
1942
1943                 if (bus_dmamap_load_mbuf_sg(sc->sc_dmat, cmd->dst_map, m0,
1944                     cmd->dst_segs, &cmd->dst_nsegs, 0)) {
1945                         hifnstats.hst_nomem_map++;
1946                         err = ENOMEM;
1947                         goto err_dstmap1;
1948                 }
1949                 cmd->dst_mapsize = m0->m_pkthdr.len;
1950         } else {
1951                 err = EINVAL;
1952                 goto err_srcmap;
1953         }
1954
1955 #ifdef HIFN_DEBUG
1956         if (hifn_debug) {
1957                 device_printf(sc->sc_dev,
1958                     "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1959                     READ_REG_1(sc, HIFN_1_DMA_CSR),
1960                     READ_REG_1(sc, HIFN_1_DMA_IER),
1961                     sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu,
1962                     cmd->src_nsegs, cmd->dst_nsegs);
1963         }
1964 #endif
1965
1966         if (cmd->src_map == cmd->dst_map) {
1967                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1968                     BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1969         } else {
1970                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1971                     BUS_DMASYNC_PREWRITE);
1972                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1973                     BUS_DMASYNC_PREREAD);
1974         }
1975
1976         /*
1977          * need N src, and N dst
1978          */
1979         if ((sc->sc_srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
1980             (sc->sc_dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
1981 #ifdef HIFN_DEBUG
1982                 if (hifn_debug) {
1983                         device_printf(sc->sc_dev,
1984                                 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
1985                                 sc->sc_srcu, cmd->src_nsegs,
1986                                 sc->sc_dstu, cmd->dst_nsegs);
1987                 }
1988 #endif
1989                 hifnstats.hst_nomem_sd++;
1990                 err = ERESTART;
1991                 goto err_dstmap;
1992         }
1993
1994         if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) {
1995                 sc->sc_cmdi = 0;
1996                 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1997                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1998                 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1999                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2000         }
2001         cmdi = sc->sc_cmdi++;
2002         cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2003         HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2004
2005         /* .p for command/result already set */
2006         dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2007             HIFN_D_MASKDONEIRQ);
2008         HIFN_CMDR_SYNC(sc, cmdi,
2009             BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2010         sc->sc_cmdu++;
2011
2012         /*
2013          * We don't worry about missing an interrupt (which a "command wait"
2014          * interrupt salvages us from), unless there is more than one command
2015          * in the queue.
2016          */
2017         if (sc->sc_cmdu > 1) {
2018                 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2019                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2020         }
2021
2022         hifnstats.hst_ipackets++;
2023         hifnstats.hst_ibytes += cmd->src_mapsize;
2024
2025         hifn_dmamap_load_src(sc, cmd);
2026
2027         /*
2028          * Unlike other descriptors, we don't mask done interrupt from
2029          * result descriptor.
2030          */
2031 #ifdef HIFN_DEBUG
2032         if (hifn_debug)
2033                 printf("load res\n");
2034 #endif
2035         if (sc->sc_resi == HIFN_D_RES_RSIZE) {
2036                 sc->sc_resi = 0;
2037                 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2038                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2039                 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2040                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2041         }
2042         resi = sc->sc_resi++;
2043         KASSERT(sc->sc_hifn_commands[resi] == NULL,
2044                 ("hifn_crypto: command slot %u busy", resi));
2045         sc->sc_hifn_commands[resi] = cmd;
2046         HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2047         if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
2048                 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2049                     HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
2050                 sc->sc_curbatch++;
2051                 if (sc->sc_curbatch > hifnstats.hst_maxbatch)
2052                         hifnstats.hst_maxbatch = sc->sc_curbatch;
2053                 hifnstats.hst_totbatch++;
2054         } else {
2055                 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2056                     HIFN_D_VALID | HIFN_D_LAST);
2057                 sc->sc_curbatch = 0;
2058         }
2059         HIFN_RESR_SYNC(sc, resi,
2060             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2061         sc->sc_resu++;
2062
2063         if (cmd->sloplen)
2064                 cmd->slopidx = resi;
2065
2066         hifn_dmamap_load_dst(sc, cmd);
2067
2068         csr = 0;
2069         if (sc->sc_c_busy == 0) {
2070                 csr |= HIFN_DMACSR_C_CTRL_ENA;
2071                 sc->sc_c_busy = 1;
2072         }
2073         if (sc->sc_s_busy == 0) {
2074                 csr |= HIFN_DMACSR_S_CTRL_ENA;
2075                 sc->sc_s_busy = 1;
2076         }
2077         if (sc->sc_r_busy == 0) {
2078                 csr |= HIFN_DMACSR_R_CTRL_ENA;
2079                 sc->sc_r_busy = 1;
2080         }
2081         if (sc->sc_d_busy == 0) {
2082                 csr |= HIFN_DMACSR_D_CTRL_ENA;
2083                 sc->sc_d_busy = 1;
2084         }
2085         if (csr)
2086                 WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
2087
2088 #ifdef HIFN_DEBUG
2089         if (hifn_debug) {
2090                 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
2091                     READ_REG_1(sc, HIFN_1_DMA_CSR),
2092                     READ_REG_1(sc, HIFN_1_DMA_IER));
2093         }
2094 #endif
2095
2096         sc->sc_active = 5;
2097         HIFN_UNLOCK(sc);
2098         KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
2099         return (err);           /* success */
2100
2101 err_dstmap:
2102         if (cmd->src_map != cmd->dst_map)
2103                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2104 err_dstmap1:
2105         if (cmd->src_map != cmd->dst_map)
2106                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2107 err_srcmap:
2108         if (crp->crp_buf_type == CRYPTO_BUF_MBUF) {
2109                 if (cmd->dst_m != NULL)
2110                         m_freem(cmd->dst_m);
2111         }
2112         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2113 err_srcmap1:
2114         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2115         HIFN_UNLOCK(sc);
2116         return (err);
2117 }
2118
2119 static void
2120 hifn_tick(void* vsc)
2121 {
2122         struct hifn_softc *sc = vsc;
2123
2124         HIFN_LOCK(sc);
2125         if (sc->sc_active == 0) {
2126                 u_int32_t r = 0;
2127
2128                 if (sc->sc_cmdu == 0 && sc->sc_c_busy) {
2129                         sc->sc_c_busy = 0;
2130                         r |= HIFN_DMACSR_C_CTRL_DIS;
2131                 }
2132                 if (sc->sc_srcu == 0 && sc->sc_s_busy) {
2133                         sc->sc_s_busy = 0;
2134                         r |= HIFN_DMACSR_S_CTRL_DIS;
2135                 }
2136                 if (sc->sc_dstu == 0 && sc->sc_d_busy) {
2137                         sc->sc_d_busy = 0;
2138                         r |= HIFN_DMACSR_D_CTRL_DIS;
2139                 }
2140                 if (sc->sc_resu == 0 && sc->sc_r_busy) {
2141                         sc->sc_r_busy = 0;
2142                         r |= HIFN_DMACSR_R_CTRL_DIS;
2143                 }
2144                 if (r)
2145                         WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
2146         } else
2147                 sc->sc_active--;
2148         HIFN_UNLOCK(sc);
2149         callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
2150 }
2151
2152 static void 
2153 hifn_intr(void *arg)
2154 {
2155         struct hifn_softc *sc = arg;
2156         struct hifn_dma *dma;
2157         u_int32_t dmacsr, restart;
2158         int i, u;
2159
2160         dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2161
2162         /* Nothing in the DMA unit interrupted */
2163         if ((dmacsr & sc->sc_dmaier) == 0)
2164                 return;
2165
2166         HIFN_LOCK(sc);
2167
2168         dma = sc->sc_dma;
2169
2170 #ifdef HIFN_DEBUG
2171         if (hifn_debug) {
2172                 device_printf(sc->sc_dev,
2173                     "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
2174                     dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
2175                     sc->sc_cmdi, sc->sc_srci, sc->sc_dsti, sc->sc_resi,
2176                     sc->sc_cmdk, sc->sc_srck, sc->sc_dstk, sc->sc_resk,
2177                     sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu);
2178         }
2179 #endif
2180
2181         WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2182
2183         if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2184             (dmacsr & HIFN_DMACSR_PUBDONE))
2185                 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2186                     READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2187
2188         restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
2189         if (restart)
2190                 device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
2191
2192         if (sc->sc_flags & HIFN_IS_7811) {
2193                 if (dmacsr & HIFN_DMACSR_ILLR)
2194                         device_printf(sc->sc_dev, "illegal read\n");
2195                 if (dmacsr & HIFN_DMACSR_ILLW)
2196                         device_printf(sc->sc_dev, "illegal write\n");
2197         }
2198
2199         restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2200             HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2201         if (restart) {
2202                 device_printf(sc->sc_dev, "abort, resetting.\n");
2203                 hifnstats.hst_abort++;
2204                 hifn_abort(sc);
2205                 HIFN_UNLOCK(sc);
2206                 return;
2207         }
2208
2209         if ((dmacsr & HIFN_DMACSR_C_WAIT) && (sc->sc_cmdu == 0)) {
2210                 /*
2211                  * If no slots to process and we receive a "waiting on
2212                  * command" interrupt, we disable the "waiting on command"
2213                  * (by clearing it).
2214                  */
2215                 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2216                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2217         }
2218
2219         /* clear the rings */
2220         i = sc->sc_resk; u = sc->sc_resu;
2221         while (u != 0) {
2222                 HIFN_RESR_SYNC(sc, i,
2223                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2224                 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2225                         HIFN_RESR_SYNC(sc, i,
2226                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2227                         break;
2228                 }
2229
2230                 if (i != HIFN_D_RES_RSIZE) {
2231                         struct hifn_command *cmd;
2232                         u_int8_t *macbuf = NULL;
2233
2234                         HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2235                         cmd = sc->sc_hifn_commands[i];
2236                         KASSERT(cmd != NULL,
2237                                 ("hifn_intr: null command slot %u", i));
2238                         sc->sc_hifn_commands[i] = NULL;
2239
2240                         if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2241                                 macbuf = dma->result_bufs[i];
2242                                 macbuf += 12;
2243                         }
2244
2245                         hifn_callback(sc, cmd, macbuf);
2246                         hifnstats.hst_opackets++;
2247                         u--;
2248                 }
2249
2250                 if (++i == (HIFN_D_RES_RSIZE + 1))
2251                         i = 0;
2252         }
2253         sc->sc_resk = i; sc->sc_resu = u;
2254
2255         i = sc->sc_srck; u = sc->sc_srcu;
2256         while (u != 0) {
2257                 if (i == HIFN_D_SRC_RSIZE)
2258                         i = 0;
2259                 HIFN_SRCR_SYNC(sc, i,
2260                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2261                 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2262                         HIFN_SRCR_SYNC(sc, i,
2263                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2264                         break;
2265                 }
2266                 i++, u--;
2267         }
2268         sc->sc_srck = i; sc->sc_srcu = u;
2269
2270         i = sc->sc_cmdk; u = sc->sc_cmdu;
2271         while (u != 0) {
2272                 HIFN_CMDR_SYNC(sc, i,
2273                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2274                 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2275                         HIFN_CMDR_SYNC(sc, i,
2276                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2277                         break;
2278                 }
2279                 if (i != HIFN_D_CMD_RSIZE) {
2280                         u--;
2281                         HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2282                 }
2283                 if (++i == (HIFN_D_CMD_RSIZE + 1))
2284                         i = 0;
2285         }
2286         sc->sc_cmdk = i; sc->sc_cmdu = u;
2287
2288         HIFN_UNLOCK(sc);
2289
2290         if (sc->sc_needwakeup) {                /* XXX check high watermark */
2291                 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
2292 #ifdef HIFN_DEBUG
2293                 if (hifn_debug)
2294                         device_printf(sc->sc_dev,
2295                                 "wakeup crypto (%x) u %d/%d/%d/%d\n",
2296                                 sc->sc_needwakeup,
2297                                 sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu);
2298 #endif
2299                 sc->sc_needwakeup &= ~wakeup;
2300                 crypto_unblock(sc->sc_cid, wakeup);
2301         }
2302 }
2303
2304 static bool
2305 hifn_auth_supported(struct hifn_softc *sc,
2306     const struct crypto_session_params *csp)
2307 {
2308
2309         switch (sc->sc_ena) {
2310         case HIFN_PUSTAT_ENA_2:
2311         case HIFN_PUSTAT_ENA_1:
2312                 break;
2313         default:
2314                 return (false);
2315         }
2316                 
2317         switch (csp->csp_auth_alg) {
2318         case CRYPTO_MD5:
2319         case CRYPTO_SHA1:
2320                 break;
2321         case CRYPTO_MD5_HMAC:
2322         case CRYPTO_SHA1_HMAC:
2323                 if (csp->csp_auth_klen > HIFN_MAC_KEY_LENGTH)
2324                         return (false);
2325                 break;
2326         default:
2327                 return (false);
2328         }
2329
2330         return (true);  
2331 }
2332
2333 static bool
2334 hifn_cipher_supported(struct hifn_softc *sc,
2335     const struct crypto_session_params *csp)
2336 {
2337
2338         if (csp->csp_cipher_klen == 0)
2339                 return (false);
2340         if (csp->csp_ivlen > HIFN_MAX_IV_LENGTH)
2341                 return (false);
2342         switch (sc->sc_ena) {
2343         case HIFN_PUSTAT_ENA_2:
2344                 switch (csp->csp_cipher_alg) {
2345                 case CRYPTO_3DES_CBC:
2346                 case CRYPTO_ARC4:
2347                         break;
2348                 case CRYPTO_AES_CBC:
2349                         if ((sc->sc_flags & HIFN_HAS_AES) == 0)
2350                                 return (false);
2351                         switch (csp->csp_cipher_klen) {
2352                         case 128:
2353                         case 192:
2354                         case 256:
2355                                 break;
2356                         default:
2357                                 return (false);
2358                         }
2359                         return (true);
2360                 }
2361                 /*FALLTHROUGH*/
2362         case HIFN_PUSTAT_ENA_1:
2363                 switch (csp->csp_cipher_alg) {
2364                 case CRYPTO_DES_CBC:
2365                         return (true);
2366                 }
2367                 break;
2368         }
2369         return (false);
2370 }
2371
2372 static int
2373 hifn_probesession(device_t dev, const struct crypto_session_params *csp)
2374 {
2375         struct hifn_softc *sc;
2376
2377         sc = device_get_softc(dev);
2378         if (csp->csp_flags != 0)
2379                 return (EINVAL);
2380         switch (csp->csp_mode) {
2381         case CSP_MODE_DIGEST:
2382                 if (!hifn_auth_supported(sc, csp))
2383                         return (EINVAL);
2384                 break;
2385         case CSP_MODE_CIPHER:
2386                 if (!hifn_cipher_supported(sc, csp))
2387                         return (EINVAL);
2388                 break;
2389         case CSP_MODE_ETA:
2390                 if (!hifn_auth_supported(sc, csp) ||
2391                     !hifn_cipher_supported(sc, csp))
2392                         return (EINVAL);
2393                 break;
2394         default:
2395                 return (EINVAL);
2396         }
2397
2398         return (CRYPTODEV_PROBE_HARDWARE);
2399 }
2400
2401 /*
2402  * Allocate a new 'session'.
2403  */
2404 static int
2405 hifn_newsession(device_t dev, crypto_session_t cses,
2406     const struct crypto_session_params *csp)
2407 {
2408         struct hifn_session *ses;
2409
2410         ses = crypto_get_driver_session(cses);
2411
2412         if (csp->csp_auth_alg != 0) {
2413                 if (csp->csp_auth_mlen == 0)
2414                         ses->hs_mlen = crypto_auth_hash(csp)->hashsize;
2415                 else
2416                         ses->hs_mlen = csp->csp_auth_mlen;
2417         }
2418
2419         return (0);
2420 }
2421
2422 /*
2423  * XXX freesession routine should run a zero'd mac/encrypt key into context
2424  * ram.  to blow away any keys already stored there.
2425  */
2426
2427 static int
2428 hifn_process(device_t dev, struct cryptop *crp, int hint)
2429 {
2430         const struct crypto_session_params *csp;
2431         struct hifn_softc *sc = device_get_softc(dev);
2432         struct hifn_command *cmd = NULL;
2433         const void *mackey;
2434         int err, keylen;
2435         struct hifn_session *ses;
2436
2437         ses = crypto_get_driver_session(crp->crp_session);
2438
2439         cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO);
2440         if (cmd == NULL) {
2441                 hifnstats.hst_nomem++;
2442                 err = ENOMEM;
2443                 goto errout;
2444         }
2445
2446         csp = crypto_get_params(crp->crp_session);
2447
2448         /*
2449          * The driver only supports ETA requests where there is no
2450          * gap between the AAD and payload.
2451          */
2452         if (csp->csp_mode == CSP_MODE_ETA && crp->crp_aad_length != 0 &&
2453             crp->crp_aad_start + crp->crp_aad_length !=
2454             crp->crp_payload_start) {
2455                 err = EINVAL;
2456                 goto errout;
2457         }
2458
2459         switch (csp->csp_mode) {
2460         case CSP_MODE_CIPHER:
2461         case CSP_MODE_ETA:
2462                 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2463                         cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2464                 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2465                 switch (csp->csp_cipher_alg) {
2466                 case CRYPTO_ARC4:
2467                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2468                         break;
2469                 case CRYPTO_DES_CBC:
2470                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2471                             HIFN_CRYPT_CMD_MODE_CBC |
2472                             HIFN_CRYPT_CMD_NEW_IV;
2473                         break;
2474                 case CRYPTO_3DES_CBC:
2475                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2476                             HIFN_CRYPT_CMD_MODE_CBC |
2477                             HIFN_CRYPT_CMD_NEW_IV;
2478                         break;
2479                 case CRYPTO_AES_CBC:
2480                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2481                             HIFN_CRYPT_CMD_MODE_CBC |
2482                             HIFN_CRYPT_CMD_NEW_IV;
2483                         break;
2484                 default:
2485                         err = EINVAL;
2486                         goto errout;
2487                 }
2488                 if (csp->csp_cipher_alg != CRYPTO_ARC4)
2489                         crypto_read_iv(crp, cmd->iv);
2490
2491                 if (crp->crp_cipher_key != NULL)
2492                         cmd->ck = crp->crp_cipher_key;
2493                 else
2494                         cmd->ck = csp->csp_cipher_key;
2495                 cmd->cklen = csp->csp_cipher_klen;
2496                 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2497
2498                 /* 
2499                  * Need to specify the size for the AES key in the masks.
2500                  */
2501                 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2502                     HIFN_CRYPT_CMD_ALG_AES) {
2503                         switch (cmd->cklen) {
2504                         case 16:
2505                                 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2506                                 break;
2507                         case 24:
2508                                 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2509                                 break;
2510                         case 32:
2511                                 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2512                                 break;
2513                         default:
2514                                 err = EINVAL;
2515                                 goto errout;
2516                         }
2517                 }
2518                 break;
2519         }
2520
2521         switch (csp->csp_mode) {
2522         case CSP_MODE_DIGEST:
2523         case CSP_MODE_ETA:
2524                 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2525
2526                 switch (csp->csp_auth_alg) {
2527                 case CRYPTO_MD5:
2528                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2529                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2530                             HIFN_MAC_CMD_POS_IPSEC;
2531                        break;
2532                 case CRYPTO_MD5_HMAC:
2533                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2534                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2535                             HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2536                         break;
2537                 case CRYPTO_SHA1:
2538                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2539                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2540                             HIFN_MAC_CMD_POS_IPSEC;
2541                         break;
2542                 case CRYPTO_SHA1_HMAC:
2543                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2544                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2545                             HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2546                         break;
2547                 }
2548
2549                 if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC ||
2550                     csp->csp_auth_alg == CRYPTO_MD5_HMAC) {
2551                         cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2552                         if (crp->crp_auth_key != NULL)
2553                                 mackey = crp->crp_auth_key;
2554                         else
2555                                 mackey = csp->csp_auth_key;
2556                         keylen = csp->csp_auth_klen;
2557                         bcopy(mackey, cmd->mac, keylen);
2558                         bzero(cmd->mac + keylen, HIFN_MAC_KEY_LENGTH - keylen);
2559                 }
2560         }
2561
2562         cmd->crp = crp;
2563         cmd->session = ses;
2564         cmd->softc = sc;
2565
2566         err = hifn_crypto(sc, cmd, crp, hint);
2567         if (!err) {
2568                 return 0;
2569         } else if (err == ERESTART) {
2570                 /*
2571                  * There weren't enough resources to dispatch the request
2572                  * to the part.  Notify the caller so they'll requeue this
2573                  * request and resubmit it again soon.
2574                  */
2575 #ifdef HIFN_DEBUG
2576                 if (hifn_debug)
2577                         device_printf(sc->sc_dev, "requeue request\n");
2578 #endif
2579                 free(cmd, M_DEVBUF);
2580                 sc->sc_needwakeup |= CRYPTO_SYMQ;
2581                 return (err);
2582         }
2583
2584 errout:
2585         if (cmd != NULL)
2586                 free(cmd, M_DEVBUF);
2587         if (err == EINVAL)
2588                 hifnstats.hst_invalid++;
2589         else
2590                 hifnstats.hst_nomem++;
2591         crp->crp_etype = err;
2592         crypto_done(crp);
2593         return (err);
2594 }
2595
2596 static void
2597 hifn_abort(struct hifn_softc *sc)
2598 {
2599         struct hifn_dma *dma = sc->sc_dma;
2600         struct hifn_command *cmd;
2601         struct cryptop *crp;
2602         int i, u;
2603
2604         i = sc->sc_resk; u = sc->sc_resu;
2605         while (u != 0) {
2606                 cmd = sc->sc_hifn_commands[i];
2607                 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
2608                 sc->sc_hifn_commands[i] = NULL;
2609                 crp = cmd->crp;
2610
2611                 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2612                         /* Salvage what we can. */
2613                         u_int8_t *macbuf;
2614
2615                         if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2616                                 macbuf = dma->result_bufs[i];
2617                                 macbuf += 12;
2618                         } else
2619                                 macbuf = NULL;
2620                         hifnstats.hst_opackets++;
2621                         hifn_callback(sc, cmd, macbuf);
2622                 } else {
2623                         if (cmd->src_map == cmd->dst_map) {
2624                                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2625                                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2626                         } else {
2627                                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2628                                     BUS_DMASYNC_POSTWRITE);
2629                                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2630                                     BUS_DMASYNC_POSTREAD);
2631                         }
2632
2633                         if (cmd->dst_m != NULL) {
2634                                 m_freem(cmd->dst_m);
2635                         }
2636
2637                         /* non-shared buffers cannot be restarted */
2638                         if (cmd->src_map != cmd->dst_map) {
2639                                 /*
2640                                  * XXX should be EAGAIN, delayed until
2641                                  * after the reset.
2642                                  */
2643                                 crp->crp_etype = ENOMEM;
2644                                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2645                                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2646                         } else
2647                                 crp->crp_etype = ENOMEM;
2648
2649                         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2650                         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2651
2652                         free(cmd, M_DEVBUF);
2653                         if (crp->crp_etype != EAGAIN)
2654                                 crypto_done(crp);
2655                 }
2656
2657                 if (++i == HIFN_D_RES_RSIZE)
2658                         i = 0;
2659                 u--;
2660         }
2661         sc->sc_resk = i; sc->sc_resu = u;
2662
2663         hifn_reset_board(sc, 1);
2664         hifn_init_dma(sc);
2665         hifn_init_pci_registers(sc);
2666 }
2667
2668 static void
2669 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
2670 {
2671         struct hifn_dma *dma = sc->sc_dma;
2672         struct cryptop *crp = cmd->crp;
2673         uint8_t macbuf2[SHA1_HASH_LEN];
2674         struct mbuf *m;
2675         int totlen, i, u;
2676
2677         if (cmd->src_map == cmd->dst_map) {
2678                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2679                     BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2680         } else {
2681                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2682                     BUS_DMASYNC_POSTWRITE);
2683                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2684                     BUS_DMASYNC_POSTREAD);
2685         }
2686
2687         if (crp->crp_buf_type == CRYPTO_BUF_MBUF) {
2688                 if (cmd->dst_m != NULL) {
2689                         totlen = cmd->src_mapsize;
2690                         for (m = cmd->dst_m; m != NULL; m = m->m_next) {
2691                                 if (totlen < m->m_len) {
2692                                         m->m_len = totlen;
2693                                         totlen = 0;
2694                                 } else
2695                                         totlen -= m->m_len;
2696                         }
2697                         cmd->dst_m->m_pkthdr.len = crp->crp_mbuf->m_pkthdr.len;
2698                         m_freem(crp->crp_mbuf);
2699                         crp->crp_mbuf = cmd->dst_m;
2700                 }
2701         }
2702
2703         if (cmd->sloplen != 0) {
2704                 crypto_copyback(crp, cmd->src_mapsize - cmd->sloplen,
2705                     cmd->sloplen, &dma->slop[cmd->slopidx]);
2706         }
2707
2708         i = sc->sc_dstk; u = sc->sc_dstu;
2709         while (u != 0) {
2710                 if (i == HIFN_D_DST_RSIZE)
2711                         i = 0;
2712                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2713                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2714                 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2715                         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2716                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2717                         break;
2718                 }
2719                 i++, u--;
2720         }
2721         sc->sc_dstk = i; sc->sc_dstu = u;
2722
2723         hifnstats.hst_obytes += cmd->dst_mapsize;
2724
2725         if (macbuf != NULL) {
2726                 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
2727                         crypto_copydata(crp, crp->crp_digest_start,
2728                             cmd->session->hs_mlen, macbuf2);
2729                         if (timingsafe_bcmp(macbuf, macbuf2,
2730                             cmd->session->hs_mlen) != 0)
2731                                 crp->crp_etype = EBADMSG;
2732                 } else
2733                         crypto_copyback(crp, crp->crp_digest_start,
2734                             cmd->session->hs_mlen, macbuf);
2735         }
2736
2737         if (cmd->src_map != cmd->dst_map) {
2738                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2739                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2740         }
2741         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2742         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2743         free(cmd, M_DEVBUF);
2744         crypto_done(crp);
2745 }
2746
2747 /*
2748  * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2749  * and Group 1 registers; avoid conditions that could create
2750  * burst writes by doing a read in between the writes.
2751  *
2752  * NB: The read we interpose is always to the same register;
2753  *     we do this because reading from an arbitrary (e.g. last)
2754  *     register may not always work.
2755  */
2756 static void
2757 hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2758 {
2759         if (sc->sc_flags & HIFN_IS_7811) {
2760                 if (sc->sc_bar0_lastreg == reg - 4)
2761                         bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG);
2762                 sc->sc_bar0_lastreg = reg;
2763         }
2764         bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2765 }
2766
2767 static void
2768 hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2769 {
2770         if (sc->sc_flags & HIFN_IS_7811) {
2771                 if (sc->sc_bar1_lastreg == reg - 4)
2772                         bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2773                 sc->sc_bar1_lastreg = reg;
2774         }
2775         bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2776 }
2777
2778 #ifdef HIFN_VULCANDEV
2779 /*
2780  * this code provides support for mapping the PK engine's register
2781  * into a userspace program.
2782  *
2783  */
2784 static int
2785 vulcanpk_mmap(struct cdev *dev, vm_ooffset_t offset,
2786               vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr)
2787 {
2788         struct hifn_softc *sc;
2789         vm_paddr_t pd;
2790         void *b;
2791
2792         sc = dev->si_drv1;
2793
2794         pd = rman_get_start(sc->sc_bar1res);
2795         b = rman_get_virtual(sc->sc_bar1res);
2796
2797 #if 0
2798         printf("vpk mmap: %p(%016llx) offset=%lld\n", b,
2799             (unsigned long long)pd, offset);
2800         hexdump(b, HIFN_1_PUB_MEMEND, "vpk", 0);
2801 #endif
2802
2803         if (offset == 0) {
2804                 *paddr = pd;
2805                 return (0);
2806         }
2807         return (-1);
2808 }
2809
2810 static struct cdevsw vulcanpk_cdevsw = {
2811         .d_version =    D_VERSION,
2812         .d_mmap =       vulcanpk_mmap,
2813         .d_name =       "vulcanpk",
2814 };
2815 #endif /* HIFN_VULCANDEV */