]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/hifn/hifn7751.c
MFV r336948: 9112 Improve allocation performance on high-end systems
[FreeBSD/FreeBSD.git] / sys / dev / hifn / hifn7751.c
1 /*      $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $  */
2
3 /*-
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  * Invertex AEON / Hifn 7751 driver
7  * Copyright (c) 1999 Invertex Inc. All rights reserved.
8  * Copyright (c) 1999 Theo de Raadt
9  * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10  *                      http://www.netsec.net
11  * Copyright (c) 2003 Hifn Inc.
12  *
13  * This driver is based on a previous driver by Invertex, for which they
14  * requested:  Please send any comments, feedback, bug-fixes, or feature
15  * requests to software@invertex.com.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  *
21  * 1. Redistributions of source code must retain the above copyright
22  *   notice, this list of conditions and the following disclaimer.
23  * 2. Redistributions in binary form must reproduce the above copyright
24  *   notice, this list of conditions and the following disclaimer in the
25  *   documentation and/or other materials provided with the distribution.
26  * 3. The name of the author may not be used to endorse or promote products
27  *   derived from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
30  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
31  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
33  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
34  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
38  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  *
40  * Effort sponsored in part by the Defense Advanced Research Projects
41  * Agency (DARPA) and Air Force Research Laboratory, Air Force
42  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43  */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 /*
49  * Driver for various Hifn encryption processors.
50  */
51 #include "opt_hifn.h"
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/errno.h>
57 #include <sys/malloc.h>
58 #include <sys/kernel.h>
59 #include <sys/module.h>
60 #include <sys/mbuf.h>
61 #include <sys/lock.h>
62 #include <sys/mutex.h>
63 #include <sys/sysctl.h>
64
65 #include <vm/vm.h>
66 #include <vm/pmap.h>
67
68 #include <machine/bus.h>
69 #include <machine/resource.h>
70 #include <sys/bus.h>
71 #include <sys/rman.h>
72
73 #include <opencrypto/cryptodev.h>
74 #include <sys/random.h>
75 #include <sys/kobj.h>
76
77 #include "cryptodev_if.h"
78
79 #include <dev/pci/pcivar.h>
80 #include <dev/pci/pcireg.h>
81
82 #ifdef HIFN_RNDTEST
83 #include <dev/rndtest/rndtest.h>
84 #endif
85 #include <dev/hifn/hifn7751reg.h>
86 #include <dev/hifn/hifn7751var.h>
87
88 #ifdef HIFN_VULCANDEV
89 #include <sys/conf.h>
90 #include <sys/uio.h>
91
92 static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
93 #endif
94
95 /*
96  * Prototypes and count for the pci_device structure
97  */
98 static  int hifn_probe(device_t);
99 static  int hifn_attach(device_t);
100 static  int hifn_detach(device_t);
101 static  int hifn_suspend(device_t);
102 static  int hifn_resume(device_t);
103 static  int hifn_shutdown(device_t);
104
105 static  int hifn_newsession(device_t, crypto_session_t, struct cryptoini *);
106 static  int hifn_process(device_t, struct cryptop *, int);
107
108 static device_method_t hifn_methods[] = {
109         /* Device interface */
110         DEVMETHOD(device_probe,         hifn_probe),
111         DEVMETHOD(device_attach,        hifn_attach),
112         DEVMETHOD(device_detach,        hifn_detach),
113         DEVMETHOD(device_suspend,       hifn_suspend),
114         DEVMETHOD(device_resume,        hifn_resume),
115         DEVMETHOD(device_shutdown,      hifn_shutdown),
116
117         /* crypto device methods */
118         DEVMETHOD(cryptodev_newsession, hifn_newsession),
119         DEVMETHOD(cryptodev_process,    hifn_process),
120
121         DEVMETHOD_END
122 };
123 static driver_t hifn_driver = {
124         "hifn",
125         hifn_methods,
126         sizeof (struct hifn_softc)
127 };
128 static devclass_t hifn_devclass;
129
130 DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0);
131 MODULE_DEPEND(hifn, crypto, 1, 1, 1);
132 #ifdef HIFN_RNDTEST
133 MODULE_DEPEND(hifn, rndtest, 1, 1, 1);
134 #endif
135
136 static  void hifn_reset_board(struct hifn_softc *, int);
137 static  void hifn_reset_puc(struct hifn_softc *);
138 static  void hifn_puc_wait(struct hifn_softc *);
139 static  int hifn_enable_crypto(struct hifn_softc *);
140 static  void hifn_set_retry(struct hifn_softc *sc);
141 static  void hifn_init_dma(struct hifn_softc *);
142 static  void hifn_init_pci_registers(struct hifn_softc *);
143 static  int hifn_sramsize(struct hifn_softc *);
144 static  int hifn_dramsize(struct hifn_softc *);
145 static  int hifn_ramtype(struct hifn_softc *);
146 static  void hifn_sessions(struct hifn_softc *);
147 static  void hifn_intr(void *);
148 static  u_int hifn_write_command(struct hifn_command *, u_int8_t *);
149 static  u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
150 static  void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
151 static  int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
152 static  int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
153 static  int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
154 static  int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
155 static  int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
156 static  int hifn_init_pubrng(struct hifn_softc *);
157 static  void hifn_rng(void *);
158 static  void hifn_tick(void *);
159 static  void hifn_abort(struct hifn_softc *);
160 static  void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
161
162 static  void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
163 static  void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
164
165 static __inline u_int32_t
166 READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
167 {
168     u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg);
169     sc->sc_bar0_lastreg = (bus_size_t) -1;
170     return (v);
171 }
172 #define WRITE_REG_0(sc, reg, val)       hifn_write_reg_0(sc, reg, val)
173
174 static __inline u_int32_t
175 READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
176 {
177     u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg);
178     sc->sc_bar1_lastreg = (bus_size_t) -1;
179     return (v);
180 }
181 #define WRITE_REG_1(sc, reg, val)       hifn_write_reg_1(sc, reg, val)
182
183 static SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0,
184             "Hifn driver parameters");
185
186 #ifdef HIFN_DEBUG
187 static  int hifn_debug = 0;
188 SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug,
189             0, "control debugging msgs");
190 #endif
191
192 static  struct hifn_stats hifnstats;
193 SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats,
194             hifn_stats, "driver statistics");
195 static  int hifn_maxbatch = 1;
196 SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch,
197             0, "max ops to batch w/o interrupt");
198
199 /*
200  * Probe for a supported device.  The PCI vendor and device
201  * IDs are used to detect devices we know how to handle.
202  */
203 static int
204 hifn_probe(device_t dev)
205 {
206         if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX &&
207             pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON)
208                 return (BUS_PROBE_DEFAULT);
209         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
210             (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 ||
211              pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
212              pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
213              pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 ||
214              pci_get_device(dev) == PCI_PRODUCT_HIFN_7811))
215                 return (BUS_PROBE_DEFAULT);
216         if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
217             pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751)
218                 return (BUS_PROBE_DEFAULT);
219         return (ENXIO);
220 }
221
222 static void
223 hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
224 {
225         bus_addr_t *paddr = (bus_addr_t*) arg;
226         *paddr = segs->ds_addr;
227 }
228
229 static const char*
230 hifn_partname(struct hifn_softc *sc)
231 {
232         /* XXX sprintf numbers when not decoded */
233         switch (pci_get_vendor(sc->sc_dev)) {
234         case PCI_VENDOR_HIFN:
235                 switch (pci_get_device(sc->sc_dev)) {
236                 case PCI_PRODUCT_HIFN_6500:     return "Hifn 6500";
237                 case PCI_PRODUCT_HIFN_7751:     return "Hifn 7751";
238                 case PCI_PRODUCT_HIFN_7811:     return "Hifn 7811";
239                 case PCI_PRODUCT_HIFN_7951:     return "Hifn 7951";
240                 case PCI_PRODUCT_HIFN_7955:     return "Hifn 7955";
241                 case PCI_PRODUCT_HIFN_7956:     return "Hifn 7956";
242                 }
243                 return "Hifn unknown-part";
244         case PCI_VENDOR_INVERTEX:
245                 switch (pci_get_device(sc->sc_dev)) {
246                 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
247                 }
248                 return "Invertex unknown-part";
249         case PCI_VENDOR_NETSEC:
250                 switch (pci_get_device(sc->sc_dev)) {
251                 case PCI_PRODUCT_NETSEC_7751:   return "NetSec 7751";
252                 }
253                 return "NetSec unknown-part";
254         }
255         return "Unknown-vendor unknown-part";
256 }
257
258 static void
259 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
260 {
261         /* MarkM: FIX!! Check that this does not swamp the harvester! */
262         random_harvest_queue(buf, count, count*NBBY/2, RANDOM_PURE_HIFN);
263 }
264
265 static u_int
266 checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max)
267 {
268         if (v > max) {
269                 device_printf(dev, "Warning, %s %u out of range, "
270                         "using max %u\n", what, v, max);
271                 v = max;
272         } else if (v < min) {
273                 device_printf(dev, "Warning, %s %u out of range, "
274                         "using min %u\n", what, v, min);
275                 v = min;
276         }
277         return v;
278 }
279
280 /*
281  * Select PLL configuration for 795x parts.  This is complicated in
282  * that we cannot determine the optimal parameters without user input.
283  * The reference clock is derived from an external clock through a
284  * multiplier.  The external clock is either the host bus (i.e. PCI)
285  * or an external clock generator.  When using the PCI bus we assume
286  * the clock is either 33 or 66 MHz; for an external source we cannot
287  * tell the speed.
288  *
289  * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
290  * for an external source, followed by the frequency.  We calculate
291  * the appropriate multiplier and PLL register contents accordingly.
292  * When no configuration is given we default to "pci66" since that
293  * always will allow the card to work.  If a card is using the PCI
294  * bus clock and in a 33MHz slot then it will be operating at half
295  * speed until the correct information is provided.
296  *
297  * We use a default setting of "ext66" because according to Mike Ham
298  * of HiFn, almost every board in existence has an external crystal
299  * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
300  * because PCI33 can have clocks from 0 to 33Mhz, and some have
301  * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
302  */
303 static void
304 hifn_getpllconfig(device_t dev, u_int *pll)
305 {
306         const char *pllspec;
307         u_int freq, mul, fl, fh;
308         u_int32_t pllconfig;
309         char *nxt;
310
311         if (resource_string_value("hifn", device_get_unit(dev),
312             "pllconfig", &pllspec))
313                 pllspec = "ext66";
314         fl = 33, fh = 66;
315         pllconfig = 0;
316         if (strncmp(pllspec, "ext", 3) == 0) {
317                 pllspec += 3;
318                 pllconfig |= HIFN_PLL_REF_SEL;
319                 switch (pci_get_device(dev)) {
320                 case PCI_PRODUCT_HIFN_7955:
321                 case PCI_PRODUCT_HIFN_7956:
322                         fl = 20, fh = 100;
323                         break;
324 #ifdef notyet
325                 case PCI_PRODUCT_HIFN_7954:
326                         fl = 20, fh = 66;
327                         break;
328 #endif
329                 }
330         } else if (strncmp(pllspec, "pci", 3) == 0)
331                 pllspec += 3;
332         freq = strtoul(pllspec, &nxt, 10);
333         if (nxt == pllspec)
334                 freq = 66;
335         else
336                 freq = checkmaxmin(dev, "frequency", freq, fl, fh);
337         /*
338          * Calculate multiplier.  We target a Fck of 266 MHz,
339          * allowing only even values, possibly rounded down.
340          * Multipliers > 8 must set the charge pump current.
341          */
342         mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
343         pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
344         if (mul > 8)
345                 pllconfig |= HIFN_PLL_IS;
346         *pll = pllconfig;
347 }
348
349 /*
350  * Attach an interface that successfully probed.
351  */
352 static int 
353 hifn_attach(device_t dev)
354 {
355         struct hifn_softc *sc = device_get_softc(dev);
356         caddr_t kva;
357         int rseg, rid;
358         char rbase;
359         u_int16_t ena, rev;
360
361         sc->sc_dev = dev;
362
363         mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF);
364
365         /* XXX handle power management */
366
367         /*
368          * The 7951 and 795x have a random number generator and
369          * public key support; note this.
370          */
371         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
372             (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
373              pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
374              pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
375                 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
376         /*
377          * The 7811 has a random number generator and
378          * we also note it's identity 'cuz of some quirks.
379          */
380         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
381             pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
382                 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
383
384         /*
385          * The 795x parts support AES.
386          */
387         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
388             (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
389              pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
390                 sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
391                 /*
392                  * Select PLL configuration.  This depends on the
393                  * bus and board design and must be manually configured
394                  * if the default setting is unacceptable.
395                  */
396                 hifn_getpllconfig(dev, &sc->sc_pllconfig);
397         }
398
399         /*
400          * Setup PCI resources. Note that we record the bus
401          * tag and handle for each register mapping, this is
402          * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
403          * and WRITE_REG_1 macros throughout the driver.
404          */
405         pci_enable_busmaster(dev);
406
407         rid = HIFN_BAR0;
408         sc->sc_bar0res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
409                                                 RF_ACTIVE);
410         if (sc->sc_bar0res == NULL) {
411                 device_printf(dev, "cannot map bar%d register space\n", 0);
412                 goto fail_pci;
413         }
414         sc->sc_st0 = rman_get_bustag(sc->sc_bar0res);
415         sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res);
416         sc->sc_bar0_lastreg = (bus_size_t) -1;
417
418         rid = HIFN_BAR1;
419         sc->sc_bar1res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
420                                                 RF_ACTIVE);
421         if (sc->sc_bar1res == NULL) {
422                 device_printf(dev, "cannot map bar%d register space\n", 1);
423                 goto fail_io0;
424         }
425         sc->sc_st1 = rman_get_bustag(sc->sc_bar1res);
426         sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res);
427         sc->sc_bar1_lastreg = (bus_size_t) -1;
428
429         hifn_set_retry(sc);
430
431         /*
432          * Setup the area where the Hifn DMA's descriptors
433          * and associated data structures.
434          */
435         if (bus_dma_tag_create(bus_get_dma_tag(dev),    /* PCI parent */
436                                1, 0,                    /* alignment,boundary */
437                                BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
438                                BUS_SPACE_MAXADDR,       /* highaddr */
439                                NULL, NULL,              /* filter, filterarg */
440                                HIFN_MAX_DMALEN,         /* maxsize */
441                                MAX_SCATTER,             /* nsegments */
442                                HIFN_MAX_SEGLEN,         /* maxsegsize */
443                                BUS_DMA_ALLOCNOW,        /* flags */
444                                NULL,                    /* lockfunc */
445                                NULL,                    /* lockarg */
446                                &sc->sc_dmat)) {
447                 device_printf(dev, "cannot allocate DMA tag\n");
448                 goto fail_io1;
449         }
450         if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
451                 device_printf(dev, "cannot create dma map\n");
452                 bus_dma_tag_destroy(sc->sc_dmat);
453                 goto fail_io1;
454         }
455         if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
456                 device_printf(dev, "cannot alloc dma buffer\n");
457                 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
458                 bus_dma_tag_destroy(sc->sc_dmat);
459                 goto fail_io1;
460         }
461         if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva,
462                              sizeof (*sc->sc_dma),
463                              hifn_dmamap_cb, &sc->sc_dma_physaddr,
464                              BUS_DMA_NOWAIT)) {
465                 device_printf(dev, "cannot load dma map\n");
466                 bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap);
467                 bus_dma_tag_destroy(sc->sc_dmat);
468                 goto fail_io1;
469         }
470         sc->sc_dma = (struct hifn_dma *)kva;
471         bzero(sc->sc_dma, sizeof(*sc->sc_dma));
472
473         KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!"));
474         KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!"));
475         KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!"));
476         KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!"));
477
478         /*
479          * Reset the board and do the ``secret handshake''
480          * to enable the crypto support.  Then complete the
481          * initialization procedure by setting up the interrupt
482          * and hooking in to the system crypto support so we'll
483          * get used for system services like the crypto device,
484          * IPsec, RNG device, etc.
485          */
486         hifn_reset_board(sc, 0);
487
488         if (hifn_enable_crypto(sc) != 0) {
489                 device_printf(dev, "crypto enabling failed\n");
490                 goto fail_mem;
491         }
492         hifn_reset_puc(sc);
493
494         hifn_init_dma(sc);
495         hifn_init_pci_registers(sc);
496
497         /* XXX can't dynamically determine ram type for 795x; force dram */
498         if (sc->sc_flags & HIFN_IS_7956)
499                 sc->sc_drammodel = 1;
500         else if (hifn_ramtype(sc))
501                 goto fail_mem;
502
503         if (sc->sc_drammodel == 0)
504                 hifn_sramsize(sc);
505         else
506                 hifn_dramsize(sc);
507
508         /*
509          * Workaround for NetSec 7751 rev A: half ram size because two
510          * of the address lines were left floating
511          */
512         if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
513             pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
514             pci_get_revid(dev) == 0x61) /*XXX???*/
515                 sc->sc_ramsize >>= 1;
516
517         /*
518          * Arrange the interrupt line.
519          */
520         rid = 0;
521         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
522                                             RF_SHAREABLE|RF_ACTIVE);
523         if (sc->sc_irq == NULL) {
524                 device_printf(dev, "could not map interrupt\n");
525                 goto fail_mem;
526         }
527         /*
528          * NB: Network code assumes we are blocked with splimp()
529          *     so make sure the IRQ is marked appropriately.
530          */
531         if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
532                            NULL, hifn_intr, sc, &sc->sc_intrhand)) {
533                 device_printf(dev, "could not setup interrupt\n");
534                 goto fail_intr2;
535         }
536
537         hifn_sessions(sc);
538
539         /*
540          * NB: Keep only the low 16 bits; this masks the chip id
541          *     from the 7951.
542          */
543         rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
544
545         rseg = sc->sc_ramsize / 1024;
546         rbase = 'K';
547         if (sc->sc_ramsize >= (1024 * 1024)) {
548                 rbase = 'M';
549                 rseg /= 1024;
550         }
551         device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
552                 hifn_partname(sc), rev,
553                 rseg, rbase, sc->sc_drammodel ? 'd' : 's');
554         if (sc->sc_flags & HIFN_IS_7956)
555                 printf(", pll=0x%x<%s clk, %ux mult>",
556                         sc->sc_pllconfig,
557                         sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
558                         2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
559         printf("\n");
560
561         sc->sc_cid = crypto_get_driverid(dev, sizeof(struct hifn_session),
562             CRYPTOCAP_F_HARDWARE);
563         if (sc->sc_cid < 0) {
564                 device_printf(dev, "could not get crypto driver id\n");
565                 goto fail_intr;
566         }
567
568         WRITE_REG_0(sc, HIFN_0_PUCNFG,
569             READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
570         ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
571
572         switch (ena) {
573         case HIFN_PUSTAT_ENA_2:
574                 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
575                 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
576                 if (sc->sc_flags & HIFN_HAS_AES)
577                         crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
578                 /*FALLTHROUGH*/
579         case HIFN_PUSTAT_ENA_1:
580                 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
581                 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
582                 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
583                 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
584                 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
585                 break;
586         }
587
588         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
589             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
590
591         if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
592                 hifn_init_pubrng(sc);
593
594         callout_init(&sc->sc_tickto, 1);
595         callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
596
597         return (0);
598
599 fail_intr:
600         bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
601 fail_intr2:
602         /* XXX don't store rid */
603         bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
604 fail_mem:
605         bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
606         bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
607         bus_dma_tag_destroy(sc->sc_dmat);
608
609         /* Turn off DMA polling */
610         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
611             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
612 fail_io1:
613         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
614 fail_io0:
615         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
616 fail_pci:
617         mtx_destroy(&sc->sc_mtx);
618         return (ENXIO);
619 }
620
621 /*
622  * Detach an interface that successfully probed.
623  */
624 static int 
625 hifn_detach(device_t dev)
626 {
627         struct hifn_softc *sc = device_get_softc(dev);
628
629         KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
630
631         /* disable interrupts */
632         WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
633
634         /*XXX other resources */
635         callout_stop(&sc->sc_tickto);
636         callout_stop(&sc->sc_rngto);
637 #ifdef HIFN_RNDTEST
638         if (sc->sc_rndtest)
639                 rndtest_detach(sc->sc_rndtest);
640 #endif
641
642         /* Turn off DMA polling */
643         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
644             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
645
646         crypto_unregister_all(sc->sc_cid);
647
648         bus_generic_detach(dev);        /*XXX should be no children, right? */
649
650         bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
651         /* XXX don't store rid */
652         bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
653
654         bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
655         bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
656         bus_dma_tag_destroy(sc->sc_dmat);
657
658         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
659         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
660
661         mtx_destroy(&sc->sc_mtx);
662
663         return (0);
664 }
665
666 /*
667  * Stop all chip I/O so that the kernel's probe routines don't
668  * get confused by errant DMAs when rebooting.
669  */
670 static int
671 hifn_shutdown(device_t dev)
672 {
673 #ifdef notyet
674         hifn_stop(device_get_softc(dev));
675 #endif
676         return (0);
677 }
678
679 /*
680  * Device suspend routine.  Stop the interface and save some PCI
681  * settings in case the BIOS doesn't restore them properly on
682  * resume.
683  */
684 static int
685 hifn_suspend(device_t dev)
686 {
687         struct hifn_softc *sc = device_get_softc(dev);
688 #ifdef notyet
689         hifn_stop(sc);
690 #endif
691         sc->sc_suspended = 1;
692
693         return (0);
694 }
695
696 /*
697  * Device resume routine.  Restore some PCI settings in case the BIOS
698  * doesn't, re-enable busmastering, and restart the interface if
699  * appropriate.
700  */
701 static int
702 hifn_resume(device_t dev)
703 {
704         struct hifn_softc *sc = device_get_softc(dev);
705 #ifdef notyet
706         /* reinitialize interface if necessary */
707         if (ifp->if_flags & IFF_UP)
708                 rl_init(sc);
709 #endif
710         sc->sc_suspended = 0;
711
712         return (0);
713 }
714
715 static int
716 hifn_init_pubrng(struct hifn_softc *sc)
717 {
718         u_int32_t r;
719         int i;
720
721 #ifdef HIFN_RNDTEST
722         sc->sc_rndtest = rndtest_attach(sc->sc_dev);
723         if (sc->sc_rndtest)
724                 sc->sc_harvest = rndtest_harvest;
725         else
726                 sc->sc_harvest = default_harvest;
727 #else
728         sc->sc_harvest = default_harvest;
729 #endif
730         if ((sc->sc_flags & HIFN_IS_7811) == 0) {
731                 /* Reset 7951 public key/rng engine */
732                 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
733                     READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
734
735                 for (i = 0; i < 100; i++) {
736                         DELAY(1000);
737                         if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
738                             HIFN_PUBRST_RESET) == 0)
739                                 break;
740                 }
741
742                 if (i == 100) {
743                         device_printf(sc->sc_dev, "public key init failed\n");
744                         return (1);
745                 }
746         }
747
748         /* Enable the rng, if available */
749         if (sc->sc_flags & HIFN_HAS_RNG) {
750                 if (sc->sc_flags & HIFN_IS_7811) {
751                         r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
752                         if (r & HIFN_7811_RNGENA_ENA) {
753                                 r &= ~HIFN_7811_RNGENA_ENA;
754                                 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
755                         }
756                         WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
757                             HIFN_7811_RNGCFG_DEFL);
758                         r |= HIFN_7811_RNGENA_ENA;
759                         WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
760                 } else
761                         WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
762                             READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
763                             HIFN_RNGCFG_ENA);
764
765                 sc->sc_rngfirst = 1;
766                 if (hz >= 100)
767                         sc->sc_rnghz = hz / 100;
768                 else
769                         sc->sc_rnghz = 1;
770                 callout_init(&sc->sc_rngto, 1);
771                 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
772         }
773
774         /* Enable public key engine, if available */
775         if (sc->sc_flags & HIFN_HAS_PUBLIC) {
776                 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
777                 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
778                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
779 #ifdef HIFN_VULCANDEV
780                 sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0, 
781                                         UID_ROOT, GID_WHEEL, 0666,
782                                         "vulcanpk");
783                 sc->sc_pkdev->si_drv1 = sc;
784 #endif
785         }
786
787         return (0);
788 }
789
790 static void
791 hifn_rng(void *vsc)
792 {
793 #define RANDOM_BITS(n)  (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0
794         struct hifn_softc *sc = vsc;
795         u_int32_t sts, num[2];
796         int i;
797
798         if (sc->sc_flags & HIFN_IS_7811) {
799                 /* ONLY VALID ON 7811!!!! */
800                 for (i = 0; i < 5; i++) {
801                         sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
802                         if (sts & HIFN_7811_RNGSTS_UFL) {
803                                 device_printf(sc->sc_dev,
804                                               "RNG underflow: disabling\n");
805                                 return;
806                         }
807                         if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
808                                 break;
809
810                         /*
811                          * There are at least two words in the RNG FIFO
812                          * at this point.
813                          */
814                         num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
815                         num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
816                         /* NB: discard first data read */
817                         if (sc->sc_rngfirst)
818                                 sc->sc_rngfirst = 0;
819                         else
820                                 (*sc->sc_harvest)(sc->sc_rndtest,
821                                         num, sizeof (num));
822                 }
823         } else {
824                 num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA);
825
826                 /* NB: discard first data read */
827                 if (sc->sc_rngfirst)
828                         sc->sc_rngfirst = 0;
829                 else
830                         (*sc->sc_harvest)(sc->sc_rndtest,
831                                 num, sizeof (num[0]));
832         }
833
834         callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
835 #undef RANDOM_BITS
836 }
837
838 static void
839 hifn_puc_wait(struct hifn_softc *sc)
840 {
841         int i;
842         int reg = HIFN_0_PUCTRL;
843
844         if (sc->sc_flags & HIFN_IS_7956) {
845                 reg = HIFN_0_PUCTRL2;
846         }
847
848         for (i = 5000; i > 0; i--) {
849                 DELAY(1);
850                 if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
851                         break;
852         }
853         if (!i)
854                 device_printf(sc->sc_dev, "proc unit did not reset\n");
855 }
856
857 /*
858  * Reset the processing unit.
859  */
860 static void
861 hifn_reset_puc(struct hifn_softc *sc)
862 {
863         /* Reset processing unit */
864         int reg = HIFN_0_PUCTRL;
865
866         if (sc->sc_flags & HIFN_IS_7956) {
867                 reg = HIFN_0_PUCTRL2;
868         }
869         WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
870
871         hifn_puc_wait(sc);
872 }
873
874 /*
875  * Set the Retry and TRDY registers; note that we set them to
876  * zero because the 7811 locks up when forced to retry (section
877  * 3.6 of "Specification Update SU-0014-04".  Not clear if we
878  * should do this for all Hifn parts, but it doesn't seem to hurt.
879  */
880 static void
881 hifn_set_retry(struct hifn_softc *sc)
882 {
883         /* NB: RETRY only responds to 8-bit reads/writes */
884         pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1);
885         pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 1);
886 }
887
888 /*
889  * Resets the board.  Values in the regesters are left as is
890  * from the reset (i.e. initial values are assigned elsewhere).
891  */
892 static void
893 hifn_reset_board(struct hifn_softc *sc, int full)
894 {
895         u_int32_t reg;
896
897         /*
898          * Set polling in the DMA configuration register to zero.  0x7 avoids
899          * resetting the board and zeros out the other fields.
900          */
901         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
902             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
903
904         /*
905          * Now that polling has been disabled, we have to wait 1 ms
906          * before resetting the board.
907          */
908         DELAY(1000);
909
910         /* Reset the DMA unit */
911         if (full) {
912                 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
913                 DELAY(1000);
914         } else {
915                 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
916                     HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
917                 hifn_reset_puc(sc);
918         }
919
920         KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
921         bzero(sc->sc_dma, sizeof(*sc->sc_dma));
922
923         /* Bring dma unit out of reset */
924         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
925             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
926
927         hifn_puc_wait(sc);
928         hifn_set_retry(sc);
929
930         if (sc->sc_flags & HIFN_IS_7811) {
931                 for (reg = 0; reg < 1000; reg++) {
932                         if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
933                             HIFN_MIPSRST_CRAMINIT)
934                                 break;
935                         DELAY(1000);
936                 }
937                 if (reg == 1000)
938                         printf(": cram init timeout\n");
939         } else {
940           /* set up DMA configuration register #2 */
941           /* turn off all PK and BAR0 swaps */
942           WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
943                       (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
944                       (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
945                       (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
946                       (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
947         }
948                       
949 }
950
951 static u_int32_t
952 hifn_next_signature(u_int32_t a, u_int cnt)
953 {
954         int i;
955         u_int32_t v;
956
957         for (i = 0; i < cnt; i++) {
958
959                 /* get the parity */
960                 v = a & 0x80080125;
961                 v ^= v >> 16;
962                 v ^= v >> 8;
963                 v ^= v >> 4;
964                 v ^= v >> 2;
965                 v ^= v >> 1;
966
967                 a = (v & 1) ^ (a << 1);
968         }
969
970         return a;
971 }
972
973 struct pci2id {
974         u_short         pci_vendor;
975         u_short         pci_prod;
976         char            card_id[13];
977 };
978 static struct pci2id pci2id[] = {
979         {
980                 PCI_VENDOR_HIFN,
981                 PCI_PRODUCT_HIFN_7951,
982                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
983                   0x00, 0x00, 0x00, 0x00, 0x00 }
984         }, {
985                 PCI_VENDOR_HIFN,
986                 PCI_PRODUCT_HIFN_7955,
987                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
988                   0x00, 0x00, 0x00, 0x00, 0x00 }
989         }, {
990                 PCI_VENDOR_HIFN,
991                 PCI_PRODUCT_HIFN_7956,
992                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
993                   0x00, 0x00, 0x00, 0x00, 0x00 }
994         }, {
995                 PCI_VENDOR_NETSEC,
996                 PCI_PRODUCT_NETSEC_7751,
997                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
998                   0x00, 0x00, 0x00, 0x00, 0x00 }
999         }, {
1000                 PCI_VENDOR_INVERTEX,
1001                 PCI_PRODUCT_INVERTEX_AEON,
1002                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1003                   0x00, 0x00, 0x00, 0x00, 0x00 }
1004         }, {
1005                 PCI_VENDOR_HIFN,
1006                 PCI_PRODUCT_HIFN_7811,
1007                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1008                   0x00, 0x00, 0x00, 0x00, 0x00 }
1009         }, {
1010                 /*
1011                  * Other vendors share this PCI ID as well, such as
1012                  * http://www.powercrypt.com, and obviously they also
1013                  * use the same key.
1014                  */
1015                 PCI_VENDOR_HIFN,
1016                 PCI_PRODUCT_HIFN_7751,
1017                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1018                   0x00, 0x00, 0x00, 0x00, 0x00 }
1019         },
1020 };
1021
1022 /*
1023  * Checks to see if crypto is already enabled.  If crypto isn't enable,
1024  * "hifn_enable_crypto" is called to enable it.  The check is important,
1025  * as enabling crypto twice will lock the board.
1026  */
1027 static int 
1028 hifn_enable_crypto(struct hifn_softc *sc)
1029 {
1030         u_int32_t dmacfg, ramcfg, encl, addr, i;
1031         char *offtbl = NULL;
1032
1033         for (i = 0; i < nitems(pci2id); i++) {
1034                 if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) &&
1035                     pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) {
1036                         offtbl = pci2id[i].card_id;
1037                         break;
1038                 }
1039         }
1040         if (offtbl == NULL) {
1041                 device_printf(sc->sc_dev, "Unknown card!\n");
1042                 return (1);
1043         }
1044
1045         ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1046         dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
1047
1048         /*
1049          * The RAM config register's encrypt level bit needs to be set before
1050          * every read performed on the encryption level register.
1051          */
1052         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1053
1054         encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1055
1056         /*
1057          * Make sure we don't re-unlock.  Two unlocks kills chip until the
1058          * next reboot.
1059          */
1060         if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
1061 #ifdef HIFN_DEBUG
1062                 if (hifn_debug)
1063                         device_printf(sc->sc_dev,
1064                             "Strong crypto already enabled!\n");
1065 #endif
1066                 goto report;
1067         }
1068
1069         if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
1070 #ifdef HIFN_DEBUG
1071                 if (hifn_debug)
1072                         device_printf(sc->sc_dev,
1073                               "Unknown encryption level 0x%x\n", encl);
1074 #endif
1075                 return 1;
1076         }
1077
1078         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
1079             HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
1080         DELAY(1000);
1081         addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
1082         DELAY(1000);
1083         WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
1084         DELAY(1000);
1085
1086         for (i = 0; i <= 12; i++) {
1087                 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
1088                 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
1089
1090                 DELAY(1000);
1091         }
1092
1093         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1094         encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1095
1096 #ifdef HIFN_DEBUG
1097         if (hifn_debug) {
1098                 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
1099                         device_printf(sc->sc_dev, "Engine is permanently "
1100                                 "locked until next system reset!\n");
1101                 else
1102                         device_printf(sc->sc_dev, "Engine enabled "
1103                                 "successfully!\n");
1104         }
1105 #endif
1106
1107 report:
1108         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1109         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1110
1111         switch (encl) {
1112         case HIFN_PUSTAT_ENA_1:
1113         case HIFN_PUSTAT_ENA_2:
1114                 break;
1115         case HIFN_PUSTAT_ENA_0:
1116         default:
1117                 device_printf(sc->sc_dev, "disabled");
1118                 break;
1119         }
1120
1121         return 0;
1122 }
1123
1124 /*
1125  * Give initial values to the registers listed in the "Register Space"
1126  * section of the HIFN Software Development reference manual.
1127  */
1128 static void 
1129 hifn_init_pci_registers(struct hifn_softc *sc)
1130 {
1131         /* write fixed values needed by the Initialization registers */
1132         WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1133         WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1134         WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1135
1136         /* write all 4 ring address registers */
1137         WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
1138             offsetof(struct hifn_dma, cmdr[0]));
1139         WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
1140             offsetof(struct hifn_dma, srcr[0]));
1141         WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
1142             offsetof(struct hifn_dma, dstr[0]));
1143         WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
1144             offsetof(struct hifn_dma, resr[0]));
1145
1146         DELAY(2000);
1147
1148         /* write status register */
1149         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1150             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1151             HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1152             HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1153             HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1154             HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1155             HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1156             HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1157             HIFN_DMACSR_S_WAIT |
1158             HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1159             HIFN_DMACSR_C_WAIT |
1160             HIFN_DMACSR_ENGINE |
1161             ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1162                 HIFN_DMACSR_PUBDONE : 0) |
1163             ((sc->sc_flags & HIFN_IS_7811) ?
1164                 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1165
1166         sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1167         sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1168             HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1169             HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1170             ((sc->sc_flags & HIFN_IS_7811) ?
1171                 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1172         sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1173         WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1174
1175
1176         if (sc->sc_flags & HIFN_IS_7956) {
1177                 u_int32_t pll;
1178
1179                 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1180                     HIFN_PUCNFG_TCALLPHASES |
1181                     HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1182
1183                 /* turn off the clocks and insure bypass is set */
1184                 pll = READ_REG_1(sc, HIFN_1_PLL);
1185                 pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
1186                   | HIFN_PLL_BP | HIFN_PLL_MBSET;
1187                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1188                 DELAY(10*1000);         /* 10ms */
1189
1190                 /* change configuration */
1191                 pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
1192                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1193                 DELAY(10*1000);         /* 10ms */
1194
1195                 /* disable bypass */
1196                 pll &= ~HIFN_PLL_BP;
1197                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1198                 /* enable clocks with new configuration */
1199                 pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
1200                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1201         } else {
1202                 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1203                     HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1204                     HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1205                     (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1206         }
1207
1208         WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1209         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1210             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1211             ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1212             ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1213 }
1214
1215 /*
1216  * The maximum number of sessions supported by the card
1217  * is dependent on the amount of context ram, which
1218  * encryption algorithms are enabled, and how compression
1219  * is configured.  This should be configured before this
1220  * routine is called.
1221  */
1222 static void
1223 hifn_sessions(struct hifn_softc *sc)
1224 {
1225         u_int32_t pucnfg;
1226         int ctxsize;
1227
1228         pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1229
1230         if (pucnfg & HIFN_PUCNFG_COMPSING) {
1231                 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1232                         ctxsize = 128;
1233                 else
1234                         ctxsize = 512;
1235                 /*
1236                  * 7955/7956 has internal context memory of 32K
1237                  */
1238                 if (sc->sc_flags & HIFN_IS_7956)
1239                         sc->sc_maxses = 32768 / ctxsize;
1240                 else
1241                         sc->sc_maxses = 1 +
1242                             ((sc->sc_ramsize - 32768) / ctxsize);
1243         } else
1244                 sc->sc_maxses = sc->sc_ramsize / 16384;
1245
1246         if (sc->sc_maxses > 2048)
1247                 sc->sc_maxses = 2048;
1248 }
1249
1250 /*
1251  * Determine ram type (sram or dram).  Board should be just out of a reset
1252  * state when this is called.
1253  */
1254 static int
1255 hifn_ramtype(struct hifn_softc *sc)
1256 {
1257         u_int8_t data[8], dataexpect[8];
1258         int i;
1259
1260         for (i = 0; i < sizeof(data); i++)
1261                 data[i] = dataexpect[i] = 0x55;
1262         if (hifn_writeramaddr(sc, 0, data))
1263                 return (-1);
1264         if (hifn_readramaddr(sc, 0, data))
1265                 return (-1);
1266         if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1267                 sc->sc_drammodel = 1;
1268                 return (0);
1269         }
1270
1271         for (i = 0; i < sizeof(data); i++)
1272                 data[i] = dataexpect[i] = 0xaa;
1273         if (hifn_writeramaddr(sc, 0, data))
1274                 return (-1);
1275         if (hifn_readramaddr(sc, 0, data))
1276                 return (-1);
1277         if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1278                 sc->sc_drammodel = 1;
1279                 return (0);
1280         }
1281
1282         return (0);
1283 }
1284
1285 #define HIFN_SRAM_MAX           (32 << 20)
1286 #define HIFN_SRAM_STEP_SIZE     16384
1287 #define HIFN_SRAM_GRANULARITY   (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1288
1289 static int
1290 hifn_sramsize(struct hifn_softc *sc)
1291 {
1292         u_int32_t a;
1293         u_int8_t data[8];
1294         u_int8_t dataexpect[sizeof(data)];
1295         int32_t i;
1296
1297         for (i = 0; i < sizeof(data); i++)
1298                 data[i] = dataexpect[i] = i ^ 0x5a;
1299
1300         for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1301                 a = i * HIFN_SRAM_STEP_SIZE;
1302                 bcopy(&i, data, sizeof(i));
1303                 hifn_writeramaddr(sc, a, data);
1304         }
1305
1306         for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1307                 a = i * HIFN_SRAM_STEP_SIZE;
1308                 bcopy(&i, dataexpect, sizeof(i));
1309                 if (hifn_readramaddr(sc, a, data) < 0)
1310                         return (0);
1311                 if (bcmp(data, dataexpect, sizeof(data)) != 0)
1312                         return (0);
1313                 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1314         }
1315
1316         return (0);
1317 }
1318
1319 /*
1320  * XXX For dram boards, one should really try all of the
1321  * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
1322  * is already set up correctly.
1323  */
1324 static int
1325 hifn_dramsize(struct hifn_softc *sc)
1326 {
1327         u_int32_t cnfg;
1328
1329         if (sc->sc_flags & HIFN_IS_7956) {
1330                 /*
1331                  * 7955/7956 have a fixed internal ram of only 32K.
1332                  */
1333                 sc->sc_ramsize = 32768;
1334         } else {
1335                 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1336                     HIFN_PUCNFG_DRAMMASK;
1337                 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1338         }
1339         return (0);
1340 }
1341
1342 static void
1343 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
1344 {
1345         struct hifn_dma *dma = sc->sc_dma;
1346
1347         if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) {
1348                 sc->sc_cmdi = 0;
1349                 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1350                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1351                 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1352                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1353         }
1354         *cmdp = sc->sc_cmdi++;
1355         sc->sc_cmdk = sc->sc_cmdi;
1356
1357         if (sc->sc_srci == HIFN_D_SRC_RSIZE) {
1358                 sc->sc_srci = 0;
1359                 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1360                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1361                 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1362                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1363         }
1364         *srcp = sc->sc_srci++;
1365         sc->sc_srck = sc->sc_srci;
1366
1367         if (sc->sc_dsti == HIFN_D_DST_RSIZE) {
1368                 sc->sc_dsti = 0;
1369                 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1370                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1371                 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1372                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1373         }
1374         *dstp = sc->sc_dsti++;
1375         sc->sc_dstk = sc->sc_dsti;
1376
1377         if (sc->sc_resi == HIFN_D_RES_RSIZE) {
1378                 sc->sc_resi = 0;
1379                 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1380                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1381                 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1382                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1383         }
1384         *resp = sc->sc_resi++;
1385         sc->sc_resk = sc->sc_resi;
1386 }
1387
1388 static int
1389 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1390 {
1391         struct hifn_dma *dma = sc->sc_dma;
1392         hifn_base_command_t wc;
1393         const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1394         int r, cmdi, resi, srci, dsti;
1395
1396         wc.masks = htole16(3 << 13);
1397         wc.session_num = htole16(addr >> 14);
1398         wc.total_source_count = htole16(8);
1399         wc.total_dest_count = htole16(addr & 0x3fff);
1400
1401         hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1402
1403         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1404             HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1405             HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1406
1407         /* build write command */
1408         bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1409         *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
1410         bcopy(data, &dma->test_src, sizeof(dma->test_src));
1411
1412         dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
1413             + offsetof(struct hifn_dma, test_src));
1414         dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
1415             + offsetof(struct hifn_dma, test_dst));
1416
1417         dma->cmdr[cmdi].l = htole32(16 | masks);
1418         dma->srcr[srci].l = htole32(8 | masks);
1419         dma->dstr[dsti].l = htole32(4 | masks);
1420         dma->resr[resi].l = htole32(4 | masks);
1421
1422         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1423             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1424
1425         for (r = 10000; r >= 0; r--) {
1426                 DELAY(10);
1427                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1428                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1429                 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1430                         break;
1431                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1432                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1433         }
1434         if (r == 0) {
1435                 device_printf(sc->sc_dev, "writeramaddr -- "
1436                     "result[%d](addr %d) still valid\n", resi, addr);
1437                 r = -1;
1438                 return (-1);
1439         } else
1440                 r = 0;
1441
1442         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1443             HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1444             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1445
1446         return (r);
1447 }
1448
1449 static int
1450 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1451 {
1452         struct hifn_dma *dma = sc->sc_dma;
1453         hifn_base_command_t rc;
1454         const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1455         int r, cmdi, srci, dsti, resi;
1456
1457         rc.masks = htole16(2 << 13);
1458         rc.session_num = htole16(addr >> 14);
1459         rc.total_source_count = htole16(addr & 0x3fff);
1460         rc.total_dest_count = htole16(8);
1461
1462         hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1463
1464         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1465             HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1466             HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1467
1468         bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1469         *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
1470
1471         dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
1472             offsetof(struct hifn_dma, test_src));
1473         dma->test_src = 0;
1474         dma->dstr[dsti].p =  htole32(sc->sc_dma_physaddr +
1475             offsetof(struct hifn_dma, test_dst));
1476         dma->test_dst = 0;
1477         dma->cmdr[cmdi].l = htole32(8 | masks);
1478         dma->srcr[srci].l = htole32(8 | masks);
1479         dma->dstr[dsti].l = htole32(8 | masks);
1480         dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1481
1482         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1483             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1484
1485         for (r = 10000; r >= 0; r--) {
1486                 DELAY(10);
1487                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1488                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1489                 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1490                         break;
1491                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1492                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1493         }
1494         if (r == 0) {
1495                 device_printf(sc->sc_dev, "readramaddr -- "
1496                     "result[%d](addr %d) still valid\n", resi, addr);
1497                 r = -1;
1498         } else {
1499                 r = 0;
1500                 bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1501         }
1502
1503         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1504             HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1505             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1506
1507         return (r);
1508 }
1509
1510 /*
1511  * Initialize the descriptor rings.
1512  */
1513 static void 
1514 hifn_init_dma(struct hifn_softc *sc)
1515 {
1516         struct hifn_dma *dma = sc->sc_dma;
1517         int i;
1518
1519         hifn_set_retry(sc);
1520
1521         /* initialize static pointer values */
1522         for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1523                 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
1524                     offsetof(struct hifn_dma, command_bufs[i][0]));
1525         for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1526                 dma->resr[i].p = htole32(sc->sc_dma_physaddr +
1527                     offsetof(struct hifn_dma, result_bufs[i][0]));
1528
1529         dma->cmdr[HIFN_D_CMD_RSIZE].p =
1530             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
1531         dma->srcr[HIFN_D_SRC_RSIZE].p =
1532             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
1533         dma->dstr[HIFN_D_DST_RSIZE].p =
1534             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
1535         dma->resr[HIFN_D_RES_RSIZE].p =
1536             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
1537
1538         sc->sc_cmdu = sc->sc_srcu = sc->sc_dstu = sc->sc_resu = 0;
1539         sc->sc_cmdi = sc->sc_srci = sc->sc_dsti = sc->sc_resi = 0;
1540         sc->sc_cmdk = sc->sc_srck = sc->sc_dstk = sc->sc_resk = 0;
1541 }
1542
1543 /*
1544  * Writes out the raw command buffer space.  Returns the
1545  * command buffer size.
1546  */
1547 static u_int
1548 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1549 {
1550         u_int8_t *buf_pos;
1551         hifn_base_command_t *base_cmd;
1552         hifn_mac_command_t *mac_cmd;
1553         hifn_crypt_command_t *cry_cmd;
1554         int using_mac, using_crypt, len, ivlen;
1555         u_int32_t dlen, slen;
1556
1557         buf_pos = buf;
1558         using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1559         using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1560
1561         base_cmd = (hifn_base_command_t *)buf_pos;
1562         base_cmd->masks = htole16(cmd->base_masks);
1563         slen = cmd->src_mapsize;
1564         if (cmd->sloplen)
1565                 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
1566         else
1567                 dlen = cmd->dst_mapsize;
1568         base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1569         base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1570         dlen >>= 16;
1571         slen >>= 16;
1572         base_cmd->session_num = htole16(
1573             ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1574             ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1575         buf_pos += sizeof(hifn_base_command_t);
1576
1577         if (using_mac) {
1578                 mac_cmd = (hifn_mac_command_t *)buf_pos;
1579                 dlen = cmd->maccrd->crd_len;
1580                 mac_cmd->source_count = htole16(dlen & 0xffff);
1581                 dlen >>= 16;
1582                 mac_cmd->masks = htole16(cmd->mac_masks |
1583                     ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1584                 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1585                 mac_cmd->reserved = 0;
1586                 buf_pos += sizeof(hifn_mac_command_t);
1587         }
1588
1589         if (using_crypt) {
1590                 cry_cmd = (hifn_crypt_command_t *)buf_pos;
1591                 dlen = cmd->enccrd->crd_len;
1592                 cry_cmd->source_count = htole16(dlen & 0xffff);
1593                 dlen >>= 16;
1594                 cry_cmd->masks = htole16(cmd->cry_masks |
1595                     ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1596                 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1597                 cry_cmd->reserved = 0;
1598                 buf_pos += sizeof(hifn_crypt_command_t);
1599         }
1600
1601         if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1602                 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1603                 buf_pos += HIFN_MAC_KEY_LENGTH;
1604         }
1605
1606         if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1607                 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1608                 case HIFN_CRYPT_CMD_ALG_3DES:
1609                         bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1610                         buf_pos += HIFN_3DES_KEY_LENGTH;
1611                         break;
1612                 case HIFN_CRYPT_CMD_ALG_DES:
1613                         bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1614                         buf_pos += HIFN_DES_KEY_LENGTH;
1615                         break;
1616                 case HIFN_CRYPT_CMD_ALG_RC4:
1617                         len = 256;
1618                         do {
1619                                 int clen;
1620
1621                                 clen = MIN(cmd->cklen, len);
1622                                 bcopy(cmd->ck, buf_pos, clen);
1623                                 len -= clen;
1624                                 buf_pos += clen;
1625                         } while (len > 0);
1626                         bzero(buf_pos, 4);
1627                         buf_pos += 4;
1628                         break;
1629                 case HIFN_CRYPT_CMD_ALG_AES:
1630                         /*
1631                          * AES keys are variable 128, 192 and
1632                          * 256 bits (16, 24 and 32 bytes).
1633                          */
1634                         bcopy(cmd->ck, buf_pos, cmd->cklen);
1635                         buf_pos += cmd->cklen;
1636                         break;
1637                 }
1638         }
1639
1640         if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1641                 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1642                 case HIFN_CRYPT_CMD_ALG_AES:
1643                         ivlen = HIFN_AES_IV_LENGTH;
1644                         break;
1645                 default:
1646                         ivlen = HIFN_IV_LENGTH;
1647                         break;
1648                 }
1649                 bcopy(cmd->iv, buf_pos, ivlen);
1650                 buf_pos += ivlen;
1651         }
1652
1653         if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
1654                 bzero(buf_pos, 8);
1655                 buf_pos += 8;
1656         }
1657
1658         return (buf_pos - buf);
1659 }
1660
1661 static int
1662 hifn_dmamap_aligned(struct hifn_operand *op)
1663 {
1664         int i;
1665
1666         for (i = 0; i < op->nsegs; i++) {
1667                 if (op->segs[i].ds_addr & 3)
1668                         return (0);
1669                 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
1670                         return (0);
1671         }
1672         return (1);
1673 }
1674
1675 static __inline int
1676 hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
1677 {
1678         struct hifn_dma *dma = sc->sc_dma;
1679
1680         if (++idx == HIFN_D_DST_RSIZE) {
1681                 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1682                     HIFN_D_MASKDONEIRQ);
1683                 HIFN_DSTR_SYNC(sc, idx,
1684                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1685                 idx = 0;
1686         }
1687         return (idx);
1688 }
1689
1690 static int
1691 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1692 {
1693         struct hifn_dma *dma = sc->sc_dma;
1694         struct hifn_operand *dst = &cmd->dst;
1695         u_int32_t p, l;
1696         int idx, used = 0, i;
1697
1698         idx = sc->sc_dsti;
1699         for (i = 0; i < dst->nsegs - 1; i++) {
1700                 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1701                 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1702                     HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
1703                 HIFN_DSTR_SYNC(sc, idx,
1704                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1705                 used++;
1706
1707                 idx = hifn_dmamap_dstwrap(sc, idx);
1708         }
1709
1710         if (cmd->sloplen == 0) {
1711                 p = dst->segs[i].ds_addr;
1712                 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1713                     dst->segs[i].ds_len;
1714         } else {
1715                 p = sc->sc_dma_physaddr +
1716                     offsetof(struct hifn_dma, slop[cmd->slopidx]);
1717                 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1718                     sizeof(u_int32_t);
1719
1720                 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
1721                         dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1722                         dma->dstr[idx].l = htole32(HIFN_D_VALID |
1723                             HIFN_D_MASKDONEIRQ |
1724                             (dst->segs[i].ds_len - cmd->sloplen));
1725                         HIFN_DSTR_SYNC(sc, idx,
1726                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1727                         used++;
1728
1729                         idx = hifn_dmamap_dstwrap(sc, idx);
1730                 }
1731         }
1732         dma->dstr[idx].p = htole32(p);
1733         dma->dstr[idx].l = htole32(l);
1734         HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1735         used++;
1736
1737         idx = hifn_dmamap_dstwrap(sc, idx);
1738
1739         sc->sc_dsti = idx;
1740         sc->sc_dstu += used;
1741         return (idx);
1742 }
1743
1744 static __inline int
1745 hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
1746 {
1747         struct hifn_dma *dma = sc->sc_dma;
1748
1749         if (++idx == HIFN_D_SRC_RSIZE) {
1750                 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1751                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1752                 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1753                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1754                 idx = 0;
1755         }
1756         return (idx);
1757 }
1758
1759 static int
1760 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1761 {
1762         struct hifn_dma *dma = sc->sc_dma;
1763         struct hifn_operand *src = &cmd->src;
1764         int idx, i;
1765         u_int32_t last = 0;
1766
1767         idx = sc->sc_srci;
1768         for (i = 0; i < src->nsegs; i++) {
1769                 if (i == src->nsegs - 1)
1770                         last = HIFN_D_LAST;
1771
1772                 dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
1773                 dma->srcr[idx].l = htole32(src->segs[i].ds_len |
1774                     HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1775                 HIFN_SRCR_SYNC(sc, idx,
1776                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1777
1778                 idx = hifn_dmamap_srcwrap(sc, idx);
1779         }
1780         sc->sc_srci = idx;
1781         sc->sc_srcu += src->nsegs;
1782         return (idx);
1783
1784
1785 static void
1786 hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
1787 {
1788         struct hifn_operand *op = arg;
1789
1790         KASSERT(nsegs <= MAX_SCATTER,
1791                 ("hifn_op_cb: too many DMA segments (%u > %u) "
1792                  "returned when mapping operand", nsegs, MAX_SCATTER));
1793         op->mapsize = mapsize;
1794         op->nsegs = nsegs;
1795         bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
1796 }
1797
1798 static int 
1799 hifn_crypto(
1800         struct hifn_softc *sc,
1801         struct hifn_command *cmd,
1802         struct cryptop *crp,
1803         int hint)
1804 {
1805         struct  hifn_dma *dma = sc->sc_dma;
1806         u_int32_t cmdlen, csr;
1807         int cmdi, resi, err = 0;
1808
1809         /*
1810          * need 1 cmd, and 1 res
1811          *
1812          * NB: check this first since it's easy.
1813          */
1814         HIFN_LOCK(sc);
1815         if ((sc->sc_cmdu + 1) > HIFN_D_CMD_RSIZE ||
1816             (sc->sc_resu + 1) > HIFN_D_RES_RSIZE) {
1817 #ifdef HIFN_DEBUG
1818                 if (hifn_debug) {
1819                         device_printf(sc->sc_dev,
1820                                 "cmd/result exhaustion, cmdu %u resu %u\n",
1821                                 sc->sc_cmdu, sc->sc_resu);
1822                 }
1823 #endif
1824                 hifnstats.hst_nomem_cr++;
1825                 HIFN_UNLOCK(sc);
1826                 return (ERESTART);
1827         }
1828
1829         if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) {
1830                 hifnstats.hst_nomem_map++;
1831                 HIFN_UNLOCK(sc);
1832                 return (ENOMEM);
1833         }
1834
1835         if (crp->crp_flags & CRYPTO_F_IMBUF) {
1836                 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1837                     cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1838                         hifnstats.hst_nomem_load++;
1839                         err = ENOMEM;
1840                         goto err_srcmap1;
1841                 }
1842         } else if (crp->crp_flags & CRYPTO_F_IOV) {
1843                 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1844                     cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1845                         hifnstats.hst_nomem_load++;
1846                         err = ENOMEM;
1847                         goto err_srcmap1;
1848                 }
1849         } else {
1850                 err = EINVAL;
1851                 goto err_srcmap1;
1852         }
1853
1854         if (hifn_dmamap_aligned(&cmd->src)) {
1855                 cmd->sloplen = cmd->src_mapsize & 3;
1856                 cmd->dst = cmd->src;
1857         } else {
1858                 if (crp->crp_flags & CRYPTO_F_IOV) {
1859                         err = EINVAL;
1860                         goto err_srcmap;
1861                 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1862                         int totlen, len;
1863                         struct mbuf *m, *m0, *mlast;
1864
1865                         KASSERT(cmd->dst_m == cmd->src_m,
1866                                 ("hifn_crypto: dst_m initialized improperly"));
1867                         hifnstats.hst_unaligned++;
1868                         /*
1869                          * Source is not aligned on a longword boundary.
1870                          * Copy the data to insure alignment.  If we fail
1871                          * to allocate mbufs or clusters while doing this
1872                          * we return ERESTART so the operation is requeued
1873                          * at the crypto later, but only if there are
1874                          * ops already posted to the hardware; otherwise we
1875                          * have no guarantee that we'll be re-entered.
1876                          */
1877                         totlen = cmd->src_mapsize;
1878                         if (cmd->src_m->m_flags & M_PKTHDR) {
1879                                 len = MHLEN;
1880                                 MGETHDR(m0, M_NOWAIT, MT_DATA);
1881                                 if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_NOWAIT)) {
1882                                         m_free(m0);
1883                                         m0 = NULL;
1884                                 }
1885                         } else {
1886                                 len = MLEN;
1887                                 MGET(m0, M_NOWAIT, MT_DATA);
1888                         }
1889                         if (m0 == NULL) {
1890                                 hifnstats.hst_nomem_mbuf++;
1891                                 err = sc->sc_cmdu ? ERESTART : ENOMEM;
1892                                 goto err_srcmap;
1893                         }
1894                         if (totlen >= MINCLSIZE) {
1895                                 if (!(MCLGET(m0, M_NOWAIT))) {
1896                                         hifnstats.hst_nomem_mcl++;
1897                                         err = sc->sc_cmdu ? ERESTART : ENOMEM;
1898                                         m_freem(m0);
1899                                         goto err_srcmap;
1900                                 }
1901                                 len = MCLBYTES;
1902                         }
1903                         totlen -= len;
1904                         m0->m_pkthdr.len = m0->m_len = len;
1905                         mlast = m0;
1906
1907                         while (totlen > 0) {
1908                                 MGET(m, M_NOWAIT, MT_DATA);
1909                                 if (m == NULL) {
1910                                         hifnstats.hst_nomem_mbuf++;
1911                                         err = sc->sc_cmdu ? ERESTART : ENOMEM;
1912                                         m_freem(m0);
1913                                         goto err_srcmap;
1914                                 }
1915                                 len = MLEN;
1916                                 if (totlen >= MINCLSIZE) {
1917                                         if (!(MCLGET(m, M_NOWAIT))) {
1918                                                 hifnstats.hst_nomem_mcl++;
1919                                                 err = sc->sc_cmdu ? ERESTART : ENOMEM;
1920                                                 mlast->m_next = m;
1921                                                 m_freem(m0);
1922                                                 goto err_srcmap;
1923                                         }
1924                                         len = MCLBYTES;
1925                                 }
1926
1927                                 m->m_len = len;
1928                                 m0->m_pkthdr.len += len;
1929                                 totlen -= len;
1930
1931                                 mlast->m_next = m;
1932                                 mlast = m;
1933                         }
1934                         cmd->dst_m = m0;
1935                 }
1936         }
1937
1938         if (cmd->dst_map == NULL) {
1939                 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1940                         hifnstats.hst_nomem_map++;
1941                         err = ENOMEM;
1942                         goto err_srcmap;
1943                 }
1944                 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1945                         if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1946                             cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1947                                 hifnstats.hst_nomem_map++;
1948                                 err = ENOMEM;
1949                                 goto err_dstmap1;
1950                         }
1951                 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1952                         if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1953                             cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1954                                 hifnstats.hst_nomem_load++;
1955                                 err = ENOMEM;
1956                                 goto err_dstmap1;
1957                         }
1958                 }
1959         }
1960
1961 #ifdef HIFN_DEBUG
1962         if (hifn_debug) {
1963                 device_printf(sc->sc_dev,
1964                     "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1965                     READ_REG_1(sc, HIFN_1_DMA_CSR),
1966                     READ_REG_1(sc, HIFN_1_DMA_IER),
1967                     sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu,
1968                     cmd->src_nsegs, cmd->dst_nsegs);
1969         }
1970 #endif
1971
1972         if (cmd->src_map == cmd->dst_map) {
1973                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1974                     BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1975         } else {
1976                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1977                     BUS_DMASYNC_PREWRITE);
1978                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1979                     BUS_DMASYNC_PREREAD);
1980         }
1981
1982         /*
1983          * need N src, and N dst
1984          */
1985         if ((sc->sc_srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
1986             (sc->sc_dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
1987 #ifdef HIFN_DEBUG
1988                 if (hifn_debug) {
1989                         device_printf(sc->sc_dev,
1990                                 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
1991                                 sc->sc_srcu, cmd->src_nsegs,
1992                                 sc->sc_dstu, cmd->dst_nsegs);
1993                 }
1994 #endif
1995                 hifnstats.hst_nomem_sd++;
1996                 err = ERESTART;
1997                 goto err_dstmap;
1998         }
1999
2000         if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) {
2001                 sc->sc_cmdi = 0;
2002                 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2003                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2004                 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2005                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2006         }
2007         cmdi = sc->sc_cmdi++;
2008         cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2009         HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2010
2011         /* .p for command/result already set */
2012         dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2013             HIFN_D_MASKDONEIRQ);
2014         HIFN_CMDR_SYNC(sc, cmdi,
2015             BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2016         sc->sc_cmdu++;
2017
2018         /*
2019          * We don't worry about missing an interrupt (which a "command wait"
2020          * interrupt salvages us from), unless there is more than one command
2021          * in the queue.
2022          */
2023         if (sc->sc_cmdu > 1) {
2024                 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2025                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2026         }
2027
2028         hifnstats.hst_ipackets++;
2029         hifnstats.hst_ibytes += cmd->src_mapsize;
2030
2031         hifn_dmamap_load_src(sc, cmd);
2032
2033         /*
2034          * Unlike other descriptors, we don't mask done interrupt from
2035          * result descriptor.
2036          */
2037 #ifdef HIFN_DEBUG
2038         if (hifn_debug)
2039                 printf("load res\n");
2040 #endif
2041         if (sc->sc_resi == HIFN_D_RES_RSIZE) {
2042                 sc->sc_resi = 0;
2043                 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2044                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2045                 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2046                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2047         }
2048         resi = sc->sc_resi++;
2049         KASSERT(sc->sc_hifn_commands[resi] == NULL,
2050                 ("hifn_crypto: command slot %u busy", resi));
2051         sc->sc_hifn_commands[resi] = cmd;
2052         HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2053         if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
2054                 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2055                     HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
2056                 sc->sc_curbatch++;
2057                 if (sc->sc_curbatch > hifnstats.hst_maxbatch)
2058                         hifnstats.hst_maxbatch = sc->sc_curbatch;
2059                 hifnstats.hst_totbatch++;
2060         } else {
2061                 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2062                     HIFN_D_VALID | HIFN_D_LAST);
2063                 sc->sc_curbatch = 0;
2064         }
2065         HIFN_RESR_SYNC(sc, resi,
2066             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2067         sc->sc_resu++;
2068
2069         if (cmd->sloplen)
2070                 cmd->slopidx = resi;
2071
2072         hifn_dmamap_load_dst(sc, cmd);
2073
2074         csr = 0;
2075         if (sc->sc_c_busy == 0) {
2076                 csr |= HIFN_DMACSR_C_CTRL_ENA;
2077                 sc->sc_c_busy = 1;
2078         }
2079         if (sc->sc_s_busy == 0) {
2080                 csr |= HIFN_DMACSR_S_CTRL_ENA;
2081                 sc->sc_s_busy = 1;
2082         }
2083         if (sc->sc_r_busy == 0) {
2084                 csr |= HIFN_DMACSR_R_CTRL_ENA;
2085                 sc->sc_r_busy = 1;
2086         }
2087         if (sc->sc_d_busy == 0) {
2088                 csr |= HIFN_DMACSR_D_CTRL_ENA;
2089                 sc->sc_d_busy = 1;
2090         }
2091         if (csr)
2092                 WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
2093
2094 #ifdef HIFN_DEBUG
2095         if (hifn_debug) {
2096                 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
2097                     READ_REG_1(sc, HIFN_1_DMA_CSR),
2098                     READ_REG_1(sc, HIFN_1_DMA_IER));
2099         }
2100 #endif
2101
2102         sc->sc_active = 5;
2103         HIFN_UNLOCK(sc);
2104         KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
2105         return (err);           /* success */
2106
2107 err_dstmap:
2108         if (cmd->src_map != cmd->dst_map)
2109                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2110 err_dstmap1:
2111         if (cmd->src_map != cmd->dst_map)
2112                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2113 err_srcmap:
2114         if (crp->crp_flags & CRYPTO_F_IMBUF) {
2115                 if (cmd->src_m != cmd->dst_m)
2116                         m_freem(cmd->dst_m);
2117         }
2118         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2119 err_srcmap1:
2120         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2121         HIFN_UNLOCK(sc);
2122         return (err);
2123 }
2124
2125 static void
2126 hifn_tick(void* vsc)
2127 {
2128         struct hifn_softc *sc = vsc;
2129
2130         HIFN_LOCK(sc);
2131         if (sc->sc_active == 0) {
2132                 u_int32_t r = 0;
2133
2134                 if (sc->sc_cmdu == 0 && sc->sc_c_busy) {
2135                         sc->sc_c_busy = 0;
2136                         r |= HIFN_DMACSR_C_CTRL_DIS;
2137                 }
2138                 if (sc->sc_srcu == 0 && sc->sc_s_busy) {
2139                         sc->sc_s_busy = 0;
2140                         r |= HIFN_DMACSR_S_CTRL_DIS;
2141                 }
2142                 if (sc->sc_dstu == 0 && sc->sc_d_busy) {
2143                         sc->sc_d_busy = 0;
2144                         r |= HIFN_DMACSR_D_CTRL_DIS;
2145                 }
2146                 if (sc->sc_resu == 0 && sc->sc_r_busy) {
2147                         sc->sc_r_busy = 0;
2148                         r |= HIFN_DMACSR_R_CTRL_DIS;
2149                 }
2150                 if (r)
2151                         WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
2152         } else
2153                 sc->sc_active--;
2154         HIFN_UNLOCK(sc);
2155         callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
2156 }
2157
2158 static void 
2159 hifn_intr(void *arg)
2160 {
2161         struct hifn_softc *sc = arg;
2162         struct hifn_dma *dma;
2163         u_int32_t dmacsr, restart;
2164         int i, u;
2165
2166         dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2167
2168         /* Nothing in the DMA unit interrupted */
2169         if ((dmacsr & sc->sc_dmaier) == 0)
2170                 return;
2171
2172         HIFN_LOCK(sc);
2173
2174         dma = sc->sc_dma;
2175
2176 #ifdef HIFN_DEBUG
2177         if (hifn_debug) {
2178                 device_printf(sc->sc_dev,
2179                     "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
2180                     dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
2181                     sc->sc_cmdi, sc->sc_srci, sc->sc_dsti, sc->sc_resi,
2182                     sc->sc_cmdk, sc->sc_srck, sc->sc_dstk, sc->sc_resk,
2183                     sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu);
2184         }
2185 #endif
2186
2187         WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2188
2189         if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2190             (dmacsr & HIFN_DMACSR_PUBDONE))
2191                 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2192                     READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2193
2194         restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
2195         if (restart)
2196                 device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
2197
2198         if (sc->sc_flags & HIFN_IS_7811) {
2199                 if (dmacsr & HIFN_DMACSR_ILLR)
2200                         device_printf(sc->sc_dev, "illegal read\n");
2201                 if (dmacsr & HIFN_DMACSR_ILLW)
2202                         device_printf(sc->sc_dev, "illegal write\n");
2203         }
2204
2205         restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2206             HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2207         if (restart) {
2208                 device_printf(sc->sc_dev, "abort, resetting.\n");
2209                 hifnstats.hst_abort++;
2210                 hifn_abort(sc);
2211                 HIFN_UNLOCK(sc);
2212                 return;
2213         }
2214
2215         if ((dmacsr & HIFN_DMACSR_C_WAIT) && (sc->sc_cmdu == 0)) {
2216                 /*
2217                  * If no slots to process and we receive a "waiting on
2218                  * command" interrupt, we disable the "waiting on command"
2219                  * (by clearing it).
2220                  */
2221                 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2222                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2223         }
2224
2225         /* clear the rings */
2226         i = sc->sc_resk; u = sc->sc_resu;
2227         while (u != 0) {
2228                 HIFN_RESR_SYNC(sc, i,
2229                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2230                 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2231                         HIFN_RESR_SYNC(sc, i,
2232                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2233                         break;
2234                 }
2235
2236                 if (i != HIFN_D_RES_RSIZE) {
2237                         struct hifn_command *cmd;
2238                         u_int8_t *macbuf = NULL;
2239
2240                         HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2241                         cmd = sc->sc_hifn_commands[i];
2242                         KASSERT(cmd != NULL,
2243                                 ("hifn_intr: null command slot %u", i));
2244                         sc->sc_hifn_commands[i] = NULL;
2245
2246                         if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2247                                 macbuf = dma->result_bufs[i];
2248                                 macbuf += 12;
2249                         }
2250
2251                         hifn_callback(sc, cmd, macbuf);
2252                         hifnstats.hst_opackets++;
2253                         u--;
2254                 }
2255
2256                 if (++i == (HIFN_D_RES_RSIZE + 1))
2257                         i = 0;
2258         }
2259         sc->sc_resk = i; sc->sc_resu = u;
2260
2261         i = sc->sc_srck; u = sc->sc_srcu;
2262         while (u != 0) {
2263                 if (i == HIFN_D_SRC_RSIZE)
2264                         i = 0;
2265                 HIFN_SRCR_SYNC(sc, i,
2266                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2267                 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2268                         HIFN_SRCR_SYNC(sc, i,
2269                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2270                         break;
2271                 }
2272                 i++, u--;
2273         }
2274         sc->sc_srck = i; sc->sc_srcu = u;
2275
2276         i = sc->sc_cmdk; u = sc->sc_cmdu;
2277         while (u != 0) {
2278                 HIFN_CMDR_SYNC(sc, i,
2279                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2280                 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2281                         HIFN_CMDR_SYNC(sc, i,
2282                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2283                         break;
2284                 }
2285                 if (i != HIFN_D_CMD_RSIZE) {
2286                         u--;
2287                         HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2288                 }
2289                 if (++i == (HIFN_D_CMD_RSIZE + 1))
2290                         i = 0;
2291         }
2292         sc->sc_cmdk = i; sc->sc_cmdu = u;
2293
2294         HIFN_UNLOCK(sc);
2295
2296         if (sc->sc_needwakeup) {                /* XXX check high watermark */
2297                 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
2298 #ifdef HIFN_DEBUG
2299                 if (hifn_debug)
2300                         device_printf(sc->sc_dev,
2301                                 "wakeup crypto (%x) u %d/%d/%d/%d\n",
2302                                 sc->sc_needwakeup,
2303                                 sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu);
2304 #endif
2305                 sc->sc_needwakeup &= ~wakeup;
2306                 crypto_unblock(sc->sc_cid, wakeup);
2307         }
2308 }
2309
2310 /*
2311  * Allocate a new 'session' and return an encoded session id.  'sidp'
2312  * contains our registration id, and should contain an encoded session
2313  * id on successful allocation.
2314  */
2315 static int
2316 hifn_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
2317 {
2318         struct hifn_softc *sc = device_get_softc(dev);
2319         struct cryptoini *c;
2320         int mac = 0, cry = 0;
2321         struct hifn_session *ses;
2322
2323         KASSERT(sc != NULL, ("hifn_newsession: null softc"));
2324         if (cri == NULL || sc == NULL)
2325                 return (EINVAL);
2326
2327         ses = crypto_get_driver_session(cses);
2328
2329         for (c = cri; c != NULL; c = c->cri_next) {
2330                 switch (c->cri_alg) {
2331                 case CRYPTO_MD5:
2332                 case CRYPTO_SHA1:
2333                 case CRYPTO_MD5_HMAC:
2334                 case CRYPTO_SHA1_HMAC:
2335                         if (mac)
2336                                 return (EINVAL);
2337                         mac = 1;
2338                         ses->hs_mlen = c->cri_mlen;
2339                         if (ses->hs_mlen == 0) {
2340                                 switch (c->cri_alg) {
2341                                 case CRYPTO_MD5:
2342                                 case CRYPTO_MD5_HMAC:
2343                                         ses->hs_mlen = 16;
2344                                         break;
2345                                 case CRYPTO_SHA1:
2346                                 case CRYPTO_SHA1_HMAC:
2347                                         ses->hs_mlen = 20;
2348                                         break;
2349                                 }
2350                         }
2351                         break;
2352                 case CRYPTO_DES_CBC:
2353                 case CRYPTO_3DES_CBC:
2354                 case CRYPTO_AES_CBC:
2355                         /* XXX this may read fewer, does it matter? */
2356                         read_random(ses->hs_iv,
2357                                 c->cri_alg == CRYPTO_AES_CBC ?
2358                                         HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2359                         /*FALLTHROUGH*/
2360                 case CRYPTO_ARC4:
2361                         if (cry)
2362                                 return (EINVAL);
2363                         cry = 1;
2364                         break;
2365                 default:
2366                         return (EINVAL);
2367                 }
2368         }
2369         if (mac == 0 && cry == 0)
2370                 return (EINVAL);
2371         return (0);
2372 }
2373
2374 /*
2375  * XXX freesession routine should run a zero'd mac/encrypt key into context
2376  * ram.  to blow away any keys already stored there.
2377  */
2378
2379 static int
2380 hifn_process(device_t dev, struct cryptop *crp, int hint)
2381 {
2382         struct hifn_softc *sc = device_get_softc(dev);
2383         struct hifn_command *cmd = NULL;
2384         int err, ivlen;
2385         struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2386         struct hifn_session *ses;
2387
2388         if (crp == NULL || crp->crp_callback == NULL) {
2389                 hifnstats.hst_invalid++;
2390                 return (EINVAL);
2391         }
2392
2393         ses = crypto_get_driver_session(crp->crp_session);
2394         cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO);
2395         if (cmd == NULL) {
2396                 hifnstats.hst_nomem++;
2397                 err = ENOMEM;
2398                 goto errout;
2399         }
2400
2401         if (crp->crp_flags & CRYPTO_F_IMBUF) {
2402                 cmd->src_m = (struct mbuf *)crp->crp_buf;
2403                 cmd->dst_m = (struct mbuf *)crp->crp_buf;
2404         } else if (crp->crp_flags & CRYPTO_F_IOV) {
2405                 cmd->src_io = (struct uio *)crp->crp_buf;
2406                 cmd->dst_io = (struct uio *)crp->crp_buf;
2407         } else {
2408                 err = EINVAL;
2409                 goto errout;    /* XXX we don't handle contiguous buffers! */
2410         }
2411
2412         crd1 = crp->crp_desc;
2413         if (crd1 == NULL) {
2414                 err = EINVAL;
2415                 goto errout;
2416         }
2417         crd2 = crd1->crd_next;
2418
2419         if (crd2 == NULL) {
2420                 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2421                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2422                     crd1->crd_alg == CRYPTO_SHA1 ||
2423                     crd1->crd_alg == CRYPTO_MD5) {
2424                         maccrd = crd1;
2425                         enccrd = NULL;
2426                 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2427                     crd1->crd_alg == CRYPTO_3DES_CBC ||
2428                     crd1->crd_alg == CRYPTO_AES_CBC ||
2429                     crd1->crd_alg == CRYPTO_ARC4) {
2430                         if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2431                                 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2432                         maccrd = NULL;
2433                         enccrd = crd1;
2434                 } else {
2435                         err = EINVAL;
2436                         goto errout;
2437                 }
2438         } else {
2439                 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2440                      crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2441                      crd1->crd_alg == CRYPTO_MD5 ||
2442                      crd1->crd_alg == CRYPTO_SHA1) &&
2443                     (crd2->crd_alg == CRYPTO_DES_CBC ||
2444                      crd2->crd_alg == CRYPTO_3DES_CBC ||
2445                      crd2->crd_alg == CRYPTO_AES_CBC ||
2446                      crd2->crd_alg == CRYPTO_ARC4) &&
2447                     ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2448                         cmd->base_masks = HIFN_BASE_CMD_DECODE;
2449                         maccrd = crd1;
2450                         enccrd = crd2;
2451                 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2452                      crd1->crd_alg == CRYPTO_ARC4 ||
2453                      crd1->crd_alg == CRYPTO_3DES_CBC ||
2454                      crd1->crd_alg == CRYPTO_AES_CBC) &&
2455                     (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2456                      crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2457                      crd2->crd_alg == CRYPTO_MD5 ||
2458                      crd2->crd_alg == CRYPTO_SHA1) &&
2459                     (crd1->crd_flags & CRD_F_ENCRYPT)) {
2460                         enccrd = crd1;
2461                         maccrd = crd2;
2462                 } else {
2463                         /*
2464                          * We cannot order the 7751 as requested
2465                          */
2466                         err = EINVAL;
2467                         goto errout;
2468                 }
2469         }
2470
2471         if (enccrd) {
2472                 cmd->enccrd = enccrd;
2473                 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2474                 switch (enccrd->crd_alg) {
2475                 case CRYPTO_ARC4:
2476                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2477                         break;
2478                 case CRYPTO_DES_CBC:
2479                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2480                             HIFN_CRYPT_CMD_MODE_CBC |
2481                             HIFN_CRYPT_CMD_NEW_IV;
2482                         break;
2483                 case CRYPTO_3DES_CBC:
2484                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2485                             HIFN_CRYPT_CMD_MODE_CBC |
2486                             HIFN_CRYPT_CMD_NEW_IV;
2487                         break;
2488                 case CRYPTO_AES_CBC:
2489                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2490                             HIFN_CRYPT_CMD_MODE_CBC |
2491                             HIFN_CRYPT_CMD_NEW_IV;
2492                         break;
2493                 default:
2494                         err = EINVAL;
2495                         goto errout;
2496                 }
2497                 if (enccrd->crd_alg != CRYPTO_ARC4) {
2498                         ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2499                                 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2500                         if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2501                                 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2502                                         bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2503                                 else
2504                                         bcopy(ses->hs_iv, cmd->iv, ivlen);
2505
2506                                 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2507                                     == 0) {
2508                                         crypto_copyback(crp->crp_flags,
2509                                             crp->crp_buf, enccrd->crd_inject,
2510                                             ivlen, cmd->iv);
2511                                 }
2512                         } else {
2513                                 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2514                                         bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2515                                 else {
2516                                         crypto_copydata(crp->crp_flags,
2517                                             crp->crp_buf, enccrd->crd_inject,
2518                                             ivlen, cmd->iv);
2519                                 }
2520                         }
2521                 }
2522
2523                 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
2524                         cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2525                 cmd->ck = enccrd->crd_key;
2526                 cmd->cklen = enccrd->crd_klen >> 3;
2527                 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2528
2529                 /* 
2530                  * Need to specify the size for the AES key in the masks.
2531                  */
2532                 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2533                     HIFN_CRYPT_CMD_ALG_AES) {
2534                         switch (cmd->cklen) {
2535                         case 16:
2536                                 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2537                                 break;
2538                         case 24:
2539                                 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2540                                 break;
2541                         case 32:
2542                                 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2543                                 break;
2544                         default:
2545                                 err = EINVAL;
2546                                 goto errout;
2547                         }
2548                 }
2549         }
2550
2551         if (maccrd) {
2552                 cmd->maccrd = maccrd;
2553                 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2554
2555                 switch (maccrd->crd_alg) {
2556                 case CRYPTO_MD5:
2557                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2558                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2559                             HIFN_MAC_CMD_POS_IPSEC;
2560                        break;
2561                 case CRYPTO_MD5_HMAC:
2562                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2563                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2564                             HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2565                         break;
2566                 case CRYPTO_SHA1:
2567                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2568                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2569                             HIFN_MAC_CMD_POS_IPSEC;
2570                         break;
2571                 case CRYPTO_SHA1_HMAC:
2572                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2573                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2574                             HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2575                         break;
2576                 }
2577
2578                 if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2579                      maccrd->crd_alg == CRYPTO_MD5_HMAC) {
2580                         cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2581                         bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2582                         bzero(cmd->mac + (maccrd->crd_klen >> 3),
2583                             HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2584                 }
2585         }
2586
2587         cmd->crp = crp;
2588         cmd->session = ses;
2589         cmd->softc = sc;
2590
2591         err = hifn_crypto(sc, cmd, crp, hint);
2592         if (!err) {
2593                 return 0;
2594         } else if (err == ERESTART) {
2595                 /*
2596                  * There weren't enough resources to dispatch the request
2597                  * to the part.  Notify the caller so they'll requeue this
2598                  * request and resubmit it again soon.
2599                  */
2600 #ifdef HIFN_DEBUG
2601                 if (hifn_debug)
2602                         device_printf(sc->sc_dev, "requeue request\n");
2603 #endif
2604                 free(cmd, M_DEVBUF);
2605                 sc->sc_needwakeup |= CRYPTO_SYMQ;
2606                 return (err);
2607         }
2608
2609 errout:
2610         if (cmd != NULL)
2611                 free(cmd, M_DEVBUF);
2612         if (err == EINVAL)
2613                 hifnstats.hst_invalid++;
2614         else
2615                 hifnstats.hst_nomem++;
2616         crp->crp_etype = err;
2617         crypto_done(crp);
2618         return (err);
2619 }
2620
2621 static void
2622 hifn_abort(struct hifn_softc *sc)
2623 {
2624         struct hifn_dma *dma = sc->sc_dma;
2625         struct hifn_command *cmd;
2626         struct cryptop *crp;
2627         int i, u;
2628
2629         i = sc->sc_resk; u = sc->sc_resu;
2630         while (u != 0) {
2631                 cmd = sc->sc_hifn_commands[i];
2632                 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
2633                 sc->sc_hifn_commands[i] = NULL;
2634                 crp = cmd->crp;
2635
2636                 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2637                         /* Salvage what we can. */
2638                         u_int8_t *macbuf;
2639
2640                         if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2641                                 macbuf = dma->result_bufs[i];
2642                                 macbuf += 12;
2643                         } else
2644                                 macbuf = NULL;
2645                         hifnstats.hst_opackets++;
2646                         hifn_callback(sc, cmd, macbuf);
2647                 } else {
2648                         if (cmd->src_map == cmd->dst_map) {
2649                                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2650                                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2651                         } else {
2652                                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2653                                     BUS_DMASYNC_POSTWRITE);
2654                                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2655                                     BUS_DMASYNC_POSTREAD);
2656                         }
2657
2658                         if (cmd->src_m != cmd->dst_m) {
2659                                 m_freem(cmd->src_m);
2660                                 crp->crp_buf = (caddr_t)cmd->dst_m;
2661                         }
2662
2663                         /* non-shared buffers cannot be restarted */
2664                         if (cmd->src_map != cmd->dst_map) {
2665                                 /*
2666                                  * XXX should be EAGAIN, delayed until
2667                                  * after the reset.
2668                                  */
2669                                 crp->crp_etype = ENOMEM;
2670                                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2671                                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2672                         } else
2673                                 crp->crp_etype = ENOMEM;
2674
2675                         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2676                         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2677
2678                         free(cmd, M_DEVBUF);
2679                         if (crp->crp_etype != EAGAIN)
2680                                 crypto_done(crp);
2681                 }
2682
2683                 if (++i == HIFN_D_RES_RSIZE)
2684                         i = 0;
2685                 u--;
2686         }
2687         sc->sc_resk = i; sc->sc_resu = u;
2688
2689         hifn_reset_board(sc, 1);
2690         hifn_init_dma(sc);
2691         hifn_init_pci_registers(sc);
2692 }
2693
2694 static void
2695 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
2696 {
2697         struct hifn_dma *dma = sc->sc_dma;
2698         struct cryptop *crp = cmd->crp;
2699         struct cryptodesc *crd;
2700         struct mbuf *m;
2701         int totlen, i, u, ivlen;
2702
2703         if (cmd->src_map == cmd->dst_map) {
2704                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2705                     BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2706         } else {
2707                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2708                     BUS_DMASYNC_POSTWRITE);
2709                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2710                     BUS_DMASYNC_POSTREAD);
2711         }
2712
2713         if (crp->crp_flags & CRYPTO_F_IMBUF) {
2714                 if (cmd->src_m != cmd->dst_m) {
2715                         crp->crp_buf = (caddr_t)cmd->dst_m;
2716                         totlen = cmd->src_mapsize;
2717                         for (m = cmd->dst_m; m != NULL; m = m->m_next) {
2718                                 if (totlen < m->m_len) {
2719                                         m->m_len = totlen;
2720                                         totlen = 0;
2721                                 } else
2722                                         totlen -= m->m_len;
2723                         }
2724                         cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
2725                         m_freem(cmd->src_m);
2726                 }
2727         }
2728
2729         if (cmd->sloplen != 0) {
2730                 crypto_copyback(crp->crp_flags, crp->crp_buf,
2731                     cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
2732                     (caddr_t)&dma->slop[cmd->slopidx]);
2733         }
2734
2735         i = sc->sc_dstk; u = sc->sc_dstu;
2736         while (u != 0) {
2737                 if (i == HIFN_D_DST_RSIZE)
2738                         i = 0;
2739                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2740                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2741                 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2742                         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2743                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2744                         break;
2745                 }
2746                 i++, u--;
2747         }
2748         sc->sc_dstk = i; sc->sc_dstu = u;
2749
2750         hifnstats.hst_obytes += cmd->dst_mapsize;
2751
2752         if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2753             HIFN_BASE_CMD_CRYPT) {
2754                 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2755                         if (crd->crd_alg != CRYPTO_DES_CBC &&
2756                             crd->crd_alg != CRYPTO_3DES_CBC &&
2757                             crd->crd_alg != CRYPTO_AES_CBC)
2758                                 continue;
2759                         ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2760                                 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2761                         crypto_copydata(crp->crp_flags, crp->crp_buf,
2762                             crd->crd_skip + crd->crd_len - ivlen, ivlen,
2763                             cmd->session->hs_iv);
2764                         break;
2765                 }
2766         }
2767
2768         if (macbuf != NULL) {
2769                 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2770                         int len;
2771
2772                         if (crd->crd_alg != CRYPTO_MD5 &&
2773                             crd->crd_alg != CRYPTO_SHA1 &&
2774                             crd->crd_alg != CRYPTO_MD5_HMAC &&
2775                             crd->crd_alg != CRYPTO_SHA1_HMAC) {
2776                                 continue;
2777                         }
2778                         len = cmd->session->hs_mlen;
2779                         crypto_copyback(crp->crp_flags, crp->crp_buf,
2780                             crd->crd_inject, len, macbuf);
2781                         break;
2782                 }
2783         }
2784
2785         if (cmd->src_map != cmd->dst_map) {
2786                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2787                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2788         }
2789         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2790         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2791         free(cmd, M_DEVBUF);
2792         crypto_done(crp);
2793 }
2794
2795 /*
2796  * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2797  * and Group 1 registers; avoid conditions that could create
2798  * burst writes by doing a read in between the writes.
2799  *
2800  * NB: The read we interpose is always to the same register;
2801  *     we do this because reading from an arbitrary (e.g. last)
2802  *     register may not always work.
2803  */
2804 static void
2805 hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2806 {
2807         if (sc->sc_flags & HIFN_IS_7811) {
2808                 if (sc->sc_bar0_lastreg == reg - 4)
2809                         bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG);
2810                 sc->sc_bar0_lastreg = reg;
2811         }
2812         bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2813 }
2814
2815 static void
2816 hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2817 {
2818         if (sc->sc_flags & HIFN_IS_7811) {
2819                 if (sc->sc_bar1_lastreg == reg - 4)
2820                         bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2821                 sc->sc_bar1_lastreg = reg;
2822         }
2823         bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2824 }
2825
2826 #ifdef HIFN_VULCANDEV
2827 /*
2828  * this code provides support for mapping the PK engine's register
2829  * into a userspace program.
2830  *
2831  */
2832 static int
2833 vulcanpk_mmap(struct cdev *dev, vm_ooffset_t offset,
2834               vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr)
2835 {
2836         struct hifn_softc *sc;
2837         vm_paddr_t pd;
2838         void *b;
2839
2840         sc = dev->si_drv1;
2841
2842         pd = rman_get_start(sc->sc_bar1res);
2843         b = rman_get_virtual(sc->sc_bar1res);
2844
2845 #if 0
2846         printf("vpk mmap: %p(%016llx) offset=%lld\n", b,
2847             (unsigned long long)pd, offset);
2848         hexdump(b, HIFN_1_PUB_MEMEND, "vpk", 0);
2849 #endif
2850
2851         if (offset == 0) {
2852                 *paddr = pd;
2853                 return (0);
2854         }
2855         return (-1);
2856 }
2857
2858 static struct cdevsw vulcanpk_cdevsw = {
2859         .d_version =    D_VERSION,
2860         .d_mmap =       vulcanpk_mmap,
2861         .d_name =       "vulcanpk",
2862 };
2863 #endif /* HIFN_VULCANDEV */