]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/hifn/hifn7751.c
Add support for optional separate output buffers to in-kernel crypto.
[FreeBSD/FreeBSD.git] / sys / dev / hifn / hifn7751.c
1 /*      $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $  */
2
3 /*-
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  * Invertex AEON / Hifn 7751 driver
7  * Copyright (c) 1999 Invertex Inc. All rights reserved.
8  * Copyright (c) 1999 Theo de Raadt
9  * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10  *                      http://www.netsec.net
11  * Copyright (c) 2003 Hifn Inc.
12  *
13  * This driver is based on a previous driver by Invertex, for which they
14  * requested:  Please send any comments, feedback, bug-fixes, or feature
15  * requests to software@invertex.com.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  *
21  * 1. Redistributions of source code must retain the above copyright
22  *   notice, this list of conditions and the following disclaimer.
23  * 2. Redistributions in binary form must reproduce the above copyright
24  *   notice, this list of conditions and the following disclaimer in the
25  *   documentation and/or other materials provided with the distribution.
26  * 3. The name of the author may not be used to endorse or promote products
27  *   derived from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
30  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
31  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
33  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
34  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
38  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  *
40  * Effort sponsored in part by the Defense Advanced Research Projects
41  * Agency (DARPA) and Air Force Research Laboratory, Air Force
42  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43  */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 /*
49  * Driver for various Hifn encryption processors.
50  */
51 #include "opt_hifn.h"
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/errno.h>
57 #include <sys/malloc.h>
58 #include <sys/kernel.h>
59 #include <sys/module.h>
60 #include <sys/mbuf.h>
61 #include <sys/lock.h>
62 #include <sys/mutex.h>
63 #include <sys/sysctl.h>
64 #include <sys/uio.h>
65
66 #include <vm/vm.h>
67 #include <vm/pmap.h>
68
69 #include <machine/bus.h>
70 #include <machine/resource.h>
71 #include <sys/bus.h>
72 #include <sys/rman.h>
73
74 #include <opencrypto/cryptodev.h>
75 #include <opencrypto/xform_auth.h>
76 #include <sys/random.h>
77 #include <sys/kobj.h>
78
79 #include "cryptodev_if.h"
80
81 #include <dev/pci/pcivar.h>
82 #include <dev/pci/pcireg.h>
83
84 #ifdef HIFN_RNDTEST
85 #include <dev/rndtest/rndtest.h>
86 #endif
87 #include <dev/hifn/hifn7751reg.h>
88 #include <dev/hifn/hifn7751var.h>
89
90 #ifdef HIFN_VULCANDEV
91 #include <sys/conf.h>
92 #include <sys/uio.h>
93
94 static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
95 #endif
96
97 /*
98  * Prototypes and count for the pci_device structure
99  */
100 static  int hifn_probe(device_t);
101 static  int hifn_attach(device_t);
102 static  int hifn_detach(device_t);
103 static  int hifn_suspend(device_t);
104 static  int hifn_resume(device_t);
105 static  int hifn_shutdown(device_t);
106
107 static  int hifn_probesession(device_t, const struct crypto_session_params *);
108 static  int hifn_newsession(device_t, crypto_session_t,
109     const struct crypto_session_params *);
110 static  int hifn_process(device_t, struct cryptop *, int);
111
112 static device_method_t hifn_methods[] = {
113         /* Device interface */
114         DEVMETHOD(device_probe,         hifn_probe),
115         DEVMETHOD(device_attach,        hifn_attach),
116         DEVMETHOD(device_detach,        hifn_detach),
117         DEVMETHOD(device_suspend,       hifn_suspend),
118         DEVMETHOD(device_resume,        hifn_resume),
119         DEVMETHOD(device_shutdown,      hifn_shutdown),
120
121         /* crypto device methods */
122         DEVMETHOD(cryptodev_probesession, hifn_probesession),
123         DEVMETHOD(cryptodev_newsession, hifn_newsession),
124         DEVMETHOD(cryptodev_process,    hifn_process),
125
126         DEVMETHOD_END
127 };
128 static driver_t hifn_driver = {
129         "hifn",
130         hifn_methods,
131         sizeof (struct hifn_softc)
132 };
133 static devclass_t hifn_devclass;
134
135 DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0);
136 MODULE_DEPEND(hifn, crypto, 1, 1, 1);
137 #ifdef HIFN_RNDTEST
138 MODULE_DEPEND(hifn, rndtest, 1, 1, 1);
139 #endif
140
141 static  void hifn_reset_board(struct hifn_softc *, int);
142 static  void hifn_reset_puc(struct hifn_softc *);
143 static  void hifn_puc_wait(struct hifn_softc *);
144 static  int hifn_enable_crypto(struct hifn_softc *);
145 static  void hifn_set_retry(struct hifn_softc *sc);
146 static  void hifn_init_dma(struct hifn_softc *);
147 static  void hifn_init_pci_registers(struct hifn_softc *);
148 static  int hifn_sramsize(struct hifn_softc *);
149 static  int hifn_dramsize(struct hifn_softc *);
150 static  int hifn_ramtype(struct hifn_softc *);
151 static  void hifn_sessions(struct hifn_softc *);
152 static  void hifn_intr(void *);
153 static  u_int hifn_write_command(struct hifn_command *, u_int8_t *);
154 static  u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
155 static  void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
156 static  int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
157 static  int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
158 static  int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
159 static  int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
160 static  int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
161 static  int hifn_init_pubrng(struct hifn_softc *);
162 static  void hifn_rng(void *);
163 static  void hifn_tick(void *);
164 static  void hifn_abort(struct hifn_softc *);
165 static  void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
166
167 static  void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
168 static  void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
169
170 static __inline u_int32_t
171 READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
172 {
173     u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg);
174     sc->sc_bar0_lastreg = (bus_size_t) -1;
175     return (v);
176 }
177 #define WRITE_REG_0(sc, reg, val)       hifn_write_reg_0(sc, reg, val)
178
179 static __inline u_int32_t
180 READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
181 {
182     u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg);
183     sc->sc_bar1_lastreg = (bus_size_t) -1;
184     return (v);
185 }
186 #define WRITE_REG_1(sc, reg, val)       hifn_write_reg_1(sc, reg, val)
187
188 static SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
189     "Hifn driver parameters");
190
191 #ifdef HIFN_DEBUG
192 static  int hifn_debug = 0;
193 SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug,
194             0, "control debugging msgs");
195 #endif
196
197 static  struct hifn_stats hifnstats;
198 SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats,
199             hifn_stats, "driver statistics");
200 static  int hifn_maxbatch = 1;
201 SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch,
202             0, "max ops to batch w/o interrupt");
203
204 /*
205  * Probe for a supported device.  The PCI vendor and device
206  * IDs are used to detect devices we know how to handle.
207  */
208 static int
209 hifn_probe(device_t dev)
210 {
211         if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX &&
212             pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON)
213                 return (BUS_PROBE_DEFAULT);
214         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
215             (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 ||
216              pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
217              pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
218              pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 ||
219              pci_get_device(dev) == PCI_PRODUCT_HIFN_7811))
220                 return (BUS_PROBE_DEFAULT);
221         if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
222             pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751)
223                 return (BUS_PROBE_DEFAULT);
224         return (ENXIO);
225 }
226
227 static void
228 hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
229 {
230         bus_addr_t *paddr = (bus_addr_t*) arg;
231         *paddr = segs->ds_addr;
232 }
233
234 static const char*
235 hifn_partname(struct hifn_softc *sc)
236 {
237         /* XXX sprintf numbers when not decoded */
238         switch (pci_get_vendor(sc->sc_dev)) {
239         case PCI_VENDOR_HIFN:
240                 switch (pci_get_device(sc->sc_dev)) {
241                 case PCI_PRODUCT_HIFN_6500:     return "Hifn 6500";
242                 case PCI_PRODUCT_HIFN_7751:     return "Hifn 7751";
243                 case PCI_PRODUCT_HIFN_7811:     return "Hifn 7811";
244                 case PCI_PRODUCT_HIFN_7951:     return "Hifn 7951";
245                 case PCI_PRODUCT_HIFN_7955:     return "Hifn 7955";
246                 case PCI_PRODUCT_HIFN_7956:     return "Hifn 7956";
247                 }
248                 return "Hifn unknown-part";
249         case PCI_VENDOR_INVERTEX:
250                 switch (pci_get_device(sc->sc_dev)) {
251                 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
252                 }
253                 return "Invertex unknown-part";
254         case PCI_VENDOR_NETSEC:
255                 switch (pci_get_device(sc->sc_dev)) {
256                 case PCI_PRODUCT_NETSEC_7751:   return "NetSec 7751";
257                 }
258                 return "NetSec unknown-part";
259         }
260         return "Unknown-vendor unknown-part";
261 }
262
263 static void
264 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
265 {
266         /* MarkM: FIX!! Check that this does not swamp the harvester! */
267         random_harvest_queue(buf, count, RANDOM_PURE_HIFN);
268 }
269
270 static u_int
271 checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max)
272 {
273         if (v > max) {
274                 device_printf(dev, "Warning, %s %u out of range, "
275                         "using max %u\n", what, v, max);
276                 v = max;
277         } else if (v < min) {
278                 device_printf(dev, "Warning, %s %u out of range, "
279                         "using min %u\n", what, v, min);
280                 v = min;
281         }
282         return v;
283 }
284
285 /*
286  * Select PLL configuration for 795x parts.  This is complicated in
287  * that we cannot determine the optimal parameters without user input.
288  * The reference clock is derived from an external clock through a
289  * multiplier.  The external clock is either the host bus (i.e. PCI)
290  * or an external clock generator.  When using the PCI bus we assume
291  * the clock is either 33 or 66 MHz; for an external source we cannot
292  * tell the speed.
293  *
294  * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
295  * for an external source, followed by the frequency.  We calculate
296  * the appropriate multiplier and PLL register contents accordingly.
297  * When no configuration is given we default to "pci66" since that
298  * always will allow the card to work.  If a card is using the PCI
299  * bus clock and in a 33MHz slot then it will be operating at half
300  * speed until the correct information is provided.
301  *
302  * We use a default setting of "ext66" because according to Mike Ham
303  * of HiFn, almost every board in existence has an external crystal
304  * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
305  * because PCI33 can have clocks from 0 to 33Mhz, and some have
306  * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
307  */
308 static void
309 hifn_getpllconfig(device_t dev, u_int *pll)
310 {
311         const char *pllspec;
312         u_int freq, mul, fl, fh;
313         u_int32_t pllconfig;
314         char *nxt;
315
316         if (resource_string_value("hifn", device_get_unit(dev),
317             "pllconfig", &pllspec))
318                 pllspec = "ext66";
319         fl = 33, fh = 66;
320         pllconfig = 0;
321         if (strncmp(pllspec, "ext", 3) == 0) {
322                 pllspec += 3;
323                 pllconfig |= HIFN_PLL_REF_SEL;
324                 switch (pci_get_device(dev)) {
325                 case PCI_PRODUCT_HIFN_7955:
326                 case PCI_PRODUCT_HIFN_7956:
327                         fl = 20, fh = 100;
328                         break;
329 #ifdef notyet
330                 case PCI_PRODUCT_HIFN_7954:
331                         fl = 20, fh = 66;
332                         break;
333 #endif
334                 }
335         } else if (strncmp(pllspec, "pci", 3) == 0)
336                 pllspec += 3;
337         freq = strtoul(pllspec, &nxt, 10);
338         if (nxt == pllspec)
339                 freq = 66;
340         else
341                 freq = checkmaxmin(dev, "frequency", freq, fl, fh);
342         /*
343          * Calculate multiplier.  We target a Fck of 266 MHz,
344          * allowing only even values, possibly rounded down.
345          * Multipliers > 8 must set the charge pump current.
346          */
347         mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
348         pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
349         if (mul > 8)
350                 pllconfig |= HIFN_PLL_IS;
351         *pll = pllconfig;
352 }
353
354 /*
355  * Attach an interface that successfully probed.
356  */
357 static int 
358 hifn_attach(device_t dev)
359 {
360         struct hifn_softc *sc = device_get_softc(dev);
361         caddr_t kva;
362         int rseg, rid;
363         char rbase;
364         uint16_t rev;
365
366         sc->sc_dev = dev;
367
368         mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF);
369
370         /* XXX handle power management */
371
372         /*
373          * The 7951 and 795x have a random number generator and
374          * public key support; note this.
375          */
376         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
377             (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
378              pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
379              pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
380                 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
381         /*
382          * The 7811 has a random number generator and
383          * we also note it's identity 'cuz of some quirks.
384          */
385         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
386             pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
387                 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
388
389         /*
390          * The 795x parts support AES.
391          */
392         if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
393             (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
394              pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
395                 sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
396                 /*
397                  * Select PLL configuration.  This depends on the
398                  * bus and board design and must be manually configured
399                  * if the default setting is unacceptable.
400                  */
401                 hifn_getpllconfig(dev, &sc->sc_pllconfig);
402         }
403
404         /*
405          * Setup PCI resources. Note that we record the bus
406          * tag and handle for each register mapping, this is
407          * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
408          * and WRITE_REG_1 macros throughout the driver.
409          */
410         pci_enable_busmaster(dev);
411
412         rid = HIFN_BAR0;
413         sc->sc_bar0res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
414                                                 RF_ACTIVE);
415         if (sc->sc_bar0res == NULL) {
416                 device_printf(dev, "cannot map bar%d register space\n", 0);
417                 goto fail_pci;
418         }
419         sc->sc_st0 = rman_get_bustag(sc->sc_bar0res);
420         sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res);
421         sc->sc_bar0_lastreg = (bus_size_t) -1;
422
423         rid = HIFN_BAR1;
424         sc->sc_bar1res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
425                                                 RF_ACTIVE);
426         if (sc->sc_bar1res == NULL) {
427                 device_printf(dev, "cannot map bar%d register space\n", 1);
428                 goto fail_io0;
429         }
430         sc->sc_st1 = rman_get_bustag(sc->sc_bar1res);
431         sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res);
432         sc->sc_bar1_lastreg = (bus_size_t) -1;
433
434         hifn_set_retry(sc);
435
436         /*
437          * Setup the area where the Hifn DMA's descriptors
438          * and associated data structures.
439          */
440         if (bus_dma_tag_create(bus_get_dma_tag(dev),    /* PCI parent */
441                                1, 0,                    /* alignment,boundary */
442                                BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
443                                BUS_SPACE_MAXADDR,       /* highaddr */
444                                NULL, NULL,              /* filter, filterarg */
445                                HIFN_MAX_DMALEN,         /* maxsize */
446                                MAX_SCATTER,             /* nsegments */
447                                HIFN_MAX_SEGLEN,         /* maxsegsize */
448                                BUS_DMA_ALLOCNOW,        /* flags */
449                                NULL,                    /* lockfunc */
450                                NULL,                    /* lockarg */
451                                &sc->sc_dmat)) {
452                 device_printf(dev, "cannot allocate DMA tag\n");
453                 goto fail_io1;
454         }
455         if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
456                 device_printf(dev, "cannot create dma map\n");
457                 bus_dma_tag_destroy(sc->sc_dmat);
458                 goto fail_io1;
459         }
460         if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
461                 device_printf(dev, "cannot alloc dma buffer\n");
462                 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
463                 bus_dma_tag_destroy(sc->sc_dmat);
464                 goto fail_io1;
465         }
466         if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva,
467                              sizeof (*sc->sc_dma),
468                              hifn_dmamap_cb, &sc->sc_dma_physaddr,
469                              BUS_DMA_NOWAIT)) {
470                 device_printf(dev, "cannot load dma map\n");
471                 bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap);
472                 bus_dma_tag_destroy(sc->sc_dmat);
473                 goto fail_io1;
474         }
475         sc->sc_dma = (struct hifn_dma *)kva;
476         bzero(sc->sc_dma, sizeof(*sc->sc_dma));
477
478         KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!"));
479         KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!"));
480         KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!"));
481         KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!"));
482
483         /*
484          * Reset the board and do the ``secret handshake''
485          * to enable the crypto support.  Then complete the
486          * initialization procedure by setting up the interrupt
487          * and hooking in to the system crypto support so we'll
488          * get used for system services like the crypto device,
489          * IPsec, RNG device, etc.
490          */
491         hifn_reset_board(sc, 0);
492
493         if (hifn_enable_crypto(sc) != 0) {
494                 device_printf(dev, "crypto enabling failed\n");
495                 goto fail_mem;
496         }
497         hifn_reset_puc(sc);
498
499         hifn_init_dma(sc);
500         hifn_init_pci_registers(sc);
501
502         /* XXX can't dynamically determine ram type for 795x; force dram */
503         if (sc->sc_flags & HIFN_IS_7956)
504                 sc->sc_drammodel = 1;
505         else if (hifn_ramtype(sc))
506                 goto fail_mem;
507
508         if (sc->sc_drammodel == 0)
509                 hifn_sramsize(sc);
510         else
511                 hifn_dramsize(sc);
512
513         /*
514          * Workaround for NetSec 7751 rev A: half ram size because two
515          * of the address lines were left floating
516          */
517         if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
518             pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
519             pci_get_revid(dev) == 0x61) /*XXX???*/
520                 sc->sc_ramsize >>= 1;
521
522         /*
523          * Arrange the interrupt line.
524          */
525         rid = 0;
526         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
527                                             RF_SHAREABLE|RF_ACTIVE);
528         if (sc->sc_irq == NULL) {
529                 device_printf(dev, "could not map interrupt\n");
530                 goto fail_mem;
531         }
532         /*
533          * NB: Network code assumes we are blocked with splimp()
534          *     so make sure the IRQ is marked appropriately.
535          */
536         if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
537                            NULL, hifn_intr, sc, &sc->sc_intrhand)) {
538                 device_printf(dev, "could not setup interrupt\n");
539                 goto fail_intr2;
540         }
541
542         hifn_sessions(sc);
543
544         /*
545          * NB: Keep only the low 16 bits; this masks the chip id
546          *     from the 7951.
547          */
548         rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
549
550         rseg = sc->sc_ramsize / 1024;
551         rbase = 'K';
552         if (sc->sc_ramsize >= (1024 * 1024)) {
553                 rbase = 'M';
554                 rseg /= 1024;
555         }
556         device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
557                 hifn_partname(sc), rev,
558                 rseg, rbase, sc->sc_drammodel ? 'd' : 's');
559         if (sc->sc_flags & HIFN_IS_7956)
560                 printf(", pll=0x%x<%s clk, %ux mult>",
561                         sc->sc_pllconfig,
562                         sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
563                         2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
564         printf("\n");
565
566         WRITE_REG_0(sc, HIFN_0_PUCNFG,
567             READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
568         sc->sc_ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
569
570         switch (sc->sc_ena) {
571         case HIFN_PUSTAT_ENA_2:
572         case HIFN_PUSTAT_ENA_1:
573                 sc->sc_cid = crypto_get_driverid(dev,
574                     sizeof(struct hifn_session), CRYPTOCAP_F_HARDWARE);
575                 if (sc->sc_cid < 0) {
576                         device_printf(dev, "could not get crypto driver id\n");
577                         goto fail_intr;
578                 }
579                 break;
580         }
581                 
582         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
583             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
584
585         if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
586                 hifn_init_pubrng(sc);
587
588         callout_init(&sc->sc_tickto, 1);
589         callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
590
591         return (0);
592
593 fail_intr:
594         bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
595 fail_intr2:
596         /* XXX don't store rid */
597         bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
598 fail_mem:
599         bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
600         bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
601         bus_dma_tag_destroy(sc->sc_dmat);
602
603         /* Turn off DMA polling */
604         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
605             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
606 fail_io1:
607         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
608 fail_io0:
609         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
610 fail_pci:
611         mtx_destroy(&sc->sc_mtx);
612         return (ENXIO);
613 }
614
615 /*
616  * Detach an interface that successfully probed.
617  */
618 static int 
619 hifn_detach(device_t dev)
620 {
621         struct hifn_softc *sc = device_get_softc(dev);
622
623         KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
624
625         /* disable interrupts */
626         WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
627
628         /*XXX other resources */
629         callout_stop(&sc->sc_tickto);
630         callout_stop(&sc->sc_rngto);
631 #ifdef HIFN_RNDTEST
632         if (sc->sc_rndtest)
633                 rndtest_detach(sc->sc_rndtest);
634 #endif
635
636         /* Turn off DMA polling */
637         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
638             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
639
640         crypto_unregister_all(sc->sc_cid);
641
642         bus_generic_detach(dev);        /*XXX should be no children, right? */
643
644         bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
645         /* XXX don't store rid */
646         bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
647
648         bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
649         bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
650         bus_dma_tag_destroy(sc->sc_dmat);
651
652         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
653         bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
654
655         mtx_destroy(&sc->sc_mtx);
656
657         return (0);
658 }
659
660 /*
661  * Stop all chip I/O so that the kernel's probe routines don't
662  * get confused by errant DMAs when rebooting.
663  */
664 static int
665 hifn_shutdown(device_t dev)
666 {
667 #ifdef notyet
668         hifn_stop(device_get_softc(dev));
669 #endif
670         return (0);
671 }
672
673 /*
674  * Device suspend routine.  Stop the interface and save some PCI
675  * settings in case the BIOS doesn't restore them properly on
676  * resume.
677  */
678 static int
679 hifn_suspend(device_t dev)
680 {
681         struct hifn_softc *sc = device_get_softc(dev);
682 #ifdef notyet
683         hifn_stop(sc);
684 #endif
685         sc->sc_suspended = 1;
686
687         return (0);
688 }
689
690 /*
691  * Device resume routine.  Restore some PCI settings in case the BIOS
692  * doesn't, re-enable busmastering, and restart the interface if
693  * appropriate.
694  */
695 static int
696 hifn_resume(device_t dev)
697 {
698         struct hifn_softc *sc = device_get_softc(dev);
699 #ifdef notyet
700         /* reinitialize interface if necessary */
701         if (ifp->if_flags & IFF_UP)
702                 rl_init(sc);
703 #endif
704         sc->sc_suspended = 0;
705
706         return (0);
707 }
708
709 static int
710 hifn_init_pubrng(struct hifn_softc *sc)
711 {
712         u_int32_t r;
713         int i;
714
715 #ifdef HIFN_RNDTEST
716         sc->sc_rndtest = rndtest_attach(sc->sc_dev);
717         if (sc->sc_rndtest)
718                 sc->sc_harvest = rndtest_harvest;
719         else
720                 sc->sc_harvest = default_harvest;
721 #else
722         sc->sc_harvest = default_harvest;
723 #endif
724         if ((sc->sc_flags & HIFN_IS_7811) == 0) {
725                 /* Reset 7951 public key/rng engine */
726                 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
727                     READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
728
729                 for (i = 0; i < 100; i++) {
730                         DELAY(1000);
731                         if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
732                             HIFN_PUBRST_RESET) == 0)
733                                 break;
734                 }
735
736                 if (i == 100) {
737                         device_printf(sc->sc_dev, "public key init failed\n");
738                         return (1);
739                 }
740         }
741
742         /* Enable the rng, if available */
743         if (sc->sc_flags & HIFN_HAS_RNG) {
744                 if (sc->sc_flags & HIFN_IS_7811) {
745                         r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
746                         if (r & HIFN_7811_RNGENA_ENA) {
747                                 r &= ~HIFN_7811_RNGENA_ENA;
748                                 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
749                         }
750                         WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
751                             HIFN_7811_RNGCFG_DEFL);
752                         r |= HIFN_7811_RNGENA_ENA;
753                         WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
754                 } else
755                         WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
756                             READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
757                             HIFN_RNGCFG_ENA);
758
759                 sc->sc_rngfirst = 1;
760                 if (hz >= 100)
761                         sc->sc_rnghz = hz / 100;
762                 else
763                         sc->sc_rnghz = 1;
764                 callout_init(&sc->sc_rngto, 1);
765                 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
766         }
767
768         /* Enable public key engine, if available */
769         if (sc->sc_flags & HIFN_HAS_PUBLIC) {
770                 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
771                 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
772                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
773 #ifdef HIFN_VULCANDEV
774                 sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0, 
775                                         UID_ROOT, GID_WHEEL, 0666,
776                                         "vulcanpk");
777                 sc->sc_pkdev->si_drv1 = sc;
778 #endif
779         }
780
781         return (0);
782 }
783
784 static void
785 hifn_rng(void *vsc)
786 {
787 #define RANDOM_BITS(n)  (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0
788         struct hifn_softc *sc = vsc;
789         u_int32_t sts, num[2];
790         int i;
791
792         if (sc->sc_flags & HIFN_IS_7811) {
793                 /* ONLY VALID ON 7811!!!! */
794                 for (i = 0; i < 5; i++) {
795                         sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
796                         if (sts & HIFN_7811_RNGSTS_UFL) {
797                                 device_printf(sc->sc_dev,
798                                               "RNG underflow: disabling\n");
799                                 return;
800                         }
801                         if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
802                                 break;
803
804                         /*
805                          * There are at least two words in the RNG FIFO
806                          * at this point.
807                          */
808                         num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
809                         num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
810                         /* NB: discard first data read */
811                         if (sc->sc_rngfirst)
812                                 sc->sc_rngfirst = 0;
813                         else
814                                 (*sc->sc_harvest)(sc->sc_rndtest,
815                                         num, sizeof (num));
816                 }
817         } else {
818                 num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA);
819
820                 /* NB: discard first data read */
821                 if (sc->sc_rngfirst)
822                         sc->sc_rngfirst = 0;
823                 else
824                         (*sc->sc_harvest)(sc->sc_rndtest,
825                                 num, sizeof (num[0]));
826         }
827
828         callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
829 #undef RANDOM_BITS
830 }
831
832 static void
833 hifn_puc_wait(struct hifn_softc *sc)
834 {
835         int i;
836         int reg = HIFN_0_PUCTRL;
837
838         if (sc->sc_flags & HIFN_IS_7956) {
839                 reg = HIFN_0_PUCTRL2;
840         }
841
842         for (i = 5000; i > 0; i--) {
843                 DELAY(1);
844                 if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
845                         break;
846         }
847         if (!i)
848                 device_printf(sc->sc_dev, "proc unit did not reset\n");
849 }
850
851 /*
852  * Reset the processing unit.
853  */
854 static void
855 hifn_reset_puc(struct hifn_softc *sc)
856 {
857         /* Reset processing unit */
858         int reg = HIFN_0_PUCTRL;
859
860         if (sc->sc_flags & HIFN_IS_7956) {
861                 reg = HIFN_0_PUCTRL2;
862         }
863         WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
864
865         hifn_puc_wait(sc);
866 }
867
868 /*
869  * Set the Retry and TRDY registers; note that we set them to
870  * zero because the 7811 locks up when forced to retry (section
871  * 3.6 of "Specification Update SU-0014-04".  Not clear if we
872  * should do this for all Hifn parts, but it doesn't seem to hurt.
873  */
874 static void
875 hifn_set_retry(struct hifn_softc *sc)
876 {
877         /* NB: RETRY only responds to 8-bit reads/writes */
878         pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1);
879         pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 1);
880 }
881
882 /*
883  * Resets the board.  Values in the regesters are left as is
884  * from the reset (i.e. initial values are assigned elsewhere).
885  */
886 static void
887 hifn_reset_board(struct hifn_softc *sc, int full)
888 {
889         u_int32_t reg;
890
891         /*
892          * Set polling in the DMA configuration register to zero.  0x7 avoids
893          * resetting the board and zeros out the other fields.
894          */
895         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
896             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
897
898         /*
899          * Now that polling has been disabled, we have to wait 1 ms
900          * before resetting the board.
901          */
902         DELAY(1000);
903
904         /* Reset the DMA unit */
905         if (full) {
906                 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
907                 DELAY(1000);
908         } else {
909                 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
910                     HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
911                 hifn_reset_puc(sc);
912         }
913
914         KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
915         bzero(sc->sc_dma, sizeof(*sc->sc_dma));
916
917         /* Bring dma unit out of reset */
918         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
919             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
920
921         hifn_puc_wait(sc);
922         hifn_set_retry(sc);
923
924         if (sc->sc_flags & HIFN_IS_7811) {
925                 for (reg = 0; reg < 1000; reg++) {
926                         if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
927                             HIFN_MIPSRST_CRAMINIT)
928                                 break;
929                         DELAY(1000);
930                 }
931                 if (reg == 1000)
932                         printf(": cram init timeout\n");
933         } else {
934           /* set up DMA configuration register #2 */
935           /* turn off all PK and BAR0 swaps */
936           WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
937                       (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
938                       (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
939                       (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
940                       (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
941         }
942                       
943 }
944
945 static u_int32_t
946 hifn_next_signature(u_int32_t a, u_int cnt)
947 {
948         int i;
949         u_int32_t v;
950
951         for (i = 0; i < cnt; i++) {
952
953                 /* get the parity */
954                 v = a & 0x80080125;
955                 v ^= v >> 16;
956                 v ^= v >> 8;
957                 v ^= v >> 4;
958                 v ^= v >> 2;
959                 v ^= v >> 1;
960
961                 a = (v & 1) ^ (a << 1);
962         }
963
964         return a;
965 }
966
967 struct pci2id {
968         u_short         pci_vendor;
969         u_short         pci_prod;
970         char            card_id[13];
971 };
972 static struct pci2id pci2id[] = {
973         {
974                 PCI_VENDOR_HIFN,
975                 PCI_PRODUCT_HIFN_7951,
976                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
977                   0x00, 0x00, 0x00, 0x00, 0x00 }
978         }, {
979                 PCI_VENDOR_HIFN,
980                 PCI_PRODUCT_HIFN_7955,
981                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
982                   0x00, 0x00, 0x00, 0x00, 0x00 }
983         }, {
984                 PCI_VENDOR_HIFN,
985                 PCI_PRODUCT_HIFN_7956,
986                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
987                   0x00, 0x00, 0x00, 0x00, 0x00 }
988         }, {
989                 PCI_VENDOR_NETSEC,
990                 PCI_PRODUCT_NETSEC_7751,
991                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
992                   0x00, 0x00, 0x00, 0x00, 0x00 }
993         }, {
994                 PCI_VENDOR_INVERTEX,
995                 PCI_PRODUCT_INVERTEX_AEON,
996                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
997                   0x00, 0x00, 0x00, 0x00, 0x00 }
998         }, {
999                 PCI_VENDOR_HIFN,
1000                 PCI_PRODUCT_HIFN_7811,
1001                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1002                   0x00, 0x00, 0x00, 0x00, 0x00 }
1003         }, {
1004                 /*
1005                  * Other vendors share this PCI ID as well, such as
1006                  * http://www.powercrypt.com, and obviously they also
1007                  * use the same key.
1008                  */
1009                 PCI_VENDOR_HIFN,
1010                 PCI_PRODUCT_HIFN_7751,
1011                 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1012                   0x00, 0x00, 0x00, 0x00, 0x00 }
1013         },
1014 };
1015
1016 /*
1017  * Checks to see if crypto is already enabled.  If crypto isn't enable,
1018  * "hifn_enable_crypto" is called to enable it.  The check is important,
1019  * as enabling crypto twice will lock the board.
1020  */
1021 static int 
1022 hifn_enable_crypto(struct hifn_softc *sc)
1023 {
1024         u_int32_t dmacfg, ramcfg, encl, addr, i;
1025         char *offtbl = NULL;
1026
1027         for (i = 0; i < nitems(pci2id); i++) {
1028                 if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) &&
1029                     pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) {
1030                         offtbl = pci2id[i].card_id;
1031                         break;
1032                 }
1033         }
1034         if (offtbl == NULL) {
1035                 device_printf(sc->sc_dev, "Unknown card!\n");
1036                 return (1);
1037         }
1038
1039         ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1040         dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
1041
1042         /*
1043          * The RAM config register's encrypt level bit needs to be set before
1044          * every read performed on the encryption level register.
1045          */
1046         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1047
1048         encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1049
1050         /*
1051          * Make sure we don't re-unlock.  Two unlocks kills chip until the
1052          * next reboot.
1053          */
1054         if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
1055 #ifdef HIFN_DEBUG
1056                 if (hifn_debug)
1057                         device_printf(sc->sc_dev,
1058                             "Strong crypto already enabled!\n");
1059 #endif
1060                 goto report;
1061         }
1062
1063         if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
1064 #ifdef HIFN_DEBUG
1065                 if (hifn_debug)
1066                         device_printf(sc->sc_dev,
1067                               "Unknown encryption level 0x%x\n", encl);
1068 #endif
1069                 return 1;
1070         }
1071
1072         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
1073             HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
1074         DELAY(1000);
1075         addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
1076         DELAY(1000);
1077         WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
1078         DELAY(1000);
1079
1080         for (i = 0; i <= 12; i++) {
1081                 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
1082                 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
1083
1084                 DELAY(1000);
1085         }
1086
1087         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1088         encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1089
1090 #ifdef HIFN_DEBUG
1091         if (hifn_debug) {
1092                 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
1093                         device_printf(sc->sc_dev, "Engine is permanently "
1094                                 "locked until next system reset!\n");
1095                 else
1096                         device_printf(sc->sc_dev, "Engine enabled "
1097                                 "successfully!\n");
1098         }
1099 #endif
1100
1101 report:
1102         WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1103         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1104
1105         switch (encl) {
1106         case HIFN_PUSTAT_ENA_1:
1107         case HIFN_PUSTAT_ENA_2:
1108                 break;
1109         case HIFN_PUSTAT_ENA_0:
1110         default:
1111                 device_printf(sc->sc_dev, "disabled");
1112                 break;
1113         }
1114
1115         return 0;
1116 }
1117
1118 /*
1119  * Give initial values to the registers listed in the "Register Space"
1120  * section of the HIFN Software Development reference manual.
1121  */
1122 static void 
1123 hifn_init_pci_registers(struct hifn_softc *sc)
1124 {
1125         /* write fixed values needed by the Initialization registers */
1126         WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1127         WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1128         WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1129
1130         /* write all 4 ring address registers */
1131         WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
1132             offsetof(struct hifn_dma, cmdr[0]));
1133         WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
1134             offsetof(struct hifn_dma, srcr[0]));
1135         WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
1136             offsetof(struct hifn_dma, dstr[0]));
1137         WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
1138             offsetof(struct hifn_dma, resr[0]));
1139
1140         DELAY(2000);
1141
1142         /* write status register */
1143         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1144             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1145             HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1146             HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1147             HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1148             HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1149             HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1150             HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1151             HIFN_DMACSR_S_WAIT |
1152             HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1153             HIFN_DMACSR_C_WAIT |
1154             HIFN_DMACSR_ENGINE |
1155             ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1156                 HIFN_DMACSR_PUBDONE : 0) |
1157             ((sc->sc_flags & HIFN_IS_7811) ?
1158                 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1159
1160         sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1161         sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1162             HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1163             HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1164             ((sc->sc_flags & HIFN_IS_7811) ?
1165                 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1166         sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1167         WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1168
1169
1170         if (sc->sc_flags & HIFN_IS_7956) {
1171                 u_int32_t pll;
1172
1173                 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1174                     HIFN_PUCNFG_TCALLPHASES |
1175                     HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1176
1177                 /* turn off the clocks and insure bypass is set */
1178                 pll = READ_REG_1(sc, HIFN_1_PLL);
1179                 pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
1180                   | HIFN_PLL_BP | HIFN_PLL_MBSET;
1181                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1182                 DELAY(10*1000);         /* 10ms */
1183
1184                 /* change configuration */
1185                 pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
1186                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1187                 DELAY(10*1000);         /* 10ms */
1188
1189                 /* disable bypass */
1190                 pll &= ~HIFN_PLL_BP;
1191                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1192                 /* enable clocks with new configuration */
1193                 pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
1194                 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1195         } else {
1196                 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1197                     HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1198                     HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1199                     (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1200         }
1201
1202         WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1203         WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1204             HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1205             ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1206             ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1207 }
1208
1209 /*
1210  * The maximum number of sessions supported by the card
1211  * is dependent on the amount of context ram, which
1212  * encryption algorithms are enabled, and how compression
1213  * is configured.  This should be configured before this
1214  * routine is called.
1215  */
1216 static void
1217 hifn_sessions(struct hifn_softc *sc)
1218 {
1219         u_int32_t pucnfg;
1220         int ctxsize;
1221
1222         pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1223
1224         if (pucnfg & HIFN_PUCNFG_COMPSING) {
1225                 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1226                         ctxsize = 128;
1227                 else
1228                         ctxsize = 512;
1229                 /*
1230                  * 7955/7956 has internal context memory of 32K
1231                  */
1232                 if (sc->sc_flags & HIFN_IS_7956)
1233                         sc->sc_maxses = 32768 / ctxsize;
1234                 else
1235                         sc->sc_maxses = 1 +
1236                             ((sc->sc_ramsize - 32768) / ctxsize);
1237         } else
1238                 sc->sc_maxses = sc->sc_ramsize / 16384;
1239
1240         if (sc->sc_maxses > 2048)
1241                 sc->sc_maxses = 2048;
1242 }
1243
1244 /*
1245  * Determine ram type (sram or dram).  Board should be just out of a reset
1246  * state when this is called.
1247  */
1248 static int
1249 hifn_ramtype(struct hifn_softc *sc)
1250 {
1251         u_int8_t data[8], dataexpect[8];
1252         int i;
1253
1254         for (i = 0; i < sizeof(data); i++)
1255                 data[i] = dataexpect[i] = 0x55;
1256         if (hifn_writeramaddr(sc, 0, data))
1257                 return (-1);
1258         if (hifn_readramaddr(sc, 0, data))
1259                 return (-1);
1260         if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1261                 sc->sc_drammodel = 1;
1262                 return (0);
1263         }
1264
1265         for (i = 0; i < sizeof(data); i++)
1266                 data[i] = dataexpect[i] = 0xaa;
1267         if (hifn_writeramaddr(sc, 0, data))
1268                 return (-1);
1269         if (hifn_readramaddr(sc, 0, data))
1270                 return (-1);
1271         if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1272                 sc->sc_drammodel = 1;
1273                 return (0);
1274         }
1275
1276         return (0);
1277 }
1278
1279 #define HIFN_SRAM_MAX           (32 << 20)
1280 #define HIFN_SRAM_STEP_SIZE     16384
1281 #define HIFN_SRAM_GRANULARITY   (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1282
1283 static int
1284 hifn_sramsize(struct hifn_softc *sc)
1285 {
1286         u_int32_t a;
1287         u_int8_t data[8];
1288         u_int8_t dataexpect[sizeof(data)];
1289         int32_t i;
1290
1291         for (i = 0; i < sizeof(data); i++)
1292                 data[i] = dataexpect[i] = i ^ 0x5a;
1293
1294         for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1295                 a = i * HIFN_SRAM_STEP_SIZE;
1296                 bcopy(&i, data, sizeof(i));
1297                 hifn_writeramaddr(sc, a, data);
1298         }
1299
1300         for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1301                 a = i * HIFN_SRAM_STEP_SIZE;
1302                 bcopy(&i, dataexpect, sizeof(i));
1303                 if (hifn_readramaddr(sc, a, data) < 0)
1304                         return (0);
1305                 if (bcmp(data, dataexpect, sizeof(data)) != 0)
1306                         return (0);
1307                 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1308         }
1309
1310         return (0);
1311 }
1312
1313 /*
1314  * XXX For dram boards, one should really try all of the
1315  * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
1316  * is already set up correctly.
1317  */
1318 static int
1319 hifn_dramsize(struct hifn_softc *sc)
1320 {
1321         u_int32_t cnfg;
1322
1323         if (sc->sc_flags & HIFN_IS_7956) {
1324                 /*
1325                  * 7955/7956 have a fixed internal ram of only 32K.
1326                  */
1327                 sc->sc_ramsize = 32768;
1328         } else {
1329                 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1330                     HIFN_PUCNFG_DRAMMASK;
1331                 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1332         }
1333         return (0);
1334 }
1335
1336 static void
1337 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
1338 {
1339         struct hifn_dma *dma = sc->sc_dma;
1340
1341         if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) {
1342                 sc->sc_cmdi = 0;
1343                 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1344                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1345                 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1346                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1347         }
1348         *cmdp = sc->sc_cmdi++;
1349         sc->sc_cmdk = sc->sc_cmdi;
1350
1351         if (sc->sc_srci == HIFN_D_SRC_RSIZE) {
1352                 sc->sc_srci = 0;
1353                 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1354                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1355                 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1356                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1357         }
1358         *srcp = sc->sc_srci++;
1359         sc->sc_srck = sc->sc_srci;
1360
1361         if (sc->sc_dsti == HIFN_D_DST_RSIZE) {
1362                 sc->sc_dsti = 0;
1363                 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1364                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1365                 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1366                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1367         }
1368         *dstp = sc->sc_dsti++;
1369         sc->sc_dstk = sc->sc_dsti;
1370
1371         if (sc->sc_resi == HIFN_D_RES_RSIZE) {
1372                 sc->sc_resi = 0;
1373                 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1374                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1375                 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1376                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1377         }
1378         *resp = sc->sc_resi++;
1379         sc->sc_resk = sc->sc_resi;
1380 }
1381
1382 static int
1383 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1384 {
1385         struct hifn_dma *dma = sc->sc_dma;
1386         hifn_base_command_t wc;
1387         const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1388         int r, cmdi, resi, srci, dsti;
1389
1390         wc.masks = htole16(3 << 13);
1391         wc.session_num = htole16(addr >> 14);
1392         wc.total_source_count = htole16(8);
1393         wc.total_dest_count = htole16(addr & 0x3fff);
1394
1395         hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1396
1397         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1398             HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1399             HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1400
1401         /* build write command */
1402         bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1403         *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
1404         bcopy(data, &dma->test_src, sizeof(dma->test_src));
1405
1406         dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
1407             + offsetof(struct hifn_dma, test_src));
1408         dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
1409             + offsetof(struct hifn_dma, test_dst));
1410
1411         dma->cmdr[cmdi].l = htole32(16 | masks);
1412         dma->srcr[srci].l = htole32(8 | masks);
1413         dma->dstr[dsti].l = htole32(4 | masks);
1414         dma->resr[resi].l = htole32(4 | masks);
1415
1416         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1417             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1418
1419         for (r = 10000; r >= 0; r--) {
1420                 DELAY(10);
1421                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1422                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1423                 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1424                         break;
1425                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1426                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1427         }
1428         if (r == 0) {
1429                 device_printf(sc->sc_dev, "writeramaddr -- "
1430                     "result[%d](addr %d) still valid\n", resi, addr);
1431                 r = -1;
1432                 return (-1);
1433         } else
1434                 r = 0;
1435
1436         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1437             HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1438             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1439
1440         return (r);
1441 }
1442
1443 static int
1444 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1445 {
1446         struct hifn_dma *dma = sc->sc_dma;
1447         hifn_base_command_t rc;
1448         const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1449         int r, cmdi, srci, dsti, resi;
1450
1451         rc.masks = htole16(2 << 13);
1452         rc.session_num = htole16(addr >> 14);
1453         rc.total_source_count = htole16(addr & 0x3fff);
1454         rc.total_dest_count = htole16(8);
1455
1456         hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1457
1458         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1459             HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1460             HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1461
1462         bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1463         *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
1464
1465         dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
1466             offsetof(struct hifn_dma, test_src));
1467         dma->test_src = 0;
1468         dma->dstr[dsti].p =  htole32(sc->sc_dma_physaddr +
1469             offsetof(struct hifn_dma, test_dst));
1470         dma->test_dst = 0;
1471         dma->cmdr[cmdi].l = htole32(8 | masks);
1472         dma->srcr[srci].l = htole32(8 | masks);
1473         dma->dstr[dsti].l = htole32(8 | masks);
1474         dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1475
1476         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1477             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1478
1479         for (r = 10000; r >= 0; r--) {
1480                 DELAY(10);
1481                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1482                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1483                 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1484                         break;
1485                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1486                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1487         }
1488         if (r == 0) {
1489                 device_printf(sc->sc_dev, "readramaddr -- "
1490                     "result[%d](addr %d) still valid\n", resi, addr);
1491                 r = -1;
1492         } else {
1493                 r = 0;
1494                 bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1495         }
1496
1497         WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1498             HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1499             HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1500
1501         return (r);
1502 }
1503
1504 /*
1505  * Initialize the descriptor rings.
1506  */
1507 static void 
1508 hifn_init_dma(struct hifn_softc *sc)
1509 {
1510         struct hifn_dma *dma = sc->sc_dma;
1511         int i;
1512
1513         hifn_set_retry(sc);
1514
1515         /* initialize static pointer values */
1516         for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1517                 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
1518                     offsetof(struct hifn_dma, command_bufs[i][0]));
1519         for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1520                 dma->resr[i].p = htole32(sc->sc_dma_physaddr +
1521                     offsetof(struct hifn_dma, result_bufs[i][0]));
1522
1523         dma->cmdr[HIFN_D_CMD_RSIZE].p =
1524             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
1525         dma->srcr[HIFN_D_SRC_RSIZE].p =
1526             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
1527         dma->dstr[HIFN_D_DST_RSIZE].p =
1528             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
1529         dma->resr[HIFN_D_RES_RSIZE].p =
1530             htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
1531
1532         sc->sc_cmdu = sc->sc_srcu = sc->sc_dstu = sc->sc_resu = 0;
1533         sc->sc_cmdi = sc->sc_srci = sc->sc_dsti = sc->sc_resi = 0;
1534         sc->sc_cmdk = sc->sc_srck = sc->sc_dstk = sc->sc_resk = 0;
1535 }
1536
1537 /*
1538  * Writes out the raw command buffer space.  Returns the
1539  * command buffer size.
1540  */
1541 static u_int
1542 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1543 {
1544         struct cryptop *crp;
1545         u_int8_t *buf_pos;
1546         hifn_base_command_t *base_cmd;
1547         hifn_mac_command_t *mac_cmd;
1548         hifn_crypt_command_t *cry_cmd;
1549         int using_mac, using_crypt, ivlen;
1550         u_int32_t dlen, slen;
1551
1552         crp = cmd->crp;
1553         buf_pos = buf;
1554         using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1555         using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1556
1557         base_cmd = (hifn_base_command_t *)buf_pos;
1558         base_cmd->masks = htole16(cmd->base_masks);
1559         slen = cmd->src_mapsize;
1560         if (cmd->sloplen)
1561                 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
1562         else
1563                 dlen = cmd->dst_mapsize;
1564         base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1565         base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1566         dlen >>= 16;
1567         slen >>= 16;
1568         base_cmd->session_num = htole16(
1569             ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1570             ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1571         buf_pos += sizeof(hifn_base_command_t);
1572
1573         if (using_mac) {
1574                 mac_cmd = (hifn_mac_command_t *)buf_pos;
1575                 dlen = crp->crp_aad_length + crp->crp_payload_length;
1576                 mac_cmd->source_count = htole16(dlen & 0xffff);
1577                 dlen >>= 16;
1578                 mac_cmd->masks = htole16(cmd->mac_masks |
1579                     ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1580                 if (crp->crp_aad_length != 0)
1581                         mac_cmd->header_skip = htole16(crp->crp_aad_start);
1582                 else
1583                         mac_cmd->header_skip = htole16(crp->crp_payload_start);
1584                 mac_cmd->reserved = 0;
1585                 buf_pos += sizeof(hifn_mac_command_t);
1586         }
1587
1588         if (using_crypt) {
1589                 cry_cmd = (hifn_crypt_command_t *)buf_pos;
1590                 dlen = crp->crp_payload_length;
1591                 cry_cmd->source_count = htole16(dlen & 0xffff);
1592                 dlen >>= 16;
1593                 cry_cmd->masks = htole16(cmd->cry_masks |
1594                     ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1595                 cry_cmd->header_skip = htole16(crp->crp_payload_length);
1596                 cry_cmd->reserved = 0;
1597                 buf_pos += sizeof(hifn_crypt_command_t);
1598         }
1599
1600         if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1601                 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1602                 buf_pos += HIFN_MAC_KEY_LENGTH;
1603         }
1604
1605         if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1606                 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1607                 case HIFN_CRYPT_CMD_ALG_AES:
1608                         /*
1609                          * AES keys are variable 128, 192 and
1610                          * 256 bits (16, 24 and 32 bytes).
1611                          */
1612                         bcopy(cmd->ck, buf_pos, cmd->cklen);
1613                         buf_pos += cmd->cklen;
1614                         break;
1615                 }
1616         }
1617
1618         if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1619                 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1620                 case HIFN_CRYPT_CMD_ALG_AES:
1621                         ivlen = HIFN_AES_IV_LENGTH;
1622                         break;
1623                 default:
1624                         ivlen = HIFN_IV_LENGTH;
1625                         break;
1626                 }
1627                 bcopy(cmd->iv, buf_pos, ivlen);
1628                 buf_pos += ivlen;
1629         }
1630
1631         if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
1632                 bzero(buf_pos, 8);
1633                 buf_pos += 8;
1634         }
1635
1636         return (buf_pos - buf);
1637 }
1638
1639 static int
1640 hifn_dmamap_aligned(struct hifn_operand *op)
1641 {
1642         int i;
1643
1644         for (i = 0; i < op->nsegs; i++) {
1645                 if (op->segs[i].ds_addr & 3)
1646                         return (0);
1647                 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
1648                         return (0);
1649         }
1650         return (1);
1651 }
1652
1653 static __inline int
1654 hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
1655 {
1656         struct hifn_dma *dma = sc->sc_dma;
1657
1658         if (++idx == HIFN_D_DST_RSIZE) {
1659                 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1660                     HIFN_D_MASKDONEIRQ);
1661                 HIFN_DSTR_SYNC(sc, idx,
1662                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1663                 idx = 0;
1664         }
1665         return (idx);
1666 }
1667
1668 static int
1669 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1670 {
1671         struct hifn_dma *dma = sc->sc_dma;
1672         struct hifn_operand *dst = &cmd->dst;
1673         u_int32_t p, l;
1674         int idx, used = 0, i;
1675
1676         idx = sc->sc_dsti;
1677         for (i = 0; i < dst->nsegs - 1; i++) {
1678                 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1679                 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1680                     HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
1681                 HIFN_DSTR_SYNC(sc, idx,
1682                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1683                 used++;
1684
1685                 idx = hifn_dmamap_dstwrap(sc, idx);
1686         }
1687
1688         if (cmd->sloplen == 0) {
1689                 p = dst->segs[i].ds_addr;
1690                 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1691                     dst->segs[i].ds_len;
1692         } else {
1693                 p = sc->sc_dma_physaddr +
1694                     offsetof(struct hifn_dma, slop[cmd->slopidx]);
1695                 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1696                     sizeof(u_int32_t);
1697
1698                 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
1699                         dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1700                         dma->dstr[idx].l = htole32(HIFN_D_VALID |
1701                             HIFN_D_MASKDONEIRQ |
1702                             (dst->segs[i].ds_len - cmd->sloplen));
1703                         HIFN_DSTR_SYNC(sc, idx,
1704                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1705                         used++;
1706
1707                         idx = hifn_dmamap_dstwrap(sc, idx);
1708                 }
1709         }
1710         dma->dstr[idx].p = htole32(p);
1711         dma->dstr[idx].l = htole32(l);
1712         HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1713         used++;
1714
1715         idx = hifn_dmamap_dstwrap(sc, idx);
1716
1717         sc->sc_dsti = idx;
1718         sc->sc_dstu += used;
1719         return (idx);
1720 }
1721
1722 static __inline int
1723 hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
1724 {
1725         struct hifn_dma *dma = sc->sc_dma;
1726
1727         if (++idx == HIFN_D_SRC_RSIZE) {
1728                 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1729                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1730                 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1731                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1732                 idx = 0;
1733         }
1734         return (idx);
1735 }
1736
1737 static int
1738 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1739 {
1740         struct hifn_dma *dma = sc->sc_dma;
1741         struct hifn_operand *src = &cmd->src;
1742         int idx, i;
1743         u_int32_t last = 0;
1744
1745         idx = sc->sc_srci;
1746         for (i = 0; i < src->nsegs; i++) {
1747                 if (i == src->nsegs - 1)
1748                         last = HIFN_D_LAST;
1749
1750                 dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
1751                 dma->srcr[idx].l = htole32(src->segs[i].ds_len |
1752                     HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1753                 HIFN_SRCR_SYNC(sc, idx,
1754                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1755
1756                 idx = hifn_dmamap_srcwrap(sc, idx);
1757         }
1758         sc->sc_srci = idx;
1759         sc->sc_srcu += src->nsegs;
1760         return (idx);
1761
1762
1763 static void
1764 hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, int error)
1765 {
1766         struct hifn_operand *op = arg;
1767
1768         KASSERT(nsegs <= MAX_SCATTER,
1769                 ("hifn_op_cb: too many DMA segments (%u > %u) "
1770                  "returned when mapping operand", nsegs, MAX_SCATTER));
1771         op->nsegs = nsegs;
1772         bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
1773 }
1774
1775 static int 
1776 hifn_crypto(
1777         struct hifn_softc *sc,
1778         struct hifn_command *cmd,
1779         struct cryptop *crp,
1780         int hint)
1781 {
1782         struct  hifn_dma *dma = sc->sc_dma;
1783         u_int32_t cmdlen, csr;
1784         int cmdi, resi, err = 0;
1785
1786         /*
1787          * need 1 cmd, and 1 res
1788          *
1789          * NB: check this first since it's easy.
1790          */
1791         HIFN_LOCK(sc);
1792         if ((sc->sc_cmdu + 1) > HIFN_D_CMD_RSIZE ||
1793             (sc->sc_resu + 1) > HIFN_D_RES_RSIZE) {
1794 #ifdef HIFN_DEBUG
1795                 if (hifn_debug) {
1796                         device_printf(sc->sc_dev,
1797                                 "cmd/result exhaustion, cmdu %u resu %u\n",
1798                                 sc->sc_cmdu, sc->sc_resu);
1799                 }
1800 #endif
1801                 hifnstats.hst_nomem_cr++;
1802                 HIFN_UNLOCK(sc);
1803                 return (ERESTART);
1804         }
1805
1806         if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) {
1807                 hifnstats.hst_nomem_map++;
1808                 HIFN_UNLOCK(sc);
1809                 return (ENOMEM);
1810         }
1811
1812         if (bus_dmamap_load_crp(sc->sc_dmat, cmd->src_map, crp, hifn_op_cb,
1813             &cmd->src, BUS_DMA_NOWAIT)) {
1814                 hifnstats.hst_nomem_load++;
1815                 err = ENOMEM;
1816                 goto err_srcmap1;
1817         }
1818         cmd->src_mapsize = crypto_buffer_len(&crp->crp_buf);
1819
1820         if (hifn_dmamap_aligned(&cmd->src)) {
1821                 cmd->sloplen = cmd->src_mapsize & 3;
1822                 cmd->dst = cmd->src;
1823         } else if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) {
1824                 int totlen, len;
1825                 struct mbuf *m, *m0, *mlast;
1826
1827                 KASSERT(cmd->dst_m == NULL,
1828                     ("hifn_crypto: dst_m initialized improperly"));
1829                 hifnstats.hst_unaligned++;
1830
1831                 /*
1832                  * Source is not aligned on a longword boundary.
1833                  * Copy the data to insure alignment.  If we fail
1834                  * to allocate mbufs or clusters while doing this
1835                  * we return ERESTART so the operation is requeued
1836                  * at the crypto later, but only if there are
1837                  * ops already posted to the hardware; otherwise we
1838                  * have no guarantee that we'll be re-entered.
1839                  */
1840                 totlen = cmd->src_mapsize;
1841                 if (crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR) {
1842                         len = MHLEN;
1843                         MGETHDR(m0, M_NOWAIT, MT_DATA);
1844                         if (m0 && !m_dup_pkthdr(m0, crp->crp_buf.cb_mbuf,
1845                             M_NOWAIT)) {
1846                                 m_free(m0);
1847                                 m0 = NULL;
1848                         }
1849                 } else {
1850                         len = MLEN;
1851                         MGET(m0, M_NOWAIT, MT_DATA);
1852                 }
1853                 if (m0 == NULL) {
1854                         hifnstats.hst_nomem_mbuf++;
1855                         err = sc->sc_cmdu ? ERESTART : ENOMEM;
1856                         goto err_srcmap;
1857                 }
1858                 if (totlen >= MINCLSIZE) {
1859                         if (!(MCLGET(m0, M_NOWAIT))) {
1860                                 hifnstats.hst_nomem_mcl++;
1861                                 err = sc->sc_cmdu ? ERESTART : ENOMEM;
1862                                 m_freem(m0);
1863                                 goto err_srcmap;
1864                         }
1865                         len = MCLBYTES;
1866                 }
1867                 totlen -= len;
1868                 m0->m_pkthdr.len = m0->m_len = len;
1869                 mlast = m0;
1870
1871                 while (totlen > 0) {
1872                         MGET(m, M_NOWAIT, MT_DATA);
1873                         if (m == NULL) {
1874                                 hifnstats.hst_nomem_mbuf++;
1875                                 err = sc->sc_cmdu ? ERESTART : ENOMEM;
1876                                 m_freem(m0);
1877                                 goto err_srcmap;
1878                         }
1879                         len = MLEN;
1880                         if (totlen >= MINCLSIZE) {
1881                                 if (!(MCLGET(m, M_NOWAIT))) {
1882                                         hifnstats.hst_nomem_mcl++;
1883                                         err = sc->sc_cmdu ? ERESTART : ENOMEM;
1884                                         mlast->m_next = m;
1885                                         m_freem(m0);
1886                                         goto err_srcmap;
1887                                 }
1888                                 len = MCLBYTES;
1889                         }
1890
1891                         m->m_len = len;
1892                         m0->m_pkthdr.len += len;
1893                         totlen -= len;
1894
1895                         mlast->m_next = m;
1896                         mlast = m;
1897                 }
1898                 cmd->dst_m = m0;
1899
1900                 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
1901                     &cmd->dst_map)) {
1902                         hifnstats.hst_nomem_map++;
1903                         err = ENOMEM;
1904                         goto err_srcmap;
1905                 }
1906
1907                 if (bus_dmamap_load_mbuf_sg(sc->sc_dmat, cmd->dst_map, m0,
1908                     cmd->dst_segs, &cmd->dst_nsegs, 0)) {
1909                         hifnstats.hst_nomem_map++;
1910                         err = ENOMEM;
1911                         goto err_dstmap1;
1912                 }
1913                 cmd->dst_mapsize = m0->m_pkthdr.len;
1914         } else {
1915                 err = EINVAL;
1916                 goto err_srcmap;
1917         }
1918
1919 #ifdef HIFN_DEBUG
1920         if (hifn_debug) {
1921                 device_printf(sc->sc_dev,
1922                     "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1923                     READ_REG_1(sc, HIFN_1_DMA_CSR),
1924                     READ_REG_1(sc, HIFN_1_DMA_IER),
1925                     sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu,
1926                     cmd->src_nsegs, cmd->dst_nsegs);
1927         }
1928 #endif
1929
1930         if (cmd->src_map == cmd->dst_map) {
1931                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1932                     BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1933         } else {
1934                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1935                     BUS_DMASYNC_PREWRITE);
1936                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1937                     BUS_DMASYNC_PREREAD);
1938         }
1939
1940         /*
1941          * need N src, and N dst
1942          */
1943         if ((sc->sc_srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
1944             (sc->sc_dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
1945 #ifdef HIFN_DEBUG
1946                 if (hifn_debug) {
1947                         device_printf(sc->sc_dev,
1948                                 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
1949                                 sc->sc_srcu, cmd->src_nsegs,
1950                                 sc->sc_dstu, cmd->dst_nsegs);
1951                 }
1952 #endif
1953                 hifnstats.hst_nomem_sd++;
1954                 err = ERESTART;
1955                 goto err_dstmap;
1956         }
1957
1958         if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) {
1959                 sc->sc_cmdi = 0;
1960                 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1961                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1962                 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1963                     BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1964         }
1965         cmdi = sc->sc_cmdi++;
1966         cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1967         HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1968
1969         /* .p for command/result already set */
1970         dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1971             HIFN_D_MASKDONEIRQ);
1972         HIFN_CMDR_SYNC(sc, cmdi,
1973             BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1974         sc->sc_cmdu++;
1975
1976         /*
1977          * We don't worry about missing an interrupt (which a "command wait"
1978          * interrupt salvages us from), unless there is more than one command
1979          * in the queue.
1980          */
1981         if (sc->sc_cmdu > 1) {
1982                 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1983                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1984         }
1985
1986         hifnstats.hst_ipackets++;
1987         hifnstats.hst_ibytes += cmd->src_mapsize;
1988
1989         hifn_dmamap_load_src(sc, cmd);
1990
1991         /*
1992          * Unlike other descriptors, we don't mask done interrupt from
1993          * result descriptor.
1994          */
1995 #ifdef HIFN_DEBUG
1996         if (hifn_debug)
1997                 printf("load res\n");
1998 #endif
1999         if (sc->sc_resi == HIFN_D_RES_RSIZE) {
2000                 sc->sc_resi = 0;
2001                 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2002                     HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2003                 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2004                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2005         }
2006         resi = sc->sc_resi++;
2007         KASSERT(sc->sc_hifn_commands[resi] == NULL,
2008                 ("hifn_crypto: command slot %u busy", resi));
2009         sc->sc_hifn_commands[resi] = cmd;
2010         HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2011         if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
2012                 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2013                     HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
2014                 sc->sc_curbatch++;
2015                 if (sc->sc_curbatch > hifnstats.hst_maxbatch)
2016                         hifnstats.hst_maxbatch = sc->sc_curbatch;
2017                 hifnstats.hst_totbatch++;
2018         } else {
2019                 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2020                     HIFN_D_VALID | HIFN_D_LAST);
2021                 sc->sc_curbatch = 0;
2022         }
2023         HIFN_RESR_SYNC(sc, resi,
2024             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2025         sc->sc_resu++;
2026
2027         if (cmd->sloplen)
2028                 cmd->slopidx = resi;
2029
2030         hifn_dmamap_load_dst(sc, cmd);
2031
2032         csr = 0;
2033         if (sc->sc_c_busy == 0) {
2034                 csr |= HIFN_DMACSR_C_CTRL_ENA;
2035                 sc->sc_c_busy = 1;
2036         }
2037         if (sc->sc_s_busy == 0) {
2038                 csr |= HIFN_DMACSR_S_CTRL_ENA;
2039                 sc->sc_s_busy = 1;
2040         }
2041         if (sc->sc_r_busy == 0) {
2042                 csr |= HIFN_DMACSR_R_CTRL_ENA;
2043                 sc->sc_r_busy = 1;
2044         }
2045         if (sc->sc_d_busy == 0) {
2046                 csr |= HIFN_DMACSR_D_CTRL_ENA;
2047                 sc->sc_d_busy = 1;
2048         }
2049         if (csr)
2050                 WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
2051
2052 #ifdef HIFN_DEBUG
2053         if (hifn_debug) {
2054                 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
2055                     READ_REG_1(sc, HIFN_1_DMA_CSR),
2056                     READ_REG_1(sc, HIFN_1_DMA_IER));
2057         }
2058 #endif
2059
2060         sc->sc_active = 5;
2061         HIFN_UNLOCK(sc);
2062         KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
2063         return (err);           /* success */
2064
2065 err_dstmap:
2066         if (cmd->src_map != cmd->dst_map)
2067                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2068 err_dstmap1:
2069         if (cmd->src_map != cmd->dst_map)
2070                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2071 err_srcmap:
2072         if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) {
2073                 if (cmd->dst_m != NULL)
2074                         m_freem(cmd->dst_m);
2075         }
2076         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2077 err_srcmap1:
2078         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2079         HIFN_UNLOCK(sc);
2080         return (err);
2081 }
2082
2083 static void
2084 hifn_tick(void* vsc)
2085 {
2086         struct hifn_softc *sc = vsc;
2087
2088         HIFN_LOCK(sc);
2089         if (sc->sc_active == 0) {
2090                 u_int32_t r = 0;
2091
2092                 if (sc->sc_cmdu == 0 && sc->sc_c_busy) {
2093                         sc->sc_c_busy = 0;
2094                         r |= HIFN_DMACSR_C_CTRL_DIS;
2095                 }
2096                 if (sc->sc_srcu == 0 && sc->sc_s_busy) {
2097                         sc->sc_s_busy = 0;
2098                         r |= HIFN_DMACSR_S_CTRL_DIS;
2099                 }
2100                 if (sc->sc_dstu == 0 && sc->sc_d_busy) {
2101                         sc->sc_d_busy = 0;
2102                         r |= HIFN_DMACSR_D_CTRL_DIS;
2103                 }
2104                 if (sc->sc_resu == 0 && sc->sc_r_busy) {
2105                         sc->sc_r_busy = 0;
2106                         r |= HIFN_DMACSR_R_CTRL_DIS;
2107                 }
2108                 if (r)
2109                         WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
2110         } else
2111                 sc->sc_active--;
2112         HIFN_UNLOCK(sc);
2113         callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
2114 }
2115
2116 static void 
2117 hifn_intr(void *arg)
2118 {
2119         struct hifn_softc *sc = arg;
2120         struct hifn_dma *dma;
2121         u_int32_t dmacsr, restart;
2122         int i, u;
2123
2124         dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2125
2126         /* Nothing in the DMA unit interrupted */
2127         if ((dmacsr & sc->sc_dmaier) == 0)
2128                 return;
2129
2130         HIFN_LOCK(sc);
2131
2132         dma = sc->sc_dma;
2133
2134 #ifdef HIFN_DEBUG
2135         if (hifn_debug) {
2136                 device_printf(sc->sc_dev,
2137                     "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
2138                     dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
2139                     sc->sc_cmdi, sc->sc_srci, sc->sc_dsti, sc->sc_resi,
2140                     sc->sc_cmdk, sc->sc_srck, sc->sc_dstk, sc->sc_resk,
2141                     sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu);
2142         }
2143 #endif
2144
2145         WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2146
2147         if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2148             (dmacsr & HIFN_DMACSR_PUBDONE))
2149                 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2150                     READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2151
2152         restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
2153         if (restart)
2154                 device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
2155
2156         if (sc->sc_flags & HIFN_IS_7811) {
2157                 if (dmacsr & HIFN_DMACSR_ILLR)
2158                         device_printf(sc->sc_dev, "illegal read\n");
2159                 if (dmacsr & HIFN_DMACSR_ILLW)
2160                         device_printf(sc->sc_dev, "illegal write\n");
2161         }
2162
2163         restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2164             HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2165         if (restart) {
2166                 device_printf(sc->sc_dev, "abort, resetting.\n");
2167                 hifnstats.hst_abort++;
2168                 hifn_abort(sc);
2169                 HIFN_UNLOCK(sc);
2170                 return;
2171         }
2172
2173         if ((dmacsr & HIFN_DMACSR_C_WAIT) && (sc->sc_cmdu == 0)) {
2174                 /*
2175                  * If no slots to process and we receive a "waiting on
2176                  * command" interrupt, we disable the "waiting on command"
2177                  * (by clearing it).
2178                  */
2179                 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2180                 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2181         }
2182
2183         /* clear the rings */
2184         i = sc->sc_resk; u = sc->sc_resu;
2185         while (u != 0) {
2186                 HIFN_RESR_SYNC(sc, i,
2187                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2188                 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2189                         HIFN_RESR_SYNC(sc, i,
2190                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2191                         break;
2192                 }
2193
2194                 if (i != HIFN_D_RES_RSIZE) {
2195                         struct hifn_command *cmd;
2196                         u_int8_t *macbuf = NULL;
2197
2198                         HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2199                         cmd = sc->sc_hifn_commands[i];
2200                         KASSERT(cmd != NULL,
2201                                 ("hifn_intr: null command slot %u", i));
2202                         sc->sc_hifn_commands[i] = NULL;
2203
2204                         if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2205                                 macbuf = dma->result_bufs[i];
2206                                 macbuf += 12;
2207                         }
2208
2209                         hifn_callback(sc, cmd, macbuf);
2210                         hifnstats.hst_opackets++;
2211                         u--;
2212                 }
2213
2214                 if (++i == (HIFN_D_RES_RSIZE + 1))
2215                         i = 0;
2216         }
2217         sc->sc_resk = i; sc->sc_resu = u;
2218
2219         i = sc->sc_srck; u = sc->sc_srcu;
2220         while (u != 0) {
2221                 if (i == HIFN_D_SRC_RSIZE)
2222                         i = 0;
2223                 HIFN_SRCR_SYNC(sc, i,
2224                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2225                 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2226                         HIFN_SRCR_SYNC(sc, i,
2227                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2228                         break;
2229                 }
2230                 i++, u--;
2231         }
2232         sc->sc_srck = i; sc->sc_srcu = u;
2233
2234         i = sc->sc_cmdk; u = sc->sc_cmdu;
2235         while (u != 0) {
2236                 HIFN_CMDR_SYNC(sc, i,
2237                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2238                 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2239                         HIFN_CMDR_SYNC(sc, i,
2240                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2241                         break;
2242                 }
2243                 if (i != HIFN_D_CMD_RSIZE) {
2244                         u--;
2245                         HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2246                 }
2247                 if (++i == (HIFN_D_CMD_RSIZE + 1))
2248                         i = 0;
2249         }
2250         sc->sc_cmdk = i; sc->sc_cmdu = u;
2251
2252         HIFN_UNLOCK(sc);
2253
2254         if (sc->sc_needwakeup) {                /* XXX check high watermark */
2255                 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
2256 #ifdef HIFN_DEBUG
2257                 if (hifn_debug)
2258                         device_printf(sc->sc_dev,
2259                                 "wakeup crypto (%x) u %d/%d/%d/%d\n",
2260                                 sc->sc_needwakeup,
2261                                 sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu);
2262 #endif
2263                 sc->sc_needwakeup &= ~wakeup;
2264                 crypto_unblock(sc->sc_cid, wakeup);
2265         }
2266 }
2267
2268 static bool
2269 hifn_auth_supported(struct hifn_softc *sc,
2270     const struct crypto_session_params *csp)
2271 {
2272
2273         switch (sc->sc_ena) {
2274         case HIFN_PUSTAT_ENA_2:
2275         case HIFN_PUSTAT_ENA_1:
2276                 break;
2277         default:
2278                 return (false);
2279         }
2280                 
2281         switch (csp->csp_auth_alg) {
2282         case CRYPTO_SHA1:
2283                 break;
2284         case CRYPTO_SHA1_HMAC:
2285                 if (csp->csp_auth_klen > HIFN_MAC_KEY_LENGTH)
2286                         return (false);
2287                 break;
2288         default:
2289                 return (false);
2290         }
2291
2292         return (true);  
2293 }
2294
2295 static bool
2296 hifn_cipher_supported(struct hifn_softc *sc,
2297     const struct crypto_session_params *csp)
2298 {
2299
2300         if (csp->csp_cipher_klen == 0)
2301                 return (false);
2302         if (csp->csp_ivlen > HIFN_MAX_IV_LENGTH)
2303                 return (false);
2304         switch (sc->sc_ena) {
2305         case HIFN_PUSTAT_ENA_2:
2306                 switch (csp->csp_cipher_alg) {
2307                 case CRYPTO_AES_CBC:
2308                         if ((sc->sc_flags & HIFN_HAS_AES) == 0)
2309                                 return (false);
2310                         switch (csp->csp_cipher_klen) {
2311                         case 128:
2312                         case 192:
2313                         case 256:
2314                                 break;
2315                         default:
2316                                 return (false);
2317                         }
2318                         return (true);
2319                 }
2320         }
2321         return (false);
2322 }
2323
2324 static int
2325 hifn_probesession(device_t dev, const struct crypto_session_params *csp)
2326 {
2327         struct hifn_softc *sc;
2328
2329         sc = device_get_softc(dev);
2330         if (csp->csp_flags != 0)
2331                 return (EINVAL);
2332         switch (csp->csp_mode) {
2333         case CSP_MODE_DIGEST:
2334                 if (!hifn_auth_supported(sc, csp))
2335                         return (EINVAL);
2336                 break;
2337         case CSP_MODE_CIPHER:
2338                 if (!hifn_cipher_supported(sc, csp))
2339                         return (EINVAL);
2340                 break;
2341         case CSP_MODE_ETA:
2342                 if (!hifn_auth_supported(sc, csp) ||
2343                     !hifn_cipher_supported(sc, csp))
2344                         return (EINVAL);
2345                 break;
2346         default:
2347                 return (EINVAL);
2348         }
2349
2350         return (CRYPTODEV_PROBE_HARDWARE);
2351 }
2352
2353 /*
2354  * Allocate a new 'session'.
2355  */
2356 static int
2357 hifn_newsession(device_t dev, crypto_session_t cses,
2358     const struct crypto_session_params *csp)
2359 {
2360         struct hifn_session *ses;
2361
2362         ses = crypto_get_driver_session(cses);
2363
2364         if (csp->csp_auth_alg != 0) {
2365                 if (csp->csp_auth_mlen == 0)
2366                         ses->hs_mlen = crypto_auth_hash(csp)->hashsize;
2367                 else
2368                         ses->hs_mlen = csp->csp_auth_mlen;
2369         }
2370
2371         return (0);
2372 }
2373
2374 /*
2375  * XXX freesession routine should run a zero'd mac/encrypt key into context
2376  * ram.  to blow away any keys already stored there.
2377  */
2378
2379 static int
2380 hifn_process(device_t dev, struct cryptop *crp, int hint)
2381 {
2382         const struct crypto_session_params *csp;
2383         struct hifn_softc *sc = device_get_softc(dev);
2384         struct hifn_command *cmd = NULL;
2385         const void *mackey;
2386         int err, keylen;
2387         struct hifn_session *ses;
2388
2389         ses = crypto_get_driver_session(crp->crp_session);
2390
2391         cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO);
2392         if (cmd == NULL) {
2393                 hifnstats.hst_nomem++;
2394                 err = ENOMEM;
2395                 goto errout;
2396         }
2397
2398         csp = crypto_get_params(crp->crp_session);
2399
2400         /*
2401          * The driver only supports ETA requests where there is no
2402          * gap between the AAD and payload.
2403          */
2404         if (csp->csp_mode == CSP_MODE_ETA && crp->crp_aad_length != 0 &&
2405             crp->crp_aad_start + crp->crp_aad_length !=
2406             crp->crp_payload_start) {
2407                 err = EINVAL;
2408                 goto errout;
2409         }
2410
2411         switch (csp->csp_mode) {
2412         case CSP_MODE_CIPHER:
2413         case CSP_MODE_ETA:
2414                 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2415                         cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2416                 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2417                 switch (csp->csp_cipher_alg) {
2418                 case CRYPTO_AES_CBC:
2419                         cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2420                             HIFN_CRYPT_CMD_MODE_CBC |
2421                             HIFN_CRYPT_CMD_NEW_IV;
2422                         break;
2423                 default:
2424                         err = EINVAL;
2425                         goto errout;
2426                 }
2427                 crypto_read_iv(crp, cmd->iv);
2428
2429                 if (crp->crp_cipher_key != NULL)
2430                         cmd->ck = crp->crp_cipher_key;
2431                 else
2432                         cmd->ck = csp->csp_cipher_key;
2433                 cmd->cklen = csp->csp_cipher_klen;
2434                 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2435
2436                 /* 
2437                  * Need to specify the size for the AES key in the masks.
2438                  */
2439                 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2440                     HIFN_CRYPT_CMD_ALG_AES) {
2441                         switch (cmd->cklen) {
2442                         case 16:
2443                                 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2444                                 break;
2445                         case 24:
2446                                 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2447                                 break;
2448                         case 32:
2449                                 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2450                                 break;
2451                         default:
2452                                 err = EINVAL;
2453                                 goto errout;
2454                         }
2455                 }
2456                 break;
2457         }
2458
2459         switch (csp->csp_mode) {
2460         case CSP_MODE_DIGEST:
2461         case CSP_MODE_ETA:
2462                 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2463
2464                 switch (csp->csp_auth_alg) {
2465                 case CRYPTO_SHA1:
2466                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2467                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2468                             HIFN_MAC_CMD_POS_IPSEC;
2469                         break;
2470                 case CRYPTO_SHA1_HMAC:
2471                         cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2472                             HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2473                             HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2474                         break;
2475                 }
2476
2477                 if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC) {
2478                         cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2479                         if (crp->crp_auth_key != NULL)
2480                                 mackey = crp->crp_auth_key;
2481                         else
2482                                 mackey = csp->csp_auth_key;
2483                         keylen = csp->csp_auth_klen;
2484                         bcopy(mackey, cmd->mac, keylen);
2485                         bzero(cmd->mac + keylen, HIFN_MAC_KEY_LENGTH - keylen);
2486                 }
2487         }
2488
2489         cmd->crp = crp;
2490         cmd->session = ses;
2491         cmd->softc = sc;
2492
2493         err = hifn_crypto(sc, cmd, crp, hint);
2494         if (!err) {
2495                 return 0;
2496         } else if (err == ERESTART) {
2497                 /*
2498                  * There weren't enough resources to dispatch the request
2499                  * to the part.  Notify the caller so they'll requeue this
2500                  * request and resubmit it again soon.
2501                  */
2502 #ifdef HIFN_DEBUG
2503                 if (hifn_debug)
2504                         device_printf(sc->sc_dev, "requeue request\n");
2505 #endif
2506                 free(cmd, M_DEVBUF);
2507                 sc->sc_needwakeup |= CRYPTO_SYMQ;
2508                 return (err);
2509         }
2510
2511 errout:
2512         if (cmd != NULL)
2513                 free(cmd, M_DEVBUF);
2514         if (err == EINVAL)
2515                 hifnstats.hst_invalid++;
2516         else
2517                 hifnstats.hst_nomem++;
2518         crp->crp_etype = err;
2519         crypto_done(crp);
2520         return (err);
2521 }
2522
2523 static void
2524 hifn_abort(struct hifn_softc *sc)
2525 {
2526         struct hifn_dma *dma = sc->sc_dma;
2527         struct hifn_command *cmd;
2528         struct cryptop *crp;
2529         int i, u;
2530
2531         i = sc->sc_resk; u = sc->sc_resu;
2532         while (u != 0) {
2533                 cmd = sc->sc_hifn_commands[i];
2534                 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
2535                 sc->sc_hifn_commands[i] = NULL;
2536                 crp = cmd->crp;
2537
2538                 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2539                         /* Salvage what we can. */
2540                         u_int8_t *macbuf;
2541
2542                         if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2543                                 macbuf = dma->result_bufs[i];
2544                                 macbuf += 12;
2545                         } else
2546                                 macbuf = NULL;
2547                         hifnstats.hst_opackets++;
2548                         hifn_callback(sc, cmd, macbuf);
2549                 } else {
2550                         if (cmd->src_map == cmd->dst_map) {
2551                                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2552                                     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2553                         } else {
2554                                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2555                                     BUS_DMASYNC_POSTWRITE);
2556                                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2557                                     BUS_DMASYNC_POSTREAD);
2558                         }
2559
2560                         if (cmd->dst_m != NULL) {
2561                                 m_freem(cmd->dst_m);
2562                         }
2563
2564                         /* non-shared buffers cannot be restarted */
2565                         if (cmd->src_map != cmd->dst_map) {
2566                                 /*
2567                                  * XXX should be EAGAIN, delayed until
2568                                  * after the reset.
2569                                  */
2570                                 crp->crp_etype = ENOMEM;
2571                                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2572                                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2573                         } else
2574                                 crp->crp_etype = ENOMEM;
2575
2576                         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2577                         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2578
2579                         free(cmd, M_DEVBUF);
2580                         if (crp->crp_etype != EAGAIN)
2581                                 crypto_done(crp);
2582                 }
2583
2584                 if (++i == HIFN_D_RES_RSIZE)
2585                         i = 0;
2586                 u--;
2587         }
2588         sc->sc_resk = i; sc->sc_resu = u;
2589
2590         hifn_reset_board(sc, 1);
2591         hifn_init_dma(sc);
2592         hifn_init_pci_registers(sc);
2593 }
2594
2595 static void
2596 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
2597 {
2598         struct hifn_dma *dma = sc->sc_dma;
2599         struct cryptop *crp = cmd->crp;
2600         uint8_t macbuf2[SHA1_HASH_LEN];
2601         struct mbuf *m;
2602         int totlen, i, u;
2603
2604         if (cmd->src_map == cmd->dst_map) {
2605                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2606                     BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2607         } else {
2608                 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2609                     BUS_DMASYNC_POSTWRITE);
2610                 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2611                     BUS_DMASYNC_POSTREAD);
2612         }
2613
2614         if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) {
2615                 if (cmd->dst_m != NULL) {
2616                         totlen = cmd->src_mapsize;
2617                         for (m = cmd->dst_m; m != NULL; m = m->m_next) {
2618                                 if (totlen < m->m_len) {
2619                                         m->m_len = totlen;
2620                                         totlen = 0;
2621                                 } else
2622                                         totlen -= m->m_len;
2623                         }
2624                         cmd->dst_m->m_pkthdr.len =
2625                             crp->crp_buf.cb_mbuf->m_pkthdr.len;
2626                         m_freem(crp->crp_buf.cb_mbuf);
2627                         crp->crp_buf.cb_mbuf = cmd->dst_m;
2628                 }
2629         }
2630
2631         if (cmd->sloplen != 0) {
2632                 crypto_copyback(crp, cmd->src_mapsize - cmd->sloplen,
2633                     cmd->sloplen, &dma->slop[cmd->slopidx]);
2634         }
2635
2636         i = sc->sc_dstk; u = sc->sc_dstu;
2637         while (u != 0) {
2638                 if (i == HIFN_D_DST_RSIZE)
2639                         i = 0;
2640                 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2641                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2642                 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2643                         bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2644                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2645                         break;
2646                 }
2647                 i++, u--;
2648         }
2649         sc->sc_dstk = i; sc->sc_dstu = u;
2650
2651         hifnstats.hst_obytes += cmd->dst_mapsize;
2652
2653         if (macbuf != NULL) {
2654                 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
2655                         crypto_copydata(crp, crp->crp_digest_start,
2656                             cmd->session->hs_mlen, macbuf2);
2657                         if (timingsafe_bcmp(macbuf, macbuf2,
2658                             cmd->session->hs_mlen) != 0)
2659                                 crp->crp_etype = EBADMSG;
2660                 } else
2661                         crypto_copyback(crp, crp->crp_digest_start,
2662                             cmd->session->hs_mlen, macbuf);
2663         }
2664
2665         if (cmd->src_map != cmd->dst_map) {
2666                 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2667                 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2668         }
2669         bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2670         bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2671         free(cmd, M_DEVBUF);
2672         crypto_done(crp);
2673 }
2674
2675 /*
2676  * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2677  * and Group 1 registers; avoid conditions that could create
2678  * burst writes by doing a read in between the writes.
2679  *
2680  * NB: The read we interpose is always to the same register;
2681  *     we do this because reading from an arbitrary (e.g. last)
2682  *     register may not always work.
2683  */
2684 static void
2685 hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2686 {
2687         if (sc->sc_flags & HIFN_IS_7811) {
2688                 if (sc->sc_bar0_lastreg == reg - 4)
2689                         bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG);
2690                 sc->sc_bar0_lastreg = reg;
2691         }
2692         bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2693 }
2694
2695 static void
2696 hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2697 {
2698         if (sc->sc_flags & HIFN_IS_7811) {
2699                 if (sc->sc_bar1_lastreg == reg - 4)
2700                         bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2701                 sc->sc_bar1_lastreg = reg;
2702         }
2703         bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2704 }
2705
2706 #ifdef HIFN_VULCANDEV
2707 /*
2708  * this code provides support for mapping the PK engine's register
2709  * into a userspace program.
2710  *
2711  */
2712 static int
2713 vulcanpk_mmap(struct cdev *dev, vm_ooffset_t offset,
2714               vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr)
2715 {
2716         struct hifn_softc *sc;
2717         vm_paddr_t pd;
2718         void *b;
2719
2720         sc = dev->si_drv1;
2721
2722         pd = rman_get_start(sc->sc_bar1res);
2723         b = rman_get_virtual(sc->sc_bar1res);
2724
2725 #if 0
2726         printf("vpk mmap: %p(%016llx) offset=%lld\n", b,
2727             (unsigned long long)pd, offset);
2728         hexdump(b, HIFN_1_PUB_MEMEND, "vpk", 0);
2729 #endif
2730
2731         if (offset == 0) {
2732                 *paddr = pd;
2733                 return (0);
2734         }
2735         return (-1);
2736 }
2737
2738 static struct cdevsw vulcanpk_cdevsw = {
2739         .d_version =    D_VERSION,
2740         .d_mmap =       vulcanpk_mmap,
2741         .d_name =       "vulcanpk",
2742 };
2743 #endif /* HIFN_VULCANDEV */