2 * Copyright (c) 2005 by David E. O'Brien <obrien@FreeBSD.org>.
3 * Copyright (c) 2003,2004 by Quinton Dolan <q@onthenet.com.au>.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $Id: if_nv.c,v 1.19 2004/08/12 14:00:05 q Exp $
31 * NVIDIA nForce MCP Networking Adapter driver
33 * This is a port of the NVIDIA MCP Linux ethernet driver distributed by NVIDIA
34 * through their web site.
36 * All mainstream nForce and nForce2 motherboards are supported. This module
37 * is as stable, sometimes more stable, than the linux version. (Recent
38 * Linux stability issues seem to be related to some issues with newer
39 * distributions using GCC 3.x, however this don't appear to effect FreeBSD
42 * In accordance with the NVIDIA distribution license it is necessary to
43 * link this module against the nvlibnet.o binary object included in the
44 * Linux driver source distribution. The binary component is not modified in
45 * any way and is simply linked against a FreeBSD equivalent of the nvnet.c
46 * linux kernel module "wrapper".
48 * The Linux driver uses a common code API that is shared between Win32 and
49 * i386 Linux. This abstracts the low level driver functions and uses
50 * callbacks and hooks to access the underlying hardware device. By using
51 * this same API in a FreeBSD kernel module it is possible to support the
52 * hardware without breaching the Linux source distributions licensing
53 * requirements, or obtaining the hardware programming specifications.
55 * Although not conventional, it works, and given the relatively small
56 * amount of hardware centric code, it's hopefully no more buggy than its
59 * NVIDIA now support the nForce3 AMD64 platform, however I have been
60 * unable to access such a system to verify support. However, the code is
61 * reported to work with little modification when compiled with the AMD64
62 * version of the NVIDIA Linux library. All that should be necessary to make
63 * the driver work is to link it directly into the kernel, instead of as a
64 * module, and apply the docs/amd64.diff patch in this source distribution to
65 * the NVIDIA Linux driver source.
67 * This driver should work on all versions of FreeBSD since 4.9/5.1 as well
68 * as recent versions of DragonFly.
70 * Written by Quinton Dolan <q@onthenet.com.au>
71 * Portions based on existing FreeBSD network drivers.
72 * NVIDIA API usage derived from distributed NVIDIA NVNET driver source files.
76 #include <sys/cdefs.h>
77 __FBSDID("$FreeBSD$");
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/sockio.h>
83 #include <sys/malloc.h>
84 #include <sys/kernel.h>
85 #include <sys/socket.h>
86 #include <sys/sysctl.h>
87 #include <sys/queue.h>
88 #include <sys/module.h>
91 #include <net/if_arp.h>
92 #include <net/ethernet.h>
93 #include <net/if_dl.h>
94 #include <net/if_media.h>
95 #include <net/if_types.h>
97 #include <net/if_vlan_var.h>
99 #include <machine/bus.h>
100 #include <machine/resource.h>
102 #include <vm/vm.h> /* for vtophys */
103 #include <vm/pmap.h> /* for vtophys */
104 #include <machine/clock.h> /* for DELAY */
106 #include <sys/rman.h>
108 #include <dev/pci/pcireg.h>
109 #include <dev/pci/pcivar.h>
110 #include <dev/mii/mii.h>
111 #include <dev/mii/miivar.h>
112 #include "miibus_if.h"
114 /* Include NVIDIA Linux driver header files */
116 #include <contrib/dev/nve/basetype.h>
117 #include <contrib/dev/nve/phy.h>
118 #include "os+%DIKED-nve.h"
119 #include <contrib/dev/nve/drvinfo.h>
120 #include <contrib/dev/nve/adapter.h>
123 #include <dev/nve/if_nvereg.h>
125 MODULE_DEPEND(nve, pci, 1, 1, 1);
126 MODULE_DEPEND(nve, ether, 1, 1, 1);
127 MODULE_DEPEND(nve, miibus, 1, 1, 1);
129 static int nve_probe(device_t);
130 static int nve_attach(device_t);
131 static int nve_detach(device_t);
132 static void nve_init(void *);
133 static void nve_stop(struct nve_softc *);
134 static void nve_shutdown(device_t);
135 static int nve_init_rings(struct nve_softc *);
136 static void nve_free_rings(struct nve_softc *);
138 static void nve_ifstart(struct ifnet *);
139 static int nve_ioctl(struct ifnet *, u_long, caddr_t);
140 static void nve_intr(void *);
141 static void nve_tick(void *);
142 static void nve_setmulti(struct nve_softc *);
143 static void nve_watchdog(struct ifnet *);
144 static void nve_update_stats(struct nve_softc *);
146 static int nve_ifmedia_upd(struct ifnet *);
147 static void nve_ifmedia_sts(struct ifnet *, struct ifmediareq *);
148 static int nve_miibus_readreg(device_t, int, int);
149 static void nve_miibus_writereg(device_t, int, int, int);
151 static void nve_dmamap_cb(void *, bus_dma_segment_t *, int, int);
152 static void nve_dmamap_tx_cb(void *, bus_dma_segment_t *, int, bus_size_t, int);
154 static NV_SINT32 nve_osalloc(PNV_VOID, PMEMORY_BLOCK);
155 static NV_SINT32 nve_osfree(PNV_VOID, PMEMORY_BLOCK);
156 static NV_SINT32 nve_osallocex(PNV_VOID, PMEMORY_BLOCKEX);
157 static NV_SINT32 nve_osfreeex(PNV_VOID, PMEMORY_BLOCKEX);
158 static NV_SINT32 nve_osclear(PNV_VOID, PNV_VOID, NV_SINT32);
159 static NV_SINT32 nve_osdelay(PNV_VOID, NV_UINT32);
160 static NV_SINT32 nve_osallocrxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID *);
161 static NV_SINT32 nve_osfreerxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID);
162 static NV_SINT32 nve_ospackettx(PNV_VOID, PNV_VOID, NV_UINT32);
163 static NV_SINT32 nve_ospacketrx(PNV_VOID, PNV_VOID, NV_UINT32, NV_UINT8 *, NV_UINT8);
164 static NV_SINT32 nve_oslinkchg(PNV_VOID, NV_SINT32);
165 static NV_SINT32 nve_osalloctimer(PNV_VOID, PNV_VOID *);
166 static NV_SINT32 nve_osfreetimer(PNV_VOID, PNV_VOID);
167 static NV_SINT32 nve_osinittimer(PNV_VOID, PNV_VOID, PTIMER_FUNC, PNV_VOID);
168 static NV_SINT32 nve_ossettimer(PNV_VOID, PNV_VOID, NV_UINT32);
169 static NV_SINT32 nve_oscanceltimer(PNV_VOID, PNV_VOID);
171 static NV_SINT32 nve_ospreprocpkt(PNV_VOID, PNV_VOID, PNV_VOID *, NV_UINT8 *, NV_UINT8);
172 static PNV_VOID nve_ospreprocpktnopq(PNV_VOID, PNV_VOID);
173 static NV_SINT32 nve_osindicatepkt(PNV_VOID, PNV_VOID *, NV_UINT32);
174 static NV_SINT32 nve_oslockalloc(PNV_VOID, NV_SINT32, PNV_VOID *);
175 static NV_SINT32 nve_oslockacquire(PNV_VOID, NV_SINT32, PNV_VOID);
176 static NV_SINT32 nve_oslockrelease(PNV_VOID, NV_SINT32, PNV_VOID);
177 static PNV_VOID nve_osreturnbufvirt(PNV_VOID, PNV_VOID);
179 static device_method_t nve_methods[] = {
180 /* Device interface */
181 DEVMETHOD(device_probe, nve_probe),
182 DEVMETHOD(device_attach, nve_attach),
183 DEVMETHOD(device_detach, nve_detach),
184 DEVMETHOD(device_shutdown, nve_shutdown),
187 DEVMETHOD(bus_print_child, bus_generic_print_child),
188 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
191 DEVMETHOD(miibus_readreg, nve_miibus_readreg),
192 DEVMETHOD(miibus_writereg, nve_miibus_writereg),
197 static driver_t nve_driver = {
200 sizeof(struct nve_softc)
203 static devclass_t nve_devclass;
205 static int nve_pollinterval = 0;
206 SYSCTL_INT(_hw, OID_AUTO, nve_pollinterval, CTLFLAG_RW,
207 &nve_pollinterval, 0, "delay between interface polls");
209 DRIVER_MODULE(nve, pci, nve_driver, nve_devclass, 0, 0);
210 DRIVER_MODULE(miibus, nve, miibus_driver, miibus_devclass, 0, 0);
212 static struct nve_type nve_devs[] = {
213 {NVIDIA_VENDORID, NFORCE_MCPNET1_DEVICEID,
214 "NVIDIA nForce MCP Networking Adapter"},
215 {NVIDIA_VENDORID, NFORCE_MCPNET2_DEVICEID,
216 "NVIDIA nForce MCP2 Networking Adapter"},
217 {NVIDIA_VENDORID, NFORCE_MCPNET3_DEVICEID,
218 "NVIDIA nForce MCP3 Networking Adapter"},
219 {NVIDIA_VENDORID, NFORCE_MCPNET4_DEVICEID,
220 "NVIDIA nForce MCP4 Networking Adapter"},
221 {NVIDIA_VENDORID, NFORCE_MCPNET5_DEVICEID,
222 "NVIDIA nForce MCP5 Networking Adapter"},
223 {NVIDIA_VENDORID, NFORCE_MCPNET6_DEVICEID,
224 "NVIDIA nForce MCP6 Networking Adapter"},
225 {NVIDIA_VENDORID, NFORCE_MCPNET7_DEVICEID,
226 "NVIDIA nForce MCP7 Networking Adapter"},
227 {NVIDIA_VENDORID, NFORCE_MCPNET8_DEVICEID,
228 "NVIDIA nForce MCP8 Networking Adapter"},
229 {NVIDIA_VENDORID, NFORCE_MCPNET9_DEVICEID,
230 "NVIDIA nForce MCP9 Networking Adapter"},
231 {NVIDIA_VENDORID, NFORCE_MCPNET10_DEVICEID,
232 "NVIDIA nForce MCP10 Networking Adapter"},
233 {NVIDIA_VENDORID, NFORCE_MCPNET11_DEVICEID,
234 "NVIDIA nForce MCP11 Networking Adapter"},
238 /* DMA MEM map callback function to get data segment physical address */
240 nve_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nsegs, int error)
246 ("Too many DMA segments returned when mapping DMA memory"));
247 *(bus_addr_t *)arg = segs->ds_addr;
250 /* DMA RX map callback function to get data segment physical address */
252 nve_dmamap_rx_cb(void *arg, bus_dma_segment_t * segs, int nsegs,
253 bus_size_t mapsize, int error)
257 *(bus_addr_t *)arg = segs->ds_addr;
261 * DMA TX buffer callback function to allocate fragment data segment
265 nve_dmamap_tx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, bus_size_t mapsize, int error)
267 struct nve_tx_desc *info;
272 KASSERT(nsegs < NV_MAX_FRAGS,
273 ("Too many DMA segments returned when mapping mbuf"));
274 info->numfrags = nsegs;
275 bcopy(segs, info->frags, nsegs * sizeof(bus_dma_segment_t));
278 /* Probe for supported hardware ID's */
280 nve_probe(device_t dev)
285 /* Check for matching PCI DEVICE ID's */
286 while (t->name != NULL) {
287 if ((pci_get_vendor(dev) == t->vid_id) &&
288 (pci_get_device(dev) == t->dev_id)) {
289 device_set_desc(dev, t->name);
298 /* Attach driver and initialise hardware for use */
300 nve_attach(device_t dev)
302 u_char eaddr[ETHER_ADDR_LEN];
303 struct nve_softc *sc;
306 ADAPTER_OPEN_PARAMS OpenParams;
307 int error = 0, i, rid, unit;
309 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - entry\n");
311 sc = device_get_softc(dev);
312 unit = device_get_unit(dev);
315 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
316 MTX_DEF | MTX_RECURSE);
317 mtx_init(&sc->osmtx, device_get_nameunit(dev), NULL, MTX_SPIN);
322 /* Preinitialize data structures */
323 bzero(&OpenParams, sizeof(ADAPTER_OPEN_PARAMS));
325 /* Enable bus mastering */
326 pci_enable_busmaster(dev);
328 /* Allocate memory mapped address space */
330 sc->res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1,
333 if (sc->res == NULL) {
334 device_printf(dev, "couldn't map memory\n");
338 sc->sc_st = rman_get_bustag(sc->res);
339 sc->sc_sh = rman_get_bushandle(sc->res);
341 /* Allocate interrupt */
343 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
344 RF_SHAREABLE | RF_ACTIVE);
346 if (sc->irq == NULL) {
347 device_printf(dev, "couldn't map interrupt\n");
351 /* Allocate DMA tags */
352 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
353 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * NV_MAX_FRAGS,
354 NV_MAX_FRAGS, MCLBYTES, 0,
355 busdma_lock_mutex, &Giant,
358 device_printf(dev, "couldn't allocate dma tag\n");
361 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
362 BUS_SPACE_MAXADDR, NULL, NULL,
363 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 1,
364 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 0,
365 busdma_lock_mutex, &Giant,
368 device_printf(dev, "couldn't allocate dma tag\n");
371 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
372 BUS_SPACE_MAXADDR, NULL, NULL,
373 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 1,
374 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 0,
375 busdma_lock_mutex, &Giant,
378 device_printf(dev, "couldn't allocate dma tag\n");
381 /* Allocate DMA safe memory and get the DMA addresses. */
382 error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc,
383 BUS_DMA_WAITOK, &sc->tmap);
385 device_printf(dev, "couldn't allocate dma memory\n");
388 bzero(sc->tx_desc, sizeof(struct nve_tx_desc) * TX_RING_SIZE);
389 error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc,
390 sizeof(struct nve_tx_desc) * TX_RING_SIZE, nve_dmamap_cb,
393 device_printf(dev, "couldn't map dma memory\n");
396 error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc,
397 BUS_DMA_WAITOK, &sc->rmap);
399 device_printf(dev, "couldn't allocate dma memory\n");
402 bzero(sc->rx_desc, sizeof(struct nve_rx_desc) * RX_RING_SIZE);
403 error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc,
404 sizeof(struct nve_rx_desc) * RX_RING_SIZE, nve_dmamap_cb,
407 device_printf(dev, "couldn't map dma memory\n");
410 /* Initialize rings. */
411 if (nve_init_rings(sc)) {
412 device_printf(dev, "failed to init rings\n");
416 /* Setup NVIDIA API callback routines */
419 osapi->pfnAllocMemory = nve_osalloc;
420 osapi->pfnFreeMemory = nve_osfree;
421 osapi->pfnAllocMemoryEx = nve_osallocex;
422 osapi->pfnFreeMemoryEx = nve_osfreeex;
423 osapi->pfnClearMemory = nve_osclear;
424 osapi->pfnStallExecution = nve_osdelay;
425 osapi->pfnAllocReceiveBuffer = nve_osallocrxbuf;
426 osapi->pfnFreeReceiveBuffer = nve_osfreerxbuf;
427 osapi->pfnPacketWasSent = nve_ospackettx;
428 osapi->pfnPacketWasReceived = nve_ospacketrx;
429 osapi->pfnLinkStateHasChanged = nve_oslinkchg;
430 osapi->pfnAllocTimer = nve_osalloctimer;
431 osapi->pfnFreeTimer = nve_osfreetimer;
432 osapi->pfnInitializeTimer = nve_osinittimer;
433 osapi->pfnSetTimer = nve_ossettimer;
434 osapi->pfnCancelTimer = nve_oscanceltimer;
435 osapi->pfnPreprocessPacket = nve_ospreprocpkt;
436 osapi->pfnPreprocessPacketNopq = nve_ospreprocpktnopq;
437 osapi->pfnIndicatePackets = nve_osindicatepkt;
438 osapi->pfnLockAlloc = nve_oslockalloc;
439 osapi->pfnLockAcquire = nve_oslockacquire;
440 osapi->pfnLockRelease = nve_oslockrelease;
441 osapi->pfnReturnBufferVirtual = nve_osreturnbufvirt;
444 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + FCS_LEN;
446 /* TODO - We don't support hardware offload yet */
450 /* Set NVIDIA API startup parameters */
451 OpenParams.MaxDpcLoop = 2;
452 OpenParams.MaxRxPkt = RX_RING_SIZE;
453 OpenParams.MaxTxPkt = TX_RING_SIZE;
454 OpenParams.SentPacketStatusSuccess = 1;
455 OpenParams.SentPacketStatusFailure = 0;
456 OpenParams.MaxRxPktToAccumulate = 6;
457 OpenParams.ulPollInterval = nve_pollinterval;
458 OpenParams.SetForcedModeEveryNthRxPacket = 0;
459 OpenParams.SetForcedModeEveryNthTxPacket = 0;
460 OpenParams.RxForcedInterrupt = 0;
461 OpenParams.TxForcedInterrupt = 0;
462 OpenParams.pOSApi = osapi;
463 OpenParams.pvHardwareBaseAddress = rman_get_virtual(sc->res);
464 OpenParams.bASFEnabled = 0;
465 OpenParams.ulDescriptorVersion = sc->hwmode;
466 OpenParams.ulMaxPacketSize = sc->max_frame_size;
467 OpenParams.DeviceId = pci_get_device(dev);
469 /* Open NVIDIA Hardware API */
470 error = ADAPTER_Open(&OpenParams, (void **)&(sc->hwapi), &sc->phyaddr);
473 "failed to open NVIDIA Hardware API: 0x%x\n", error);
477 /* TODO - Add support for MODE2 hardware offload */
479 bzero(&sc->adapterdata, sizeof(sc->adapterdata));
481 sc->adapterdata.ulMediaIF = sc->media;
482 sc->adapterdata.ulModeRegTxReadCompleteEnable = 1;
483 sc->hwapi->pfnSetCommonData(sc->hwapi->pADCX, &sc->adapterdata);
485 /* MAC is loaded backwards into h/w reg */
486 sc->hwapi->pfnGetNodeAddress(sc->hwapi->pADCX, sc->original_mac_addr);
487 for (i = 0; i < 6; i++) {
488 eaddr[i] = sc->original_mac_addr[5 - i];
490 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, eaddr);
492 /* Display ethernet address ,... */
493 device_printf(dev, "Ethernet address %6D\n", eaddr, ":");
495 /* Allocate interface structures */
496 ifp = sc->ifp = if_alloc(IFT_ETHER);
498 device_printf(dev, "can not if_alloc()\n");
503 /* Probe device for MII interface to PHY */
504 DEBUGOUT(NVE_DEBUG_INIT, "nve: do mii_phy_probe\n");
505 if (mii_phy_probe(dev, &sc->miibus, nve_ifmedia_upd, nve_ifmedia_sts)) {
506 device_printf(dev, "MII without any phy!\n");
511 /* Setup interface parameters */
513 if_initname(ifp, "nve", unit);
514 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
515 ifp->if_ioctl = nve_ioctl;
516 ifp->if_output = ether_output;
517 ifp->if_start = nve_ifstart;
518 ifp->if_watchdog = nve_watchdog;
520 ifp->if_init = nve_init;
521 ifp->if_mtu = ETHERMTU;
522 ifp->if_baudrate = IF_Mbps(100);
523 ifp->if_snd.ifq_maxlen = TX_RING_SIZE - 1;
524 ifp->if_capabilities |= IFCAP_VLAN_MTU;
526 /* Attach to OS's managers. */
527 ether_ifattach(ifp, eaddr);
528 callout_handle_init(&sc->stat_ch);
530 /* Activate our interrupt handler. - attach last to avoid lock */
531 error = bus_setup_intr(sc->dev, sc->irq, INTR_TYPE_NET, nve_intr,
534 device_printf(sc->dev, "couldn't set up interrupt handler\n");
537 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - exit\n");
546 /* Detach interface for module unload */
548 nve_detach(device_t dev)
550 struct nve_softc *sc = device_get_softc(dev);
553 KASSERT(mtx_initialized(&sc->mtx), ("mutex not initialized"));
556 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - entry\n");
560 if (device_is_attached(dev)) {
562 /* XXX shouldn't hold lock over call to ether_ifdetch */
567 device_delete_child(dev, sc->miibus);
568 bus_generic_detach(dev);
570 /* Reload unreversed address back into MAC in original state */
571 if (sc->original_mac_addr)
572 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX,
573 sc->original_mac_addr);
575 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnClose\n");
576 /* Detach from NVIDIA hardware API */
577 if (sc->hwapi->pfnClose)
578 sc->hwapi->pfnClose(sc->hwapi->pADCX, FALSE);
579 /* Release resources */
581 bus_teardown_intr(sc->dev, sc->irq, sc->sc_ih);
583 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq);
585 bus_release_resource(sc->dev, SYS_RES_MEMORY, NV_RID, sc->res);
590 bus_dmamap_unload(sc->rtag, sc->rmap);
591 bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap);
592 bus_dmamap_destroy(sc->rtag, sc->rmap);
595 bus_dma_tag_destroy(sc->mtag);
597 bus_dma_tag_destroy(sc->ttag);
599 bus_dma_tag_destroy(sc->rtag);
604 mtx_destroy(&sc->mtx);
605 mtx_destroy(&sc->osmtx);
607 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - exit\n");
612 /* Initialise interface and start it "RUNNING" */
616 struct nve_softc *sc = xsc;
621 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - entry (%d)\n", sc->linkup);
625 /* Do nothing if already running */
626 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
630 DEBUGOUT(NVE_DEBUG_INIT, "nve: do pfnInit\n");
632 /* Setup Hardware interface and allocate memory structures */
633 error = sc->hwapi->pfnInit(sc->hwapi->pADCX,
635 0, /* force full duplex */
637 0, /* force async mode */
641 device_printf(sc->dev,
642 "failed to start NVIDIA Hardware interface\n");
645 /* Set the MAC address */
646 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, IF_LLADDR(sc->ifp));
647 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX);
648 sc->hwapi->pfnStart(sc->hwapi->pADCX);
650 /* Setup multicast filter */
652 nve_ifmedia_upd(ifp);
654 /* Update interface parameters */
655 ifp->if_drv_flags |= IFF_DRV_RUNNING;
656 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
658 sc->stat_ch = timeout(nve_tick, sc, hz);
660 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - exit\n");
668 /* Stop interface activity ie. not "RUNNING" */
670 nve_stop(struct nve_softc *sc)
676 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - entry\n");
681 /* Cancel tick timer */
682 untimeout(nve_tick, sc, sc->stat_ch);
684 /* Stop hardware activity */
685 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX);
686 sc->hwapi->pfnStop(sc->hwapi->pADCX, 0);
688 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnDeinit\n");
689 /* Shutdown interface and deallocate memory buffers */
690 if (sc->hwapi->pfnDeinit)
691 sc->hwapi->pfnDeinit(sc->hwapi->pADCX, 0);
698 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
700 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - exit\n");
707 /* Shutdown interface for unload/reboot */
709 nve_shutdown(device_t dev)
711 struct nve_softc *sc;
713 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_shutdown\n");
715 sc = device_get_softc(dev);
717 /* Stop hardware activity */
721 /* Allocate TX ring buffers */
723 nve_init_rings(struct nve_softc *sc)
729 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - entry\n");
731 sc->cur_rx = sc->cur_tx = sc->pending_rxs = sc->pending_txs = 0;
732 /* Initialise RX ring */
733 for (i = 0; i < RX_RING_SIZE; i++) {
734 struct nve_rx_desc *desc = sc->rx_desc + i;
735 struct nve_map_buffer *buf = &desc->buf;
737 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
738 if (buf->mbuf == NULL) {
739 device_printf(sc->dev, "couldn't allocate mbuf\n");
744 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES;
745 m_adj(buf->mbuf, ETHER_ALIGN);
747 error = bus_dmamap_create(sc->mtag, 0, &buf->map);
749 device_printf(sc->dev, "couldn't create dma map\n");
753 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf,
754 nve_dmamap_rx_cb, &desc->paddr, 0);
756 device_printf(sc->dev, "couldn't dma map mbuf\n");
760 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD);
762 desc->buflength = buf->mbuf->m_len;
763 desc->vaddr = mtod(buf->mbuf, caddr_t);
765 bus_dmamap_sync(sc->rtag, sc->rmap,
766 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
768 /* Initialize TX ring */
769 for (i = 0; i < TX_RING_SIZE; i++) {
770 struct nve_tx_desc *desc = sc->tx_desc + i;
771 struct nve_map_buffer *buf = &desc->buf;
775 error = bus_dmamap_create(sc->mtag, 0, &buf->map);
777 device_printf(sc->dev, "couldn't create dma map\n");
782 bus_dmamap_sync(sc->ttag, sc->tmap,
783 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
785 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - exit\n");
793 /* Free the TX ring buffers */
795 nve_free_rings(struct nve_softc *sc)
801 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - entry\n");
803 for (i = 0; i < RX_RING_SIZE; i++) {
804 struct nve_rx_desc *desc = sc->rx_desc + i;
805 struct nve_map_buffer *buf = &desc->buf;
808 bus_dmamap_unload(sc->mtag, buf->map);
809 bus_dmamap_destroy(sc->mtag, buf->map);
815 for (i = 0; i < TX_RING_SIZE; i++) {
816 struct nve_tx_desc *desc = sc->tx_desc + i;
817 struct nve_map_buffer *buf = &desc->buf;
820 bus_dmamap_unload(sc->mtag, buf->map);
821 bus_dmamap_destroy(sc->mtag, buf->map);
827 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - exit\n");
832 /* Main loop for sending packets from OS to interface */
834 nve_ifstart(struct ifnet *ifp)
836 struct nve_softc *sc = ifp->if_softc;
837 struct nve_map_buffer *buf;
839 struct nve_tx_desc *desc;
840 ADAPTER_WRITE_DATA txdata;
843 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - entry\n");
845 /* If link is down/busy or queue is empty do nothing */
846 if (ifp->if_drv_flags & IFF_DRV_OACTIVE || ifp->if_snd.ifq_head == NULL)
849 /* Transmit queued packets until sent or TX ring is full */
850 while (sc->pending_txs < TX_RING_SIZE) {
851 desc = sc->tx_desc + sc->cur_tx;
854 /* Get next packet to send. */
855 IF_DEQUEUE(&ifp->if_snd, m0);
857 /* If nothing to send, return. */
861 /* Map MBUF for DMA access */
862 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0,
863 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT);
865 if (error && error != EFBIG) {
871 * Packet has too many fragments - defrag into new mbuf
875 m = m_defrag(m0, M_DONTWAIT);
883 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m,
884 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT);
891 /* Do sync on DMA bounce buffer */
892 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE);
895 txdata.ulNumberOfElements = desc->numfrags;
896 txdata.pvID = (PVOID)desc;
898 /* Put fragments into API element list */
899 txdata.ulTotalLength = buf->mbuf->m_len;
900 for (i = 0; i < desc->numfrags; i++) {
901 txdata.sElement[i].ulLength =
902 (ulong)desc->frags[i].ds_len;
903 txdata.sElement[i].pPhysical =
904 (PVOID)desc->frags[i].ds_addr;
907 /* Send packet to Nvidia API for transmission */
908 error = sc->hwapi->pfnWrite(sc->hwapi->pADCX, &txdata);
911 case ADAPTERERR_NONE:
912 /* Packet was queued in API TX queue successfully */
914 sc->cur_tx = (sc->cur_tx + 1) % TX_RING_SIZE;
917 case ADAPTERERR_TRANSMIT_QUEUE_FULL:
918 /* The API TX queue is full - requeue the packet */
919 device_printf(sc->dev,
920 "nve_ifstart: transmit queue is full\n");
921 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
922 bus_dmamap_unload(sc->mtag, buf->map);
923 IF_PREPEND(&ifp->if_snd, buf->mbuf);
928 /* The API failed to queue/send the packet so dump it */
929 device_printf(sc->dev, "nve_ifstart: transmit error\n");
930 bus_dmamap_unload(sc->mtag, buf->map);
936 /* Set watchdog timer. */
939 /* Copy packet to BPF tap */
942 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
944 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - exit\n");
947 /* Handle IOCTL events */
949 nve_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
951 struct nve_softc *sc = ifp->if_softc;
952 struct ifreq *ifr = (struct ifreq *) data;
953 struct mii_data *mii;
958 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - entry\n");
963 if (ifp->if_mtu == ifr->ifr_mtu)
965 if (ifr->ifr_mtu + ifp->if_hdrlen <= MAX_PACKET_SIZE_1518) {
966 ifp->if_mtu = ifr->ifr_mtu;
974 /* Setup interface flags */
975 if (ifp->if_flags & IFF_UP) {
976 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
981 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
986 /* Handle IFF_PROMISC and IFF_ALLMULTI flags. */
992 /* Setup multicast filter */
993 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1000 /* Get/Set interface media parameters */
1001 mii = device_get_softc(sc->miibus);
1002 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1006 /* Everything else we forward to generic ether ioctl */
1007 error = ether_ioctl(ifp, (int)command, data);
1011 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - exit\n");
1018 /* Interrupt service routine */
1022 struct nve_softc *sc = arg;
1023 struct ifnet *ifp = sc->ifp;
1025 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - entry\n");
1027 if (!ifp->if_flags & IFF_UP) {
1031 /* Handle interrupt event */
1032 if (sc->hwapi->pfnQueryInterrupt(sc->hwapi->pADCX)) {
1033 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX);
1034 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX);
1036 if (ifp->if_snd.ifq_head != NULL)
1039 /* If no pending packets we don't need a timeout */
1040 if (sc->pending_txs == 0)
1041 sc->ifp->if_timer = 0;
1043 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - exit\n");
1048 /* Setup multicast filters */
1050 nve_setmulti(struct nve_softc *sc)
1053 struct ifmultiaddr *ifma;
1054 PACKET_FILTER hwfilter;
1056 u_int8_t andaddr[6], oraddr[6];
1060 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - entry\n");
1064 /* Initialize filter */
1065 hwfilter.ulFilterFlags = 0;
1066 for (i = 0; i < 6; i++) {
1067 hwfilter.acMulticastAddress[i] = 0;
1068 hwfilter.acMulticastMask[i] = 0;
1071 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1072 /* Accept all packets */
1073 hwfilter.ulFilterFlags |= ACCEPT_ALL_PACKETS;
1074 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter);
1078 /* Setup multicast filter */
1080 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1083 if (ifma->ifma_addr->sa_family != AF_LINK)
1086 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
1087 for (i = 0; i < 6; i++) {
1088 u_int8_t mcaddr = addrp[i];
1089 andaddr[i] &= mcaddr;
1090 oraddr[i] |= mcaddr;
1093 IF_ADDR_UNLOCK(ifp);
1094 for (i = 0; i < 6; i++) {
1095 hwfilter.acMulticastAddress[i] = andaddr[i] & oraddr[i];
1096 hwfilter.acMulticastMask[i] = andaddr[i] | (~oraddr[i]);
1099 /* Send filter to NVIDIA API */
1100 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter);
1104 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - exit\n");
1109 /* Change the current media/mediaopts */
1111 nve_ifmedia_upd(struct ifnet *ifp)
1113 struct nve_softc *sc = ifp->if_softc;
1114 struct mii_data *mii;
1116 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_upd\n");
1118 mii = device_get_softc(sc->miibus);
1120 if (mii->mii_instance) {
1121 struct mii_softc *miisc;
1122 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1123 miisc = LIST_NEXT(miisc, mii_list)) {
1124 mii_phy_reset(miisc);
1132 /* Update current miibus PHY status of media */
1134 nve_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1136 struct nve_softc *sc;
1137 struct mii_data *mii;
1139 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_sts\n");
1142 mii = device_get_softc(sc->miibus);
1145 ifmr->ifm_active = mii->mii_media_active;
1146 ifmr->ifm_status = mii->mii_media_status;
1151 /* miibus tick timer - maintain link status */
1155 struct nve_softc *sc = xsc;
1156 struct mii_data *mii;
1162 nve_update_stats(sc);
1164 mii = device_get_softc(sc->miibus);
1167 if (mii->mii_media_status & IFM_ACTIVE &&
1168 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1169 if (ifp->if_snd.ifq_head != NULL)
1172 sc->stat_ch = timeout(nve_tick, sc, hz);
1179 /* Update ifnet data structure with collected interface stats from API */
1181 nve_update_stats(struct nve_softc *sc)
1183 struct ifnet *ifp = sc->ifp;
1184 ADAPTER_STATS stats;
1189 sc->hwapi->pfnGetStatistics(sc->hwapi->pADCX, &stats);
1191 ifp->if_ipackets = stats.ulSuccessfulReceptions;
1192 ifp->if_ierrors = stats.ulMissedFrames +
1193 stats.ulFailedReceptions +
1195 stats.ulFramingErrors +
1196 stats.ulOverFlowErrors;
1198 ifp->if_opackets = stats.ulSuccessfulTransmissions;
1199 ifp->if_oerrors = sc->tx_errors +
1200 stats.ulFailedTransmissions +
1201 stats.ulRetryErrors +
1202 stats.ulUnderflowErrors +
1203 stats.ulLossOfCarrierErrors +
1204 stats.ulLateCollisionErrors;
1206 ifp->if_collisions = stats.ulLateCollisionErrors;
1213 /* miibus Read PHY register wrapper - calls Nvidia API entry point */
1215 nve_miibus_readreg(device_t dev, int phy, int reg)
1217 struct nve_softc *sc = device_get_softc(dev);
1220 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - entry\n");
1222 ADAPTER_ReadPhy(sc->hwapi->pADCX, phy, reg, &data);
1224 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - exit\n");
1229 /* miibus Write PHY register wrapper - calls Nvidia API entry point */
1231 nve_miibus_writereg(device_t dev, int phy, int reg, int data)
1233 struct nve_softc *sc = device_get_softc(dev);
1235 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - entry\n");
1237 ADAPTER_WritePhy(sc->hwapi->pADCX, phy, reg, (ulong)data);
1239 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - exit\n");
1244 /* Watchdog timer to prevent PHY lockups */
1246 nve_watchdog(struct ifnet *ifp)
1248 struct nve_softc *sc = ifp->if_softc;
1250 device_printf(sc->dev, "device timeout (%d)\n", sc->pending_txs);
1255 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1258 if (ifp->if_snd.ifq_head != NULL)
1264 /* --- Start of NVOSAPI interface --- */
1266 /* Allocate DMA enabled general use memory for API */
1268 nve_osalloc(PNV_VOID ctx, PMEMORY_BLOCK mem)
1270 struct nve_softc *sc;
1271 bus_addr_t mem_physical;
1273 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc - %d\n", mem->uiLength);
1275 sc = (struct nve_softc *)ctx;
1277 mem->pLogical = (PVOID)contigmalloc(mem->uiLength, M_DEVBUF,
1278 M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0);
1280 if (!mem->pLogical) {
1281 device_printf(sc->dev, "memory allocation failed\n");
1284 memset(mem->pLogical, 0, (ulong)mem->uiLength);
1285 mem_physical = vtophys(mem->pLogical);
1286 mem->pPhysical = (PVOID)mem_physical;
1288 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc 0x%x/0x%x - %d\n",
1289 (uint)mem->pLogical, (uint)mem->pPhysical, (uint)mem->uiLength);
1294 /* Free allocated memory */
1296 nve_osfree(PNV_VOID ctx, PMEMORY_BLOCK mem)
1298 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfree - 0x%x - %d\n",
1299 (uint)mem->pLogical, (uint) mem->uiLength);
1301 contigfree(mem->pLogical, PAGE_SIZE, M_DEVBUF);
1305 /* Copied directly from nvnet.c */
1307 nve_osallocex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex)
1309 MEMORY_BLOCK mem_block;
1311 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocex\n");
1313 mem_block_ex->pLogical = NULL;
1314 mem_block_ex->uiLengthOrig = mem_block_ex->uiLength;
1316 if ((mem_block_ex->AllocFlags & ALLOC_MEMORY_ALIGNED) &&
1317 (mem_block_ex->AlignmentSize > 1)) {
1318 DEBUGOUT(NVE_DEBUG_API, " aligning on %d\n",
1319 mem_block_ex->AlignmentSize);
1320 mem_block_ex->uiLengthOrig += mem_block_ex->AlignmentSize;
1322 mem_block.uiLength = mem_block_ex->uiLengthOrig;
1324 if (nve_osalloc(ctx, &mem_block) == 0) {
1327 mem_block_ex->pLogicalOrig = mem_block.pLogical;
1328 mem_block_ex->pPhysicalOrigLow = (unsigned long)mem_block.pPhysical;
1329 mem_block_ex->pPhysicalOrigHigh = 0;
1331 mem_block_ex->pPhysical = mem_block.pPhysical;
1332 mem_block_ex->pLogical = mem_block.pLogical;
1334 if (mem_block_ex->uiLength != mem_block_ex->uiLengthOrig) {
1335 unsigned int offset;
1336 offset = mem_block_ex->pPhysicalOrigLow &
1337 (mem_block_ex->AlignmentSize - 1);
1340 mem_block_ex->pPhysical =
1341 (PVOID)((ulong)mem_block_ex->pPhysical +
1342 mem_block_ex->AlignmentSize - offset);
1343 mem_block_ex->pLogical =
1344 (PVOID)((ulong)mem_block_ex->pLogical +
1345 mem_block_ex->AlignmentSize - offset);
1347 } /* if (mem_block_ex->uiLength != *mem_block_ex->uiLengthOrig) */
1351 /* Copied directly from nvnet.c */
1353 nve_osfreeex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex)
1355 MEMORY_BLOCK mem_block;
1357 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreeex\n");
1359 mem_block.pLogical = mem_block_ex->pLogicalOrig;
1360 mem_block.pPhysical = (PVOID)((ulong)mem_block_ex->pPhysicalOrigLow);
1361 mem_block.uiLength = mem_block_ex->uiLengthOrig;
1363 return (nve_osfree(ctx, &mem_block));
1366 /* Clear memory region */
1368 nve_osclear(PNV_VOID ctx, PNV_VOID mem, NV_SINT32 length)
1370 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osclear\n");
1371 memset(mem, 0, length);
1375 /* Sleep for a tick */
1377 nve_osdelay(PNV_VOID ctx, NV_UINT32 usec)
1383 /* Allocate memory for rx buffer */
1385 nve_osallocrxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID *id)
1387 struct nve_softc *sc = ctx;
1388 struct nve_rx_desc *desc;
1389 struct nve_map_buffer *buf;
1394 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocrxbuf\n");
1396 if (sc->pending_rxs == RX_RING_SIZE) {
1397 device_printf(sc->dev, "rx ring buffer is full\n");
1400 desc = sc->rx_desc + sc->cur_rx;
1403 if (buf->mbuf == NULL) {
1404 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1405 if (buf->mbuf == NULL) {
1406 device_printf(sc->dev, "failed to allocate memory\n");
1409 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES;
1410 m_adj(buf->mbuf, ETHER_ALIGN);
1412 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf,
1413 nve_dmamap_rx_cb, &desc->paddr, 0);
1415 device_printf(sc->dev, "failed to dmamap mbuf\n");
1420 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD);
1421 desc->buflength = buf->mbuf->m_len;
1422 desc->vaddr = mtod(buf->mbuf, caddr_t);
1425 sc->cur_rx = (sc->cur_rx + 1) % RX_RING_SIZE;
1427 mem->pLogical = (void *)desc->vaddr;
1428 mem->pPhysical = (void *)desc->paddr;
1429 mem->uiLength = desc->buflength;
1440 /* Free the rx buffer */
1442 nve_osfreerxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID id)
1444 struct nve_softc *sc = ctx;
1445 struct nve_rx_desc *desc;
1446 struct nve_map_buffer *buf;
1450 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreerxbuf\n");
1452 desc = (struct nve_rx_desc *) id;
1456 bus_dmamap_unload(sc->mtag, buf->map);
1457 bus_dmamap_destroy(sc->mtag, buf->map);
1468 /* This gets called by the Nvidia API after our TX packet has been sent */
1470 nve_ospackettx(PNV_VOID ctx, PNV_VOID id, NV_UINT32 success)
1472 struct nve_softc *sc = ctx;
1473 struct nve_map_buffer *buf;
1474 struct nve_tx_desc *desc = (struct nve_tx_desc *) id;
1479 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospackettx\n");
1485 /* Unload and free mbuf cluster */
1486 if (buf->mbuf == NULL)
1489 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE);
1490 bus_dmamap_unload(sc->mtag, buf->map);
1494 /* Send more packets if we have them */
1495 if (sc->pending_txs < TX_RING_SIZE)
1496 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1498 if (ifp->if_snd.ifq_head != NULL && sc->pending_txs < TX_RING_SIZE)
1507 /* This gets called by the Nvidia API when a new packet has been received */
1508 /* XXX What is newbuf used for? XXX */
1510 nve_ospacketrx(PNV_VOID ctx, PNV_VOID data, NV_UINT32 success, NV_UINT8 *newbuf,
1513 struct nve_softc *sc = ctx;
1515 struct nve_rx_desc *desc;
1516 struct nve_map_buffer *buf;
1517 ADAPTER_READ_DATA *readdata;
1521 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospacketrx\n");
1525 readdata = (ADAPTER_READ_DATA *) data;
1526 desc = readdata->pvID;
1528 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
1531 /* Sync DMA bounce buffer. */
1532 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
1534 /* First mbuf in packet holds the ethernet and packet headers */
1535 buf->mbuf->m_pkthdr.rcvif = ifp;
1536 buf->mbuf->m_pkthdr.len = buf->mbuf->m_len =
1537 readdata->ulTotalLength;
1539 bus_dmamap_unload(sc->mtag, buf->map);
1541 /* Give mbuf to OS. */
1542 (*ifp->if_input) (ifp, buf->mbuf);
1543 if (readdata->ulFilterMatch & ADREADFL_MULTICAST_MATCH)
1546 /* Blat the mbuf pointer, kernel will free the mbuf cluster */
1549 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
1550 bus_dmamap_unload(sc->mtag, buf->map);
1555 sc->cur_rx = desc - sc->rx_desc;
1563 /* This gets called by NVIDIA API when the PHY link state changes */
1565 nve_oslinkchg(PNV_VOID ctx, NV_SINT32 enabled)
1567 struct nve_softc *sc = (struct nve_softc *)ctx;
1570 DEBUGOUT(NVE_DEBUG_API, "nve: nve_oslinkchg\n");
1575 ifp->if_flags |= IFF_UP;
1577 ifp->if_flags &= ~IFF_UP;
1582 /* Setup a watchdog timer */
1584 nve_osalloctimer(PNV_VOID ctx, PNV_VOID *timer)
1586 struct nve_softc *sc = (struct nve_softc *)ctx;
1588 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osalloctimer\n");
1590 callout_handle_init(&sc->ostimer);
1591 *timer = &sc->ostimer;
1596 /* Free the timer */
1598 nve_osfreetimer(PNV_VOID ctx, PNV_VOID timer)
1601 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osfreetimer\n");
1606 /* Setup timer parameters */
1608 nve_osinittimer(PNV_VOID ctx, PNV_VOID timer, PTIMER_FUNC func, PNV_VOID parameters)
1610 struct nve_softc *sc = (struct nve_softc *)ctx;
1612 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osinittimer\n");
1614 sc->ostimer_func = func;
1615 sc->ostimer_params = parameters;
1620 /* Set the timer to go off */
1622 nve_ossettimer(PNV_VOID ctx, PNV_VOID timer, NV_UINT32 delay)
1624 struct nve_softc *sc = ctx;
1626 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ossettimer\n");
1628 *(struct callout_handle *)timer = timeout(sc->ostimer_func,
1629 sc->ostimer_params, delay);
1634 /* Cancel the timer */
1636 nve_oscanceltimer(PNV_VOID ctx, PNV_VOID timer)
1638 struct nve_softc *sc = ctx;
1640 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_oscanceltimer\n");
1642 untimeout(sc->ostimer_func, sc->ostimer_params,
1643 *(struct callout_handle *)timer);
1649 nve_ospreprocpkt(PNV_VOID ctx, PNV_VOID readdata, PNV_VOID *id,
1650 NV_UINT8 *newbuffer, NV_UINT8 priority)
1653 /* Not implemented */
1654 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n");
1660 nve_ospreprocpktnopq(PNV_VOID ctx, PNV_VOID readdata)
1663 /* Not implemented */
1664 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n");
1670 nve_osindicatepkt(PNV_VOID ctx, PNV_VOID *id, NV_UINT32 pktno)
1673 /* Not implemented */
1674 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osindicatepkt\n");
1679 /* Allocate mutex context (already done in nve_attach) */
1681 nve_oslockalloc(PNV_VOID ctx, NV_SINT32 type, PNV_VOID *pLock)
1683 struct nve_softc *sc = (struct nve_softc *)ctx;
1685 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockalloc\n");
1687 *pLock = (void **)sc;
1692 /* Obtain a spin lock */
1694 nve_oslockacquire(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock)
1697 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockacquire\n");
1699 NVE_OSLOCK((struct nve_softc *)lock);
1706 nve_oslockrelease(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock)
1709 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockrelease\n");
1711 NVE_OSUNLOCK((struct nve_softc *)lock);
1716 /* I have no idea what this is for */
1718 nve_osreturnbufvirt(PNV_VOID ctx, PNV_VOID readdata)
1721 /* Not implemented */
1722 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_osreturnbufvirt\n");
1723 panic("nve: nve_osreturnbufvirtual not implemented\n");
1728 /* --- End on NVOSAPI interface --- */