1 /* $OpenBSD: if_txp.c,v 1.48 2001/06/27 06:34:50 kjc Exp $ */
5 * Jason L. Wright <jason@thought.net>, Theo de Raadt, and
6 * Aaron Campbell <aaron@monkey.org>. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Jason L. Wright,
19 * Theo de Raadt and Aaron Campbell.
20 * 4. Neither the name of the author nor the names of any co-contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
34 * THE POSSIBILITY OF SUCH DAMAGE.
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
41 * Driver for 3c990 (Typhoon) Ethernet ASIC
43 #include <sys/param.h>
44 #include <sys/systm.h>
46 #include <sys/endian.h>
47 #include <sys/kernel.h>
49 #include <sys/malloc.h>
51 #include <sys/module.h>
52 #include <sys/mutex.h>
53 #include <sys/queue.h>
55 #include <sys/socket.h>
56 #include <sys/sockio.h>
57 #include <sys/sysctl.h>
58 #include <sys/taskqueue.h>
62 #include <net/if_arp.h>
63 #include <net/ethernet.h>
64 #include <net/if_dl.h>
65 #include <net/if_media.h>
66 #include <net/if_types.h>
67 #include <net/if_vlan_var.h>
69 #include <netinet/in.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/ip.h>
73 #include <dev/mii/mii.h>
75 #include <dev/pci/pcireg.h>
76 #include <dev/pci/pcivar.h>
78 #include <machine/bus.h>
79 #include <machine/in_cksum.h>
81 #include <dev/txp/if_txpreg.h>
82 #include <dev/txp/3c990img.h>
84 MODULE_DEPEND(txp, pci, 1, 1, 1);
85 MODULE_DEPEND(txp, ether, 1, 1, 1);
88 * XXX Known Typhoon firmware issues.
90 * 1. It seems that firmware has Tx TCP/UDP checksum offloading bug.
91 * The firmware hangs when it's told to compute TCP/UDP checksum.
92 * I'm not sure whether the firmware requires special alignment to
93 * do checksum offloading but datasheet says nothing about that.
94 * 2. Datasheet says nothing for maximum number of fragmented
95 * descriptors supported. Experimentation shows up to 16 fragment
96 * descriptors are supported in the firmware. For TSO case, upper
97 * stack can send 64KB sized IP datagram plus link header size(
98 * ethernet header + VLAN tag) frame but controller can handle up
99 * to 64KB frame given that PAGE_SIZE is 4KB(i.e. 16 * PAGE_SIZE).
100 * Because frames that need TSO operation of hardware can be
101 * larger than 64KB I disabled TSO capability. TSO operation for
102 * less than or equal to 16 fragment descriptors works without
104 * 3. VLAN hardware tag stripping is always enabled in the firmware
105 * even if it's explicitly told to not strip the tag. It's
106 * possible to add the tag back in Rx handler if VLAN hardware
107 * tag is not active but I didn't try that as it would be
108 * layering violation.
109 * 4. TXP_CMD_RECV_BUFFER_CONTROL does not work as expected in
110 * datasheet such that driver should handle the alignment
111 * restriction by copying received frame to align the frame on
112 * 32bit boundary on strict-alignment architectures. This adds a
113 * lot of CPU burden and it effectively reduce Rx performance on
114 * strict-alignment architectures(e.g. sparc64, arm, mips and ia64).
116 * Unfortunately it seems that 3Com have no longer interests in
117 * releasing fixed firmware so we may have to live with these bugs.
120 #define TXP_CSUM_FEATURES (CSUM_IP)
123 * Various supported device vendors/types and their names.
125 static struct txp_type txp_devs[] = {
126 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_95,
127 "3Com 3cR990-TX-95 Etherlink with 3XP Processor" },
128 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_97,
129 "3Com 3cR990-TX-97 Etherlink with 3XP Processor" },
130 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_TXM,
131 "3Com 3cR990B-TXM Etherlink with 3XP Processor" },
132 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_95,
133 "3Com 3cR990-SRV-95 Etherlink Server with 3XP Processor" },
134 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_97,
135 "3Com 3cR990-SRV-97 Etherlink Server with 3XP Processor" },
136 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_SRV,
137 "3Com 3cR990B-SRV Etherlink Server with 3XP Processor" },
141 static int txp_probe(device_t);
142 static int txp_attach(device_t);
143 static int txp_detach(device_t);
144 static int txp_shutdown(device_t);
145 static int txp_suspend(device_t);
146 static int txp_resume(device_t);
147 static int txp_intr(void *);
148 static void txp_int_task(void *, int);
149 static void txp_tick(void *);
150 static int txp_ioctl(struct ifnet *, u_long, caddr_t);
151 static void txp_start(struct ifnet *);
152 static void txp_start_locked(struct ifnet *);
153 static int txp_encap(struct txp_softc *, struct txp_tx_ring *, struct mbuf **);
154 static void txp_stop(struct txp_softc *);
155 static void txp_init(void *);
156 static void txp_init_locked(struct txp_softc *);
157 static void txp_watchdog(struct txp_softc *);
159 static int txp_reset(struct txp_softc *);
160 static int txp_boot(struct txp_softc *, uint32_t);
161 static int txp_sleep(struct txp_softc *, int);
162 static int txp_wait(struct txp_softc *, uint32_t);
163 static int txp_download_fw(struct txp_softc *);
164 static int txp_download_fw_wait(struct txp_softc *);
165 static int txp_download_fw_section(struct txp_softc *,
166 struct txp_fw_section_header *, int);
167 static int txp_alloc_rings(struct txp_softc *);
168 static void txp_init_rings(struct txp_softc *);
169 static int txp_dma_alloc(struct txp_softc *, char *, bus_dma_tag_t *,
170 bus_size_t, bus_size_t, bus_dmamap_t *, void **, bus_size_t, bus_addr_t *);
171 static void txp_dma_free(struct txp_softc *, bus_dma_tag_t *, bus_dmamap_t *,
173 static void txp_free_rings(struct txp_softc *);
174 static int txp_rxring_fill(struct txp_softc *);
175 static void txp_rxring_empty(struct txp_softc *);
176 static void txp_set_filter(struct txp_softc *);
178 static int txp_cmd_desc_numfree(struct txp_softc *);
179 static int txp_command(struct txp_softc *, uint16_t, uint16_t, uint32_t,
180 uint32_t, uint16_t *, uint32_t *, uint32_t *, int);
181 static int txp_ext_command(struct txp_softc *, uint16_t, uint16_t,
182 uint32_t, uint32_t, struct txp_ext_desc *, uint8_t,
183 struct txp_rsp_desc **, int);
184 static int txp_response(struct txp_softc *, uint16_t, uint16_t,
185 struct txp_rsp_desc **);
186 static void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *,
187 struct txp_rsp_desc *);
188 static int txp_set_capabilities(struct txp_softc *);
190 static void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
191 static int txp_ifmedia_upd(struct ifnet *);
193 static void txp_show_descriptor(void *);
195 static void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *);
196 static void txp_rxbuf_reclaim(struct txp_softc *);
197 #ifndef __NO_STRICT_ALIGNMENT
198 static __inline void txp_fixup_rx(struct mbuf *);
200 static int txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *, int);
201 static void txp_stats_save(struct txp_softc *);
202 static void txp_stats_update(struct txp_softc *, struct txp_rsp_desc *);
203 static void txp_sysctl_node(struct txp_softc *);
204 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
205 static int sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS);
207 static int prefer_iomap = 0;
208 TUNABLE_INT("hw.txp.prefer_iomap", &prefer_iomap);
210 static device_method_t txp_methods[] = {
211 /* Device interface */
212 DEVMETHOD(device_probe, txp_probe),
213 DEVMETHOD(device_attach, txp_attach),
214 DEVMETHOD(device_detach, txp_detach),
215 DEVMETHOD(device_shutdown, txp_shutdown),
216 DEVMETHOD(device_suspend, txp_suspend),
217 DEVMETHOD(device_resume, txp_resume),
222 static driver_t txp_driver = {
225 sizeof(struct txp_softc)
228 static devclass_t txp_devclass;
230 DRIVER_MODULE(txp, pci, txp_driver, txp_devclass, 0, 0);
233 txp_probe(device_t dev)
239 while (t->txp_name != NULL) {
240 if ((pci_get_vendor(dev) == t->txp_vid) &&
241 (pci_get_device(dev) == t->txp_did)) {
242 device_set_desc(dev, t->txp_name);
243 return (BUS_PROBE_DEFAULT);
252 txp_attach(device_t dev)
254 struct txp_softc *sc;
256 struct txp_rsp_desc *rsp;
259 int error = 0, pmc, rid;
260 uint8_t eaddr[ETHER_ADDR_LEN], *ver;
262 sc = device_get_softc(dev);
265 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
267 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
268 TASK_INIT(&sc->sc_int_task, 0, txp_int_task, sc);
269 TAILQ_INIT(&sc->sc_busy_list);
270 TAILQ_INIT(&sc->sc_free_list);
272 ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
273 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
274 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 0, NULL);
275 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
276 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
277 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 0, NULL);
278 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
279 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
281 pci_enable_busmaster(dev);
282 /* Prefer memory space register mapping over IO space. */
283 if (prefer_iomap == 0) {
284 sc->sc_res_id = PCIR_BAR(1);
285 sc->sc_res_type = SYS_RES_MEMORY;
287 sc->sc_res_id = PCIR_BAR(0);
288 sc->sc_res_type = SYS_RES_IOPORT;
290 sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
291 &sc->sc_res_id, RF_ACTIVE);
292 if (sc->sc_res == NULL && prefer_iomap == 0) {
293 sc->sc_res_id = PCIR_BAR(0);
294 sc->sc_res_type = SYS_RES_IOPORT;
295 sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
296 &sc->sc_res_id, RF_ACTIVE);
298 if (sc->sc_res == NULL) {
299 device_printf(dev, "couldn't map ports/memory\n");
300 ifmedia_removeall(&sc->sc_ifmedia);
301 mtx_destroy(&sc->sc_mtx);
306 reg = pci_read_config(dev, PCIR_COMMAND, 2);
307 reg |= PCIM_CMD_MWRICEN;
308 pci_write_config(dev, PCIR_COMMAND, reg, 2);
309 /* Check cache line size. */
310 reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
312 if (reg == 0 || (reg % 16) != 0)
313 device_printf(sc->sc_dev,
314 "invalid cache line size : %u\n", reg);
316 /* Allocate interrupt */
318 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
319 RF_SHAREABLE | RF_ACTIVE);
321 if (sc->sc_irq == NULL) {
322 device_printf(dev, "couldn't map interrupt\n");
327 if ((error = txp_alloc_rings(sc)) != 0)
331 /* Reset controller and make it reload sleep image. */
332 if (txp_reset(sc) != 0) {
337 /* Let controller boot from sleep image. */
338 if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
339 device_printf(sc->sc_dev, "could not boot sleep image\n");
344 /* Get station address. */
345 if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
346 &p1, &p2, NULL, TXP_CMD_WAIT)) {
352 eaddr[0] = ((uint8_t *)&p1)[1];
353 eaddr[1] = ((uint8_t *)&p1)[0];
355 eaddr[2] = ((uint8_t *)&p2)[3];
356 eaddr[3] = ((uint8_t *)&p2)[2];
357 eaddr[4] = ((uint8_t *)&p2)[1];
358 eaddr[5] = ((uint8_t *)&p2)[0];
360 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
362 device_printf(dev, "can not allocate ifnet structure\n");
368 * Show sleep image version information which may help to
369 * diagnose sleep image specific issues.
372 if (txp_ext_command(sc, TXP_CMD_READ_VERSION, 0, 0, 0, NULL, 0,
373 &rsp, TXP_CMD_WAIT)) {
374 device_printf(dev, "can not read sleep image version\n");
378 if (rsp->rsp_numdesc == 0) {
379 p2 = le32toh(rsp->rsp_par2) & 0xFFFF;
380 device_printf(dev, "Typhoon 1.0 sleep image (2000/%02u/%02u)\n",
382 } else if (rsp->rsp_numdesc == 2) {
383 p2 = le32toh(rsp->rsp_par2);
384 ver = (uint8_t *)(rsp + 1);
386 * Even if datasheet says the command returns a NULL
387 * terminated version string, explicitly terminate
388 * the string. Given that several bugs of firmware
389 * I can't trust this simple one.
393 "Typhoon 1.1+ sleep image %02u.%03u.%03u %s\n",
394 p2 >> 24, (p2 >> 12) & 0xFFF, p2 & 0xFFF, ver);
396 p2 = le32toh(rsp->rsp_par2);
398 "Unknown Typhoon sleep image version: %u:0x%08x\n",
399 rsp->rsp_numdesc, p2);
404 sc->sc_xcvr = TXP_XCVR_AUTO;
405 txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
406 NULL, NULL, NULL, TXP_CMD_NOWAIT);
407 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);
410 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
411 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
412 ifp->if_ioctl = txp_ioctl;
413 ifp->if_start = txp_start;
414 ifp->if_init = txp_init;
415 ifp->if_snd.ifq_drv_maxlen = TX_ENTRIES - 1;
416 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
417 IFQ_SET_READY(&ifp->if_snd);
419 * It's possible to read firmware's offload capability but
420 * we have not downloaded the firmware yet so announce
421 * working capability here. We're not interested in IPSec
422 * capability and due to the lots of firmware bug we can't
423 * advertise the whole capability anyway.
425 ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM;
426 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
427 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
428 /* Enable all capabilities. */
429 ifp->if_capenable = ifp->if_capabilities;
431 ether_ifattach(ifp, eaddr);
433 /* VLAN capability setup. */
434 ifp->if_capabilities |= IFCAP_VLAN_MTU;
435 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
436 ifp->if_capenable = ifp->if_capabilities;
437 /* Tell the upper layer(s) we support long frames. */
438 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
440 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
441 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
443 /* Create local taskq. */
444 sc->sc_tq = taskqueue_create_fast("txp_taskq", M_WAITOK,
445 taskqueue_thread_enqueue, &sc->sc_tq);
446 if (sc->sc_tq == NULL) {
447 device_printf(dev, "could not create taskqueue.\n");
452 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
453 device_get_nameunit(sc->sc_dev));
455 /* Put controller into sleep. */
456 if (txp_sleep(sc, 0) != 0) {
462 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
463 txp_intr, NULL, sc, &sc->sc_intrhand);
467 device_printf(dev, "couldn't set up interrupt handler.\n");
480 txp_detach(device_t dev)
482 struct txp_softc *sc;
485 sc = device_get_softc(dev);
488 if (device_is_attached(dev)) {
490 sc->sc_flags |= TXP_FLAG_DETACH;
493 callout_drain(&sc->sc_tick);
494 taskqueue_drain(sc->sc_tq, &sc->sc_int_task);
497 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
499 ifmedia_removeall(&sc->sc_ifmedia);
500 if (sc->sc_intrhand != NULL)
501 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
502 if (sc->sc_irq != NULL)
503 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
504 if (sc->sc_res != NULL)
505 bus_release_resource(dev, sc->sc_res_type, sc->sc_res_id,
507 if (sc->sc_ifp != NULL) {
512 mtx_destroy(&sc->sc_mtx);
518 txp_reset(struct txp_softc *sc)
523 /* Disable interrupts. */
524 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
525 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
526 /* Ack all pending interrupts. */
527 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
530 WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
532 WRITE_REG(sc, TXP_SRR, 0);
534 /* Should wait max 6 seconds. */
535 for (i = 0; i < 6000; i++) {
536 r = READ_REG(sc, TXP_A2H_0);
537 if (r == STAT_WAITING_FOR_HOST_REQUEST)
542 if (r != STAT_WAITING_FOR_HOST_REQUEST)
543 device_printf(sc->sc_dev, "reset hung\n");
545 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
546 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
547 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
550 * Give more time to complete loading sleep image before
551 * trying to boot from sleep image.
559 txp_boot(struct txp_softc *sc, uint32_t state)
562 /* See if it's waiting for boot, and try to boot it. */
563 if (txp_wait(sc, state) != 0) {
564 device_printf(sc->sc_dev, "not waiting for boot\n");
568 WRITE_REG(sc, TXP_H2A_2, TXP_ADDR_HI(sc->sc_ldata.txp_boot_paddr));
569 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
570 WRITE_REG(sc, TXP_H2A_1, TXP_ADDR_LO(sc->sc_ldata.txp_boot_paddr));
571 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
572 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
573 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
575 /* See if it booted. */
576 if (txp_wait(sc, STAT_RUNNING) != 0) {
577 device_printf(sc->sc_dev, "firmware not running\n");
581 /* Clear TX and CMD ring write registers. */
582 WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
583 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
584 WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
585 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
586 WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
587 TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
588 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
589 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
595 txp_download_fw(struct txp_softc *sc)
597 struct txp_fw_file_header *fileheader;
598 struct txp_fw_section_header *secthead;
600 uint32_t error, ier, imr;
605 ier = READ_REG(sc, TXP_IER);
606 WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);
608 imr = READ_REG(sc, TXP_IMR);
609 WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);
611 if (txp_wait(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
612 device_printf(sc->sc_dev, "not waiting for host request\n");
617 /* Ack the status. */
618 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
620 fileheader = (struct txp_fw_file_header *)tc990image;
621 if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) {
622 device_printf(sc->sc_dev, "firmware invalid magic\n");
626 /* Tell boot firmware to get ready for image. */
627 WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr));
628 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
629 WRITE_REG(sc, TXP_H2A_2, le32toh(fileheader->hmac[0]));
630 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
631 WRITE_REG(sc, TXP_H2A_3, le32toh(fileheader->hmac[1]));
632 TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
633 WRITE_REG(sc, TXP_H2A_4, le32toh(fileheader->hmac[2]));
634 TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
635 WRITE_REG(sc, TXP_H2A_5, le32toh(fileheader->hmac[3]));
636 TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
637 WRITE_REG(sc, TXP_H2A_6, le32toh(fileheader->hmac[4]));
638 TXP_BARRIER(sc, TXP_H2A_6, 4, BUS_SPACE_BARRIER_WRITE);
639 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);
640 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
642 if (txp_download_fw_wait(sc)) {
643 device_printf(sc->sc_dev, "firmware wait failed, initial\n");
648 secthead = (struct txp_fw_section_header *)(((uint8_t *)tc990image) +
649 sizeof(struct txp_fw_file_header));
651 for (sect = 0; sect < le32toh(fileheader->nsections); sect++) {
652 if ((error = txp_download_fw_section(sc, secthead, sect)) != 0)
654 secthead = (struct txp_fw_section_header *)
655 (((uint8_t *)secthead) + le32toh(secthead->nbytes) +
659 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);
660 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
662 if (txp_wait(sc, STAT_WAITING_FOR_BOOT) != 0) {
663 device_printf(sc->sc_dev, "not waiting for boot\n");
669 WRITE_REG(sc, TXP_IER, ier);
670 WRITE_REG(sc, TXP_IMR, imr);
676 txp_download_fw_wait(struct txp_softc *sc)
682 for (i = 0; i < TXP_TIMEOUT; i++) {
683 if ((READ_REG(sc, TXP_ISR) & TXP_INT_A2H_0) != 0)
688 if (i == TXP_TIMEOUT) {
689 device_printf(sc->sc_dev, "firmware wait failed comm0\n");
693 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
695 if (READ_REG(sc, TXP_A2H_0) != STAT_WAITING_FOR_SEGMENT) {
696 device_printf(sc->sc_dev, "firmware not waiting for segment\n");
703 txp_download_fw_section(struct txp_softc *sc,
704 struct txp_fw_section_header *sect, int sectnum)
706 bus_dma_tag_t sec_tag;
707 bus_dmamap_t sec_map;
708 bus_addr_t sec_paddr;
716 /* Skip zero length sections. */
717 if (le32toh(sect->nbytes) == 0)
720 /* Make sure we aren't past the end of the image. */
721 rseg = ((uint8_t *)sect) - ((uint8_t *)tc990image);
722 if (rseg >= sizeof(tc990image)) {
723 device_printf(sc->sc_dev,
724 "firmware invalid section address, section %d\n", sectnum);
728 /* Make sure this section doesn't go past the end. */
729 rseg += le32toh(sect->nbytes);
730 if (rseg >= sizeof(tc990image)) {
731 device_printf(sc->sc_dev, "firmware truncated section %d\n",
741 err = txp_dma_alloc(sc, "firmware sections", &sec_tag, sizeof(uint32_t),
742 0, &sec_map, (void **)&sec_buf, le32toh(sect->nbytes), &sec_paddr);
746 bcopy(((uint8_t *)sect) + sizeof(*sect), sec_buf,
747 le32toh(sect->nbytes));
750 * dummy up mbuf and verify section checksum
753 m.m_next = m.m_nextpkt = NULL;
754 m.m_len = le32toh(sect->nbytes);
757 csum = in_cksum(&m, le32toh(sect->nbytes));
758 if (csum != sect->cksum) {
759 device_printf(sc->sc_dev,
760 "firmware section %d, bad cksum (expected 0x%x got 0x%x)\n",
761 sectnum, le16toh(sect->cksum), csum);
766 bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_PREWRITE);
768 WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes));
769 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
770 WRITE_REG(sc, TXP_H2A_2, le16toh(sect->cksum));
771 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
772 WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr));
773 TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
774 WRITE_REG(sc, TXP_H2A_4, TXP_ADDR_HI(sec_paddr));
775 TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
776 WRITE_REG(sc, TXP_H2A_5, TXP_ADDR_LO(sec_paddr));
777 TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
778 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);
779 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
781 if (txp_download_fw_wait(sc)) {
782 device_printf(sc->sc_dev,
783 "firmware wait failed, section %d\n", sectnum);
787 bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_POSTWRITE);
789 txp_dma_free(sc, &sec_tag, &sec_map, (void **)&sec_buf);
796 struct txp_softc *sc;
800 status = READ_REG(sc, TXP_ISR);
801 if ((status & TXP_INT_LATCH) == 0)
802 return (FILTER_STRAY);
803 WRITE_REG(sc, TXP_ISR, status);
804 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
805 taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
807 return (FILTER_HANDLED);
811 txp_int_task(void *arg, int pending)
813 struct txp_softc *sc;
815 struct txp_hostvar *hv;
819 sc = (struct txp_softc *)arg;
824 isr = READ_REG(sc, TXP_ISR);
825 if ((isr & TXP_INT_LATCH) != 0)
826 WRITE_REG(sc, TXP_ISR, isr);
828 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
829 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
830 sc->sc_cdata.txp_hostvar_map,
831 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
833 if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
834 more += txp_rx_reclaim(sc, &sc->sc_rxhir,
835 sc->sc_process_limit);
836 if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
837 more += txp_rx_reclaim(sc, &sc->sc_rxlor,
838 sc->sc_process_limit);
841 * It seems controller is not smart enough to handle
842 * FIFO overflow conditions under heavy network load.
843 * No matter how often new Rx buffers are passed to
844 * controller the situation didn't change. Maybe
845 * flow-control would be the only way to mitigate the
846 * issue but firmware does not have commands that
847 * control the threshold of emitting pause frames.
849 if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
850 txp_rxbuf_reclaim(sc);
851 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
852 TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
853 txp_tx_reclaim(sc, &sc->sc_txhir);
854 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
855 TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
856 txp_tx_reclaim(sc, &sc->sc_txlor);
857 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
858 sc->sc_cdata.txp_hostvar_map,
859 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
860 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
861 txp_start_locked(sc->sc_ifp);
862 if (more != 0 || READ_REG(sc, TXP_ISR & TXP_INT_LATCH) != 0) {
863 taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
869 /* Re-enable interrupts. */
870 WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
874 #ifndef __NO_STRICT_ALIGNMENT
876 txp_fixup_rx(struct mbuf *m)
881 src = mtod(m, uint16_t *);
882 dst = src - (TXP_RXBUF_ALIGN - ETHER_ALIGN) / sizeof *src;
884 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
887 m->m_data -= TXP_RXBUF_ALIGN - ETHER_ALIGN;
892 txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r, int count)
895 struct txp_rx_desc *rxd;
897 struct txp_rx_swdesc *sd;
898 uint32_t roff, woff, rx_stat, prog;
904 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
905 BUS_DMASYNC_POSTWRITE);
907 roff = le32toh(*r->r_roff);
908 woff = le32toh(*r->r_woff);
909 rxd = r->r_desc + roff / sizeof(struct txp_rx_desc);
910 for (prog = 0; roff != woff; prog++, count--) {
913 bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd));
914 KASSERT(sd != NULL, ("%s: Rx desc ring corrupted", __func__));
915 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
916 BUS_DMASYNC_POSTREAD);
917 bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
919 KASSERT(m != NULL, ("%s: Rx buffer ring corrupted", __func__));
921 TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
922 TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
923 if ((rxd->rx_flags & RX_FLAGS_ERROR) != 0) {
925 device_printf(sc->sc_dev, "Rx error %u\n",
926 le32toh(rxd->rx_stat) & RX_ERROR_MASK);
931 m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len);
932 m->m_pkthdr.rcvif = ifp;
933 #ifndef __NO_STRICT_ALIGNMENT
936 rx_stat = le32toh(rxd->rx_stat);
937 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
938 if ((rx_stat & RX_STAT_IPCKSUMBAD) != 0)
939 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
940 else if ((rx_stat & RX_STAT_IPCKSUMGOOD) != 0)
941 m->m_pkthdr.csum_flags |=
942 CSUM_IP_CHECKED|CSUM_IP_VALID;
944 if ((rx_stat & RX_STAT_TCPCKSUMGOOD) != 0 ||
945 (rx_stat & RX_STAT_UDPCKSUMGOOD) != 0) {
946 m->m_pkthdr.csum_flags |=
947 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
948 m->m_pkthdr.csum_data = 0xffff;
954 * Typhoon has a firmware bug that VLAN tag is always
955 * stripped out even if it is told to not remove the tag.
956 * Therefore don't check if_capenable here.
958 if (/* (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && */
959 (rx_stat & RX_STAT_VLAN) != 0) {
960 m->m_pkthdr.ether_vtag =
961 bswap16((le32toh(rxd->rx_vlan) >> 16));
962 m->m_flags |= M_VLANTAG;
966 (*ifp->if_input)(ifp, m);
970 roff += sizeof(struct txp_rx_desc);
971 if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
982 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
983 BUS_DMASYNC_PREWRITE);
984 *r->r_roff = le32toh(roff);
986 return (count > 0 ? 0 : EAGAIN);
990 txp_rxbuf_reclaim(struct txp_softc *sc)
992 struct txp_hostvar *hv;
993 struct txp_rxbuf_desc *rbd;
994 struct txp_rx_swdesc *sd;
995 bus_dma_segment_t segs[1];
996 int nsegs, prod, prog;
1001 hv = sc->sc_hostvar;
1002 cons = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx));
1003 prod = sc->sc_rxbufprod;
1004 TXP_DESC_INC(prod, RXBUF_ENTRIES);
1008 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1009 sc->sc_cdata.txp_rxbufs_map,
1010 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1012 for (prog = 0; prod != cons; prog++) {
1013 sd = TAILQ_FIRST(&sc->sc_free_list);
1016 rbd = sc->sc_rxbufs + prod;
1017 bcopy((u_long *)&rbd->rb_vaddrlo, &sd, sizeof(sd));
1018 sd->sd_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1019 if (sd->sd_mbuf == NULL)
1021 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1022 #ifndef __NO_STRICT_ALIGNMENT
1023 m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1025 if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1026 sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0) != 0) {
1027 m_freem(sd->sd_mbuf);
1031 KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1033 TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1034 TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1035 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1036 BUS_DMASYNC_PREREAD);
1037 rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1038 rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1039 TXP_DESC_INC(prod, RXBUF_ENTRIES);
1044 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1045 sc->sc_cdata.txp_rxbufs_map,
1046 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1047 prod = (prod + RXBUF_ENTRIES - 1) % RXBUF_ENTRIES;
1048 sc->sc_rxbufprod = prod;
1049 hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(prod));
1053 * Reclaim mbufs and entries from a transmit ring.
1056 txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r)
1061 struct txp_tx_desc *txd;
1062 struct txp_swdesc *sd;
1064 TXP_LOCK_ASSERT(sc);
1066 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
1067 BUS_DMASYNC_POSTWRITE);
1069 idx = TXP_OFFSET2IDX(le32toh(*(r->r_off)));
1072 txd = r->r_desc + cons;
1073 sd = sc->sc_txd + cons;
1075 for (cnt = r->r_cnt; cons != idx && cnt > 0; cnt--) {
1076 if ((txd->tx_flags & TX_FLAGS_TYPE_M) == TX_FLAGS_TYPE_DATA) {
1077 if (sd->sd_mbuf != NULL) {
1078 bus_dmamap_sync(sc->sc_cdata.txp_tx_tag,
1079 sd->sd_map, BUS_DMASYNC_POSTWRITE);
1080 bus_dmamap_unload(sc->sc_cdata.txp_tx_tag,
1082 m_freem(sd->sd_mbuf);
1089 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1091 if (++cons == TX_ENTRIES) {
1101 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
1102 BUS_DMASYNC_PREWRITE);
1106 sc->sc_watchdog_timer = 0;
1110 txp_shutdown(device_t dev)
1113 return (txp_suspend(dev));
1117 txp_suspend(device_t dev)
1119 struct txp_softc *sc;
1127 sc = device_get_softc(dev);
1133 /* Reset controller and make it reload sleep image. */
1135 /* Let controller boot from sleep image. */
1136 if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
1137 device_printf(sc->sc_dev, "couldn't boot sleep image\n");
1139 /* Set station address. */
1140 eaddr = IF_LLADDR(sc->sc_ifp);
1142 ((uint8_t *)&p1)[1] = eaddr[0];
1143 ((uint8_t *)&p1)[0] = eaddr[1];
1145 ((uint8_t *)&p2)[3] = eaddr[2];
1146 ((uint8_t *)&p2)[2] = eaddr[3];
1147 ((uint8_t *)&p2)[1] = eaddr[4];
1148 ((uint8_t *)&p2)[0] = eaddr[5];
1150 txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0, NULL, NULL,
1151 NULL, TXP_CMD_WAIT);
1153 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
1154 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1155 txp_sleep(sc, sc->sc_ifp->if_capenable);
1156 if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1158 pmstat = pci_read_config(sc->sc_dev,
1159 pmc + PCIR_POWER_STATUS, 2);
1160 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1161 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1162 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1163 pci_write_config(sc->sc_dev,
1164 pmc + PCIR_POWER_STATUS, pmstat, 2);
1172 txp_resume(device_t dev)
1174 struct txp_softc *sc;
1178 sc = device_get_softc(dev);
1181 if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1182 /* Disable PME and clear PME status. */
1183 pmstat = pci_read_config(sc->sc_dev,
1184 pmc + PCIR_POWER_STATUS, 2);
1185 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1186 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1187 pci_write_config(sc->sc_dev,
1188 pmc + PCIR_POWER_STATUS, pmstat, 2);
1191 if ((sc->sc_ifp->if_flags & IFF_UP) != 0)
1192 txp_init_locked(sc);
1198 struct txp_dmamap_arg {
1199 bus_addr_t txp_busaddr;
1203 txp_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1205 struct txp_dmamap_arg *ctx;
1210 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1212 ctx = (struct txp_dmamap_arg *)arg;
1213 ctx->txp_busaddr = segs[0].ds_addr;
1217 txp_dma_alloc(struct txp_softc *sc, char *type, bus_dma_tag_t *tag,
1218 bus_size_t alignment, bus_size_t boundary, bus_dmamap_t *map, void **buf,
1219 bus_size_t size, bus_addr_t *paddr)
1221 struct txp_dmamap_arg ctx;
1224 /* Create DMA block tag. */
1225 error = bus_dma_tag_create(
1226 sc->sc_cdata.txp_parent_tag, /* parent */
1227 alignment, boundary, /* algnmnt, boundary */
1228 BUS_SPACE_MAXADDR, /* lowaddr */
1229 BUS_SPACE_MAXADDR, /* highaddr */
1230 NULL, NULL, /* filter, filterarg */
1233 size, /* maxsegsize */
1235 NULL, NULL, /* lockfunc, lockarg */
1238 device_printf(sc->sc_dev,
1239 "could not create DMA tag for %s.\n", type);
1244 /* Allocate DMA'able memory and load the DMA map. */
1245 error = bus_dmamem_alloc(*tag, buf, BUS_DMA_WAITOK | BUS_DMA_ZERO |
1246 BUS_DMA_COHERENT, map);
1248 device_printf(sc->sc_dev,
1249 "could not allocate DMA'able memory for %s.\n", type);
1253 ctx.txp_busaddr = 0;
1254 error = bus_dmamap_load(*tag, *map, *(uint8_t **)buf,
1255 size, txp_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1256 if (error != 0 || ctx.txp_busaddr == 0) {
1257 device_printf(sc->sc_dev,
1258 "could not load DMA'able memory for %s.\n", type);
1261 *paddr = ctx.txp_busaddr;
1267 txp_dma_free(struct txp_softc *sc, bus_dma_tag_t *tag, bus_dmamap_t *map,
1273 bus_dmamap_unload(*tag, *map);
1274 if (*map != NULL && buf != NULL)
1275 bus_dmamem_free(*tag, *(uint8_t **)buf, *map);
1276 *(uint8_t **)buf = NULL;
1278 bus_dma_tag_destroy(*tag);
1284 txp_alloc_rings(struct txp_softc *sc)
1286 struct txp_boot_record *boot;
1287 struct txp_ldata *ld;
1288 struct txp_swdesc *txd;
1289 struct txp_rxbuf_desc *rbd;
1290 struct txp_rx_swdesc *sd;
1294 boot = ld->txp_boot;
1300 * Create parent ring/DMA block tag.
1301 * Datasheet says that all ring addresses and descriptors
1302 * support 64bits addressing. However the controller is
1303 * known to have no support DAC so limit DMA address space
1306 error = bus_dma_tag_create(
1307 bus_get_dma_tag(sc->sc_dev), /* parent */
1308 1, 0, /* algnmnt, boundary */
1309 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1310 BUS_SPACE_MAXADDR, /* highaddr */
1311 NULL, NULL, /* filter, filterarg */
1312 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1314 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1316 NULL, NULL, /* lockfunc, lockarg */
1317 &sc->sc_cdata.txp_parent_tag);
1319 device_printf(sc->sc_dev, "could not create parent DMA tag.\n");
1324 error = txp_dma_alloc(sc, "boot record",
1325 &sc->sc_cdata.txp_boot_tag, sizeof(uint32_t), 0,
1326 &sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot,
1327 sizeof(struct txp_boot_record),
1328 &sc->sc_ldata.txp_boot_paddr);
1331 boot = sc->sc_ldata.txp_boot;
1334 /* Host variables. */
1335 error = txp_dma_alloc(sc, "host variables",
1336 &sc->sc_cdata.txp_hostvar_tag, sizeof(uint32_t), 0,
1337 &sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar,
1338 sizeof(struct txp_hostvar),
1339 &sc->sc_ldata.txp_hostvar_paddr);
1342 boot->br_hostvar_lo =
1343 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_hostvar_paddr));
1344 boot->br_hostvar_hi =
1345 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_hostvar_paddr));
1346 sc->sc_hostvar = sc->sc_ldata.txp_hostvar;
1348 /* Hi priority tx ring. */
1349 error = txp_dma_alloc(sc, "hi priority tx ring",
1350 &sc->sc_cdata.txp_txhiring_tag, sizeof(struct txp_tx_desc), 0,
1351 &sc->sc_cdata.txp_txhiring_map, (void **)&sc->sc_ldata.txp_txhiring,
1352 sizeof(struct txp_tx_desc) * TX_ENTRIES,
1353 &sc->sc_ldata.txp_txhiring_paddr);
1356 boot->br_txhipri_lo =
1357 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txhiring_paddr));
1358 boot->br_txhipri_hi =
1359 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txhiring_paddr));
1360 boot->br_txhipri_siz =
1361 htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1362 sc->sc_txhir.r_tag = sc->sc_cdata.txp_txhiring_tag;
1363 sc->sc_txhir.r_map = sc->sc_cdata.txp_txhiring_map;
1364 sc->sc_txhir.r_reg = TXP_H2A_1;
1365 sc->sc_txhir.r_desc = sc->sc_ldata.txp_txhiring;
1366 sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
1367 sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
1369 /* Low priority tx ring. */
1370 error = txp_dma_alloc(sc, "low priority tx ring",
1371 &sc->sc_cdata.txp_txloring_tag, sizeof(struct txp_tx_desc), 0,
1372 &sc->sc_cdata.txp_txloring_map, (void **)&sc->sc_ldata.txp_txloring,
1373 sizeof(struct txp_tx_desc) * TX_ENTRIES,
1374 &sc->sc_ldata.txp_txloring_paddr);
1377 boot->br_txlopri_lo =
1378 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txloring_paddr));
1379 boot->br_txlopri_hi =
1380 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txloring_paddr));
1381 boot->br_txlopri_siz =
1382 htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1383 sc->sc_txlor.r_tag = sc->sc_cdata.txp_txloring_tag;
1384 sc->sc_txlor.r_map = sc->sc_cdata.txp_txloring_map;
1385 sc->sc_txlor.r_reg = TXP_H2A_3;
1386 sc->sc_txlor.r_desc = sc->sc_ldata.txp_txloring;
1387 sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
1388 sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;
1390 /* High priority rx ring. */
1391 error = txp_dma_alloc(sc, "hi priority rx ring",
1392 &sc->sc_cdata.txp_rxhiring_tag, sizeof(struct txp_rx_desc), 0,
1393 &sc->sc_cdata.txp_rxhiring_map, (void **)&sc->sc_ldata.txp_rxhiring,
1394 sizeof(struct txp_rx_desc) * RX_ENTRIES,
1395 &sc->sc_ldata.txp_rxhiring_paddr);
1398 boot->br_rxhipri_lo =
1399 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxhiring_paddr));
1400 boot->br_rxhipri_hi =
1401 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxhiring_paddr));
1402 boot->br_rxhipri_siz =
1403 htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1404 sc->sc_rxhir.r_tag = sc->sc_cdata.txp_rxhiring_tag;
1405 sc->sc_rxhir.r_map = sc->sc_cdata.txp_rxhiring_map;
1406 sc->sc_rxhir.r_desc = sc->sc_ldata.txp_rxhiring;
1407 sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
1408 sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
1410 /* Low priority rx ring. */
1411 error = txp_dma_alloc(sc, "low priority rx ring",
1412 &sc->sc_cdata.txp_rxloring_tag, sizeof(struct txp_rx_desc), 0,
1413 &sc->sc_cdata.txp_rxloring_map, (void **)&sc->sc_ldata.txp_rxloring,
1414 sizeof(struct txp_rx_desc) * RX_ENTRIES,
1415 &sc->sc_ldata.txp_rxloring_paddr);
1418 boot->br_rxlopri_lo =
1419 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxloring_paddr));
1420 boot->br_rxlopri_hi =
1421 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxloring_paddr));
1422 boot->br_rxlopri_siz =
1423 htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1424 sc->sc_rxlor.r_tag = sc->sc_cdata.txp_rxloring_tag;
1425 sc->sc_rxlor.r_map = sc->sc_cdata.txp_rxloring_map;
1426 sc->sc_rxlor.r_desc = sc->sc_ldata.txp_rxloring;
1427 sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
1428 sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
1431 error = txp_dma_alloc(sc, "command ring",
1432 &sc->sc_cdata.txp_cmdring_tag, sizeof(struct txp_cmd_desc), 0,
1433 &sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring,
1434 sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
1435 &sc->sc_ldata.txp_cmdring_paddr);
1438 boot->br_cmd_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_cmdring_paddr));
1439 boot->br_cmd_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_cmdring_paddr));
1440 boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc));
1441 sc->sc_cmdring.base = sc->sc_ldata.txp_cmdring;
1442 sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
1443 sc->sc_cmdring.lastwrite = 0;
1445 /* Response ring. */
1446 error = txp_dma_alloc(sc, "response ring",
1447 &sc->sc_cdata.txp_rspring_tag, sizeof(struct txp_rsp_desc), 0,
1448 &sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring,
1449 sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
1450 &sc->sc_ldata.txp_rspring_paddr);
1453 boot->br_resp_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rspring_paddr));
1454 boot->br_resp_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rspring_paddr));
1455 boot->br_resp_siz = htole32(RSP_ENTRIES * sizeof(struct txp_rsp_desc));
1456 sc->sc_rspring.base = sc->sc_ldata.txp_rspring;
1457 sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
1458 sc->sc_rspring.lastwrite = 0;
1460 /* Receive buffer ring. */
1461 error = txp_dma_alloc(sc, "receive buffer ring",
1462 &sc->sc_cdata.txp_rxbufs_tag, sizeof(struct txp_rxbuf_desc), 0,
1463 &sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs,
1464 sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
1465 &sc->sc_ldata.txp_rxbufs_paddr);
1469 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxbufs_paddr));
1471 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxbufs_paddr));
1472 boot->br_rxbuf_siz =
1473 htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc));
1474 sc->sc_rxbufs = sc->sc_ldata.txp_rxbufs;
1477 error = txp_dma_alloc(sc, "zero buffer",
1478 &sc->sc_cdata.txp_zero_tag, sizeof(uint32_t), 0,
1479 &sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero,
1480 sizeof(uint32_t), &sc->sc_ldata.txp_zero_paddr);
1483 boot->br_zero_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_zero_paddr));
1484 boot->br_zero_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_zero_paddr));
1486 bus_dmamap_sync(sc->sc_cdata.txp_boot_tag, sc->sc_cdata.txp_boot_map,
1487 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1489 /* Create Tx buffers. */
1490 error = bus_dma_tag_create(
1491 sc->sc_cdata.txp_parent_tag, /* parent */
1492 1, 0, /* algnmnt, boundary */
1493 BUS_SPACE_MAXADDR, /* lowaddr */
1494 BUS_SPACE_MAXADDR, /* highaddr */
1495 NULL, NULL, /* filter, filterarg */
1496 MCLBYTES * TXP_MAXTXSEGS, /* maxsize */
1497 TXP_MAXTXSEGS, /* nsegments */
1498 MCLBYTES, /* maxsegsize */
1500 NULL, NULL, /* lockfunc, lockarg */
1501 &sc->sc_cdata.txp_tx_tag);
1503 device_printf(sc->sc_dev, "could not create Tx DMA tag.\n");
1507 /* Create tag for Rx buffers. */
1508 error = bus_dma_tag_create(
1509 sc->sc_cdata.txp_parent_tag, /* parent */
1510 TXP_RXBUF_ALIGN, 0, /* algnmnt, boundary */
1511 BUS_SPACE_MAXADDR, /* lowaddr */
1512 BUS_SPACE_MAXADDR, /* highaddr */
1513 NULL, NULL, /* filter, filterarg */
1514 MCLBYTES, /* maxsize */
1516 MCLBYTES, /* maxsegsize */
1518 NULL, NULL, /* lockfunc, lockarg */
1519 &sc->sc_cdata.txp_rx_tag);
1521 device_printf(sc->sc_dev, "could not create Rx DMA tag.\n");
1525 /* Create DMA maps for Tx buffers. */
1526 for (i = 0; i < TX_ENTRIES; i++) {
1527 txd = &sc->sc_txd[i];
1528 txd->sd_mbuf = NULL;
1530 error = bus_dmamap_create(sc->sc_cdata.txp_tx_tag, 0,
1533 device_printf(sc->sc_dev,
1534 "could not create Tx dmamap.\n");
1539 /* Create DMA maps for Rx buffers. */
1540 for (i = 0; i < RXBUF_ENTRIES; i++) {
1541 sd = malloc(sizeof(struct txp_rx_swdesc), M_DEVBUF,
1548 * The virtual address part of descriptor is not used
1549 * by hardware so use that to save an ring entry. We
1550 * need bcopy here otherwise the address wouldn't be
1551 * valid on big-endian architectures.
1553 rbd = sc->sc_rxbufs + i;
1554 bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1557 error = bus_dmamap_create(sc->sc_cdata.txp_rx_tag, 0,
1560 device_printf(sc->sc_dev,
1561 "could not create Rx dmamap.\n");
1564 TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1572 txp_init_rings(struct txp_softc *sc)
1575 bzero(sc->sc_ldata.txp_hostvar, sizeof(struct txp_hostvar));
1576 bzero(sc->sc_ldata.txp_zero, sizeof(uint32_t));
1577 sc->sc_txhir.r_cons = 0;
1578 sc->sc_txhir.r_prod = 0;
1579 sc->sc_txhir.r_cnt = 0;
1580 sc->sc_txlor.r_cons = 0;
1581 sc->sc_txlor.r_prod = 0;
1582 sc->sc_txlor.r_cnt = 0;
1583 sc->sc_cmdring.lastwrite = 0;
1584 sc->sc_rspring.lastwrite = 0;
1585 sc->sc_rxbufprod = 0;
1586 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1587 sc->sc_cdata.txp_hostvar_map,
1588 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1592 txp_wait(struct txp_softc *sc, uint32_t state)
1597 for (i = 0; i < TXP_TIMEOUT; i++) {
1598 reg = READ_REG(sc, TXP_A2H_0);
1604 return (i == TXP_TIMEOUT ? ETIMEDOUT : 0);
1608 txp_free_rings(struct txp_softc *sc)
1610 struct txp_swdesc *txd;
1611 struct txp_rx_swdesc *sd;
1615 if (sc->sc_cdata.txp_tx_tag != NULL) {
1616 for (i = 0; i < TX_ENTRIES; i++) {
1617 txd = &sc->sc_txd[i];
1618 if (txd->sd_map != NULL) {
1619 bus_dmamap_destroy(sc->sc_cdata.txp_tx_tag,
1624 bus_dma_tag_destroy(sc->sc_cdata.txp_tx_tag);
1625 sc->sc_cdata.txp_tx_tag = NULL;
1628 if (sc->sc_cdata.txp_rx_tag != NULL) {
1629 if (sc->sc_rxbufs != NULL) {
1630 KASSERT(TAILQ_FIRST(&sc->sc_busy_list) == NULL,
1631 ("%s : still have busy Rx buffers", __func__));
1632 while ((sd = TAILQ_FIRST(&sc->sc_free_list)) != NULL) {
1633 TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1634 if (sd->sd_map != NULL) {
1636 sc->sc_cdata.txp_rx_tag,
1643 bus_dma_tag_destroy(sc->sc_cdata.txp_rx_tag);
1644 sc->sc_cdata.txp_rx_tag = NULL;
1647 /* Hi priority Tx ring. */
1648 txp_dma_free(sc, &sc->sc_cdata.txp_txhiring_tag,
1649 &sc->sc_cdata.txp_txhiring_map,
1650 (void **)&sc->sc_ldata.txp_txhiring);
1651 /* Low priority Tx ring. */
1652 txp_dma_free(sc, &sc->sc_cdata.txp_txloring_tag,
1653 &sc->sc_cdata.txp_txloring_map,
1654 (void **)&sc->sc_ldata.txp_txloring);
1655 /* Hi priority Rx ring. */
1656 txp_dma_free(sc, &sc->sc_cdata.txp_rxhiring_tag,
1657 &sc->sc_cdata.txp_rxhiring_map,
1658 (void **)&sc->sc_ldata.txp_rxhiring);
1659 /* Low priority Rx ring. */
1660 txp_dma_free(sc, &sc->sc_cdata.txp_rxloring_tag,
1661 &sc->sc_cdata.txp_rxloring_map,
1662 (void **)&sc->sc_ldata.txp_rxloring);
1663 /* Receive buffer ring. */
1664 txp_dma_free(sc, &sc->sc_cdata.txp_rxbufs_tag,
1665 &sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs);
1667 txp_dma_free(sc, &sc->sc_cdata.txp_cmdring_tag,
1668 &sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring);
1669 /* Response ring. */
1670 txp_dma_free(sc, &sc->sc_cdata.txp_rspring_tag,
1671 &sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring);
1673 txp_dma_free(sc, &sc->sc_cdata.txp_zero_tag,
1674 &sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero);
1675 /* Host variables. */
1676 txp_dma_free(sc, &sc->sc_cdata.txp_hostvar_tag,
1677 &sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar);
1679 txp_dma_free(sc, &sc->sc_cdata.txp_boot_tag,
1680 &sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot);
1682 if (sc->sc_cdata.txp_parent_tag != NULL) {
1683 bus_dma_tag_destroy(sc->sc_cdata.txp_parent_tag);
1684 sc->sc_cdata.txp_parent_tag = NULL;
1690 txp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1692 struct txp_softc *sc = ifp->if_softc;
1693 struct ifreq *ifr = (struct ifreq *)data;
1694 int capenable, error = 0, mask;
1699 if ((ifp->if_flags & IFF_UP) != 0) {
1700 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1701 if (((ifp->if_flags ^ sc->sc_if_flags)
1702 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1705 if ((sc->sc_flags & TXP_FLAG_DETACH) == 0)
1706 txp_init_locked(sc);
1709 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1712 sc->sc_if_flags = ifp->if_flags;
1718 * Multicast list has changed; set the hardware
1719 * filter accordingly.
1722 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1728 capenable = ifp->if_capenable;
1729 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1730 if ((mask & IFCAP_TXCSUM) != 0 &&
1731 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1732 ifp->if_capenable ^= IFCAP_TXCSUM;
1733 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1734 ifp->if_hwassist |= TXP_CSUM_FEATURES;
1736 ifp->if_hwassist &= ~TXP_CSUM_FEATURES;
1738 if ((mask & IFCAP_RXCSUM) != 0 &&
1739 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
1740 ifp->if_capenable ^= IFCAP_RXCSUM;
1741 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1742 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1743 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1744 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1745 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0)
1746 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1747 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1748 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1749 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1750 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
1751 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1752 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1753 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1754 if (capenable != ifp->if_capenable)
1755 txp_set_capabilities(sc);
1757 VLAN_CAPABILITIES(ifp);
1761 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command);
1764 error = ether_ioctl(ifp, command, data);
1772 txp_rxring_fill(struct txp_softc *sc)
1774 struct txp_rxbuf_desc *rbd;
1775 struct txp_rx_swdesc *sd;
1776 bus_dma_segment_t segs[1];
1777 int error, i, nsegs;
1779 TXP_LOCK_ASSERT(sc);
1781 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1782 sc->sc_cdata.txp_rxbufs_map,
1783 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1785 for (i = 0; i < RXBUF_ENTRIES; i++) {
1786 sd = TAILQ_FIRST(&sc->sc_free_list);
1789 rbd = sc->sc_rxbufs + i;
1790 bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1791 KASSERT(sd->sd_mbuf == NULL,
1792 ("%s : Rx buffer ring corrupted", __func__));
1793 sd->sd_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1794 if (sd->sd_mbuf == NULL)
1796 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1797 #ifndef __NO_STRICT_ALIGNMENT
1798 m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1800 if ((error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1801 sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0)) != 0) {
1802 m_freem(sd->sd_mbuf);
1806 KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1808 TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1809 TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1810 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1811 BUS_DMASYNC_PREREAD);
1812 rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1813 rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1816 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1817 sc->sc_cdata.txp_rxbufs_map,
1818 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1819 sc->sc_rxbufprod = RXBUF_ENTRIES - 1;
1820 sc->sc_hostvar->hv_rx_buf_write_idx =
1821 htole32(TXP_IDX2OFFSET(RXBUF_ENTRIES - 1));
1827 txp_rxring_empty(struct txp_softc *sc)
1829 struct txp_rx_swdesc *sd;
1832 TXP_LOCK_ASSERT(sc);
1834 if (sc->sc_rxbufs == NULL)
1836 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1837 sc->sc_cdata.txp_hostvar_map,
1838 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1840 /* Release allocated Rx buffers. */
1842 while ((sd = TAILQ_FIRST(&sc->sc_busy_list)) != NULL) {
1843 TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
1844 KASSERT(sd->sd_mbuf != NULL,
1845 ("%s : Rx buffer ring corrupted", __func__));
1846 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1847 BUS_DMASYNC_POSTREAD);
1848 bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
1849 m_freem(sd->sd_mbuf);
1851 TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1859 struct txp_softc *sc;
1863 txp_init_locked(sc);
1868 txp_init_locked(struct txp_softc *sc)
1876 TXP_LOCK_ASSERT(sc);
1879 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1882 /* Initialize ring structure. */
1884 /* Wakeup controller. */
1885 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_WAKEUP);
1886 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
1888 * It seems that earlier NV image can go back to online from
1889 * wakeup command but newer ones require controller reset.
1890 * So jut reset controller again.
1892 if (txp_reset(sc) != 0)
1894 /* Download firmware. */
1895 error = txp_download_fw(sc);
1897 device_printf(sc->sc_dev, "could not download firmware.\n");
1900 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1901 sc->sc_cdata.txp_hostvar_map,
1902 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1903 if ((error = txp_rxring_fill(sc)) != 0) {
1904 device_printf(sc->sc_dev, "no memory for Rx buffers.\n");
1907 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1908 sc->sc_cdata.txp_hostvar_map,
1909 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1910 if (txp_boot(sc, STAT_WAITING_FOR_BOOT) != 0) {
1911 device_printf(sc->sc_dev, "could not boot firmware.\n");
1916 * Quite contrary to Typhoon T2 software functional specification,
1917 * it seems that TXP_CMD_RECV_BUFFER_CONTROL command is not
1918 * implemented in the firmware. This means driver should have to
1919 * handle misaligned frames on alignment architectures. AFAIK this
1920 * is the only controller manufactured by 3Com that has this stupid
1921 * bug. 3Com should fix this.
1923 if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
1924 NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1926 /* Undocumented command(interrupt coalescing disable?) - From Linux. */
1927 if (txp_command(sc, TXP_CMD_FILTER_DEFINE, 0, 0, 0, NULL, NULL, NULL,
1928 TXP_CMD_NOWAIT) != 0)
1931 /* Set station address. */
1932 eaddr = IF_LLADDR(sc->sc_ifp);
1934 ((uint8_t *)&p1)[1] = eaddr[0];
1935 ((uint8_t *)&p1)[0] = eaddr[1];
1937 ((uint8_t *)&p2)[3] = eaddr[2];
1938 ((uint8_t *)&p2)[2] = eaddr[3];
1939 ((uint8_t *)&p2)[1] = eaddr[4];
1940 ((uint8_t *)&p2)[0] = eaddr[5];
1942 if (txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0,
1943 NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1947 txp_set_capabilities(sc);
1949 if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
1950 NULL, NULL, NULL, TXP_CMD_NOWAIT))
1952 if (txp_command(sc, TXP_CMD_XCVR_SELECT, sc->sc_xcvr, 0, 0,
1953 NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1955 if (txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1956 TXP_CMD_NOWAIT) != 0)
1958 if (txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1959 TXP_CMD_NOWAIT) != 0)
1962 /* Ack all pending interrupts and enable interrupts. */
1963 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
1964 WRITE_REG(sc, TXP_IER, TXP_INTRS);
1965 WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
1967 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1968 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1970 callout_reset(&sc->sc_tick, hz, txp_tick, sc);
1974 txp_rxring_empty(sc);
1977 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1983 struct txp_softc *sc;
1985 struct txp_rsp_desc *rsp;
1986 struct txp_ext_desc *ext;
1990 TXP_LOCK_ASSERT(sc);
1991 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1992 sc->sc_cdata.txp_hostvar_map,
1993 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1994 txp_rxbuf_reclaim(sc);
1995 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1996 sc->sc_cdata.txp_hostvar_map,
1997 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2002 link = sc->sc_flags & TXP_FLAG_LINK;
2003 if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2004 &rsp, TXP_CMD_WAIT))
2006 if (rsp->rsp_numdesc != 6)
2008 txp_stats_update(sc, rsp);
2009 if (link == 0 && (sc->sc_flags & TXP_FLAG_LINK) != 0) {
2010 ext = (struct txp_ext_desc *)(rsp + 1);
2011 /* Update baudrate with resolved speed. */
2012 if ((ext[5].ext_2 & 0x02) != 0)
2013 ifp->if_baudrate = IF_Mbps(100);
2015 ifp->if_baudrate = IF_Mbps(10);
2020 free(rsp, M_DEVBUF);
2022 callout_reset(&sc->sc_tick, hz, txp_tick, sc);
2026 txp_start(struct ifnet *ifp)
2028 struct txp_softc *sc;
2032 txp_start_locked(ifp);
2037 txp_start_locked(struct ifnet *ifp)
2039 struct txp_softc *sc;
2040 struct mbuf *m_head;
2044 TXP_LOCK_ASSERT(sc);
2046 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2047 IFF_DRV_RUNNING || (sc->sc_flags & TXP_FLAG_LINK) == 0)
2050 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
2051 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2055 * Pack the data into the transmit ring. If we
2056 * don't have room, set the OACTIVE flag and wait
2057 * for the NIC to drain the ring.
2058 * ATM only Hi-ring is used.
2060 if (txp_encap(sc, &sc->sc_txhir, &m_head)) {
2063 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2064 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2069 * If there's a BPF listener, bounce a copy of this frame
2072 ETHER_BPF_MTAP(ifp, m_head);
2074 /* Send queued frame. */
2075 WRITE_REG(sc, sc->sc_txhir.r_reg,
2076 TXP_IDX2OFFSET(sc->sc_txhir.r_prod));
2080 /* Set a timeout in case the chip goes out to lunch. */
2081 sc->sc_watchdog_timer = TXP_TX_TIMEOUT;
2086 txp_encap(struct txp_softc *sc, struct txp_tx_ring *r, struct mbuf **m_head)
2088 struct txp_tx_desc *first_txd;
2089 struct txp_frag_desc *fxd;
2090 struct txp_swdesc *sd;
2092 bus_dma_segment_t txsegs[TXP_MAXTXSEGS];
2093 int error, i, nsegs;
2095 TXP_LOCK_ASSERT(sc);
2097 M_ASSERTPKTHDR((*m_head));
2100 first_txd = r->r_desc + r->r_prod;
2101 sd = sc->sc_txd + r->r_prod;
2103 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2104 *m_head, txsegs, &nsegs, 0);
2105 if (error == EFBIG) {
2106 m = m_collapse(*m_head, M_DONTWAIT, TXP_MAXTXSEGS);
2113 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag,
2114 sd->sd_map, *m_head, txsegs, &nsegs, 0);
2120 } else if (error != 0)
2128 /* Check descriptor overrun. */
2129 if (r->r_cnt + nsegs >= TX_ENTRIES - TXP_TXD_RESERVED) {
2130 bus_dmamap_unload(sc->sc_cdata.txp_tx_tag, sd->sd_map);
2133 bus_dmamap_sync(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2134 BUS_DMASYNC_PREWRITE);
2137 first_txd->tx_flags = TX_FLAGS_TYPE_DATA;
2138 first_txd->tx_numdesc = 0;
2139 first_txd->tx_addrlo = 0;
2140 first_txd->tx_addrhi = 0;
2141 first_txd->tx_totlen = 0;
2142 first_txd->tx_pflags = 0;
2144 TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2146 /* Configure Tx IP/TCP/UDP checksum offload. */
2147 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2148 first_txd->tx_pflags |= htole32(TX_PFLAGS_IPCKSUM);
2150 /* XXX firmware bug. */
2151 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2152 first_txd->tx_pflags |= htole32(TX_PFLAGS_TCPCKSUM);
2153 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2154 first_txd->tx_pflags |= htole32(TX_PFLAGS_UDPCKSUM);
2157 /* Configure VLAN hardware tag insertion. */
2158 if ((m->m_flags & M_VLANTAG) != 0)
2159 first_txd->tx_pflags |=
2160 htole32(TX_PFLAGS_VLAN | TX_PFLAGS_PRIO |
2161 (bswap16(m->m_pkthdr.ether_vtag) << TX_PFLAGS_VLANTAG_S));
2163 for (i = 0; i < nsegs; i++) {
2164 fxd = (struct txp_frag_desc *)(r->r_desc + r->r_prod);
2165 fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG | TX_FLAGS_VALID;
2166 fxd->frag_rsvd1 = 0;
2167 fxd->frag_len = htole16(txsegs[i].ds_len);
2168 fxd->frag_addrhi = htole32(TXP_ADDR_HI(txsegs[i].ds_addr));
2169 fxd->frag_addrlo = htole32(TXP_ADDR_LO(txsegs[i].ds_addr));
2170 fxd->frag_rsvd2 = 0;
2171 first_txd->tx_numdesc++;
2173 TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2176 /* Lastly set valid flag. */
2177 first_txd->tx_flags |= TX_FLAGS_VALID;
2179 /* Sync descriptors. */
2180 bus_dmamap_sync(r->r_tag, r->r_map,
2181 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2187 * Handle simple commands sent to the typhoon
2190 txp_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2191 uint32_t in3, uint16_t *out1, uint32_t *out2, uint32_t *out3, int wait)
2193 struct txp_rsp_desc *rsp;
2196 if (txp_ext_command(sc, id, in1, in2, in3, NULL, 0, &rsp, wait) != 0) {
2197 device_printf(sc->sc_dev, "command 0x%02x failed\n", id);
2201 if (wait == TXP_CMD_NOWAIT)
2204 KASSERT(rsp != NULL, ("rsp is NULL!\n"));
2206 *out1 = le16toh(rsp->rsp_par1);
2208 *out2 = le32toh(rsp->rsp_par2);
2210 *out3 = le32toh(rsp->rsp_par3);
2211 free(rsp, M_DEVBUF);
2216 txp_ext_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2217 uint32_t in3, struct txp_ext_desc *in_extp, uint8_t in_extn,
2218 struct txp_rsp_desc **rspp, int wait)
2220 struct txp_hostvar *hv;
2221 struct txp_cmd_desc *cmd;
2222 struct txp_ext_desc *ext;
2228 hv = sc->sc_hostvar;
2229 if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
2230 device_printf(sc->sc_dev,
2231 "%s : out of free cmd descriptors for command 0x%02x\n",
2236 bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2237 sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_POSTWRITE);
2238 idx = sc->sc_cmdring.lastwrite;
2239 cmd = (struct txp_cmd_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2240 bzero(cmd, sizeof(*cmd));
2242 cmd->cmd_numdesc = in_extn;
2244 cmd->cmd_seq = htole16(seq);
2245 cmd->cmd_id = htole16(id);
2246 cmd->cmd_par1 = htole16(in1);
2247 cmd->cmd_par2 = htole32(in2);
2248 cmd->cmd_par3 = htole32(in3);
2249 cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
2250 (wait == TXP_CMD_WAIT ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
2252 idx += sizeof(struct txp_cmd_desc);
2253 if (idx == sc->sc_cmdring.size)
2256 for (i = 0; i < in_extn; i++) {
2257 ext = (struct txp_ext_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2258 bcopy(in_extp, ext, sizeof(struct txp_ext_desc));
2260 idx += sizeof(struct txp_cmd_desc);
2261 if (idx == sc->sc_cmdring.size)
2265 sc->sc_cmdring.lastwrite = idx;
2266 bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2267 sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_PREWRITE);
2268 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2269 sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_PREREAD |
2270 BUS_DMASYNC_PREWRITE);
2271 WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
2272 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
2274 if (wait == TXP_CMD_NOWAIT)
2277 for (i = 0; i < TXP_TIMEOUT; i++) {
2278 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2279 sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_POSTREAD |
2280 BUS_DMASYNC_POSTWRITE);
2281 if (le32toh(hv->hv_resp_read_idx) !=
2282 le32toh(hv->hv_resp_write_idx)) {
2283 error = txp_response(sc, id, seq, rspp);
2284 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2285 sc->sc_cdata.txp_hostvar_map,
2286 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2294 if (i == TXP_TIMEOUT) {
2295 device_printf(sc->sc_dev, "command 0x%02x timedout\n", id);
2303 txp_response(struct txp_softc *sc, uint16_t id, uint16_t seq,
2304 struct txp_rsp_desc **rspp)
2306 struct txp_hostvar *hv;
2307 struct txp_rsp_desc *rsp;
2310 bus_dmamap_sync(sc->sc_cdata.txp_rspring_tag,
2311 sc->sc_cdata.txp_rspring_map, BUS_DMASYNC_POSTREAD);
2312 hv = sc->sc_hostvar;
2313 ridx = le32toh(hv->hv_resp_read_idx);
2314 while (ridx != le32toh(hv->hv_resp_write_idx)) {
2315 rsp = (struct txp_rsp_desc *)(((uint8_t *)sc->sc_rspring.base) + ridx);
2317 if (id == le16toh(rsp->rsp_id) &&
2318 le16toh(rsp->rsp_seq) == seq) {
2319 *rspp = (struct txp_rsp_desc *)malloc(
2320 sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1),
2321 M_DEVBUF, M_NOWAIT);
2322 if (*rspp == NULL) {
2323 device_printf(sc->sc_dev,"%s : command 0x%02x "
2324 "memory allocation failure\n",
2328 txp_rsp_fixup(sc, rsp, *rspp);
2332 if ((rsp->rsp_flags & RSP_FLAGS_ERROR) != 0) {
2333 device_printf(sc->sc_dev,
2334 "%s : command 0x%02x response error!\n", __func__,
2335 le16toh(rsp->rsp_id));
2336 txp_rsp_fixup(sc, rsp, NULL);
2337 ridx = le32toh(hv->hv_resp_read_idx);
2342 * The following unsolicited responses are handled during
2343 * processing of TXP_CMD_READ_STATISTICS which requires
2344 * response. Driver abuses the command to detect media
2346 * TXP_CMD_FILTER_DEFINE is not an unsolicited response
2347 * but we don't process response ring in interrupt handler
2348 * so we have to ignore this command here, otherwise
2349 * unknown command message would be printed.
2351 switch (le16toh(rsp->rsp_id)) {
2352 case TXP_CMD_CYCLE_STATISTICS:
2353 case TXP_CMD_FILTER_DEFINE:
2355 case TXP_CMD_MEDIA_STATUS_READ:
2356 if ((le16toh(rsp->rsp_par1) & 0x0800) == 0) {
2357 sc->sc_flags |= TXP_FLAG_LINK;
2358 if_link_state_change(sc->sc_ifp,
2361 sc->sc_flags &= ~TXP_FLAG_LINK;
2362 if_link_state_change(sc->sc_ifp,
2366 case TXP_CMD_HELLO_RESPONSE:
2368 * Driver should repsond to hello message but
2369 * TXP_CMD_READ_STATISTICS is issued for every
2370 * hz, therefore there is no need to send an
2371 * explicit command here.
2373 device_printf(sc->sc_dev, "%s : hello\n", __func__);
2376 device_printf(sc->sc_dev,
2377 "%s : unknown command 0x%02x\n", __func__,
2378 le16toh(rsp->rsp_id));
2380 txp_rsp_fixup(sc, rsp, NULL);
2381 ridx = le32toh(hv->hv_resp_read_idx);
2388 txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp,
2389 struct txp_rsp_desc *dst)
2391 struct txp_rsp_desc *src;
2392 struct txp_hostvar *hv;
2396 hv = sc->sc_hostvar;
2397 ridx = le32toh(hv->hv_resp_read_idx);
2399 for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
2401 bcopy(src, dst++, sizeof(struct txp_rsp_desc));
2402 ridx += sizeof(struct txp_rsp_desc);
2403 if (ridx == sc->sc_rspring.size) {
2404 src = sc->sc_rspring.base;
2408 sc->sc_rspring.lastwrite = ridx;
2411 hv->hv_resp_read_idx = htole32(ridx);
2415 txp_cmd_desc_numfree(struct txp_softc *sc)
2417 struct txp_hostvar *hv;
2418 struct txp_boot_record *br;
2419 uint32_t widx, ridx, nfree;
2421 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2422 sc->sc_cdata.txp_hostvar_map,
2423 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2424 hv = sc->sc_hostvar;
2426 widx = sc->sc_cmdring.lastwrite;
2427 ridx = le32toh(hv->hv_cmd_read_idx);
2430 /* Ring is completely free */
2431 nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc);
2434 nfree = le32toh(br->br_cmd_siz) -
2435 (widx - ridx + sizeof(struct txp_cmd_desc));
2437 nfree = ridx - widx - sizeof(struct txp_cmd_desc);
2440 return (nfree / sizeof(struct txp_cmd_desc));
2444 txp_sleep(struct txp_softc *sc, int capenable)
2450 if ((capenable & IFCAP_WOL_MAGIC) != 0)
2452 error = txp_command(sc, TXP_CMD_ENABLE_WAKEUP_EVENTS, events, 0, 0,
2453 NULL, NULL, NULL, TXP_CMD_NOWAIT);
2456 error = txp_command(sc, TXP_CMD_GOTO_SLEEP, 0, 0, 0, NULL,
2457 NULL, NULL, TXP_CMD_NOWAIT);
2459 error = txp_wait(sc, STAT_SLEEPING);
2461 device_printf(sc->sc_dev,
2462 "unable to enter into sleep\n");
2470 txp_stop(struct txp_softc *sc)
2474 TXP_LOCK_ASSERT(sc);
2477 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2480 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
2481 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
2483 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2484 sc->sc_flags &= ~TXP_FLAG_LINK;
2486 callout_stop(&sc->sc_tick);
2488 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2490 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2492 /* Save statistics for later use. */
2494 /* Halt controller. */
2495 txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL,
2498 if (txp_wait(sc, STAT_HALTED) != 0)
2499 device_printf(sc->sc_dev, "controller halt timedout!\n");
2500 /* Reclaim Tx/Rx buffers. */
2501 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
2502 TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
2503 txp_tx_reclaim(sc, &sc->sc_txhir);
2504 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
2505 TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
2506 txp_tx_reclaim(sc, &sc->sc_txlor);
2507 txp_rxring_empty(sc);
2510 /* Reset controller and make it reload sleep image. */
2512 /* Let controller boot from sleep image. */
2513 if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
2514 device_printf(sc->sc_dev, "could not boot sleep image\n");
2519 txp_watchdog(struct txp_softc *sc)
2523 TXP_LOCK_ASSERT(sc);
2525 if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
2529 if_printf(ifp, "watchdog timeout -- resetting\n");
2532 txp_init_locked(sc);
2536 txp_ifmedia_upd(struct ifnet *ifp)
2538 struct txp_softc *sc = ifp->if_softc;
2539 struct ifmedia *ifm = &sc->sc_ifmedia;
2543 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
2548 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
2549 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2550 new_xcvr = TXP_XCVR_10_FDX;
2552 new_xcvr = TXP_XCVR_10_HDX;
2553 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
2554 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2555 new_xcvr = TXP_XCVR_100_FDX;
2557 new_xcvr = TXP_XCVR_100_HDX;
2558 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
2559 new_xcvr = TXP_XCVR_AUTO;
2566 if (sc->sc_xcvr == new_xcvr) {
2571 txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
2572 NULL, NULL, NULL, TXP_CMD_NOWAIT);
2573 sc->sc_xcvr = new_xcvr;
2580 txp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2582 struct txp_softc *sc = ifp->if_softc;
2583 struct ifmedia *ifm = &sc->sc_ifmedia;
2584 uint16_t bmsr, bmcr, anar, anlpar;
2586 ifmr->ifm_status = IFM_AVALID;
2587 ifmr->ifm_active = IFM_ETHER;
2590 /* Check whether firmware is running. */
2591 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2593 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2594 &bmsr, NULL, NULL, TXP_CMD_WAIT))
2596 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2597 &bmsr, NULL, NULL, TXP_CMD_WAIT))
2600 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
2601 &bmcr, NULL, NULL, TXP_CMD_WAIT))
2604 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
2605 &anlpar, NULL, NULL, TXP_CMD_WAIT))
2608 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANAR, 0,
2609 &anar, NULL, NULL, TXP_CMD_WAIT))
2613 if (bmsr & BMSR_LINK)
2614 ifmr->ifm_status |= IFM_ACTIVE;
2616 if (bmcr & BMCR_ISO) {
2617 ifmr->ifm_active |= IFM_NONE;
2618 ifmr->ifm_status = 0;
2622 if (bmcr & BMCR_LOOP)
2623 ifmr->ifm_active |= IFM_LOOP;
2625 if (bmcr & BMCR_AUTOEN) {
2626 if ((bmsr & BMSR_ACOMP) == 0) {
2627 ifmr->ifm_active |= IFM_NONE;
2632 if (anlpar & ANLPAR_TX_FD)
2633 ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
2634 else if (anlpar & ANLPAR_T4)
2635 ifmr->ifm_active |= IFM_100_T4;
2636 else if (anlpar & ANLPAR_TX)
2637 ifmr->ifm_active |= IFM_100_TX;
2638 else if (anlpar & ANLPAR_10_FD)
2639 ifmr->ifm_active |= IFM_10_T|IFM_FDX;
2640 else if (anlpar & ANLPAR_10)
2641 ifmr->ifm_active |= IFM_10_T;
2643 ifmr->ifm_active |= IFM_NONE;
2645 ifmr->ifm_active = ifm->ifm_cur->ifm_media;
2650 ifmr->ifm_active |= IFM_NONE;
2651 ifmr->ifm_status &= ~IFM_AVALID;
2656 txp_show_descriptor(void *d)
2658 struct txp_cmd_desc *cmd = d;
2659 struct txp_rsp_desc *rsp = d;
2660 struct txp_tx_desc *txd = d;
2661 struct txp_frag_desc *frgd = d;
2663 switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) {
2664 case CMD_FLAGS_TYPE_CMD:
2665 /* command descriptor */
2666 printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2667 cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2668 le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2669 le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2671 case CMD_FLAGS_TYPE_RESP:
2672 /* response descriptor */
2673 printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2674 rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id),
2675 le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1),
2676 le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3));
2678 case CMD_FLAGS_TYPE_DATA:
2679 /* data header (assuming tx for now) */
2680 printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
2681 txd->tx_flags, txd->tx_numdesc, le16toh(txd->tx_totlen),
2682 le32toh(txd->tx_addrlo), le32toh(txd->tx_addrhi),
2683 le32toh(txd->tx_pflags));
2685 case CMD_FLAGS_TYPE_FRAG:
2686 /* fragment descriptor */
2687 printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
2688 frgd->frag_flags, frgd->frag_rsvd1, le16toh(frgd->frag_len),
2689 le32toh(frgd->frag_addrlo), le32toh(frgd->frag_addrhi),
2690 le32toh(frgd->frag_rsvd2));
2693 printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2694 cmd->cmd_flags & CMD_FLAGS_TYPE_M,
2695 cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2696 le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2697 le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2704 txp_set_filter(struct txp_softc *sc)
2707 uint32_t crc, mchash[2];
2709 struct ifmultiaddr *ifma;
2712 TXP_LOCK_ASSERT(sc);
2715 filter = TXP_RXFILT_DIRECT;
2716 if ((ifp->if_flags & IFF_BROADCAST) != 0)
2717 filter |= TXP_RXFILT_BROADCAST;
2718 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2719 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2720 filter |= TXP_RXFILT_ALLMULTI;
2721 if ((ifp->if_flags & IFF_PROMISC) != 0)
2722 filter = TXP_RXFILT_PROMISC;
2726 mchash[0] = mchash[1] = 0;
2729 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2730 if (ifma->ifma_addr->sa_family != AF_LINK)
2732 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2733 ifma->ifma_addr), ETHER_ADDR_LEN);
2735 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2738 IF_ADDR_UNLOCK(ifp);
2741 filter |= TXP_RXFILT_HASHMULTI;
2742 txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 2, mchash[0],
2743 mchash[1], NULL, NULL, NULL, TXP_CMD_NOWAIT);
2747 txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
2748 NULL, NULL, NULL, TXP_CMD_NOWAIT);
2752 txp_set_capabilities(struct txp_softc *sc)
2755 uint32_t rxcap, txcap;
2757 TXP_LOCK_ASSERT(sc);
2761 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) {
2762 if ((ifp->if_hwassist & CSUM_IP) != 0)
2763 txcap |= OFFLOAD_IPCKSUM;
2764 if ((ifp->if_hwassist & CSUM_TCP) != 0)
2765 txcap |= OFFLOAD_TCPCKSUM;
2766 if ((ifp->if_hwassist & CSUM_UDP) != 0)
2767 txcap |= OFFLOAD_UDPCKSUM;
2770 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2771 rxcap &= ~(OFFLOAD_IPCKSUM | OFFLOAD_TCPCKSUM |
2773 if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2774 rxcap |= OFFLOAD_VLAN;
2775 txcap |= OFFLOAD_VLAN;
2778 /* Tell firmware new offload configuration. */
2779 return (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, txcap, rxcap, NULL,
2780 NULL, NULL, TXP_CMD_NOWAIT));
2784 txp_stats_save(struct txp_softc *sc)
2786 struct txp_rsp_desc *rsp;
2788 TXP_LOCK_ASSERT(sc);
2791 if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2792 &rsp, TXP_CMD_WAIT))
2794 if (rsp->rsp_numdesc != 6)
2796 txp_stats_update(sc, rsp);
2799 free(rsp, M_DEVBUF);
2800 bcopy(&sc->sc_stats, &sc->sc_ostats, sizeof(struct txp_hw_stats));
2804 txp_stats_update(struct txp_softc *sc, struct txp_rsp_desc *rsp)
2807 struct txp_hw_stats *ostats, *stats;
2808 struct txp_ext_desc *ext;
2810 TXP_LOCK_ASSERT(sc);
2813 ext = (struct txp_ext_desc *)(rsp + 1);
2814 ostats = &sc->sc_ostats;
2815 stats = &sc->sc_stats;
2816 stats->tx_frames = ostats->tx_frames + le32toh(rsp->rsp_par2);
2817 stats->tx_bytes = ostats->tx_bytes + (uint64_t)le32toh(rsp->rsp_par3) +
2818 ((uint64_t)le32toh(ext[0].ext_1) << 32);
2819 stats->tx_deferred = ostats->tx_deferred + le32toh(ext[0].ext_2);
2820 stats->tx_late_colls = ostats->tx_late_colls + le32toh(ext[0].ext_3);
2821 stats->tx_colls = ostats->tx_colls + le32toh(ext[0].ext_4);
2822 stats->tx_carrier_lost = ostats->tx_carrier_lost +
2823 le32toh(ext[1].ext_1);
2824 stats->tx_multi_colls = ostats->tx_multi_colls +
2825 le32toh(ext[1].ext_2);
2826 stats->tx_excess_colls = ostats->tx_excess_colls +
2827 le32toh(ext[1].ext_3);
2828 stats->tx_fifo_underruns = ostats->tx_fifo_underruns +
2829 le32toh(ext[1].ext_4);
2830 stats->tx_mcast_oflows = ostats->tx_mcast_oflows +
2831 le32toh(ext[2].ext_1);
2832 stats->tx_filtered = ostats->tx_filtered + le32toh(ext[2].ext_2);
2833 stats->rx_frames = ostats->rx_frames + le32toh(ext[2].ext_3);
2834 stats->rx_bytes = ostats->rx_bytes + (uint64_t)le32toh(ext[2].ext_4) +
2835 ((uint64_t)le32toh(ext[3].ext_1) << 32);
2836 stats->rx_fifo_oflows = ostats->rx_fifo_oflows + le32toh(ext[3].ext_2);
2837 stats->rx_badssd = ostats->rx_badssd + le32toh(ext[3].ext_3);
2838 stats->rx_crcerrs = ostats->rx_crcerrs + le32toh(ext[3].ext_4);
2839 stats->rx_lenerrs = ostats->rx_lenerrs + le32toh(ext[4].ext_1);
2840 stats->rx_bcast_frames = ostats->rx_bcast_frames +
2841 le32toh(ext[4].ext_2);
2842 stats->rx_mcast_frames = ostats->rx_mcast_frames +
2843 le32toh(ext[4].ext_3);
2844 stats->rx_oflows = ostats->rx_oflows + le32toh(ext[4].ext_4);
2845 stats->rx_filtered = ostats->rx_filtered + le32toh(ext[5].ext_1);
2847 ifp->if_ierrors = stats->rx_fifo_oflows + stats->rx_badssd +
2848 stats->rx_crcerrs + stats->rx_lenerrs + stats->rx_oflows;
2849 ifp->if_oerrors = stats->tx_deferred + stats->tx_carrier_lost +
2850 stats->tx_fifo_underruns + stats->tx_mcast_oflows;
2851 ifp->if_collisions = stats->tx_late_colls + stats->tx_multi_colls +
2852 stats->tx_excess_colls;
2853 ifp->if_opackets = stats->tx_frames;
2854 ifp->if_ipackets = stats->rx_frames;
2857 #define TXP_SYSCTL_STAT_ADD32(c, h, n, p, d) \
2858 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2860 #if __FreeBSD_version > 800000
2861 #define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2862 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2864 #define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2865 SYSCTL_ADD_ULONG(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2869 txp_sysctl_node(struct txp_softc *sc)
2871 struct sysctl_ctx_list *ctx;
2872 struct sysctl_oid_list *child, *parent;
2873 struct sysctl_oid *tree;
2874 struct txp_hw_stats *stats;
2877 stats = &sc->sc_stats;
2878 ctx = device_get_sysctl_ctx(sc->sc_dev);
2879 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
2880 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
2881 CTLTYPE_INT | CTLFLAG_RW, &sc->sc_process_limit, 0,
2882 sysctl_hw_txp_proc_limit, "I",
2883 "max number of Rx events to process");
2884 /* Pull in device tunables. */
2885 sc->sc_process_limit = TXP_PROC_DEFAULT;
2886 error = resource_int_value(device_get_name(sc->sc_dev),
2887 device_get_unit(sc->sc_dev), "process_limit",
2888 &sc->sc_process_limit);
2890 if (sc->sc_process_limit < TXP_PROC_MIN ||
2891 sc->sc_process_limit > TXP_PROC_MAX) {
2892 device_printf(sc->sc_dev,
2893 "process_limit value out of range; "
2894 "using default: %d\n", TXP_PROC_DEFAULT);
2895 sc->sc_process_limit = TXP_PROC_DEFAULT;
2898 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2899 NULL, "TXP statistics");
2900 parent = SYSCTL_CHILDREN(tree);
2902 /* Tx statistics. */
2903 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2904 NULL, "Tx MAC statistics");
2905 child = SYSCTL_CHILDREN(tree);
2907 TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2908 &stats->tx_frames, "Frames");
2909 TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2910 &stats->tx_bytes, "Octets");
2911 TXP_SYSCTL_STAT_ADD32(ctx, child, "deferred",
2912 &stats->tx_deferred, "Deferred frames");
2913 TXP_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2914 &stats->tx_late_colls, "Late collisions");
2915 TXP_SYSCTL_STAT_ADD32(ctx, child, "colls",
2916 &stats->tx_colls, "Collisions");
2917 TXP_SYSCTL_STAT_ADD32(ctx, child, "carrier_lost",
2918 &stats->tx_carrier_lost, "Carrier lost");
2919 TXP_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
2920 &stats->tx_multi_colls, "Multiple collisions");
2921 TXP_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
2922 &stats->tx_excess_colls, "Excessive collisions");
2923 TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_underruns",
2924 &stats->tx_fifo_underruns, "FIFO underruns");
2925 TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_oflows",
2926 &stats->tx_mcast_oflows, "Multicast overflows");
2927 TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2928 &stats->tx_filtered, "Filtered frames");
2930 /* Rx statistics. */
2931 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2932 NULL, "Rx MAC statistics");
2933 child = SYSCTL_CHILDREN(tree);
2935 TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2936 &stats->rx_frames, "Frames");
2937 TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2938 &stats->rx_bytes, "Octets");
2939 TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2940 &stats->rx_fifo_oflows, "FIFO overflows");
2941 TXP_SYSCTL_STAT_ADD32(ctx, child, "badssd",
2942 &stats->rx_badssd, "Bad SSD");
2943 TXP_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2944 &stats->rx_crcerrs, "CRC errors");
2945 TXP_SYSCTL_STAT_ADD32(ctx, child, "lenerrs",
2946 &stats->rx_lenerrs, "Length errors");
2947 TXP_SYSCTL_STAT_ADD32(ctx, child, "bcast_frames",
2948 &stats->rx_bcast_frames, "Broadcast frames");
2949 TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_frames",
2950 &stats->rx_mcast_frames, "Multicast frames");
2951 TXP_SYSCTL_STAT_ADD32(ctx, child, "oflows",
2952 &stats->rx_oflows, "Overflows");
2953 TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2954 &stats->rx_filtered, "Filtered frames");
2957 #undef TXP_SYSCTL_STAT_ADD32
2958 #undef TXP_SYSCTL_STAT_ADD64
2961 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2967 value = *(int *)arg1;
2968 error = sysctl_handle_int(oidp, &value, 0, req);
2969 if (error || req->newptr == NULL)
2971 if (value < low || value > high)
2973 *(int *)arg1 = value;
2979 sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS)
2981 return (sysctl_int_range(oidp, arg1, arg2, req,
2982 TXP_PROC_MIN, TXP_PROC_MAX));