1 /* $OpenBSD: if_txp.c,v 1.48 2001/06/27 06:34:50 kjc Exp $ */
5 * Jason L. Wright <jason@thought.net>, Theo de Raadt, and
6 * Aaron Campbell <aaron@monkey.org>. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Jason L. Wright,
19 * Theo de Raadt and Aaron Campbell.
20 * 4. Neither the name of the author nor the names of any co-contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
34 * THE POSSIBILITY OF SUCH DAMAGE.
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
41 * Driver for 3c990 (Typhoon) Ethernet ASIC
43 #include <sys/param.h>
44 #include <sys/systm.h>
46 #include <sys/endian.h>
47 #include <sys/kernel.h>
49 #include <sys/malloc.h>
51 #include <sys/module.h>
52 #include <sys/mutex.h>
53 #include <sys/queue.h>
55 #include <sys/socket.h>
56 #include <sys/sockio.h>
57 #include <sys/sysctl.h>
58 #include <sys/taskqueue.h>
62 #include <net/if_var.h>
63 #include <net/if_arp.h>
64 #include <net/ethernet.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/if_vlan_var.h>
70 #include <netinet/in.h>
71 #include <netinet/in_systm.h>
72 #include <netinet/ip.h>
74 #include <dev/mii/mii.h>
76 #include <dev/pci/pcireg.h>
77 #include <dev/pci/pcivar.h>
79 #include <machine/bus.h>
80 #include <machine/in_cksum.h>
82 #include <dev/txp/if_txpreg.h>
83 #include <dev/txp/3c990img.h>
85 MODULE_DEPEND(txp, pci, 1, 1, 1);
86 MODULE_DEPEND(txp, ether, 1, 1, 1);
89 * XXX Known Typhoon firmware issues.
91 * 1. It seems that firmware has Tx TCP/UDP checksum offloading bug.
92 * The firmware hangs when it's told to compute TCP/UDP checksum.
93 * I'm not sure whether the firmware requires special alignment to
94 * do checksum offloading but datasheet says nothing about that.
95 * 2. Datasheet says nothing for maximum number of fragmented
96 * descriptors supported. Experimentation shows up to 16 fragment
97 * descriptors are supported in the firmware. For TSO case, upper
98 * stack can send 64KB sized IP datagram plus link header size(
99 * ethernet header + VLAN tag) frame but controller can handle up
100 * to 64KB frame given that PAGE_SIZE is 4KB(i.e. 16 * PAGE_SIZE).
101 * Because frames that need TSO operation of hardware can be
102 * larger than 64KB I disabled TSO capability. TSO operation for
103 * less than or equal to 16 fragment descriptors works without
105 * 3. VLAN hardware tag stripping is always enabled in the firmware
106 * even if it's explicitly told to not strip the tag. It's
107 * possible to add the tag back in Rx handler if VLAN hardware
108 * tag is not active but I didn't try that as it would be
109 * layering violation.
110 * 4. TXP_CMD_RECV_BUFFER_CONTROL does not work as expected in
111 * datasheet such that driver should handle the alignment
112 * restriction by copying received frame to align the frame on
113 * 32bit boundary on strict-alignment architectures. This adds a
114 * lot of CPU burden and it effectively reduce Rx performance on
115 * strict-alignment architectures(e.g. sparc64, arm, mips and ia64).
117 * Unfortunately it seems that 3Com have no longer interests in
118 * releasing fixed firmware so we may have to live with these bugs.
121 #define TXP_CSUM_FEATURES (CSUM_IP)
124 * Various supported device vendors/types and their names.
126 static struct txp_type txp_devs[] = {
127 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_95,
128 "3Com 3cR990-TX-95 Etherlink with 3XP Processor" },
129 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_97,
130 "3Com 3cR990-TX-97 Etherlink with 3XP Processor" },
131 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_TXM,
132 "3Com 3cR990B-TXM Etherlink with 3XP Processor" },
133 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_95,
134 "3Com 3cR990-SRV-95 Etherlink Server with 3XP Processor" },
135 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_97,
136 "3Com 3cR990-SRV-97 Etherlink Server with 3XP Processor" },
137 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_SRV,
138 "3Com 3cR990B-SRV Etherlink Server with 3XP Processor" },
142 static int txp_probe(device_t);
143 static int txp_attach(device_t);
144 static int txp_detach(device_t);
145 static int txp_shutdown(device_t);
146 static int txp_suspend(device_t);
147 static int txp_resume(device_t);
148 static int txp_intr(void *);
149 static void txp_int_task(void *, int);
150 static void txp_tick(void *);
151 static int txp_ioctl(struct ifnet *, u_long, caddr_t);
152 static void txp_start(struct ifnet *);
153 static void txp_start_locked(struct ifnet *);
154 static int txp_encap(struct txp_softc *, struct txp_tx_ring *, struct mbuf **);
155 static void txp_stop(struct txp_softc *);
156 static void txp_init(void *);
157 static void txp_init_locked(struct txp_softc *);
158 static void txp_watchdog(struct txp_softc *);
160 static int txp_reset(struct txp_softc *);
161 static int txp_boot(struct txp_softc *, uint32_t);
162 static int txp_sleep(struct txp_softc *, int);
163 static int txp_wait(struct txp_softc *, uint32_t);
164 static int txp_download_fw(struct txp_softc *);
165 static int txp_download_fw_wait(struct txp_softc *);
166 static int txp_download_fw_section(struct txp_softc *,
167 struct txp_fw_section_header *, int);
168 static int txp_alloc_rings(struct txp_softc *);
169 static void txp_init_rings(struct txp_softc *);
170 static int txp_dma_alloc(struct txp_softc *, char *, bus_dma_tag_t *,
171 bus_size_t, bus_size_t, bus_dmamap_t *, void **, bus_size_t, bus_addr_t *);
172 static void txp_dma_free(struct txp_softc *, bus_dma_tag_t *, bus_dmamap_t *,
174 static void txp_free_rings(struct txp_softc *);
175 static int txp_rxring_fill(struct txp_softc *);
176 static void txp_rxring_empty(struct txp_softc *);
177 static void txp_set_filter(struct txp_softc *);
179 static int txp_cmd_desc_numfree(struct txp_softc *);
180 static int txp_command(struct txp_softc *, uint16_t, uint16_t, uint32_t,
181 uint32_t, uint16_t *, uint32_t *, uint32_t *, int);
182 static int txp_ext_command(struct txp_softc *, uint16_t, uint16_t,
183 uint32_t, uint32_t, struct txp_ext_desc *, uint8_t,
184 struct txp_rsp_desc **, int);
185 static int txp_response(struct txp_softc *, uint16_t, uint16_t,
186 struct txp_rsp_desc **);
187 static void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *,
188 struct txp_rsp_desc *);
189 static int txp_set_capabilities(struct txp_softc *);
191 static void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
192 static int txp_ifmedia_upd(struct ifnet *);
194 static void txp_show_descriptor(void *);
196 static void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *);
197 static void txp_rxbuf_reclaim(struct txp_softc *);
198 #ifndef __NO_STRICT_ALIGNMENT
199 static __inline void txp_fixup_rx(struct mbuf *);
201 static int txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *, int);
202 static void txp_stats_save(struct txp_softc *);
203 static void txp_stats_update(struct txp_softc *, struct txp_rsp_desc *);
204 static void txp_sysctl_node(struct txp_softc *);
205 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
206 static int sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS);
208 static int prefer_iomap = 0;
209 TUNABLE_INT("hw.txp.prefer_iomap", &prefer_iomap);
211 static device_method_t txp_methods[] = {
212 /* Device interface */
213 DEVMETHOD(device_probe, txp_probe),
214 DEVMETHOD(device_attach, txp_attach),
215 DEVMETHOD(device_detach, txp_detach),
216 DEVMETHOD(device_shutdown, txp_shutdown),
217 DEVMETHOD(device_suspend, txp_suspend),
218 DEVMETHOD(device_resume, txp_resume),
223 static driver_t txp_driver = {
226 sizeof(struct txp_softc)
229 static devclass_t txp_devclass;
231 DRIVER_MODULE(txp, pci, txp_driver, txp_devclass, 0, 0);
234 txp_probe(device_t dev)
240 while (t->txp_name != NULL) {
241 if ((pci_get_vendor(dev) == t->txp_vid) &&
242 (pci_get_device(dev) == t->txp_did)) {
243 device_set_desc(dev, t->txp_name);
244 return (BUS_PROBE_DEFAULT);
253 txp_attach(device_t dev)
255 struct txp_softc *sc;
257 struct txp_rsp_desc *rsp;
260 int error = 0, pmc, rid;
261 uint8_t eaddr[ETHER_ADDR_LEN], *ver;
263 sc = device_get_softc(dev);
266 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
268 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
269 TASK_INIT(&sc->sc_int_task, 0, txp_int_task, sc);
270 TAILQ_INIT(&sc->sc_busy_list);
271 TAILQ_INIT(&sc->sc_free_list);
273 ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
274 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
275 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 0, NULL);
276 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
277 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
278 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 0, NULL);
279 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
280 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
282 pci_enable_busmaster(dev);
283 /* Prefer memory space register mapping over IO space. */
284 if (prefer_iomap == 0) {
285 sc->sc_res_id = PCIR_BAR(1);
286 sc->sc_res_type = SYS_RES_MEMORY;
288 sc->sc_res_id = PCIR_BAR(0);
289 sc->sc_res_type = SYS_RES_IOPORT;
291 sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
292 &sc->sc_res_id, RF_ACTIVE);
293 if (sc->sc_res == NULL && prefer_iomap == 0) {
294 sc->sc_res_id = PCIR_BAR(0);
295 sc->sc_res_type = SYS_RES_IOPORT;
296 sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
297 &sc->sc_res_id, RF_ACTIVE);
299 if (sc->sc_res == NULL) {
300 device_printf(dev, "couldn't map ports/memory\n");
301 ifmedia_removeall(&sc->sc_ifmedia);
302 mtx_destroy(&sc->sc_mtx);
307 reg = pci_read_config(dev, PCIR_COMMAND, 2);
308 reg |= PCIM_CMD_MWRICEN;
309 pci_write_config(dev, PCIR_COMMAND, reg, 2);
310 /* Check cache line size. */
311 reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
313 if (reg == 0 || (reg % 16) != 0)
314 device_printf(sc->sc_dev,
315 "invalid cache line size : %u\n", reg);
317 /* Allocate interrupt */
319 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
320 RF_SHAREABLE | RF_ACTIVE);
322 if (sc->sc_irq == NULL) {
323 device_printf(dev, "couldn't map interrupt\n");
328 if ((error = txp_alloc_rings(sc)) != 0)
332 /* Reset controller and make it reload sleep image. */
333 if (txp_reset(sc) != 0) {
338 /* Let controller boot from sleep image. */
339 if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
340 device_printf(sc->sc_dev, "could not boot sleep image\n");
345 /* Get station address. */
346 if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
347 &p1, &p2, NULL, TXP_CMD_WAIT)) {
353 eaddr[0] = ((uint8_t *)&p1)[1];
354 eaddr[1] = ((uint8_t *)&p1)[0];
356 eaddr[2] = ((uint8_t *)&p2)[3];
357 eaddr[3] = ((uint8_t *)&p2)[2];
358 eaddr[4] = ((uint8_t *)&p2)[1];
359 eaddr[5] = ((uint8_t *)&p2)[0];
361 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
363 device_printf(dev, "can not allocate ifnet structure\n");
369 * Show sleep image version information which may help to
370 * diagnose sleep image specific issues.
373 if (txp_ext_command(sc, TXP_CMD_READ_VERSION, 0, 0, 0, NULL, 0,
374 &rsp, TXP_CMD_WAIT)) {
375 device_printf(dev, "can not read sleep image version\n");
379 if (rsp->rsp_numdesc == 0) {
380 p2 = le32toh(rsp->rsp_par2) & 0xFFFF;
381 device_printf(dev, "Typhoon 1.0 sleep image (2000/%02u/%02u)\n",
383 } else if (rsp->rsp_numdesc == 2) {
384 p2 = le32toh(rsp->rsp_par2);
385 ver = (uint8_t *)(rsp + 1);
387 * Even if datasheet says the command returns a NULL
388 * terminated version string, explicitly terminate
389 * the string. Given that several bugs of firmware
390 * I can't trust this simple one.
394 "Typhoon 1.1+ sleep image %02u.%03u.%03u %s\n",
395 p2 >> 24, (p2 >> 12) & 0xFFF, p2 & 0xFFF, ver);
397 p2 = le32toh(rsp->rsp_par2);
399 "Unknown Typhoon sleep image version: %u:0x%08x\n",
400 rsp->rsp_numdesc, p2);
405 sc->sc_xcvr = TXP_XCVR_AUTO;
406 txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
407 NULL, NULL, NULL, TXP_CMD_NOWAIT);
408 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);
411 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
412 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
413 ifp->if_ioctl = txp_ioctl;
414 ifp->if_start = txp_start;
415 ifp->if_init = txp_init;
416 ifp->if_snd.ifq_drv_maxlen = TX_ENTRIES - 1;
417 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
418 IFQ_SET_READY(&ifp->if_snd);
420 * It's possible to read firmware's offload capability but
421 * we have not downloaded the firmware yet so announce
422 * working capability here. We're not interested in IPSec
423 * capability and due to the lots of firmware bug we can't
424 * advertise the whole capability anyway.
426 ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM;
427 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
428 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
429 /* Enable all capabilities. */
430 ifp->if_capenable = ifp->if_capabilities;
432 ether_ifattach(ifp, eaddr);
434 /* VLAN capability setup. */
435 ifp->if_capabilities |= IFCAP_VLAN_MTU;
436 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
437 ifp->if_capenable = ifp->if_capabilities;
438 /* Tell the upper layer(s) we support long frames. */
439 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
441 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
442 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
444 /* Create local taskq. */
445 sc->sc_tq = taskqueue_create_fast("txp_taskq", M_WAITOK,
446 taskqueue_thread_enqueue, &sc->sc_tq);
447 if (sc->sc_tq == NULL) {
448 device_printf(dev, "could not create taskqueue.\n");
453 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
454 device_get_nameunit(sc->sc_dev));
456 /* Put controller into sleep. */
457 if (txp_sleep(sc, 0) != 0) {
463 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
464 txp_intr, NULL, sc, &sc->sc_intrhand);
468 device_printf(dev, "couldn't set up interrupt handler.\n");
481 txp_detach(device_t dev)
483 struct txp_softc *sc;
486 sc = device_get_softc(dev);
489 if (device_is_attached(dev)) {
491 sc->sc_flags |= TXP_FLAG_DETACH;
494 callout_drain(&sc->sc_tick);
495 taskqueue_drain(sc->sc_tq, &sc->sc_int_task);
498 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
500 ifmedia_removeall(&sc->sc_ifmedia);
501 if (sc->sc_intrhand != NULL)
502 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
503 if (sc->sc_irq != NULL)
504 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
505 if (sc->sc_res != NULL)
506 bus_release_resource(dev, sc->sc_res_type, sc->sc_res_id,
508 if (sc->sc_ifp != NULL) {
513 mtx_destroy(&sc->sc_mtx);
519 txp_reset(struct txp_softc *sc)
524 /* Disable interrupts. */
525 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
526 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
527 /* Ack all pending interrupts. */
528 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
531 WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
533 WRITE_REG(sc, TXP_SRR, 0);
535 /* Should wait max 6 seconds. */
536 for (i = 0; i < 6000; i++) {
537 r = READ_REG(sc, TXP_A2H_0);
538 if (r == STAT_WAITING_FOR_HOST_REQUEST)
543 if (r != STAT_WAITING_FOR_HOST_REQUEST)
544 device_printf(sc->sc_dev, "reset hung\n");
546 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
547 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
548 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
551 * Give more time to complete loading sleep image before
552 * trying to boot from sleep image.
560 txp_boot(struct txp_softc *sc, uint32_t state)
563 /* See if it's waiting for boot, and try to boot it. */
564 if (txp_wait(sc, state) != 0) {
565 device_printf(sc->sc_dev, "not waiting for boot\n");
569 WRITE_REG(sc, TXP_H2A_2, TXP_ADDR_HI(sc->sc_ldata.txp_boot_paddr));
570 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
571 WRITE_REG(sc, TXP_H2A_1, TXP_ADDR_LO(sc->sc_ldata.txp_boot_paddr));
572 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
573 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
574 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
576 /* See if it booted. */
577 if (txp_wait(sc, STAT_RUNNING) != 0) {
578 device_printf(sc->sc_dev, "firmware not running\n");
582 /* Clear TX and CMD ring write registers. */
583 WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
584 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
585 WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
586 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
587 WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
588 TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
589 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
590 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
596 txp_download_fw(struct txp_softc *sc)
598 struct txp_fw_file_header *fileheader;
599 struct txp_fw_section_header *secthead;
601 uint32_t error, ier, imr;
606 ier = READ_REG(sc, TXP_IER);
607 WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);
609 imr = READ_REG(sc, TXP_IMR);
610 WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);
612 if (txp_wait(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
613 device_printf(sc->sc_dev, "not waiting for host request\n");
618 /* Ack the status. */
619 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
621 fileheader = (struct txp_fw_file_header *)tc990image;
622 if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) {
623 device_printf(sc->sc_dev, "firmware invalid magic\n");
627 /* Tell boot firmware to get ready for image. */
628 WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr));
629 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
630 WRITE_REG(sc, TXP_H2A_2, le32toh(fileheader->hmac[0]));
631 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
632 WRITE_REG(sc, TXP_H2A_3, le32toh(fileheader->hmac[1]));
633 TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
634 WRITE_REG(sc, TXP_H2A_4, le32toh(fileheader->hmac[2]));
635 TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
636 WRITE_REG(sc, TXP_H2A_5, le32toh(fileheader->hmac[3]));
637 TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
638 WRITE_REG(sc, TXP_H2A_6, le32toh(fileheader->hmac[4]));
639 TXP_BARRIER(sc, TXP_H2A_6, 4, BUS_SPACE_BARRIER_WRITE);
640 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);
641 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
643 if (txp_download_fw_wait(sc)) {
644 device_printf(sc->sc_dev, "firmware wait failed, initial\n");
649 secthead = (struct txp_fw_section_header *)(((uint8_t *)tc990image) +
650 sizeof(struct txp_fw_file_header));
652 for (sect = 0; sect < le32toh(fileheader->nsections); sect++) {
653 if ((error = txp_download_fw_section(sc, secthead, sect)) != 0)
655 secthead = (struct txp_fw_section_header *)
656 (((uint8_t *)secthead) + le32toh(secthead->nbytes) +
660 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);
661 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
663 if (txp_wait(sc, STAT_WAITING_FOR_BOOT) != 0) {
664 device_printf(sc->sc_dev, "not waiting for boot\n");
670 WRITE_REG(sc, TXP_IER, ier);
671 WRITE_REG(sc, TXP_IMR, imr);
677 txp_download_fw_wait(struct txp_softc *sc)
683 for (i = 0; i < TXP_TIMEOUT; i++) {
684 if ((READ_REG(sc, TXP_ISR) & TXP_INT_A2H_0) != 0)
689 if (i == TXP_TIMEOUT) {
690 device_printf(sc->sc_dev, "firmware wait failed comm0\n");
694 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
696 if (READ_REG(sc, TXP_A2H_0) != STAT_WAITING_FOR_SEGMENT) {
697 device_printf(sc->sc_dev, "firmware not waiting for segment\n");
704 txp_download_fw_section(struct txp_softc *sc,
705 struct txp_fw_section_header *sect, int sectnum)
707 bus_dma_tag_t sec_tag;
708 bus_dmamap_t sec_map;
709 bus_addr_t sec_paddr;
717 /* Skip zero length sections. */
718 if (le32toh(sect->nbytes) == 0)
721 /* Make sure we aren't past the end of the image. */
722 rseg = ((uint8_t *)sect) - ((uint8_t *)tc990image);
723 if (rseg >= sizeof(tc990image)) {
724 device_printf(sc->sc_dev,
725 "firmware invalid section address, section %d\n", sectnum);
729 /* Make sure this section doesn't go past the end. */
730 rseg += le32toh(sect->nbytes);
731 if (rseg >= sizeof(tc990image)) {
732 device_printf(sc->sc_dev, "firmware truncated section %d\n",
742 err = txp_dma_alloc(sc, "firmware sections", &sec_tag, sizeof(uint32_t),
743 0, &sec_map, (void **)&sec_buf, le32toh(sect->nbytes), &sec_paddr);
747 bcopy(((uint8_t *)sect) + sizeof(*sect), sec_buf,
748 le32toh(sect->nbytes));
751 * dummy up mbuf and verify section checksum
754 m.m_next = m.m_nextpkt = NULL;
755 m.m_len = le32toh(sect->nbytes);
758 csum = in_cksum(&m, le32toh(sect->nbytes));
759 if (csum != sect->cksum) {
760 device_printf(sc->sc_dev,
761 "firmware section %d, bad cksum (expected 0x%x got 0x%x)\n",
762 sectnum, le16toh(sect->cksum), csum);
767 bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_PREWRITE);
769 WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes));
770 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
771 WRITE_REG(sc, TXP_H2A_2, le16toh(sect->cksum));
772 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
773 WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr));
774 TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
775 WRITE_REG(sc, TXP_H2A_4, TXP_ADDR_HI(sec_paddr));
776 TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
777 WRITE_REG(sc, TXP_H2A_5, TXP_ADDR_LO(sec_paddr));
778 TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
779 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);
780 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
782 if (txp_download_fw_wait(sc)) {
783 device_printf(sc->sc_dev,
784 "firmware wait failed, section %d\n", sectnum);
788 bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_POSTWRITE);
790 txp_dma_free(sc, &sec_tag, &sec_map, (void **)&sec_buf);
797 struct txp_softc *sc;
801 status = READ_REG(sc, TXP_ISR);
802 if ((status & TXP_INT_LATCH) == 0)
803 return (FILTER_STRAY);
804 WRITE_REG(sc, TXP_ISR, status);
805 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
806 taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
808 return (FILTER_HANDLED);
812 txp_int_task(void *arg, int pending)
814 struct txp_softc *sc;
816 struct txp_hostvar *hv;
820 sc = (struct txp_softc *)arg;
825 isr = READ_REG(sc, TXP_ISR);
826 if ((isr & TXP_INT_LATCH) != 0)
827 WRITE_REG(sc, TXP_ISR, isr);
829 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
830 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
831 sc->sc_cdata.txp_hostvar_map,
832 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
834 if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
835 more += txp_rx_reclaim(sc, &sc->sc_rxhir,
836 sc->sc_process_limit);
837 if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
838 more += txp_rx_reclaim(sc, &sc->sc_rxlor,
839 sc->sc_process_limit);
842 * It seems controller is not smart enough to handle
843 * FIFO overflow conditions under heavy network load.
844 * No matter how often new Rx buffers are passed to
845 * controller the situation didn't change. Maybe
846 * flow-control would be the only way to mitigate the
847 * issue but firmware does not have commands that
848 * control the threshold of emitting pause frames.
850 if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
851 txp_rxbuf_reclaim(sc);
852 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
853 TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
854 txp_tx_reclaim(sc, &sc->sc_txhir);
855 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
856 TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
857 txp_tx_reclaim(sc, &sc->sc_txlor);
858 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
859 sc->sc_cdata.txp_hostvar_map,
860 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
861 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
862 txp_start_locked(sc->sc_ifp);
863 if (more != 0 || READ_REG(sc, TXP_ISR & TXP_INT_LATCH) != 0) {
864 taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
870 /* Re-enable interrupts. */
871 WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
875 #ifndef __NO_STRICT_ALIGNMENT
877 txp_fixup_rx(struct mbuf *m)
882 src = mtod(m, uint16_t *);
883 dst = src - (TXP_RXBUF_ALIGN - ETHER_ALIGN) / sizeof *src;
885 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
888 m->m_data -= TXP_RXBUF_ALIGN - ETHER_ALIGN;
893 txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r, int count)
896 struct txp_rx_desc *rxd;
898 struct txp_rx_swdesc *sd;
899 uint32_t roff, woff, rx_stat, prog;
905 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
906 BUS_DMASYNC_POSTWRITE);
908 roff = le32toh(*r->r_roff);
909 woff = le32toh(*r->r_woff);
910 rxd = r->r_desc + roff / sizeof(struct txp_rx_desc);
911 for (prog = 0; roff != woff; prog++, count--) {
914 bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd));
915 KASSERT(sd != NULL, ("%s: Rx desc ring corrupted", __func__));
916 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
917 BUS_DMASYNC_POSTREAD);
918 bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
920 KASSERT(m != NULL, ("%s: Rx buffer ring corrupted", __func__));
922 TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
923 TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
924 if ((rxd->rx_flags & RX_FLAGS_ERROR) != 0) {
926 device_printf(sc->sc_dev, "Rx error %u\n",
927 le32toh(rxd->rx_stat) & RX_ERROR_MASK);
932 m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len);
933 m->m_pkthdr.rcvif = ifp;
934 #ifndef __NO_STRICT_ALIGNMENT
937 rx_stat = le32toh(rxd->rx_stat);
938 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
939 if ((rx_stat & RX_STAT_IPCKSUMBAD) != 0)
940 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
941 else if ((rx_stat & RX_STAT_IPCKSUMGOOD) != 0)
942 m->m_pkthdr.csum_flags |=
943 CSUM_IP_CHECKED|CSUM_IP_VALID;
945 if ((rx_stat & RX_STAT_TCPCKSUMGOOD) != 0 ||
946 (rx_stat & RX_STAT_UDPCKSUMGOOD) != 0) {
947 m->m_pkthdr.csum_flags |=
948 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
949 m->m_pkthdr.csum_data = 0xffff;
955 * Typhoon has a firmware bug that VLAN tag is always
956 * stripped out even if it is told to not remove the tag.
957 * Therefore don't check if_capenable here.
959 if (/* (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && */
960 (rx_stat & RX_STAT_VLAN) != 0) {
961 m->m_pkthdr.ether_vtag =
962 bswap16((le32toh(rxd->rx_vlan) >> 16));
963 m->m_flags |= M_VLANTAG;
967 (*ifp->if_input)(ifp, m);
971 roff += sizeof(struct txp_rx_desc);
972 if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
983 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
984 BUS_DMASYNC_PREWRITE);
985 *r->r_roff = le32toh(roff);
987 return (count > 0 ? 0 : EAGAIN);
991 txp_rxbuf_reclaim(struct txp_softc *sc)
993 struct txp_hostvar *hv;
994 struct txp_rxbuf_desc *rbd;
995 struct txp_rx_swdesc *sd;
996 bus_dma_segment_t segs[1];
997 int nsegs, prod, prog;
1000 TXP_LOCK_ASSERT(sc);
1002 hv = sc->sc_hostvar;
1003 cons = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx));
1004 prod = sc->sc_rxbufprod;
1005 TXP_DESC_INC(prod, RXBUF_ENTRIES);
1009 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1010 sc->sc_cdata.txp_rxbufs_map,
1011 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1013 for (prog = 0; prod != cons; prog++) {
1014 sd = TAILQ_FIRST(&sc->sc_free_list);
1017 rbd = sc->sc_rxbufs + prod;
1018 bcopy((u_long *)&rbd->rb_vaddrlo, &sd, sizeof(sd));
1019 sd->sd_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1020 if (sd->sd_mbuf == NULL)
1022 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1023 #ifndef __NO_STRICT_ALIGNMENT
1024 m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1026 if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1027 sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0) != 0) {
1028 m_freem(sd->sd_mbuf);
1032 KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1034 TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1035 TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1036 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1037 BUS_DMASYNC_PREREAD);
1038 rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1039 rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1040 TXP_DESC_INC(prod, RXBUF_ENTRIES);
1045 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1046 sc->sc_cdata.txp_rxbufs_map,
1047 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1048 prod = (prod + RXBUF_ENTRIES - 1) % RXBUF_ENTRIES;
1049 sc->sc_rxbufprod = prod;
1050 hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(prod));
1054 * Reclaim mbufs and entries from a transmit ring.
1057 txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r)
1062 struct txp_tx_desc *txd;
1063 struct txp_swdesc *sd;
1065 TXP_LOCK_ASSERT(sc);
1067 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
1068 BUS_DMASYNC_POSTWRITE);
1070 idx = TXP_OFFSET2IDX(le32toh(*(r->r_off)));
1073 txd = r->r_desc + cons;
1074 sd = sc->sc_txd + cons;
1076 for (cnt = r->r_cnt; cons != idx && cnt > 0; cnt--) {
1077 if ((txd->tx_flags & TX_FLAGS_TYPE_M) == TX_FLAGS_TYPE_DATA) {
1078 if (sd->sd_mbuf != NULL) {
1079 bus_dmamap_sync(sc->sc_cdata.txp_tx_tag,
1080 sd->sd_map, BUS_DMASYNC_POSTWRITE);
1081 bus_dmamap_unload(sc->sc_cdata.txp_tx_tag,
1083 m_freem(sd->sd_mbuf);
1090 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1092 if (++cons == TX_ENTRIES) {
1102 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
1103 BUS_DMASYNC_PREWRITE);
1107 sc->sc_watchdog_timer = 0;
1111 txp_shutdown(device_t dev)
1114 return (txp_suspend(dev));
1118 txp_suspend(device_t dev)
1120 struct txp_softc *sc;
1128 sc = device_get_softc(dev);
1134 /* Reset controller and make it reload sleep image. */
1136 /* Let controller boot from sleep image. */
1137 if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
1138 device_printf(sc->sc_dev, "couldn't boot sleep image\n");
1140 /* Set station address. */
1141 eaddr = IF_LLADDR(sc->sc_ifp);
1143 ((uint8_t *)&p1)[1] = eaddr[0];
1144 ((uint8_t *)&p1)[0] = eaddr[1];
1146 ((uint8_t *)&p2)[3] = eaddr[2];
1147 ((uint8_t *)&p2)[2] = eaddr[3];
1148 ((uint8_t *)&p2)[1] = eaddr[4];
1149 ((uint8_t *)&p2)[0] = eaddr[5];
1151 txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0, NULL, NULL,
1152 NULL, TXP_CMD_WAIT);
1154 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
1155 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1156 txp_sleep(sc, sc->sc_ifp->if_capenable);
1157 if (pci_find_cap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1159 pmstat = pci_read_config(sc->sc_dev,
1160 pmc + PCIR_POWER_STATUS, 2);
1161 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1162 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1163 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1164 pci_write_config(sc->sc_dev,
1165 pmc + PCIR_POWER_STATUS, pmstat, 2);
1173 txp_resume(device_t dev)
1175 struct txp_softc *sc;
1179 sc = device_get_softc(dev);
1182 if (pci_find_cap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1183 /* Disable PME and clear PME status. */
1184 pmstat = pci_read_config(sc->sc_dev,
1185 pmc + PCIR_POWER_STATUS, 2);
1186 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1187 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1188 pci_write_config(sc->sc_dev,
1189 pmc + PCIR_POWER_STATUS, pmstat, 2);
1192 if ((sc->sc_ifp->if_flags & IFF_UP) != 0)
1193 txp_init_locked(sc);
1199 struct txp_dmamap_arg {
1200 bus_addr_t txp_busaddr;
1204 txp_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1206 struct txp_dmamap_arg *ctx;
1211 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1213 ctx = (struct txp_dmamap_arg *)arg;
1214 ctx->txp_busaddr = segs[0].ds_addr;
1218 txp_dma_alloc(struct txp_softc *sc, char *type, bus_dma_tag_t *tag,
1219 bus_size_t alignment, bus_size_t boundary, bus_dmamap_t *map, void **buf,
1220 bus_size_t size, bus_addr_t *paddr)
1222 struct txp_dmamap_arg ctx;
1225 /* Create DMA block tag. */
1226 error = bus_dma_tag_create(
1227 sc->sc_cdata.txp_parent_tag, /* parent */
1228 alignment, boundary, /* algnmnt, boundary */
1229 BUS_SPACE_MAXADDR, /* lowaddr */
1230 BUS_SPACE_MAXADDR, /* highaddr */
1231 NULL, NULL, /* filter, filterarg */
1234 size, /* maxsegsize */
1236 NULL, NULL, /* lockfunc, lockarg */
1239 device_printf(sc->sc_dev,
1240 "could not create DMA tag for %s.\n", type);
1245 /* Allocate DMA'able memory and load the DMA map. */
1246 error = bus_dmamem_alloc(*tag, buf, BUS_DMA_WAITOK | BUS_DMA_ZERO |
1247 BUS_DMA_COHERENT, map);
1249 device_printf(sc->sc_dev,
1250 "could not allocate DMA'able memory for %s.\n", type);
1254 ctx.txp_busaddr = 0;
1255 error = bus_dmamap_load(*tag, *map, *(uint8_t **)buf,
1256 size, txp_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1257 if (error != 0 || ctx.txp_busaddr == 0) {
1258 device_printf(sc->sc_dev,
1259 "could not load DMA'able memory for %s.\n", type);
1262 *paddr = ctx.txp_busaddr;
1268 txp_dma_free(struct txp_softc *sc, bus_dma_tag_t *tag, bus_dmamap_t *map,
1274 bus_dmamap_unload(*tag, *map);
1275 if (*map != NULL && buf != NULL)
1276 bus_dmamem_free(*tag, *(uint8_t **)buf, *map);
1277 *(uint8_t **)buf = NULL;
1279 bus_dma_tag_destroy(*tag);
1285 txp_alloc_rings(struct txp_softc *sc)
1287 struct txp_boot_record *boot;
1288 struct txp_ldata *ld;
1289 struct txp_swdesc *txd;
1290 struct txp_rxbuf_desc *rbd;
1291 struct txp_rx_swdesc *sd;
1295 boot = ld->txp_boot;
1301 * Create parent ring/DMA block tag.
1302 * Datasheet says that all ring addresses and descriptors
1303 * support 64bits addressing. However the controller is
1304 * known to have no support DAC so limit DMA address space
1307 error = bus_dma_tag_create(
1308 bus_get_dma_tag(sc->sc_dev), /* parent */
1309 1, 0, /* algnmnt, boundary */
1310 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1311 BUS_SPACE_MAXADDR, /* highaddr */
1312 NULL, NULL, /* filter, filterarg */
1313 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1315 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1317 NULL, NULL, /* lockfunc, lockarg */
1318 &sc->sc_cdata.txp_parent_tag);
1320 device_printf(sc->sc_dev, "could not create parent DMA tag.\n");
1325 error = txp_dma_alloc(sc, "boot record",
1326 &sc->sc_cdata.txp_boot_tag, sizeof(uint32_t), 0,
1327 &sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot,
1328 sizeof(struct txp_boot_record),
1329 &sc->sc_ldata.txp_boot_paddr);
1332 boot = sc->sc_ldata.txp_boot;
1335 /* Host variables. */
1336 error = txp_dma_alloc(sc, "host variables",
1337 &sc->sc_cdata.txp_hostvar_tag, sizeof(uint32_t), 0,
1338 &sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar,
1339 sizeof(struct txp_hostvar),
1340 &sc->sc_ldata.txp_hostvar_paddr);
1343 boot->br_hostvar_lo =
1344 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_hostvar_paddr));
1345 boot->br_hostvar_hi =
1346 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_hostvar_paddr));
1347 sc->sc_hostvar = sc->sc_ldata.txp_hostvar;
1349 /* Hi priority tx ring. */
1350 error = txp_dma_alloc(sc, "hi priority tx ring",
1351 &sc->sc_cdata.txp_txhiring_tag, sizeof(struct txp_tx_desc), 0,
1352 &sc->sc_cdata.txp_txhiring_map, (void **)&sc->sc_ldata.txp_txhiring,
1353 sizeof(struct txp_tx_desc) * TX_ENTRIES,
1354 &sc->sc_ldata.txp_txhiring_paddr);
1357 boot->br_txhipri_lo =
1358 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txhiring_paddr));
1359 boot->br_txhipri_hi =
1360 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txhiring_paddr));
1361 boot->br_txhipri_siz =
1362 htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1363 sc->sc_txhir.r_tag = sc->sc_cdata.txp_txhiring_tag;
1364 sc->sc_txhir.r_map = sc->sc_cdata.txp_txhiring_map;
1365 sc->sc_txhir.r_reg = TXP_H2A_1;
1366 sc->sc_txhir.r_desc = sc->sc_ldata.txp_txhiring;
1367 sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
1368 sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
1370 /* Low priority tx ring. */
1371 error = txp_dma_alloc(sc, "low priority tx ring",
1372 &sc->sc_cdata.txp_txloring_tag, sizeof(struct txp_tx_desc), 0,
1373 &sc->sc_cdata.txp_txloring_map, (void **)&sc->sc_ldata.txp_txloring,
1374 sizeof(struct txp_tx_desc) * TX_ENTRIES,
1375 &sc->sc_ldata.txp_txloring_paddr);
1378 boot->br_txlopri_lo =
1379 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txloring_paddr));
1380 boot->br_txlopri_hi =
1381 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txloring_paddr));
1382 boot->br_txlopri_siz =
1383 htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1384 sc->sc_txlor.r_tag = sc->sc_cdata.txp_txloring_tag;
1385 sc->sc_txlor.r_map = sc->sc_cdata.txp_txloring_map;
1386 sc->sc_txlor.r_reg = TXP_H2A_3;
1387 sc->sc_txlor.r_desc = sc->sc_ldata.txp_txloring;
1388 sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
1389 sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;
1391 /* High priority rx ring. */
1392 error = txp_dma_alloc(sc, "hi priority rx ring",
1393 &sc->sc_cdata.txp_rxhiring_tag,
1394 roundup(sizeof(struct txp_rx_desc), 16), 0,
1395 &sc->sc_cdata.txp_rxhiring_map, (void **)&sc->sc_ldata.txp_rxhiring,
1396 sizeof(struct txp_rx_desc) * RX_ENTRIES,
1397 &sc->sc_ldata.txp_rxhiring_paddr);
1400 boot->br_rxhipri_lo =
1401 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxhiring_paddr));
1402 boot->br_rxhipri_hi =
1403 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxhiring_paddr));
1404 boot->br_rxhipri_siz =
1405 htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1406 sc->sc_rxhir.r_tag = sc->sc_cdata.txp_rxhiring_tag;
1407 sc->sc_rxhir.r_map = sc->sc_cdata.txp_rxhiring_map;
1408 sc->sc_rxhir.r_desc = sc->sc_ldata.txp_rxhiring;
1409 sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
1410 sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
1412 /* Low priority rx ring. */
1413 error = txp_dma_alloc(sc, "low priority rx ring",
1414 &sc->sc_cdata.txp_rxloring_tag,
1415 roundup(sizeof(struct txp_rx_desc), 16), 0,
1416 &sc->sc_cdata.txp_rxloring_map, (void **)&sc->sc_ldata.txp_rxloring,
1417 sizeof(struct txp_rx_desc) * RX_ENTRIES,
1418 &sc->sc_ldata.txp_rxloring_paddr);
1421 boot->br_rxlopri_lo =
1422 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxloring_paddr));
1423 boot->br_rxlopri_hi =
1424 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxloring_paddr));
1425 boot->br_rxlopri_siz =
1426 htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1427 sc->sc_rxlor.r_tag = sc->sc_cdata.txp_rxloring_tag;
1428 sc->sc_rxlor.r_map = sc->sc_cdata.txp_rxloring_map;
1429 sc->sc_rxlor.r_desc = sc->sc_ldata.txp_rxloring;
1430 sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
1431 sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
1434 error = txp_dma_alloc(sc, "command ring",
1435 &sc->sc_cdata.txp_cmdring_tag, sizeof(struct txp_cmd_desc), 0,
1436 &sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring,
1437 sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
1438 &sc->sc_ldata.txp_cmdring_paddr);
1441 boot->br_cmd_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_cmdring_paddr));
1442 boot->br_cmd_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_cmdring_paddr));
1443 boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc));
1444 sc->sc_cmdring.base = sc->sc_ldata.txp_cmdring;
1445 sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
1446 sc->sc_cmdring.lastwrite = 0;
1448 /* Response ring. */
1449 error = txp_dma_alloc(sc, "response ring",
1450 &sc->sc_cdata.txp_rspring_tag, sizeof(struct txp_rsp_desc), 0,
1451 &sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring,
1452 sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
1453 &sc->sc_ldata.txp_rspring_paddr);
1456 boot->br_resp_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rspring_paddr));
1457 boot->br_resp_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rspring_paddr));
1458 boot->br_resp_siz = htole32(RSP_ENTRIES * sizeof(struct txp_rsp_desc));
1459 sc->sc_rspring.base = sc->sc_ldata.txp_rspring;
1460 sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
1461 sc->sc_rspring.lastwrite = 0;
1463 /* Receive buffer ring. */
1464 error = txp_dma_alloc(sc, "receive buffer ring",
1465 &sc->sc_cdata.txp_rxbufs_tag, sizeof(struct txp_rxbuf_desc), 0,
1466 &sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs,
1467 sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
1468 &sc->sc_ldata.txp_rxbufs_paddr);
1472 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxbufs_paddr));
1474 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxbufs_paddr));
1475 boot->br_rxbuf_siz =
1476 htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc));
1477 sc->sc_rxbufs = sc->sc_ldata.txp_rxbufs;
1480 error = txp_dma_alloc(sc, "zero buffer",
1481 &sc->sc_cdata.txp_zero_tag, sizeof(uint32_t), 0,
1482 &sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero,
1483 sizeof(uint32_t), &sc->sc_ldata.txp_zero_paddr);
1486 boot->br_zero_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_zero_paddr));
1487 boot->br_zero_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_zero_paddr));
1489 bus_dmamap_sync(sc->sc_cdata.txp_boot_tag, sc->sc_cdata.txp_boot_map,
1490 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1492 /* Create Tx buffers. */
1493 error = bus_dma_tag_create(
1494 sc->sc_cdata.txp_parent_tag, /* parent */
1495 1, 0, /* algnmnt, boundary */
1496 BUS_SPACE_MAXADDR, /* lowaddr */
1497 BUS_SPACE_MAXADDR, /* highaddr */
1498 NULL, NULL, /* filter, filterarg */
1499 MCLBYTES * TXP_MAXTXSEGS, /* maxsize */
1500 TXP_MAXTXSEGS, /* nsegments */
1501 MCLBYTES, /* maxsegsize */
1503 NULL, NULL, /* lockfunc, lockarg */
1504 &sc->sc_cdata.txp_tx_tag);
1506 device_printf(sc->sc_dev, "could not create Tx DMA tag.\n");
1510 /* Create tag for Rx buffers. */
1511 error = bus_dma_tag_create(
1512 sc->sc_cdata.txp_parent_tag, /* parent */
1513 TXP_RXBUF_ALIGN, 0, /* algnmnt, boundary */
1514 BUS_SPACE_MAXADDR, /* lowaddr */
1515 BUS_SPACE_MAXADDR, /* highaddr */
1516 NULL, NULL, /* filter, filterarg */
1517 MCLBYTES, /* maxsize */
1519 MCLBYTES, /* maxsegsize */
1521 NULL, NULL, /* lockfunc, lockarg */
1522 &sc->sc_cdata.txp_rx_tag);
1524 device_printf(sc->sc_dev, "could not create Rx DMA tag.\n");
1528 /* Create DMA maps for Tx buffers. */
1529 for (i = 0; i < TX_ENTRIES; i++) {
1530 txd = &sc->sc_txd[i];
1531 txd->sd_mbuf = NULL;
1533 error = bus_dmamap_create(sc->sc_cdata.txp_tx_tag, 0,
1536 device_printf(sc->sc_dev,
1537 "could not create Tx dmamap.\n");
1542 /* Create DMA maps for Rx buffers. */
1543 for (i = 0; i < RXBUF_ENTRIES; i++) {
1544 sd = malloc(sizeof(struct txp_rx_swdesc), M_DEVBUF,
1551 * The virtual address part of descriptor is not used
1552 * by hardware so use that to save an ring entry. We
1553 * need bcopy here otherwise the address wouldn't be
1554 * valid on big-endian architectures.
1556 rbd = sc->sc_rxbufs + i;
1557 bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1560 error = bus_dmamap_create(sc->sc_cdata.txp_rx_tag, 0,
1563 device_printf(sc->sc_dev,
1564 "could not create Rx dmamap.\n");
1567 TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1575 txp_init_rings(struct txp_softc *sc)
1578 bzero(sc->sc_ldata.txp_hostvar, sizeof(struct txp_hostvar));
1579 bzero(sc->sc_ldata.txp_zero, sizeof(uint32_t));
1580 sc->sc_txhir.r_cons = 0;
1581 sc->sc_txhir.r_prod = 0;
1582 sc->sc_txhir.r_cnt = 0;
1583 sc->sc_txlor.r_cons = 0;
1584 sc->sc_txlor.r_prod = 0;
1585 sc->sc_txlor.r_cnt = 0;
1586 sc->sc_cmdring.lastwrite = 0;
1587 sc->sc_rspring.lastwrite = 0;
1588 sc->sc_rxbufprod = 0;
1589 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1590 sc->sc_cdata.txp_hostvar_map,
1591 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1595 txp_wait(struct txp_softc *sc, uint32_t state)
1600 for (i = 0; i < TXP_TIMEOUT; i++) {
1601 reg = READ_REG(sc, TXP_A2H_0);
1607 return (i == TXP_TIMEOUT ? ETIMEDOUT : 0);
1611 txp_free_rings(struct txp_softc *sc)
1613 struct txp_swdesc *txd;
1614 struct txp_rx_swdesc *sd;
1618 if (sc->sc_cdata.txp_tx_tag != NULL) {
1619 for (i = 0; i < TX_ENTRIES; i++) {
1620 txd = &sc->sc_txd[i];
1621 if (txd->sd_map != NULL) {
1622 bus_dmamap_destroy(sc->sc_cdata.txp_tx_tag,
1627 bus_dma_tag_destroy(sc->sc_cdata.txp_tx_tag);
1628 sc->sc_cdata.txp_tx_tag = NULL;
1631 if (sc->sc_cdata.txp_rx_tag != NULL) {
1632 if (sc->sc_rxbufs != NULL) {
1633 KASSERT(TAILQ_FIRST(&sc->sc_busy_list) == NULL,
1634 ("%s : still have busy Rx buffers", __func__));
1635 while ((sd = TAILQ_FIRST(&sc->sc_free_list)) != NULL) {
1636 TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1637 if (sd->sd_map != NULL) {
1639 sc->sc_cdata.txp_rx_tag,
1646 bus_dma_tag_destroy(sc->sc_cdata.txp_rx_tag);
1647 sc->sc_cdata.txp_rx_tag = NULL;
1650 /* Hi priority Tx ring. */
1651 txp_dma_free(sc, &sc->sc_cdata.txp_txhiring_tag,
1652 &sc->sc_cdata.txp_txhiring_map,
1653 (void **)&sc->sc_ldata.txp_txhiring);
1654 /* Low priority Tx ring. */
1655 txp_dma_free(sc, &sc->sc_cdata.txp_txloring_tag,
1656 &sc->sc_cdata.txp_txloring_map,
1657 (void **)&sc->sc_ldata.txp_txloring);
1658 /* Hi priority Rx ring. */
1659 txp_dma_free(sc, &sc->sc_cdata.txp_rxhiring_tag,
1660 &sc->sc_cdata.txp_rxhiring_map,
1661 (void **)&sc->sc_ldata.txp_rxhiring);
1662 /* Low priority Rx ring. */
1663 txp_dma_free(sc, &sc->sc_cdata.txp_rxloring_tag,
1664 &sc->sc_cdata.txp_rxloring_map,
1665 (void **)&sc->sc_ldata.txp_rxloring);
1666 /* Receive buffer ring. */
1667 txp_dma_free(sc, &sc->sc_cdata.txp_rxbufs_tag,
1668 &sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs);
1670 txp_dma_free(sc, &sc->sc_cdata.txp_cmdring_tag,
1671 &sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring);
1672 /* Response ring. */
1673 txp_dma_free(sc, &sc->sc_cdata.txp_rspring_tag,
1674 &sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring);
1676 txp_dma_free(sc, &sc->sc_cdata.txp_zero_tag,
1677 &sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero);
1678 /* Host variables. */
1679 txp_dma_free(sc, &sc->sc_cdata.txp_hostvar_tag,
1680 &sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar);
1682 txp_dma_free(sc, &sc->sc_cdata.txp_boot_tag,
1683 &sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot);
1685 if (sc->sc_cdata.txp_parent_tag != NULL) {
1686 bus_dma_tag_destroy(sc->sc_cdata.txp_parent_tag);
1687 sc->sc_cdata.txp_parent_tag = NULL;
1693 txp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1695 struct txp_softc *sc = ifp->if_softc;
1696 struct ifreq *ifr = (struct ifreq *)data;
1697 int capenable, error = 0, mask;
1702 if ((ifp->if_flags & IFF_UP) != 0) {
1703 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1704 if (((ifp->if_flags ^ sc->sc_if_flags)
1705 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1708 if ((sc->sc_flags & TXP_FLAG_DETACH) == 0)
1709 txp_init_locked(sc);
1712 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1715 sc->sc_if_flags = ifp->if_flags;
1721 * Multicast list has changed; set the hardware
1722 * filter accordingly.
1725 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1731 capenable = ifp->if_capenable;
1732 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1733 if ((mask & IFCAP_TXCSUM) != 0 &&
1734 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1735 ifp->if_capenable ^= IFCAP_TXCSUM;
1736 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1737 ifp->if_hwassist |= TXP_CSUM_FEATURES;
1739 ifp->if_hwassist &= ~TXP_CSUM_FEATURES;
1741 if ((mask & IFCAP_RXCSUM) != 0 &&
1742 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
1743 ifp->if_capenable ^= IFCAP_RXCSUM;
1744 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1745 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1746 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1747 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1748 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0)
1749 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1750 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1751 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1752 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1753 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
1754 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1755 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1756 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1757 if (capenable != ifp->if_capenable)
1758 txp_set_capabilities(sc);
1760 VLAN_CAPABILITIES(ifp);
1764 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command);
1767 error = ether_ioctl(ifp, command, data);
1775 txp_rxring_fill(struct txp_softc *sc)
1777 struct txp_rxbuf_desc *rbd;
1778 struct txp_rx_swdesc *sd;
1779 bus_dma_segment_t segs[1];
1780 int error, i, nsegs;
1782 TXP_LOCK_ASSERT(sc);
1784 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1785 sc->sc_cdata.txp_rxbufs_map,
1786 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1788 for (i = 0; i < RXBUF_ENTRIES; i++) {
1789 sd = TAILQ_FIRST(&sc->sc_free_list);
1792 rbd = sc->sc_rxbufs + i;
1793 bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1794 KASSERT(sd->sd_mbuf == NULL,
1795 ("%s : Rx buffer ring corrupted", __func__));
1796 sd->sd_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1797 if (sd->sd_mbuf == NULL)
1799 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1800 #ifndef __NO_STRICT_ALIGNMENT
1801 m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1803 if ((error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1804 sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0)) != 0) {
1805 m_freem(sd->sd_mbuf);
1809 KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1811 TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1812 TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1813 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1814 BUS_DMASYNC_PREREAD);
1815 rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1816 rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1819 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1820 sc->sc_cdata.txp_rxbufs_map,
1821 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1822 sc->sc_rxbufprod = RXBUF_ENTRIES - 1;
1823 sc->sc_hostvar->hv_rx_buf_write_idx =
1824 htole32(TXP_IDX2OFFSET(RXBUF_ENTRIES - 1));
1830 txp_rxring_empty(struct txp_softc *sc)
1832 struct txp_rx_swdesc *sd;
1835 TXP_LOCK_ASSERT(sc);
1837 if (sc->sc_rxbufs == NULL)
1839 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1840 sc->sc_cdata.txp_hostvar_map,
1841 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1843 /* Release allocated Rx buffers. */
1845 while ((sd = TAILQ_FIRST(&sc->sc_busy_list)) != NULL) {
1846 TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
1847 KASSERT(sd->sd_mbuf != NULL,
1848 ("%s : Rx buffer ring corrupted", __func__));
1849 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1850 BUS_DMASYNC_POSTREAD);
1851 bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
1852 m_freem(sd->sd_mbuf);
1854 TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1862 struct txp_softc *sc;
1866 txp_init_locked(sc);
1871 txp_init_locked(struct txp_softc *sc)
1879 TXP_LOCK_ASSERT(sc);
1882 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1885 /* Initialize ring structure. */
1887 /* Wakeup controller. */
1888 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_WAKEUP);
1889 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
1891 * It seems that earlier NV image can go back to online from
1892 * wakeup command but newer ones require controller reset.
1893 * So jut reset controller again.
1895 if (txp_reset(sc) != 0)
1897 /* Download firmware. */
1898 error = txp_download_fw(sc);
1900 device_printf(sc->sc_dev, "could not download firmware.\n");
1903 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1904 sc->sc_cdata.txp_hostvar_map,
1905 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1906 if ((error = txp_rxring_fill(sc)) != 0) {
1907 device_printf(sc->sc_dev, "no memory for Rx buffers.\n");
1910 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1911 sc->sc_cdata.txp_hostvar_map,
1912 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1913 if (txp_boot(sc, STAT_WAITING_FOR_BOOT) != 0) {
1914 device_printf(sc->sc_dev, "could not boot firmware.\n");
1919 * Quite contrary to Typhoon T2 software functional specification,
1920 * it seems that TXP_CMD_RECV_BUFFER_CONTROL command is not
1921 * implemented in the firmware. This means driver should have to
1922 * handle misaligned frames on alignment architectures. AFAIK this
1923 * is the only controller manufactured by 3Com that has this stupid
1924 * bug. 3Com should fix this.
1926 if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
1927 NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1929 /* Undocumented command(interrupt coalescing disable?) - From Linux. */
1930 if (txp_command(sc, TXP_CMD_FILTER_DEFINE, 0, 0, 0, NULL, NULL, NULL,
1931 TXP_CMD_NOWAIT) != 0)
1934 /* Set station address. */
1935 eaddr = IF_LLADDR(sc->sc_ifp);
1937 ((uint8_t *)&p1)[1] = eaddr[0];
1938 ((uint8_t *)&p1)[0] = eaddr[1];
1940 ((uint8_t *)&p2)[3] = eaddr[2];
1941 ((uint8_t *)&p2)[2] = eaddr[3];
1942 ((uint8_t *)&p2)[1] = eaddr[4];
1943 ((uint8_t *)&p2)[0] = eaddr[5];
1945 if (txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0,
1946 NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1950 txp_set_capabilities(sc);
1952 if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
1953 NULL, NULL, NULL, TXP_CMD_NOWAIT))
1955 if (txp_command(sc, TXP_CMD_XCVR_SELECT, sc->sc_xcvr, 0, 0,
1956 NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1958 if (txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1959 TXP_CMD_NOWAIT) != 0)
1961 if (txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1962 TXP_CMD_NOWAIT) != 0)
1965 /* Ack all pending interrupts and enable interrupts. */
1966 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
1967 WRITE_REG(sc, TXP_IER, TXP_INTRS);
1968 WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
1970 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1971 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1973 callout_reset(&sc->sc_tick, hz, txp_tick, sc);
1977 txp_rxring_empty(sc);
1980 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1986 struct txp_softc *sc;
1988 struct txp_rsp_desc *rsp;
1989 struct txp_ext_desc *ext;
1993 TXP_LOCK_ASSERT(sc);
1994 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1995 sc->sc_cdata.txp_hostvar_map,
1996 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1997 txp_rxbuf_reclaim(sc);
1998 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1999 sc->sc_cdata.txp_hostvar_map,
2000 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2005 link = sc->sc_flags & TXP_FLAG_LINK;
2006 if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2007 &rsp, TXP_CMD_WAIT))
2009 if (rsp->rsp_numdesc != 6)
2011 txp_stats_update(sc, rsp);
2012 if (link == 0 && (sc->sc_flags & TXP_FLAG_LINK) != 0) {
2013 ext = (struct txp_ext_desc *)(rsp + 1);
2014 /* Update baudrate with resolved speed. */
2015 if ((ext[5].ext_2 & 0x02) != 0)
2016 ifp->if_baudrate = IF_Mbps(100);
2018 ifp->if_baudrate = IF_Mbps(10);
2023 free(rsp, M_DEVBUF);
2025 callout_reset(&sc->sc_tick, hz, txp_tick, sc);
2029 txp_start(struct ifnet *ifp)
2031 struct txp_softc *sc;
2035 txp_start_locked(ifp);
2040 txp_start_locked(struct ifnet *ifp)
2042 struct txp_softc *sc;
2043 struct mbuf *m_head;
2047 TXP_LOCK_ASSERT(sc);
2049 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2050 IFF_DRV_RUNNING || (sc->sc_flags & TXP_FLAG_LINK) == 0)
2053 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
2054 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2058 * Pack the data into the transmit ring. If we
2059 * don't have room, set the OACTIVE flag and wait
2060 * for the NIC to drain the ring.
2061 * ATM only Hi-ring is used.
2063 if (txp_encap(sc, &sc->sc_txhir, &m_head)) {
2066 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2067 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2072 * If there's a BPF listener, bounce a copy of this frame
2075 ETHER_BPF_MTAP(ifp, m_head);
2077 /* Send queued frame. */
2078 WRITE_REG(sc, sc->sc_txhir.r_reg,
2079 TXP_IDX2OFFSET(sc->sc_txhir.r_prod));
2083 /* Set a timeout in case the chip goes out to lunch. */
2084 sc->sc_watchdog_timer = TXP_TX_TIMEOUT;
2089 txp_encap(struct txp_softc *sc, struct txp_tx_ring *r, struct mbuf **m_head)
2091 struct txp_tx_desc *first_txd;
2092 struct txp_frag_desc *fxd;
2093 struct txp_swdesc *sd;
2095 bus_dma_segment_t txsegs[TXP_MAXTXSEGS];
2096 int error, i, nsegs;
2098 TXP_LOCK_ASSERT(sc);
2100 M_ASSERTPKTHDR((*m_head));
2103 first_txd = r->r_desc + r->r_prod;
2104 sd = sc->sc_txd + r->r_prod;
2106 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2107 *m_head, txsegs, &nsegs, 0);
2108 if (error == EFBIG) {
2109 m = m_collapse(*m_head, M_NOWAIT, TXP_MAXTXSEGS);
2116 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag,
2117 sd->sd_map, *m_head, txsegs, &nsegs, 0);
2123 } else if (error != 0)
2131 /* Check descriptor overrun. */
2132 if (r->r_cnt + nsegs >= TX_ENTRIES - TXP_TXD_RESERVED) {
2133 bus_dmamap_unload(sc->sc_cdata.txp_tx_tag, sd->sd_map);
2136 bus_dmamap_sync(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2137 BUS_DMASYNC_PREWRITE);
2140 first_txd->tx_flags = TX_FLAGS_TYPE_DATA;
2141 first_txd->tx_numdesc = 0;
2142 first_txd->tx_addrlo = 0;
2143 first_txd->tx_addrhi = 0;
2144 first_txd->tx_totlen = 0;
2145 first_txd->tx_pflags = 0;
2147 TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2149 /* Configure Tx IP/TCP/UDP checksum offload. */
2150 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2151 first_txd->tx_pflags |= htole32(TX_PFLAGS_IPCKSUM);
2153 /* XXX firmware bug. */
2154 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2155 first_txd->tx_pflags |= htole32(TX_PFLAGS_TCPCKSUM);
2156 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2157 first_txd->tx_pflags |= htole32(TX_PFLAGS_UDPCKSUM);
2160 /* Configure VLAN hardware tag insertion. */
2161 if ((m->m_flags & M_VLANTAG) != 0)
2162 first_txd->tx_pflags |=
2163 htole32(TX_PFLAGS_VLAN | TX_PFLAGS_PRIO |
2164 (bswap16(m->m_pkthdr.ether_vtag) << TX_PFLAGS_VLANTAG_S));
2166 for (i = 0; i < nsegs; i++) {
2167 fxd = (struct txp_frag_desc *)(r->r_desc + r->r_prod);
2168 fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG | TX_FLAGS_VALID;
2169 fxd->frag_rsvd1 = 0;
2170 fxd->frag_len = htole16(txsegs[i].ds_len);
2171 fxd->frag_addrhi = htole32(TXP_ADDR_HI(txsegs[i].ds_addr));
2172 fxd->frag_addrlo = htole32(TXP_ADDR_LO(txsegs[i].ds_addr));
2173 fxd->frag_rsvd2 = 0;
2174 first_txd->tx_numdesc++;
2176 TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2179 /* Lastly set valid flag. */
2180 first_txd->tx_flags |= TX_FLAGS_VALID;
2182 /* Sync descriptors. */
2183 bus_dmamap_sync(r->r_tag, r->r_map,
2184 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2190 * Handle simple commands sent to the typhoon
2193 txp_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2194 uint32_t in3, uint16_t *out1, uint32_t *out2, uint32_t *out3, int wait)
2196 struct txp_rsp_desc *rsp;
2199 if (txp_ext_command(sc, id, in1, in2, in3, NULL, 0, &rsp, wait) != 0) {
2200 device_printf(sc->sc_dev, "command 0x%02x failed\n", id);
2204 if (wait == TXP_CMD_NOWAIT)
2207 KASSERT(rsp != NULL, ("rsp is NULL!\n"));
2209 *out1 = le16toh(rsp->rsp_par1);
2211 *out2 = le32toh(rsp->rsp_par2);
2213 *out3 = le32toh(rsp->rsp_par3);
2214 free(rsp, M_DEVBUF);
2219 txp_ext_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2220 uint32_t in3, struct txp_ext_desc *in_extp, uint8_t in_extn,
2221 struct txp_rsp_desc **rspp, int wait)
2223 struct txp_hostvar *hv;
2224 struct txp_cmd_desc *cmd;
2225 struct txp_ext_desc *ext;
2231 hv = sc->sc_hostvar;
2232 if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
2233 device_printf(sc->sc_dev,
2234 "%s : out of free cmd descriptors for command 0x%02x\n",
2239 bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2240 sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_POSTWRITE);
2241 idx = sc->sc_cmdring.lastwrite;
2242 cmd = (struct txp_cmd_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2243 bzero(cmd, sizeof(*cmd));
2245 cmd->cmd_numdesc = in_extn;
2247 cmd->cmd_seq = htole16(seq);
2248 cmd->cmd_id = htole16(id);
2249 cmd->cmd_par1 = htole16(in1);
2250 cmd->cmd_par2 = htole32(in2);
2251 cmd->cmd_par3 = htole32(in3);
2252 cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
2253 (wait == TXP_CMD_WAIT ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
2255 idx += sizeof(struct txp_cmd_desc);
2256 if (idx == sc->sc_cmdring.size)
2259 for (i = 0; i < in_extn; i++) {
2260 ext = (struct txp_ext_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2261 bcopy(in_extp, ext, sizeof(struct txp_ext_desc));
2263 idx += sizeof(struct txp_cmd_desc);
2264 if (idx == sc->sc_cmdring.size)
2268 sc->sc_cmdring.lastwrite = idx;
2269 bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2270 sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_PREWRITE);
2271 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2272 sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_PREREAD |
2273 BUS_DMASYNC_PREWRITE);
2274 WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
2275 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
2277 if (wait == TXP_CMD_NOWAIT)
2280 for (i = 0; i < TXP_TIMEOUT; i++) {
2281 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2282 sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_POSTREAD |
2283 BUS_DMASYNC_POSTWRITE);
2284 if (le32toh(hv->hv_resp_read_idx) !=
2285 le32toh(hv->hv_resp_write_idx)) {
2286 error = txp_response(sc, id, seq, rspp);
2287 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2288 sc->sc_cdata.txp_hostvar_map,
2289 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2297 if (i == TXP_TIMEOUT) {
2298 device_printf(sc->sc_dev, "command 0x%02x timedout\n", id);
2306 txp_response(struct txp_softc *sc, uint16_t id, uint16_t seq,
2307 struct txp_rsp_desc **rspp)
2309 struct txp_hostvar *hv;
2310 struct txp_rsp_desc *rsp;
2313 bus_dmamap_sync(sc->sc_cdata.txp_rspring_tag,
2314 sc->sc_cdata.txp_rspring_map, BUS_DMASYNC_POSTREAD);
2315 hv = sc->sc_hostvar;
2316 ridx = le32toh(hv->hv_resp_read_idx);
2317 while (ridx != le32toh(hv->hv_resp_write_idx)) {
2318 rsp = (struct txp_rsp_desc *)(((uint8_t *)sc->sc_rspring.base) + ridx);
2320 if (id == le16toh(rsp->rsp_id) &&
2321 le16toh(rsp->rsp_seq) == seq) {
2322 *rspp = (struct txp_rsp_desc *)malloc(
2323 sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1),
2324 M_DEVBUF, M_NOWAIT);
2325 if (*rspp == NULL) {
2326 device_printf(sc->sc_dev,"%s : command 0x%02x "
2327 "memory allocation failure\n",
2331 txp_rsp_fixup(sc, rsp, *rspp);
2335 if ((rsp->rsp_flags & RSP_FLAGS_ERROR) != 0) {
2336 device_printf(sc->sc_dev,
2337 "%s : command 0x%02x response error!\n", __func__,
2338 le16toh(rsp->rsp_id));
2339 txp_rsp_fixup(sc, rsp, NULL);
2340 ridx = le32toh(hv->hv_resp_read_idx);
2345 * The following unsolicited responses are handled during
2346 * processing of TXP_CMD_READ_STATISTICS which requires
2347 * response. Driver abuses the command to detect media
2349 * TXP_CMD_FILTER_DEFINE is not an unsolicited response
2350 * but we don't process response ring in interrupt handler
2351 * so we have to ignore this command here, otherwise
2352 * unknown command message would be printed.
2354 switch (le16toh(rsp->rsp_id)) {
2355 case TXP_CMD_CYCLE_STATISTICS:
2356 case TXP_CMD_FILTER_DEFINE:
2358 case TXP_CMD_MEDIA_STATUS_READ:
2359 if ((le16toh(rsp->rsp_par1) & 0x0800) == 0) {
2360 sc->sc_flags |= TXP_FLAG_LINK;
2361 if_link_state_change(sc->sc_ifp,
2364 sc->sc_flags &= ~TXP_FLAG_LINK;
2365 if_link_state_change(sc->sc_ifp,
2369 case TXP_CMD_HELLO_RESPONSE:
2371 * Driver should repsond to hello message but
2372 * TXP_CMD_READ_STATISTICS is issued for every
2373 * hz, therefore there is no need to send an
2374 * explicit command here.
2376 device_printf(sc->sc_dev, "%s : hello\n", __func__);
2379 device_printf(sc->sc_dev,
2380 "%s : unknown command 0x%02x\n", __func__,
2381 le16toh(rsp->rsp_id));
2383 txp_rsp_fixup(sc, rsp, NULL);
2384 ridx = le32toh(hv->hv_resp_read_idx);
2391 txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp,
2392 struct txp_rsp_desc *dst)
2394 struct txp_rsp_desc *src;
2395 struct txp_hostvar *hv;
2399 hv = sc->sc_hostvar;
2400 ridx = le32toh(hv->hv_resp_read_idx);
2402 for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
2404 bcopy(src, dst++, sizeof(struct txp_rsp_desc));
2405 ridx += sizeof(struct txp_rsp_desc);
2406 if (ridx == sc->sc_rspring.size) {
2407 src = sc->sc_rspring.base;
2411 sc->sc_rspring.lastwrite = ridx;
2414 hv->hv_resp_read_idx = htole32(ridx);
2418 txp_cmd_desc_numfree(struct txp_softc *sc)
2420 struct txp_hostvar *hv;
2421 struct txp_boot_record *br;
2422 uint32_t widx, ridx, nfree;
2424 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2425 sc->sc_cdata.txp_hostvar_map,
2426 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2427 hv = sc->sc_hostvar;
2429 widx = sc->sc_cmdring.lastwrite;
2430 ridx = le32toh(hv->hv_cmd_read_idx);
2433 /* Ring is completely free */
2434 nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc);
2437 nfree = le32toh(br->br_cmd_siz) -
2438 (widx - ridx + sizeof(struct txp_cmd_desc));
2440 nfree = ridx - widx - sizeof(struct txp_cmd_desc);
2443 return (nfree / sizeof(struct txp_cmd_desc));
2447 txp_sleep(struct txp_softc *sc, int capenable)
2453 if ((capenable & IFCAP_WOL_MAGIC) != 0)
2455 error = txp_command(sc, TXP_CMD_ENABLE_WAKEUP_EVENTS, events, 0, 0,
2456 NULL, NULL, NULL, TXP_CMD_NOWAIT);
2459 error = txp_command(sc, TXP_CMD_GOTO_SLEEP, 0, 0, 0, NULL,
2460 NULL, NULL, TXP_CMD_NOWAIT);
2462 error = txp_wait(sc, STAT_SLEEPING);
2464 device_printf(sc->sc_dev,
2465 "unable to enter into sleep\n");
2473 txp_stop(struct txp_softc *sc)
2477 TXP_LOCK_ASSERT(sc);
2480 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2483 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
2484 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
2486 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2487 sc->sc_flags &= ~TXP_FLAG_LINK;
2489 callout_stop(&sc->sc_tick);
2491 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2493 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2495 /* Save statistics for later use. */
2497 /* Halt controller. */
2498 txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL,
2501 if (txp_wait(sc, STAT_HALTED) != 0)
2502 device_printf(sc->sc_dev, "controller halt timedout!\n");
2503 /* Reclaim Tx/Rx buffers. */
2504 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
2505 TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
2506 txp_tx_reclaim(sc, &sc->sc_txhir);
2507 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
2508 TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
2509 txp_tx_reclaim(sc, &sc->sc_txlor);
2510 txp_rxring_empty(sc);
2513 /* Reset controller and make it reload sleep image. */
2515 /* Let controller boot from sleep image. */
2516 if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
2517 device_printf(sc->sc_dev, "could not boot sleep image\n");
2522 txp_watchdog(struct txp_softc *sc)
2526 TXP_LOCK_ASSERT(sc);
2528 if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
2532 if_printf(ifp, "watchdog timeout -- resetting\n");
2535 txp_init_locked(sc);
2539 txp_ifmedia_upd(struct ifnet *ifp)
2541 struct txp_softc *sc = ifp->if_softc;
2542 struct ifmedia *ifm = &sc->sc_ifmedia;
2546 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
2551 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
2552 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2553 new_xcvr = TXP_XCVR_10_FDX;
2555 new_xcvr = TXP_XCVR_10_HDX;
2556 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
2557 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2558 new_xcvr = TXP_XCVR_100_FDX;
2560 new_xcvr = TXP_XCVR_100_HDX;
2561 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
2562 new_xcvr = TXP_XCVR_AUTO;
2569 if (sc->sc_xcvr == new_xcvr) {
2574 txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
2575 NULL, NULL, NULL, TXP_CMD_NOWAIT);
2576 sc->sc_xcvr = new_xcvr;
2583 txp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2585 struct txp_softc *sc = ifp->if_softc;
2586 struct ifmedia *ifm = &sc->sc_ifmedia;
2587 uint16_t bmsr, bmcr, anar, anlpar;
2589 ifmr->ifm_status = IFM_AVALID;
2590 ifmr->ifm_active = IFM_ETHER;
2593 /* Check whether firmware is running. */
2594 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2596 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2597 &bmsr, NULL, NULL, TXP_CMD_WAIT))
2599 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2600 &bmsr, NULL, NULL, TXP_CMD_WAIT))
2603 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
2604 &bmcr, NULL, NULL, TXP_CMD_WAIT))
2607 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
2608 &anlpar, NULL, NULL, TXP_CMD_WAIT))
2611 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANAR, 0,
2612 &anar, NULL, NULL, TXP_CMD_WAIT))
2616 if (bmsr & BMSR_LINK)
2617 ifmr->ifm_status |= IFM_ACTIVE;
2619 if (bmcr & BMCR_ISO) {
2620 ifmr->ifm_active |= IFM_NONE;
2621 ifmr->ifm_status = 0;
2625 if (bmcr & BMCR_LOOP)
2626 ifmr->ifm_active |= IFM_LOOP;
2628 if (bmcr & BMCR_AUTOEN) {
2629 if ((bmsr & BMSR_ACOMP) == 0) {
2630 ifmr->ifm_active |= IFM_NONE;
2635 if (anlpar & ANLPAR_TX_FD)
2636 ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
2637 else if (anlpar & ANLPAR_T4)
2638 ifmr->ifm_active |= IFM_100_T4;
2639 else if (anlpar & ANLPAR_TX)
2640 ifmr->ifm_active |= IFM_100_TX;
2641 else if (anlpar & ANLPAR_10_FD)
2642 ifmr->ifm_active |= IFM_10_T|IFM_FDX;
2643 else if (anlpar & ANLPAR_10)
2644 ifmr->ifm_active |= IFM_10_T;
2646 ifmr->ifm_active |= IFM_NONE;
2648 ifmr->ifm_active = ifm->ifm_cur->ifm_media;
2653 ifmr->ifm_active |= IFM_NONE;
2654 ifmr->ifm_status &= ~IFM_AVALID;
2659 txp_show_descriptor(void *d)
2661 struct txp_cmd_desc *cmd = d;
2662 struct txp_rsp_desc *rsp = d;
2663 struct txp_tx_desc *txd = d;
2664 struct txp_frag_desc *frgd = d;
2666 switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) {
2667 case CMD_FLAGS_TYPE_CMD:
2668 /* command descriptor */
2669 printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2670 cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2671 le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2672 le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2674 case CMD_FLAGS_TYPE_RESP:
2675 /* response descriptor */
2676 printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2677 rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id),
2678 le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1),
2679 le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3));
2681 case CMD_FLAGS_TYPE_DATA:
2682 /* data header (assuming tx for now) */
2683 printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
2684 txd->tx_flags, txd->tx_numdesc, le16toh(txd->tx_totlen),
2685 le32toh(txd->tx_addrlo), le32toh(txd->tx_addrhi),
2686 le32toh(txd->tx_pflags));
2688 case CMD_FLAGS_TYPE_FRAG:
2689 /* fragment descriptor */
2690 printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
2691 frgd->frag_flags, frgd->frag_rsvd1, le16toh(frgd->frag_len),
2692 le32toh(frgd->frag_addrlo), le32toh(frgd->frag_addrhi),
2693 le32toh(frgd->frag_rsvd2));
2696 printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2697 cmd->cmd_flags & CMD_FLAGS_TYPE_M,
2698 cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2699 le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2700 le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2707 txp_set_filter(struct txp_softc *sc)
2710 uint32_t crc, mchash[2];
2712 struct ifmultiaddr *ifma;
2715 TXP_LOCK_ASSERT(sc);
2718 filter = TXP_RXFILT_DIRECT;
2719 if ((ifp->if_flags & IFF_BROADCAST) != 0)
2720 filter |= TXP_RXFILT_BROADCAST;
2721 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2722 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2723 filter |= TXP_RXFILT_ALLMULTI;
2724 if ((ifp->if_flags & IFF_PROMISC) != 0)
2725 filter = TXP_RXFILT_PROMISC;
2729 mchash[0] = mchash[1] = 0;
2731 if_maddr_rlock(ifp);
2732 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2733 if (ifma->ifma_addr->sa_family != AF_LINK)
2735 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2736 ifma->ifma_addr), ETHER_ADDR_LEN);
2738 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2741 if_maddr_runlock(ifp);
2744 filter |= TXP_RXFILT_HASHMULTI;
2745 txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 2, mchash[0],
2746 mchash[1], NULL, NULL, NULL, TXP_CMD_NOWAIT);
2750 txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
2751 NULL, NULL, NULL, TXP_CMD_NOWAIT);
2755 txp_set_capabilities(struct txp_softc *sc)
2758 uint32_t rxcap, txcap;
2760 TXP_LOCK_ASSERT(sc);
2764 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) {
2765 if ((ifp->if_hwassist & CSUM_IP) != 0)
2766 txcap |= OFFLOAD_IPCKSUM;
2767 if ((ifp->if_hwassist & CSUM_TCP) != 0)
2768 txcap |= OFFLOAD_TCPCKSUM;
2769 if ((ifp->if_hwassist & CSUM_UDP) != 0)
2770 txcap |= OFFLOAD_UDPCKSUM;
2773 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2774 rxcap &= ~(OFFLOAD_IPCKSUM | OFFLOAD_TCPCKSUM |
2776 if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2777 rxcap |= OFFLOAD_VLAN;
2778 txcap |= OFFLOAD_VLAN;
2781 /* Tell firmware new offload configuration. */
2782 return (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, txcap, rxcap, NULL,
2783 NULL, NULL, TXP_CMD_NOWAIT));
2787 txp_stats_save(struct txp_softc *sc)
2789 struct txp_rsp_desc *rsp;
2791 TXP_LOCK_ASSERT(sc);
2794 if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2795 &rsp, TXP_CMD_WAIT))
2797 if (rsp->rsp_numdesc != 6)
2799 txp_stats_update(sc, rsp);
2802 free(rsp, M_DEVBUF);
2803 bcopy(&sc->sc_stats, &sc->sc_ostats, sizeof(struct txp_hw_stats));
2807 txp_stats_update(struct txp_softc *sc, struct txp_rsp_desc *rsp)
2810 struct txp_hw_stats *ostats, *stats;
2811 struct txp_ext_desc *ext;
2813 TXP_LOCK_ASSERT(sc);
2816 ext = (struct txp_ext_desc *)(rsp + 1);
2817 ostats = &sc->sc_ostats;
2818 stats = &sc->sc_stats;
2819 stats->tx_frames = ostats->tx_frames + le32toh(rsp->rsp_par2);
2820 stats->tx_bytes = ostats->tx_bytes + (uint64_t)le32toh(rsp->rsp_par3) +
2821 ((uint64_t)le32toh(ext[0].ext_1) << 32);
2822 stats->tx_deferred = ostats->tx_deferred + le32toh(ext[0].ext_2);
2823 stats->tx_late_colls = ostats->tx_late_colls + le32toh(ext[0].ext_3);
2824 stats->tx_colls = ostats->tx_colls + le32toh(ext[0].ext_4);
2825 stats->tx_carrier_lost = ostats->tx_carrier_lost +
2826 le32toh(ext[1].ext_1);
2827 stats->tx_multi_colls = ostats->tx_multi_colls +
2828 le32toh(ext[1].ext_2);
2829 stats->tx_excess_colls = ostats->tx_excess_colls +
2830 le32toh(ext[1].ext_3);
2831 stats->tx_fifo_underruns = ostats->tx_fifo_underruns +
2832 le32toh(ext[1].ext_4);
2833 stats->tx_mcast_oflows = ostats->tx_mcast_oflows +
2834 le32toh(ext[2].ext_1);
2835 stats->tx_filtered = ostats->tx_filtered + le32toh(ext[2].ext_2);
2836 stats->rx_frames = ostats->rx_frames + le32toh(ext[2].ext_3);
2837 stats->rx_bytes = ostats->rx_bytes + (uint64_t)le32toh(ext[2].ext_4) +
2838 ((uint64_t)le32toh(ext[3].ext_1) << 32);
2839 stats->rx_fifo_oflows = ostats->rx_fifo_oflows + le32toh(ext[3].ext_2);
2840 stats->rx_badssd = ostats->rx_badssd + le32toh(ext[3].ext_3);
2841 stats->rx_crcerrs = ostats->rx_crcerrs + le32toh(ext[3].ext_4);
2842 stats->rx_lenerrs = ostats->rx_lenerrs + le32toh(ext[4].ext_1);
2843 stats->rx_bcast_frames = ostats->rx_bcast_frames +
2844 le32toh(ext[4].ext_2);
2845 stats->rx_mcast_frames = ostats->rx_mcast_frames +
2846 le32toh(ext[4].ext_3);
2847 stats->rx_oflows = ostats->rx_oflows + le32toh(ext[4].ext_4);
2848 stats->rx_filtered = ostats->rx_filtered + le32toh(ext[5].ext_1);
2850 ifp->if_ierrors = stats->rx_fifo_oflows + stats->rx_badssd +
2851 stats->rx_crcerrs + stats->rx_lenerrs + stats->rx_oflows;
2852 ifp->if_oerrors = stats->tx_deferred + stats->tx_carrier_lost +
2853 stats->tx_fifo_underruns + stats->tx_mcast_oflows;
2854 ifp->if_collisions = stats->tx_late_colls + stats->tx_multi_colls +
2855 stats->tx_excess_colls;
2856 ifp->if_opackets = stats->tx_frames;
2857 ifp->if_ipackets = stats->rx_frames;
2860 #define TXP_SYSCTL_STAT_ADD32(c, h, n, p, d) \
2861 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2863 #if __FreeBSD_version >= 900030
2864 #define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2865 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2866 #elif __FreeBSD_version > 800000
2867 #define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2868 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2870 #define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2871 SYSCTL_ADD_ULONG(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2875 txp_sysctl_node(struct txp_softc *sc)
2877 struct sysctl_ctx_list *ctx;
2878 struct sysctl_oid_list *child, *parent;
2879 struct sysctl_oid *tree;
2880 struct txp_hw_stats *stats;
2883 stats = &sc->sc_stats;
2884 ctx = device_get_sysctl_ctx(sc->sc_dev);
2885 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
2886 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
2887 CTLTYPE_INT | CTLFLAG_RW, &sc->sc_process_limit, 0,
2888 sysctl_hw_txp_proc_limit, "I",
2889 "max number of Rx events to process");
2890 /* Pull in device tunables. */
2891 sc->sc_process_limit = TXP_PROC_DEFAULT;
2892 error = resource_int_value(device_get_name(sc->sc_dev),
2893 device_get_unit(sc->sc_dev), "process_limit",
2894 &sc->sc_process_limit);
2896 if (sc->sc_process_limit < TXP_PROC_MIN ||
2897 sc->sc_process_limit > TXP_PROC_MAX) {
2898 device_printf(sc->sc_dev,
2899 "process_limit value out of range; "
2900 "using default: %d\n", TXP_PROC_DEFAULT);
2901 sc->sc_process_limit = TXP_PROC_DEFAULT;
2904 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2905 NULL, "TXP statistics");
2906 parent = SYSCTL_CHILDREN(tree);
2908 /* Tx statistics. */
2909 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2910 NULL, "Tx MAC statistics");
2911 child = SYSCTL_CHILDREN(tree);
2913 TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2914 &stats->tx_frames, "Frames");
2915 TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2916 &stats->tx_bytes, "Octets");
2917 TXP_SYSCTL_STAT_ADD32(ctx, child, "deferred",
2918 &stats->tx_deferred, "Deferred frames");
2919 TXP_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2920 &stats->tx_late_colls, "Late collisions");
2921 TXP_SYSCTL_STAT_ADD32(ctx, child, "colls",
2922 &stats->tx_colls, "Collisions");
2923 TXP_SYSCTL_STAT_ADD32(ctx, child, "carrier_lost",
2924 &stats->tx_carrier_lost, "Carrier lost");
2925 TXP_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
2926 &stats->tx_multi_colls, "Multiple collisions");
2927 TXP_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
2928 &stats->tx_excess_colls, "Excessive collisions");
2929 TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_underruns",
2930 &stats->tx_fifo_underruns, "FIFO underruns");
2931 TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_oflows",
2932 &stats->tx_mcast_oflows, "Multicast overflows");
2933 TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2934 &stats->tx_filtered, "Filtered frames");
2936 /* Rx statistics. */
2937 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2938 NULL, "Rx MAC statistics");
2939 child = SYSCTL_CHILDREN(tree);
2941 TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2942 &stats->rx_frames, "Frames");
2943 TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2944 &stats->rx_bytes, "Octets");
2945 TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2946 &stats->rx_fifo_oflows, "FIFO overflows");
2947 TXP_SYSCTL_STAT_ADD32(ctx, child, "badssd",
2948 &stats->rx_badssd, "Bad SSD");
2949 TXP_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2950 &stats->rx_crcerrs, "CRC errors");
2951 TXP_SYSCTL_STAT_ADD32(ctx, child, "lenerrs",
2952 &stats->rx_lenerrs, "Length errors");
2953 TXP_SYSCTL_STAT_ADD32(ctx, child, "bcast_frames",
2954 &stats->rx_bcast_frames, "Broadcast frames");
2955 TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_frames",
2956 &stats->rx_mcast_frames, "Multicast frames");
2957 TXP_SYSCTL_STAT_ADD32(ctx, child, "oflows",
2958 &stats->rx_oflows, "Overflows");
2959 TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2960 &stats->rx_filtered, "Filtered frames");
2963 #undef TXP_SYSCTL_STAT_ADD32
2964 #undef TXP_SYSCTL_STAT_ADD64
2967 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2973 value = *(int *)arg1;
2974 error = sysctl_handle_int(oidp, &value, 0, req);
2975 if (error || req->newptr == NULL)
2977 if (value < low || value > high)
2979 *(int *)arg1 = value;
2985 sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS)
2987 return (sysctl_int_range(oidp, arg1, arg2, req,
2988 TXP_PROC_MIN, TXP_PROC_MAX));