1 /* $OpenBSD: if_txp.c,v 1.48 2001/06/27 06:34:50 kjc Exp $ */
5 * Jason L. Wright <jason@thought.net>, Theo de Raadt, and
6 * Aaron Campbell <aaron@monkey.org>. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Jason L. Wright,
19 * Theo de Raadt and Aaron Campbell.
20 * 4. Neither the name of the author nor the names of any co-contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
34 * THE POSSIBILITY OF SUCH DAMAGE.
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
41 * Driver for 3c990 (Typhoon) Ethernet ASIC
43 #include <sys/param.h>
44 #include <sys/systm.h>
46 #include <sys/endian.h>
47 #include <sys/kernel.h>
49 #include <sys/malloc.h>
51 #include <sys/module.h>
52 #include <sys/mutex.h>
53 #include <sys/queue.h>
55 #include <sys/socket.h>
56 #include <sys/sockio.h>
57 #include <sys/sysctl.h>
58 #include <sys/taskqueue.h>
62 #include <net/if_var.h>
63 #include <net/if_arp.h>
64 #include <net/ethernet.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/if_vlan_var.h>
70 #include <netinet/in.h>
71 #include <netinet/in_systm.h>
72 #include <netinet/ip.h>
74 #include <dev/mii/mii.h>
76 #include <dev/pci/pcireg.h>
77 #include <dev/pci/pcivar.h>
79 #include <machine/bus.h>
80 #include <machine/in_cksum.h>
82 #include <dev/txp/if_txpreg.h>
83 #include <dev/txp/3c990img.h>
85 MODULE_DEPEND(txp, pci, 1, 1, 1);
86 MODULE_DEPEND(txp, ether, 1, 1, 1);
89 * XXX Known Typhoon firmware issues.
91 * 1. It seems that firmware has Tx TCP/UDP checksum offloading bug.
92 * The firmware hangs when it's told to compute TCP/UDP checksum.
93 * I'm not sure whether the firmware requires special alignment to
94 * do checksum offloading but datasheet says nothing about that.
95 * 2. Datasheet says nothing for maximum number of fragmented
96 * descriptors supported. Experimentation shows up to 16 fragment
97 * descriptors are supported in the firmware. For TSO case, upper
98 * stack can send 64KB sized IP datagram plus link header size(
99 * ethernet header + VLAN tag) frame but controller can handle up
100 * to 64KB frame given that PAGE_SIZE is 4KB(i.e. 16 * PAGE_SIZE).
101 * Because frames that need TSO operation of hardware can be
102 * larger than 64KB I disabled TSO capability. TSO operation for
103 * less than or equal to 16 fragment descriptors works without
105 * 3. VLAN hardware tag stripping is always enabled in the firmware
106 * even if it's explicitly told to not strip the tag. It's
107 * possible to add the tag back in Rx handler if VLAN hardware
108 * tag is not active but I didn't try that as it would be
109 * layering violation.
110 * 4. TXP_CMD_RECV_BUFFER_CONTROL does not work as expected in
111 * datasheet such that driver should handle the alignment
112 * restriction by copying received frame to align the frame on
113 * 32bit boundary on strict-alignment architectures. This adds a
114 * lot of CPU burden and it effectively reduce Rx performance on
115 * strict-alignment architectures(e.g. sparc64, arm and mips).
117 * Unfortunately it seems that 3Com have no longer interests in
118 * releasing fixed firmware so we may have to live with these bugs.
121 #define TXP_CSUM_FEATURES (CSUM_IP)
124 * Various supported device vendors/types and their names.
126 static struct txp_type txp_devs[] = {
127 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_95,
128 "3Com 3cR990-TX-95 Etherlink with 3XP Processor" },
129 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_97,
130 "3Com 3cR990-TX-97 Etherlink with 3XP Processor" },
131 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_TXM,
132 "3Com 3cR990B-TXM Etherlink with 3XP Processor" },
133 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_95,
134 "3Com 3cR990-SRV-95 Etherlink Server with 3XP Processor" },
135 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_97,
136 "3Com 3cR990-SRV-97 Etherlink Server with 3XP Processor" },
137 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_SRV,
138 "3Com 3cR990B-SRV Etherlink Server with 3XP Processor" },
142 static int txp_probe(device_t);
143 static int txp_attach(device_t);
144 static int txp_detach(device_t);
145 static int txp_shutdown(device_t);
146 static int txp_suspend(device_t);
147 static int txp_resume(device_t);
148 static int txp_intr(void *);
149 static void txp_int_task(void *, int);
150 static void txp_tick(void *);
151 static int txp_ioctl(struct ifnet *, u_long, caddr_t);
152 static void txp_start(struct ifnet *);
153 static void txp_start_locked(struct ifnet *);
154 static int txp_encap(struct txp_softc *, struct txp_tx_ring *, struct mbuf **);
155 static void txp_stop(struct txp_softc *);
156 static void txp_init(void *);
157 static void txp_init_locked(struct txp_softc *);
158 static void txp_watchdog(struct txp_softc *);
160 static int txp_reset(struct txp_softc *);
161 static int txp_boot(struct txp_softc *, uint32_t);
162 static int txp_sleep(struct txp_softc *, int);
163 static int txp_wait(struct txp_softc *, uint32_t);
164 static int txp_download_fw(struct txp_softc *);
165 static int txp_download_fw_wait(struct txp_softc *);
166 static int txp_download_fw_section(struct txp_softc *,
167 struct txp_fw_section_header *, int);
168 static int txp_alloc_rings(struct txp_softc *);
169 static void txp_init_rings(struct txp_softc *);
170 static int txp_dma_alloc(struct txp_softc *, char *, bus_dma_tag_t *,
171 bus_size_t, bus_size_t, bus_dmamap_t *, void **, bus_size_t, bus_addr_t *);
172 static void txp_dma_free(struct txp_softc *, bus_dma_tag_t *, bus_dmamap_t,
173 void **, bus_addr_t *);
174 static void txp_free_rings(struct txp_softc *);
175 static int txp_rxring_fill(struct txp_softc *);
176 static void txp_rxring_empty(struct txp_softc *);
177 static void txp_set_filter(struct txp_softc *);
179 static int txp_cmd_desc_numfree(struct txp_softc *);
180 static int txp_command(struct txp_softc *, uint16_t, uint16_t, uint32_t,
181 uint32_t, uint16_t *, uint32_t *, uint32_t *, int);
182 static int txp_ext_command(struct txp_softc *, uint16_t, uint16_t,
183 uint32_t, uint32_t, struct txp_ext_desc *, uint8_t,
184 struct txp_rsp_desc **, int);
185 static int txp_response(struct txp_softc *, uint16_t, uint16_t,
186 struct txp_rsp_desc **);
187 static void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *,
188 struct txp_rsp_desc *);
189 static int txp_set_capabilities(struct txp_softc *);
191 static void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
192 static int txp_ifmedia_upd(struct ifnet *);
194 static void txp_show_descriptor(void *);
196 static void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *);
197 static void txp_rxbuf_reclaim(struct txp_softc *);
198 #ifndef __NO_STRICT_ALIGNMENT
199 static __inline void txp_fixup_rx(struct mbuf *);
201 static int txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *, int);
202 static void txp_stats_save(struct txp_softc *);
203 static void txp_stats_update(struct txp_softc *, struct txp_rsp_desc *);
204 static void txp_sysctl_node(struct txp_softc *);
205 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
206 static int sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS);
208 static int prefer_iomap = 0;
209 TUNABLE_INT("hw.txp.prefer_iomap", &prefer_iomap);
211 static device_method_t txp_methods[] = {
212 /* Device interface */
213 DEVMETHOD(device_probe, txp_probe),
214 DEVMETHOD(device_attach, txp_attach),
215 DEVMETHOD(device_detach, txp_detach),
216 DEVMETHOD(device_shutdown, txp_shutdown),
217 DEVMETHOD(device_suspend, txp_suspend),
218 DEVMETHOD(device_resume, txp_resume),
223 static driver_t txp_driver = {
226 sizeof(struct txp_softc)
229 static devclass_t txp_devclass;
231 DRIVER_MODULE(txp, pci, txp_driver, txp_devclass, 0, 0);
234 txp_probe(device_t dev)
240 while (t->txp_name != NULL) {
241 if ((pci_get_vendor(dev) == t->txp_vid) &&
242 (pci_get_device(dev) == t->txp_did)) {
243 device_set_desc(dev, t->txp_name);
244 return (BUS_PROBE_DEFAULT);
253 txp_attach(device_t dev)
255 struct txp_softc *sc;
257 struct txp_rsp_desc *rsp;
260 int error = 0, pmc, rid;
261 uint8_t eaddr[ETHER_ADDR_LEN], *ver;
263 sc = device_get_softc(dev);
266 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
268 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
269 TASK_INIT(&sc->sc_int_task, 0, txp_int_task, sc);
270 TAILQ_INIT(&sc->sc_busy_list);
271 TAILQ_INIT(&sc->sc_free_list);
273 ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
274 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
275 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 0, NULL);
276 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
277 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
278 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 0, NULL);
279 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
280 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
282 pci_enable_busmaster(dev);
283 /* Prefer memory space register mapping over IO space. */
284 if (prefer_iomap == 0) {
285 sc->sc_res_id = PCIR_BAR(1);
286 sc->sc_res_type = SYS_RES_MEMORY;
288 sc->sc_res_id = PCIR_BAR(0);
289 sc->sc_res_type = SYS_RES_IOPORT;
291 sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
292 &sc->sc_res_id, RF_ACTIVE);
293 if (sc->sc_res == NULL && prefer_iomap == 0) {
294 sc->sc_res_id = PCIR_BAR(0);
295 sc->sc_res_type = SYS_RES_IOPORT;
296 sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
297 &sc->sc_res_id, RF_ACTIVE);
299 if (sc->sc_res == NULL) {
300 device_printf(dev, "couldn't map ports/memory\n");
301 ifmedia_removeall(&sc->sc_ifmedia);
302 mtx_destroy(&sc->sc_mtx);
307 reg = pci_read_config(dev, PCIR_COMMAND, 2);
308 reg |= PCIM_CMD_MWRICEN;
309 pci_write_config(dev, PCIR_COMMAND, reg, 2);
310 /* Check cache line size. */
311 reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
313 if (reg == 0 || (reg % 16) != 0)
314 device_printf(sc->sc_dev,
315 "invalid cache line size : %u\n", reg);
317 /* Allocate interrupt */
319 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
320 RF_SHAREABLE | RF_ACTIVE);
322 if (sc->sc_irq == NULL) {
323 device_printf(dev, "couldn't map interrupt\n");
328 if ((error = txp_alloc_rings(sc)) != 0)
332 /* Reset controller and make it reload sleep image. */
333 if (txp_reset(sc) != 0) {
338 /* Let controller boot from sleep image. */
339 if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
340 device_printf(sc->sc_dev, "could not boot sleep image\n");
345 /* Get station address. */
346 if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
347 &p1, &p2, NULL, TXP_CMD_WAIT)) {
353 eaddr[0] = ((uint8_t *)&p1)[1];
354 eaddr[1] = ((uint8_t *)&p1)[0];
356 eaddr[2] = ((uint8_t *)&p2)[3];
357 eaddr[3] = ((uint8_t *)&p2)[2];
358 eaddr[4] = ((uint8_t *)&p2)[1];
359 eaddr[5] = ((uint8_t *)&p2)[0];
361 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
363 device_printf(dev, "can not allocate ifnet structure\n");
369 * Show sleep image version information which may help to
370 * diagnose sleep image specific issues.
373 if (txp_ext_command(sc, TXP_CMD_READ_VERSION, 0, 0, 0, NULL, 0,
374 &rsp, TXP_CMD_WAIT)) {
375 device_printf(dev, "can not read sleep image version\n");
379 if (rsp->rsp_numdesc == 0) {
380 p2 = le32toh(rsp->rsp_par2) & 0xFFFF;
381 device_printf(dev, "Typhoon 1.0 sleep image (2000/%02u/%02u)\n",
383 } else if (rsp->rsp_numdesc == 2) {
384 p2 = le32toh(rsp->rsp_par2);
385 ver = (uint8_t *)(rsp + 1);
387 * Even if datasheet says the command returns a NULL
388 * terminated version string, explicitly terminate
389 * the string. Given that several bugs of firmware
390 * I can't trust this simple one.
394 "Typhoon 1.1+ sleep image %02u.%03u.%03u %s\n",
395 p2 >> 24, (p2 >> 12) & 0xFFF, p2 & 0xFFF, ver);
397 p2 = le32toh(rsp->rsp_par2);
399 "Unknown Typhoon sleep image version: %u:0x%08x\n",
400 rsp->rsp_numdesc, p2);
405 sc->sc_xcvr = TXP_XCVR_AUTO;
406 txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
407 NULL, NULL, NULL, TXP_CMD_NOWAIT);
408 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);
411 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
412 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
413 ifp->if_ioctl = txp_ioctl;
414 ifp->if_start = txp_start;
415 ifp->if_init = txp_init;
416 ifp->if_snd.ifq_drv_maxlen = TX_ENTRIES - 1;
417 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
418 IFQ_SET_READY(&ifp->if_snd);
420 * It's possible to read firmware's offload capability but
421 * we have not downloaded the firmware yet so announce
422 * working capability here. We're not interested in IPSec
423 * capability and due to the lots of firmware bug we can't
424 * advertise the whole capability anyway.
426 ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM;
427 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
428 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
429 /* Enable all capabilities. */
430 ifp->if_capenable = ifp->if_capabilities;
432 ether_ifattach(ifp, eaddr);
434 /* VLAN capability setup. */
435 ifp->if_capabilities |= IFCAP_VLAN_MTU;
436 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
437 ifp->if_capenable = ifp->if_capabilities;
438 /* Tell the upper layer(s) we support long frames. */
439 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
441 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
442 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
444 /* Create local taskq. */
445 sc->sc_tq = taskqueue_create_fast("txp_taskq", M_WAITOK,
446 taskqueue_thread_enqueue, &sc->sc_tq);
447 if (sc->sc_tq == NULL) {
448 device_printf(dev, "could not create taskqueue.\n");
453 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
454 device_get_nameunit(sc->sc_dev));
456 /* Put controller into sleep. */
457 if (txp_sleep(sc, 0) != 0) {
463 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
464 txp_intr, NULL, sc, &sc->sc_intrhand);
468 device_printf(dev, "couldn't set up interrupt handler.\n");
481 txp_detach(device_t dev)
483 struct txp_softc *sc;
486 sc = device_get_softc(dev);
489 if (device_is_attached(dev)) {
491 sc->sc_flags |= TXP_FLAG_DETACH;
494 callout_drain(&sc->sc_tick);
495 taskqueue_drain(sc->sc_tq, &sc->sc_int_task);
498 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
500 ifmedia_removeall(&sc->sc_ifmedia);
501 if (sc->sc_intrhand != NULL)
502 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
503 if (sc->sc_irq != NULL)
504 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
505 if (sc->sc_res != NULL)
506 bus_release_resource(dev, sc->sc_res_type, sc->sc_res_id,
508 if (sc->sc_ifp != NULL) {
513 mtx_destroy(&sc->sc_mtx);
519 txp_reset(struct txp_softc *sc)
524 /* Disable interrupts. */
525 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
526 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
527 /* Ack all pending interrupts. */
528 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
531 WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
533 WRITE_REG(sc, TXP_SRR, 0);
535 /* Should wait max 6 seconds. */
536 for (i = 0; i < 6000; i++) {
537 r = READ_REG(sc, TXP_A2H_0);
538 if (r == STAT_WAITING_FOR_HOST_REQUEST)
543 if (r != STAT_WAITING_FOR_HOST_REQUEST)
544 device_printf(sc->sc_dev, "reset hung\n");
546 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
547 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
548 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
551 * Give more time to complete loading sleep image before
552 * trying to boot from sleep image.
560 txp_boot(struct txp_softc *sc, uint32_t state)
563 /* See if it's waiting for boot, and try to boot it. */
564 if (txp_wait(sc, state) != 0) {
565 device_printf(sc->sc_dev, "not waiting for boot\n");
569 WRITE_REG(sc, TXP_H2A_2, TXP_ADDR_HI(sc->sc_ldata.txp_boot_paddr));
570 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
571 WRITE_REG(sc, TXP_H2A_1, TXP_ADDR_LO(sc->sc_ldata.txp_boot_paddr));
572 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
573 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
574 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
576 /* See if it booted. */
577 if (txp_wait(sc, STAT_RUNNING) != 0) {
578 device_printf(sc->sc_dev, "firmware not running\n");
582 /* Clear TX and CMD ring write registers. */
583 WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
584 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
585 WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
586 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
587 WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
588 TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
589 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
590 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
596 txp_download_fw(struct txp_softc *sc)
598 struct txp_fw_file_header *fileheader;
599 struct txp_fw_section_header *secthead;
601 uint32_t error, ier, imr;
606 ier = READ_REG(sc, TXP_IER);
607 WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);
609 imr = READ_REG(sc, TXP_IMR);
610 WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);
612 if (txp_wait(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
613 device_printf(sc->sc_dev, "not waiting for host request\n");
618 /* Ack the status. */
619 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
621 fileheader = (struct txp_fw_file_header *)tc990image;
622 if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) {
623 device_printf(sc->sc_dev, "firmware invalid magic\n");
627 /* Tell boot firmware to get ready for image. */
628 WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr));
629 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
630 WRITE_REG(sc, TXP_H2A_2, le32toh(fileheader->hmac[0]));
631 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
632 WRITE_REG(sc, TXP_H2A_3, le32toh(fileheader->hmac[1]));
633 TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
634 WRITE_REG(sc, TXP_H2A_4, le32toh(fileheader->hmac[2]));
635 TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
636 WRITE_REG(sc, TXP_H2A_5, le32toh(fileheader->hmac[3]));
637 TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
638 WRITE_REG(sc, TXP_H2A_6, le32toh(fileheader->hmac[4]));
639 TXP_BARRIER(sc, TXP_H2A_6, 4, BUS_SPACE_BARRIER_WRITE);
640 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);
641 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
643 if (txp_download_fw_wait(sc)) {
644 device_printf(sc->sc_dev, "firmware wait failed, initial\n");
649 secthead = (struct txp_fw_section_header *)(((uint8_t *)tc990image) +
650 sizeof(struct txp_fw_file_header));
652 for (sect = 0; sect < le32toh(fileheader->nsections); sect++) {
653 if ((error = txp_download_fw_section(sc, secthead, sect)) != 0)
655 secthead = (struct txp_fw_section_header *)
656 (((uint8_t *)secthead) + le32toh(secthead->nbytes) +
660 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);
661 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
663 if (txp_wait(sc, STAT_WAITING_FOR_BOOT) != 0) {
664 device_printf(sc->sc_dev, "not waiting for boot\n");
670 WRITE_REG(sc, TXP_IER, ier);
671 WRITE_REG(sc, TXP_IMR, imr);
677 txp_download_fw_wait(struct txp_softc *sc)
683 for (i = 0; i < TXP_TIMEOUT; i++) {
684 if ((READ_REG(sc, TXP_ISR) & TXP_INT_A2H_0) != 0)
689 if (i == TXP_TIMEOUT) {
690 device_printf(sc->sc_dev, "firmware wait failed comm0\n");
694 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
696 if (READ_REG(sc, TXP_A2H_0) != STAT_WAITING_FOR_SEGMENT) {
697 device_printf(sc->sc_dev, "firmware not waiting for segment\n");
704 txp_download_fw_section(struct txp_softc *sc,
705 struct txp_fw_section_header *sect, int sectnum)
707 bus_dma_tag_t sec_tag;
708 bus_dmamap_t sec_map;
709 bus_addr_t sec_paddr;
717 /* Skip zero length sections. */
718 if (le32toh(sect->nbytes) == 0)
721 /* Make sure we aren't past the end of the image. */
722 rseg = ((uint8_t *)sect) - ((uint8_t *)tc990image);
723 if (rseg >= sizeof(tc990image)) {
724 device_printf(sc->sc_dev,
725 "firmware invalid section address, section %d\n", sectnum);
729 /* Make sure this section doesn't go past the end. */
730 rseg += le32toh(sect->nbytes);
731 if (rseg >= sizeof(tc990image)) {
732 device_printf(sc->sc_dev, "firmware truncated section %d\n",
742 err = txp_dma_alloc(sc, "firmware sections", &sec_tag, sizeof(uint32_t),
743 0, &sec_map, (void **)&sec_buf, le32toh(sect->nbytes), &sec_paddr);
747 bcopy(((uint8_t *)sect) + sizeof(*sect), sec_buf,
748 le32toh(sect->nbytes));
751 * dummy up mbuf and verify section checksum
754 m.m_next = m.m_nextpkt = NULL;
755 m.m_len = le32toh(sect->nbytes);
758 csum = in_cksum(&m, le32toh(sect->nbytes));
759 if (csum != sect->cksum) {
760 device_printf(sc->sc_dev,
761 "firmware section %d, bad cksum (expected 0x%x got 0x%x)\n",
762 sectnum, le16toh(sect->cksum), csum);
767 bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_PREWRITE);
769 WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes));
770 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
771 WRITE_REG(sc, TXP_H2A_2, le16toh(sect->cksum));
772 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
773 WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr));
774 TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
775 WRITE_REG(sc, TXP_H2A_4, TXP_ADDR_HI(sec_paddr));
776 TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
777 WRITE_REG(sc, TXP_H2A_5, TXP_ADDR_LO(sec_paddr));
778 TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
779 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);
780 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
782 if (txp_download_fw_wait(sc)) {
783 device_printf(sc->sc_dev,
784 "firmware wait failed, section %d\n", sectnum);
788 bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_POSTWRITE);
790 txp_dma_free(sc, &sec_tag, sec_map, (void **)&sec_buf, &sec_paddr);
797 struct txp_softc *sc;
801 status = READ_REG(sc, TXP_ISR);
802 if ((status & TXP_INT_LATCH) == 0)
803 return (FILTER_STRAY);
804 WRITE_REG(sc, TXP_ISR, status);
805 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
806 taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
808 return (FILTER_HANDLED);
812 txp_int_task(void *arg, int pending)
814 struct txp_softc *sc;
816 struct txp_hostvar *hv;
820 sc = (struct txp_softc *)arg;
825 isr = READ_REG(sc, TXP_ISR);
826 if ((isr & TXP_INT_LATCH) != 0)
827 WRITE_REG(sc, TXP_ISR, isr);
829 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
830 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
831 sc->sc_cdata.txp_hostvar_map,
832 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
834 if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
835 more += txp_rx_reclaim(sc, &sc->sc_rxhir,
836 sc->sc_process_limit);
837 if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
838 more += txp_rx_reclaim(sc, &sc->sc_rxlor,
839 sc->sc_process_limit);
842 * It seems controller is not smart enough to handle
843 * FIFO overflow conditions under heavy network load.
844 * No matter how often new Rx buffers are passed to
845 * controller the situation didn't change. Maybe
846 * flow-control would be the only way to mitigate the
847 * issue but firmware does not have commands that
848 * control the threshold of emitting pause frames.
850 if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
851 txp_rxbuf_reclaim(sc);
852 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
853 TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
854 txp_tx_reclaim(sc, &sc->sc_txhir);
855 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
856 TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
857 txp_tx_reclaim(sc, &sc->sc_txlor);
858 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
859 sc->sc_cdata.txp_hostvar_map,
860 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
861 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
862 txp_start_locked(sc->sc_ifp);
863 if (more != 0 || READ_REG(sc, TXP_ISR & TXP_INT_LATCH) != 0) {
864 taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
870 /* Re-enable interrupts. */
871 WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
875 #ifndef __NO_STRICT_ALIGNMENT
877 txp_fixup_rx(struct mbuf *m)
882 src = mtod(m, uint16_t *);
883 dst = src - (TXP_RXBUF_ALIGN - ETHER_ALIGN) / sizeof *src;
885 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
888 m->m_data -= TXP_RXBUF_ALIGN - ETHER_ALIGN;
893 txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r, int count)
896 struct txp_rx_desc *rxd;
898 struct txp_rx_swdesc *sd;
899 uint32_t roff, woff, rx_stat, prog;
905 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
906 BUS_DMASYNC_POSTWRITE);
908 roff = le32toh(*r->r_roff);
909 woff = le32toh(*r->r_woff);
910 rxd = r->r_desc + roff / sizeof(struct txp_rx_desc);
911 for (prog = 0; roff != woff; prog++, count--) {
914 bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd));
915 KASSERT(sd != NULL, ("%s: Rx desc ring corrupted", __func__));
916 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
917 BUS_DMASYNC_POSTREAD);
918 bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
920 KASSERT(m != NULL, ("%s: Rx buffer ring corrupted", __func__));
922 TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
923 TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
924 if ((rxd->rx_flags & RX_FLAGS_ERROR) != 0) {
926 device_printf(sc->sc_dev, "Rx error %u\n",
927 le32toh(rxd->rx_stat) & RX_ERROR_MASK);
932 m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len);
933 m->m_pkthdr.rcvif = ifp;
934 #ifndef __NO_STRICT_ALIGNMENT
937 rx_stat = le32toh(rxd->rx_stat);
938 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
939 if ((rx_stat & RX_STAT_IPCKSUMBAD) != 0)
940 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
941 else if ((rx_stat & RX_STAT_IPCKSUMGOOD) != 0)
942 m->m_pkthdr.csum_flags |=
943 CSUM_IP_CHECKED|CSUM_IP_VALID;
945 if ((rx_stat & RX_STAT_TCPCKSUMGOOD) != 0 ||
946 (rx_stat & RX_STAT_UDPCKSUMGOOD) != 0) {
947 m->m_pkthdr.csum_flags |=
948 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
949 m->m_pkthdr.csum_data = 0xffff;
955 * Typhoon has a firmware bug that VLAN tag is always
956 * stripped out even if it is told to not remove the tag.
957 * Therefore don't check if_capenable here.
959 if (/* (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && */
960 (rx_stat & RX_STAT_VLAN) != 0) {
961 m->m_pkthdr.ether_vtag =
962 bswap16((le32toh(rxd->rx_vlan) >> 16));
963 m->m_flags |= M_VLANTAG;
967 (*ifp->if_input)(ifp, m);
971 roff += sizeof(struct txp_rx_desc);
972 if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
983 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
984 BUS_DMASYNC_PREWRITE);
985 *r->r_roff = le32toh(roff);
987 return (count > 0 ? 0 : EAGAIN);
991 txp_rxbuf_reclaim(struct txp_softc *sc)
993 struct txp_hostvar *hv;
994 struct txp_rxbuf_desc *rbd;
995 struct txp_rx_swdesc *sd;
996 bus_dma_segment_t segs[1];
997 int nsegs, prod, prog;
1000 TXP_LOCK_ASSERT(sc);
1002 hv = sc->sc_hostvar;
1003 cons = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx));
1004 prod = sc->sc_rxbufprod;
1005 TXP_DESC_INC(prod, RXBUF_ENTRIES);
1009 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1010 sc->sc_cdata.txp_rxbufs_map,
1011 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1013 for (prog = 0; prod != cons; prog++) {
1014 sd = TAILQ_FIRST(&sc->sc_free_list);
1017 rbd = sc->sc_rxbufs + prod;
1018 bcopy((u_long *)&rbd->rb_vaddrlo, &sd, sizeof(sd));
1019 sd->sd_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1020 if (sd->sd_mbuf == NULL)
1022 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1023 #ifndef __NO_STRICT_ALIGNMENT
1024 m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1026 if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1027 sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0) != 0) {
1028 m_freem(sd->sd_mbuf);
1032 KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1034 TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1035 TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1036 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1037 BUS_DMASYNC_PREREAD);
1038 rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1039 rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1040 TXP_DESC_INC(prod, RXBUF_ENTRIES);
1045 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1046 sc->sc_cdata.txp_rxbufs_map,
1047 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1048 prod = (prod + RXBUF_ENTRIES - 1) % RXBUF_ENTRIES;
1049 sc->sc_rxbufprod = prod;
1050 hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(prod));
1054 * Reclaim mbufs and entries from a transmit ring.
1057 txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r)
1062 struct txp_tx_desc *txd;
1063 struct txp_swdesc *sd;
1065 TXP_LOCK_ASSERT(sc);
1067 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
1068 BUS_DMASYNC_POSTWRITE);
1070 idx = TXP_OFFSET2IDX(le32toh(*(r->r_off)));
1073 txd = r->r_desc + cons;
1074 sd = sc->sc_txd + cons;
1076 for (cnt = r->r_cnt; cons != idx && cnt > 0; cnt--) {
1077 if ((txd->tx_flags & TX_FLAGS_TYPE_M) == TX_FLAGS_TYPE_DATA) {
1078 if (sd->sd_mbuf != NULL) {
1079 bus_dmamap_sync(sc->sc_cdata.txp_tx_tag,
1080 sd->sd_map, BUS_DMASYNC_POSTWRITE);
1081 bus_dmamap_unload(sc->sc_cdata.txp_tx_tag,
1083 m_freem(sd->sd_mbuf);
1090 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1092 if (++cons == TX_ENTRIES) {
1102 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
1103 BUS_DMASYNC_PREWRITE);
1107 sc->sc_watchdog_timer = 0;
1111 txp_shutdown(device_t dev)
1114 return (txp_suspend(dev));
1118 txp_suspend(device_t dev)
1120 struct txp_softc *sc;
1128 sc = device_get_softc(dev);
1134 /* Reset controller and make it reload sleep image. */
1136 /* Let controller boot from sleep image. */
1137 if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
1138 device_printf(sc->sc_dev, "couldn't boot sleep image\n");
1140 /* Set station address. */
1141 eaddr = IF_LLADDR(sc->sc_ifp);
1143 ((uint8_t *)&p1)[1] = eaddr[0];
1144 ((uint8_t *)&p1)[0] = eaddr[1];
1146 ((uint8_t *)&p2)[3] = eaddr[2];
1147 ((uint8_t *)&p2)[2] = eaddr[3];
1148 ((uint8_t *)&p2)[1] = eaddr[4];
1149 ((uint8_t *)&p2)[0] = eaddr[5];
1151 txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0, NULL, NULL,
1152 NULL, TXP_CMD_WAIT);
1154 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
1155 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1156 txp_sleep(sc, sc->sc_ifp->if_capenable);
1157 if (pci_find_cap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1159 pmstat = pci_read_config(sc->sc_dev,
1160 pmc + PCIR_POWER_STATUS, 2);
1161 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1162 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1163 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1164 pci_write_config(sc->sc_dev,
1165 pmc + PCIR_POWER_STATUS, pmstat, 2);
1173 txp_resume(device_t dev)
1175 struct txp_softc *sc;
1179 sc = device_get_softc(dev);
1182 if (pci_find_cap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1183 /* Disable PME and clear PME status. */
1184 pmstat = pci_read_config(sc->sc_dev,
1185 pmc + PCIR_POWER_STATUS, 2);
1186 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1187 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1188 pci_write_config(sc->sc_dev,
1189 pmc + PCIR_POWER_STATUS, pmstat, 2);
1192 if ((sc->sc_ifp->if_flags & IFF_UP) != 0)
1193 txp_init_locked(sc);
1199 struct txp_dmamap_arg {
1200 bus_addr_t txp_busaddr;
1204 txp_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1206 struct txp_dmamap_arg *ctx;
1211 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1213 ctx = (struct txp_dmamap_arg *)arg;
1214 ctx->txp_busaddr = segs[0].ds_addr;
1218 txp_dma_alloc(struct txp_softc *sc, char *type, bus_dma_tag_t *tag,
1219 bus_size_t alignment, bus_size_t boundary, bus_dmamap_t *map, void **buf,
1220 bus_size_t size, bus_addr_t *paddr)
1222 struct txp_dmamap_arg ctx;
1225 /* Create DMA block tag. */
1226 error = bus_dma_tag_create(
1227 sc->sc_cdata.txp_parent_tag, /* parent */
1228 alignment, boundary, /* algnmnt, boundary */
1229 BUS_SPACE_MAXADDR, /* lowaddr */
1230 BUS_SPACE_MAXADDR, /* highaddr */
1231 NULL, NULL, /* filter, filterarg */
1234 size, /* maxsegsize */
1236 NULL, NULL, /* lockfunc, lockarg */
1239 device_printf(sc->sc_dev,
1240 "could not create DMA tag for %s.\n", type);
1245 /* Allocate DMA'able memory and load the DMA map. */
1246 error = bus_dmamem_alloc(*tag, buf, BUS_DMA_WAITOK | BUS_DMA_ZERO |
1247 BUS_DMA_COHERENT, map);
1249 device_printf(sc->sc_dev,
1250 "could not allocate DMA'able memory for %s.\n", type);
1254 ctx.txp_busaddr = 0;
1255 error = bus_dmamap_load(*tag, *map, *(uint8_t **)buf,
1256 size, txp_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1257 if (error != 0 || ctx.txp_busaddr == 0) {
1258 device_printf(sc->sc_dev,
1259 "could not load DMA'able memory for %s.\n", type);
1262 *paddr = ctx.txp_busaddr;
1268 txp_dma_free(struct txp_softc *sc, bus_dma_tag_t *tag, bus_dmamap_t map,
1269 void **buf, bus_addr_t *paddr)
1274 bus_dmamap_unload(*tag, map);
1276 bus_dmamem_free(*tag, *(uint8_t **)buf, map);
1277 *(uint8_t **)buf = NULL;
1279 bus_dma_tag_destroy(*tag);
1285 txp_alloc_rings(struct txp_softc *sc)
1287 struct txp_boot_record *boot;
1288 struct txp_ldata *ld;
1289 struct txp_swdesc *txd;
1290 struct txp_rxbuf_desc *rbd;
1291 struct txp_rx_swdesc *sd;
1295 boot = ld->txp_boot;
1301 * Create parent ring/DMA block tag.
1302 * Datasheet says that all ring addresses and descriptors
1303 * support 64bits addressing. However the controller is
1304 * known to have no support DAC so limit DMA address space
1307 error = bus_dma_tag_create(
1308 bus_get_dma_tag(sc->sc_dev), /* parent */
1309 1, 0, /* algnmnt, boundary */
1310 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1311 BUS_SPACE_MAXADDR, /* highaddr */
1312 NULL, NULL, /* filter, filterarg */
1313 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1315 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1317 NULL, NULL, /* lockfunc, lockarg */
1318 &sc->sc_cdata.txp_parent_tag);
1320 device_printf(sc->sc_dev, "could not create parent DMA tag.\n");
1325 error = txp_dma_alloc(sc, "boot record",
1326 &sc->sc_cdata.txp_boot_tag, sizeof(uint32_t), 0,
1327 &sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot,
1328 sizeof(struct txp_boot_record),
1329 &sc->sc_ldata.txp_boot_paddr);
1332 boot = sc->sc_ldata.txp_boot;
1335 /* Host variables. */
1336 error = txp_dma_alloc(sc, "host variables",
1337 &sc->sc_cdata.txp_hostvar_tag, sizeof(uint32_t), 0,
1338 &sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar,
1339 sizeof(struct txp_hostvar),
1340 &sc->sc_ldata.txp_hostvar_paddr);
1343 boot->br_hostvar_lo =
1344 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_hostvar_paddr));
1345 boot->br_hostvar_hi =
1346 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_hostvar_paddr));
1347 sc->sc_hostvar = sc->sc_ldata.txp_hostvar;
1349 /* Hi priority tx ring. */
1350 error = txp_dma_alloc(sc, "hi priority tx ring",
1351 &sc->sc_cdata.txp_txhiring_tag, sizeof(struct txp_tx_desc), 0,
1352 &sc->sc_cdata.txp_txhiring_map, (void **)&sc->sc_ldata.txp_txhiring,
1353 sizeof(struct txp_tx_desc) * TX_ENTRIES,
1354 &sc->sc_ldata.txp_txhiring_paddr);
1357 boot->br_txhipri_lo =
1358 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txhiring_paddr));
1359 boot->br_txhipri_hi =
1360 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txhiring_paddr));
1361 boot->br_txhipri_siz =
1362 htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1363 sc->sc_txhir.r_tag = sc->sc_cdata.txp_txhiring_tag;
1364 sc->sc_txhir.r_map = sc->sc_cdata.txp_txhiring_map;
1365 sc->sc_txhir.r_reg = TXP_H2A_1;
1366 sc->sc_txhir.r_desc = sc->sc_ldata.txp_txhiring;
1367 sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
1368 sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
1370 /* Low priority tx ring. */
1371 error = txp_dma_alloc(sc, "low priority tx ring",
1372 &sc->sc_cdata.txp_txloring_tag, sizeof(struct txp_tx_desc), 0,
1373 &sc->sc_cdata.txp_txloring_map, (void **)&sc->sc_ldata.txp_txloring,
1374 sizeof(struct txp_tx_desc) * TX_ENTRIES,
1375 &sc->sc_ldata.txp_txloring_paddr);
1378 boot->br_txlopri_lo =
1379 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txloring_paddr));
1380 boot->br_txlopri_hi =
1381 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txloring_paddr));
1382 boot->br_txlopri_siz =
1383 htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1384 sc->sc_txlor.r_tag = sc->sc_cdata.txp_txloring_tag;
1385 sc->sc_txlor.r_map = sc->sc_cdata.txp_txloring_map;
1386 sc->sc_txlor.r_reg = TXP_H2A_3;
1387 sc->sc_txlor.r_desc = sc->sc_ldata.txp_txloring;
1388 sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
1389 sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;
1391 /* High priority rx ring. */
1392 error = txp_dma_alloc(sc, "hi priority rx ring",
1393 &sc->sc_cdata.txp_rxhiring_tag,
1394 roundup(sizeof(struct txp_rx_desc), 16), 0,
1395 &sc->sc_cdata.txp_rxhiring_map, (void **)&sc->sc_ldata.txp_rxhiring,
1396 sizeof(struct txp_rx_desc) * RX_ENTRIES,
1397 &sc->sc_ldata.txp_rxhiring_paddr);
1400 boot->br_rxhipri_lo =
1401 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxhiring_paddr));
1402 boot->br_rxhipri_hi =
1403 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxhiring_paddr));
1404 boot->br_rxhipri_siz =
1405 htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1406 sc->sc_rxhir.r_tag = sc->sc_cdata.txp_rxhiring_tag;
1407 sc->sc_rxhir.r_map = sc->sc_cdata.txp_rxhiring_map;
1408 sc->sc_rxhir.r_desc = sc->sc_ldata.txp_rxhiring;
1409 sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
1410 sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
1412 /* Low priority rx ring. */
1413 error = txp_dma_alloc(sc, "low priority rx ring",
1414 &sc->sc_cdata.txp_rxloring_tag,
1415 roundup(sizeof(struct txp_rx_desc), 16), 0,
1416 &sc->sc_cdata.txp_rxloring_map, (void **)&sc->sc_ldata.txp_rxloring,
1417 sizeof(struct txp_rx_desc) * RX_ENTRIES,
1418 &sc->sc_ldata.txp_rxloring_paddr);
1421 boot->br_rxlopri_lo =
1422 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxloring_paddr));
1423 boot->br_rxlopri_hi =
1424 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxloring_paddr));
1425 boot->br_rxlopri_siz =
1426 htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1427 sc->sc_rxlor.r_tag = sc->sc_cdata.txp_rxloring_tag;
1428 sc->sc_rxlor.r_map = sc->sc_cdata.txp_rxloring_map;
1429 sc->sc_rxlor.r_desc = sc->sc_ldata.txp_rxloring;
1430 sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
1431 sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
1434 error = txp_dma_alloc(sc, "command ring",
1435 &sc->sc_cdata.txp_cmdring_tag, sizeof(struct txp_cmd_desc), 0,
1436 &sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring,
1437 sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
1438 &sc->sc_ldata.txp_cmdring_paddr);
1441 boot->br_cmd_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_cmdring_paddr));
1442 boot->br_cmd_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_cmdring_paddr));
1443 boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc));
1444 sc->sc_cmdring.base = sc->sc_ldata.txp_cmdring;
1445 sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
1446 sc->sc_cmdring.lastwrite = 0;
1448 /* Response ring. */
1449 error = txp_dma_alloc(sc, "response ring",
1450 &sc->sc_cdata.txp_rspring_tag, sizeof(struct txp_rsp_desc), 0,
1451 &sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring,
1452 sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
1453 &sc->sc_ldata.txp_rspring_paddr);
1456 boot->br_resp_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rspring_paddr));
1457 boot->br_resp_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rspring_paddr));
1458 boot->br_resp_siz = htole32(RSP_ENTRIES * sizeof(struct txp_rsp_desc));
1459 sc->sc_rspring.base = sc->sc_ldata.txp_rspring;
1460 sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
1461 sc->sc_rspring.lastwrite = 0;
1463 /* Receive buffer ring. */
1464 error = txp_dma_alloc(sc, "receive buffer ring",
1465 &sc->sc_cdata.txp_rxbufs_tag, sizeof(struct txp_rxbuf_desc), 0,
1466 &sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs,
1467 sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
1468 &sc->sc_ldata.txp_rxbufs_paddr);
1472 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxbufs_paddr));
1474 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxbufs_paddr));
1475 boot->br_rxbuf_siz =
1476 htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc));
1477 sc->sc_rxbufs = sc->sc_ldata.txp_rxbufs;
1480 error = txp_dma_alloc(sc, "zero buffer",
1481 &sc->sc_cdata.txp_zero_tag, sizeof(uint32_t), 0,
1482 &sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero,
1483 sizeof(uint32_t), &sc->sc_ldata.txp_zero_paddr);
1486 boot->br_zero_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_zero_paddr));
1487 boot->br_zero_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_zero_paddr));
1489 bus_dmamap_sync(sc->sc_cdata.txp_boot_tag, sc->sc_cdata.txp_boot_map,
1490 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1492 /* Create Tx buffers. */
1493 error = bus_dma_tag_create(
1494 sc->sc_cdata.txp_parent_tag, /* parent */
1495 1, 0, /* algnmnt, boundary */
1496 BUS_SPACE_MAXADDR, /* lowaddr */
1497 BUS_SPACE_MAXADDR, /* highaddr */
1498 NULL, NULL, /* filter, filterarg */
1499 MCLBYTES * TXP_MAXTXSEGS, /* maxsize */
1500 TXP_MAXTXSEGS, /* nsegments */
1501 MCLBYTES, /* maxsegsize */
1503 NULL, NULL, /* lockfunc, lockarg */
1504 &sc->sc_cdata.txp_tx_tag);
1506 device_printf(sc->sc_dev, "could not create Tx DMA tag.\n");
1510 /* Create tag for Rx buffers. */
1511 error = bus_dma_tag_create(
1512 sc->sc_cdata.txp_parent_tag, /* parent */
1513 TXP_RXBUF_ALIGN, 0, /* algnmnt, boundary */
1514 BUS_SPACE_MAXADDR, /* lowaddr */
1515 BUS_SPACE_MAXADDR, /* highaddr */
1516 NULL, NULL, /* filter, filterarg */
1517 MCLBYTES, /* maxsize */
1519 MCLBYTES, /* maxsegsize */
1521 NULL, NULL, /* lockfunc, lockarg */
1522 &sc->sc_cdata.txp_rx_tag);
1524 device_printf(sc->sc_dev, "could not create Rx DMA tag.\n");
1528 /* Create DMA maps for Tx buffers. */
1529 for (i = 0; i < TX_ENTRIES; i++) {
1530 txd = &sc->sc_txd[i];
1531 txd->sd_mbuf = NULL;
1533 error = bus_dmamap_create(sc->sc_cdata.txp_tx_tag, 0,
1536 device_printf(sc->sc_dev,
1537 "could not create Tx dmamap.\n");
1542 /* Create DMA maps for Rx buffers. */
1543 for (i = 0; i < RXBUF_ENTRIES; i++) {
1544 sd = malloc(sizeof(struct txp_rx_swdesc), M_DEVBUF,
1551 * The virtual address part of descriptor is not used
1552 * by hardware so use that to save an ring entry. We
1553 * need bcopy here otherwise the address wouldn't be
1554 * valid on big-endian architectures.
1556 rbd = sc->sc_rxbufs + i;
1557 bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1560 error = bus_dmamap_create(sc->sc_cdata.txp_rx_tag, 0,
1563 device_printf(sc->sc_dev,
1564 "could not create Rx dmamap.\n");
1567 TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1575 txp_init_rings(struct txp_softc *sc)
1578 bzero(sc->sc_ldata.txp_hostvar, sizeof(struct txp_hostvar));
1579 bzero(sc->sc_ldata.txp_zero, sizeof(uint32_t));
1580 sc->sc_txhir.r_cons = 0;
1581 sc->sc_txhir.r_prod = 0;
1582 sc->sc_txhir.r_cnt = 0;
1583 sc->sc_txlor.r_cons = 0;
1584 sc->sc_txlor.r_prod = 0;
1585 sc->sc_txlor.r_cnt = 0;
1586 sc->sc_cmdring.lastwrite = 0;
1587 sc->sc_rspring.lastwrite = 0;
1588 sc->sc_rxbufprod = 0;
1589 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1590 sc->sc_cdata.txp_hostvar_map,
1591 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1595 txp_wait(struct txp_softc *sc, uint32_t state)
1600 for (i = 0; i < TXP_TIMEOUT; i++) {
1601 reg = READ_REG(sc, TXP_A2H_0);
1607 return (i == TXP_TIMEOUT ? ETIMEDOUT : 0);
1611 txp_free_rings(struct txp_softc *sc)
1613 struct txp_swdesc *txd;
1614 struct txp_rx_swdesc *sd;
1618 if (sc->sc_cdata.txp_tx_tag != NULL) {
1619 for (i = 0; i < TX_ENTRIES; i++) {
1620 txd = &sc->sc_txd[i];
1621 if (txd->sd_map != NULL) {
1622 bus_dmamap_destroy(sc->sc_cdata.txp_tx_tag,
1627 bus_dma_tag_destroy(sc->sc_cdata.txp_tx_tag);
1628 sc->sc_cdata.txp_tx_tag = NULL;
1631 if (sc->sc_cdata.txp_rx_tag != NULL) {
1632 if (sc->sc_rxbufs != NULL) {
1633 KASSERT(TAILQ_FIRST(&sc->sc_busy_list) == NULL,
1634 ("%s : still have busy Rx buffers", __func__));
1635 while ((sd = TAILQ_FIRST(&sc->sc_free_list)) != NULL) {
1636 TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1637 if (sd->sd_map != NULL) {
1639 sc->sc_cdata.txp_rx_tag,
1646 bus_dma_tag_destroy(sc->sc_cdata.txp_rx_tag);
1647 sc->sc_cdata.txp_rx_tag = NULL;
1650 /* Hi priority Tx ring. */
1651 txp_dma_free(sc, &sc->sc_cdata.txp_txhiring_tag,
1652 sc->sc_cdata.txp_txhiring_map,
1653 (void **)&sc->sc_ldata.txp_txhiring,
1654 &sc->sc_ldata.txp_txhiring_paddr);
1655 /* Low priority Tx ring. */
1656 txp_dma_free(sc, &sc->sc_cdata.txp_txloring_tag,
1657 sc->sc_cdata.txp_txloring_map,
1658 (void **)&sc->sc_ldata.txp_txloring,
1659 &sc->sc_ldata.txp_txloring_paddr);
1660 /* Hi priority Rx ring. */
1661 txp_dma_free(sc, &sc->sc_cdata.txp_rxhiring_tag,
1662 sc->sc_cdata.txp_rxhiring_map,
1663 (void **)&sc->sc_ldata.txp_rxhiring,
1664 &sc->sc_ldata.txp_rxhiring_paddr);
1665 /* Low priority Rx ring. */
1666 txp_dma_free(sc, &sc->sc_cdata.txp_rxloring_tag,
1667 sc->sc_cdata.txp_rxloring_map,
1668 (void **)&sc->sc_ldata.txp_rxloring,
1669 &sc->sc_ldata.txp_rxloring_paddr);
1670 /* Receive buffer ring. */
1671 txp_dma_free(sc, &sc->sc_cdata.txp_rxbufs_tag,
1672 sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs,
1673 &sc->sc_ldata.txp_rxbufs_paddr);
1675 txp_dma_free(sc, &sc->sc_cdata.txp_cmdring_tag,
1676 sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring,
1677 &sc->sc_ldata.txp_cmdring_paddr);
1678 /* Response ring. */
1679 txp_dma_free(sc, &sc->sc_cdata.txp_rspring_tag,
1680 sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring,
1681 &sc->sc_ldata.txp_rspring_paddr);
1683 txp_dma_free(sc, &sc->sc_cdata.txp_zero_tag,
1684 sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero,
1685 &sc->sc_ldata.txp_zero_paddr);
1686 /* Host variables. */
1687 txp_dma_free(sc, &sc->sc_cdata.txp_hostvar_tag,
1688 sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar,
1689 &sc->sc_ldata.txp_hostvar_paddr);
1691 txp_dma_free(sc, &sc->sc_cdata.txp_boot_tag,
1692 sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot,
1693 &sc->sc_ldata.txp_boot_paddr);
1695 if (sc->sc_cdata.txp_parent_tag != NULL) {
1696 bus_dma_tag_destroy(sc->sc_cdata.txp_parent_tag);
1697 sc->sc_cdata.txp_parent_tag = NULL;
1703 txp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1705 struct txp_softc *sc = ifp->if_softc;
1706 struct ifreq *ifr = (struct ifreq *)data;
1707 int capenable, error = 0, mask;
1712 if ((ifp->if_flags & IFF_UP) != 0) {
1713 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1714 if (((ifp->if_flags ^ sc->sc_if_flags)
1715 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1718 if ((sc->sc_flags & TXP_FLAG_DETACH) == 0)
1719 txp_init_locked(sc);
1722 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1725 sc->sc_if_flags = ifp->if_flags;
1731 * Multicast list has changed; set the hardware
1732 * filter accordingly.
1735 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1741 capenable = ifp->if_capenable;
1742 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1743 if ((mask & IFCAP_TXCSUM) != 0 &&
1744 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1745 ifp->if_capenable ^= IFCAP_TXCSUM;
1746 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1747 ifp->if_hwassist |= TXP_CSUM_FEATURES;
1749 ifp->if_hwassist &= ~TXP_CSUM_FEATURES;
1751 if ((mask & IFCAP_RXCSUM) != 0 &&
1752 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
1753 ifp->if_capenable ^= IFCAP_RXCSUM;
1754 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1755 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1756 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1757 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1758 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0)
1759 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1760 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1761 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1762 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1763 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
1764 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1765 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1766 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1767 if (capenable != ifp->if_capenable)
1768 txp_set_capabilities(sc);
1770 VLAN_CAPABILITIES(ifp);
1774 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command);
1777 error = ether_ioctl(ifp, command, data);
1785 txp_rxring_fill(struct txp_softc *sc)
1787 struct txp_rxbuf_desc *rbd;
1788 struct txp_rx_swdesc *sd;
1789 bus_dma_segment_t segs[1];
1790 int error, i, nsegs;
1792 TXP_LOCK_ASSERT(sc);
1794 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1795 sc->sc_cdata.txp_rxbufs_map,
1796 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1798 for (i = 0; i < RXBUF_ENTRIES; i++) {
1799 sd = TAILQ_FIRST(&sc->sc_free_list);
1802 rbd = sc->sc_rxbufs + i;
1803 bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1804 KASSERT(sd->sd_mbuf == NULL,
1805 ("%s : Rx buffer ring corrupted", __func__));
1806 sd->sd_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1807 if (sd->sd_mbuf == NULL)
1809 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1810 #ifndef __NO_STRICT_ALIGNMENT
1811 m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1813 if ((error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1814 sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0)) != 0) {
1815 m_freem(sd->sd_mbuf);
1819 KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1821 TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1822 TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1823 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1824 BUS_DMASYNC_PREREAD);
1825 rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1826 rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1829 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1830 sc->sc_cdata.txp_rxbufs_map,
1831 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1832 sc->sc_rxbufprod = RXBUF_ENTRIES - 1;
1833 sc->sc_hostvar->hv_rx_buf_write_idx =
1834 htole32(TXP_IDX2OFFSET(RXBUF_ENTRIES - 1));
1840 txp_rxring_empty(struct txp_softc *sc)
1842 struct txp_rx_swdesc *sd;
1845 TXP_LOCK_ASSERT(sc);
1847 if (sc->sc_rxbufs == NULL)
1849 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1850 sc->sc_cdata.txp_hostvar_map,
1851 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1853 /* Release allocated Rx buffers. */
1855 while ((sd = TAILQ_FIRST(&sc->sc_busy_list)) != NULL) {
1856 TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
1857 KASSERT(sd->sd_mbuf != NULL,
1858 ("%s : Rx buffer ring corrupted", __func__));
1859 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1860 BUS_DMASYNC_POSTREAD);
1861 bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
1862 m_freem(sd->sd_mbuf);
1864 TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1872 struct txp_softc *sc;
1876 txp_init_locked(sc);
1881 txp_init_locked(struct txp_softc *sc)
1889 TXP_LOCK_ASSERT(sc);
1892 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1895 /* Initialize ring structure. */
1897 /* Wakeup controller. */
1898 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_WAKEUP);
1899 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
1901 * It seems that earlier NV image can go back to online from
1902 * wakeup command but newer ones require controller reset.
1903 * So jut reset controller again.
1905 if (txp_reset(sc) != 0)
1907 /* Download firmware. */
1908 error = txp_download_fw(sc);
1910 device_printf(sc->sc_dev, "could not download firmware.\n");
1913 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1914 sc->sc_cdata.txp_hostvar_map,
1915 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1916 if ((error = txp_rxring_fill(sc)) != 0) {
1917 device_printf(sc->sc_dev, "no memory for Rx buffers.\n");
1920 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1921 sc->sc_cdata.txp_hostvar_map,
1922 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1923 if (txp_boot(sc, STAT_WAITING_FOR_BOOT) != 0) {
1924 device_printf(sc->sc_dev, "could not boot firmware.\n");
1929 * Quite contrary to Typhoon T2 software functional specification,
1930 * it seems that TXP_CMD_RECV_BUFFER_CONTROL command is not
1931 * implemented in the firmware. This means driver should have to
1932 * handle misaligned frames on alignment architectures. AFAIK this
1933 * is the only controller manufactured by 3Com that has this stupid
1934 * bug. 3Com should fix this.
1936 if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
1937 NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1939 /* Undocumented command(interrupt coalescing disable?) - From Linux. */
1940 if (txp_command(sc, TXP_CMD_FILTER_DEFINE, 0, 0, 0, NULL, NULL, NULL,
1941 TXP_CMD_NOWAIT) != 0)
1944 /* Set station address. */
1945 eaddr = IF_LLADDR(sc->sc_ifp);
1947 ((uint8_t *)&p1)[1] = eaddr[0];
1948 ((uint8_t *)&p1)[0] = eaddr[1];
1950 ((uint8_t *)&p2)[3] = eaddr[2];
1951 ((uint8_t *)&p2)[2] = eaddr[3];
1952 ((uint8_t *)&p2)[1] = eaddr[4];
1953 ((uint8_t *)&p2)[0] = eaddr[5];
1955 if (txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0,
1956 NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1960 txp_set_capabilities(sc);
1962 if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
1963 NULL, NULL, NULL, TXP_CMD_NOWAIT))
1965 if (txp_command(sc, TXP_CMD_XCVR_SELECT, sc->sc_xcvr, 0, 0,
1966 NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1968 if (txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1969 TXP_CMD_NOWAIT) != 0)
1971 if (txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1972 TXP_CMD_NOWAIT) != 0)
1975 /* Ack all pending interrupts and enable interrupts. */
1976 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
1977 WRITE_REG(sc, TXP_IER, TXP_INTRS);
1978 WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
1980 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1981 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1983 callout_reset(&sc->sc_tick, hz, txp_tick, sc);
1987 txp_rxring_empty(sc);
1990 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1996 struct txp_softc *sc;
1998 struct txp_rsp_desc *rsp;
1999 struct txp_ext_desc *ext;
2003 TXP_LOCK_ASSERT(sc);
2004 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2005 sc->sc_cdata.txp_hostvar_map,
2006 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2007 txp_rxbuf_reclaim(sc);
2008 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2009 sc->sc_cdata.txp_hostvar_map,
2010 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2015 link = sc->sc_flags & TXP_FLAG_LINK;
2016 if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2017 &rsp, TXP_CMD_WAIT))
2019 if (rsp->rsp_numdesc != 6)
2021 txp_stats_update(sc, rsp);
2022 if (link == 0 && (sc->sc_flags & TXP_FLAG_LINK) != 0) {
2023 ext = (struct txp_ext_desc *)(rsp + 1);
2024 /* Update baudrate with resolved speed. */
2025 if ((ext[5].ext_2 & 0x02) != 0)
2026 ifp->if_baudrate = IF_Mbps(100);
2028 ifp->if_baudrate = IF_Mbps(10);
2033 free(rsp, M_DEVBUF);
2035 callout_reset(&sc->sc_tick, hz, txp_tick, sc);
2039 txp_start(struct ifnet *ifp)
2041 struct txp_softc *sc;
2045 txp_start_locked(ifp);
2050 txp_start_locked(struct ifnet *ifp)
2052 struct txp_softc *sc;
2053 struct mbuf *m_head;
2057 TXP_LOCK_ASSERT(sc);
2059 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2060 IFF_DRV_RUNNING || (sc->sc_flags & TXP_FLAG_LINK) == 0)
2063 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
2064 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2068 * Pack the data into the transmit ring. If we
2069 * don't have room, set the OACTIVE flag and wait
2070 * for the NIC to drain the ring.
2071 * ATM only Hi-ring is used.
2073 if (txp_encap(sc, &sc->sc_txhir, &m_head)) {
2076 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2077 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2082 * If there's a BPF listener, bounce a copy of this frame
2085 ETHER_BPF_MTAP(ifp, m_head);
2087 /* Send queued frame. */
2088 WRITE_REG(sc, sc->sc_txhir.r_reg,
2089 TXP_IDX2OFFSET(sc->sc_txhir.r_prod));
2093 /* Set a timeout in case the chip goes out to lunch. */
2094 sc->sc_watchdog_timer = TXP_TX_TIMEOUT;
2099 txp_encap(struct txp_softc *sc, struct txp_tx_ring *r, struct mbuf **m_head)
2101 struct txp_tx_desc *first_txd;
2102 struct txp_frag_desc *fxd;
2103 struct txp_swdesc *sd;
2105 bus_dma_segment_t txsegs[TXP_MAXTXSEGS];
2106 int error, i, nsegs;
2108 TXP_LOCK_ASSERT(sc);
2110 M_ASSERTPKTHDR((*m_head));
2113 first_txd = r->r_desc + r->r_prod;
2114 sd = sc->sc_txd + r->r_prod;
2116 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2117 *m_head, txsegs, &nsegs, 0);
2118 if (error == EFBIG) {
2119 m = m_collapse(*m_head, M_NOWAIT, TXP_MAXTXSEGS);
2126 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag,
2127 sd->sd_map, *m_head, txsegs, &nsegs, 0);
2133 } else if (error != 0)
2141 /* Check descriptor overrun. */
2142 if (r->r_cnt + nsegs >= TX_ENTRIES - TXP_TXD_RESERVED) {
2143 bus_dmamap_unload(sc->sc_cdata.txp_tx_tag, sd->sd_map);
2146 bus_dmamap_sync(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2147 BUS_DMASYNC_PREWRITE);
2150 first_txd->tx_flags = TX_FLAGS_TYPE_DATA;
2151 first_txd->tx_numdesc = 0;
2152 first_txd->tx_addrlo = 0;
2153 first_txd->tx_addrhi = 0;
2154 first_txd->tx_totlen = 0;
2155 first_txd->tx_pflags = 0;
2157 TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2159 /* Configure Tx IP/TCP/UDP checksum offload. */
2160 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2161 first_txd->tx_pflags |= htole32(TX_PFLAGS_IPCKSUM);
2163 /* XXX firmware bug. */
2164 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2165 first_txd->tx_pflags |= htole32(TX_PFLAGS_TCPCKSUM);
2166 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2167 first_txd->tx_pflags |= htole32(TX_PFLAGS_UDPCKSUM);
2170 /* Configure VLAN hardware tag insertion. */
2171 if ((m->m_flags & M_VLANTAG) != 0)
2172 first_txd->tx_pflags |=
2173 htole32(TX_PFLAGS_VLAN | TX_PFLAGS_PRIO |
2174 (bswap16(m->m_pkthdr.ether_vtag) << TX_PFLAGS_VLANTAG_S));
2176 for (i = 0; i < nsegs; i++) {
2177 fxd = (struct txp_frag_desc *)(r->r_desc + r->r_prod);
2178 fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG | TX_FLAGS_VALID;
2179 fxd->frag_rsvd1 = 0;
2180 fxd->frag_len = htole16(txsegs[i].ds_len);
2181 fxd->frag_addrhi = htole32(TXP_ADDR_HI(txsegs[i].ds_addr));
2182 fxd->frag_addrlo = htole32(TXP_ADDR_LO(txsegs[i].ds_addr));
2183 fxd->frag_rsvd2 = 0;
2184 first_txd->tx_numdesc++;
2186 TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2189 /* Lastly set valid flag. */
2190 first_txd->tx_flags |= TX_FLAGS_VALID;
2192 /* Sync descriptors. */
2193 bus_dmamap_sync(r->r_tag, r->r_map,
2194 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2200 * Handle simple commands sent to the typhoon
2203 txp_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2204 uint32_t in3, uint16_t *out1, uint32_t *out2, uint32_t *out3, int wait)
2206 struct txp_rsp_desc *rsp;
2209 if (txp_ext_command(sc, id, in1, in2, in3, NULL, 0, &rsp, wait) != 0) {
2210 device_printf(sc->sc_dev, "command 0x%02x failed\n", id);
2214 if (wait == TXP_CMD_NOWAIT)
2217 KASSERT(rsp != NULL, ("rsp is NULL!\n"));
2219 *out1 = le16toh(rsp->rsp_par1);
2221 *out2 = le32toh(rsp->rsp_par2);
2223 *out3 = le32toh(rsp->rsp_par3);
2224 free(rsp, M_DEVBUF);
2229 txp_ext_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2230 uint32_t in3, struct txp_ext_desc *in_extp, uint8_t in_extn,
2231 struct txp_rsp_desc **rspp, int wait)
2233 struct txp_hostvar *hv;
2234 struct txp_cmd_desc *cmd;
2235 struct txp_ext_desc *ext;
2241 hv = sc->sc_hostvar;
2242 if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
2243 device_printf(sc->sc_dev,
2244 "%s : out of free cmd descriptors for command 0x%02x\n",
2249 bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2250 sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_POSTWRITE);
2251 idx = sc->sc_cmdring.lastwrite;
2252 cmd = (struct txp_cmd_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2253 bzero(cmd, sizeof(*cmd));
2255 cmd->cmd_numdesc = in_extn;
2257 cmd->cmd_seq = htole16(seq);
2258 cmd->cmd_id = htole16(id);
2259 cmd->cmd_par1 = htole16(in1);
2260 cmd->cmd_par2 = htole32(in2);
2261 cmd->cmd_par3 = htole32(in3);
2262 cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
2263 (wait == TXP_CMD_WAIT ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
2265 idx += sizeof(struct txp_cmd_desc);
2266 if (idx == sc->sc_cmdring.size)
2269 for (i = 0; i < in_extn; i++) {
2270 ext = (struct txp_ext_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2271 bcopy(in_extp, ext, sizeof(struct txp_ext_desc));
2273 idx += sizeof(struct txp_cmd_desc);
2274 if (idx == sc->sc_cmdring.size)
2278 sc->sc_cmdring.lastwrite = idx;
2279 bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2280 sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_PREWRITE);
2281 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2282 sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_PREREAD |
2283 BUS_DMASYNC_PREWRITE);
2284 WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
2285 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
2287 if (wait == TXP_CMD_NOWAIT)
2290 for (i = 0; i < TXP_TIMEOUT; i++) {
2291 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2292 sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_POSTREAD |
2293 BUS_DMASYNC_POSTWRITE);
2294 if (le32toh(hv->hv_resp_read_idx) !=
2295 le32toh(hv->hv_resp_write_idx)) {
2296 error = txp_response(sc, id, seq, rspp);
2297 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2298 sc->sc_cdata.txp_hostvar_map,
2299 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2307 if (i == TXP_TIMEOUT) {
2308 device_printf(sc->sc_dev, "command 0x%02x timedout\n", id);
2316 txp_response(struct txp_softc *sc, uint16_t id, uint16_t seq,
2317 struct txp_rsp_desc **rspp)
2319 struct txp_hostvar *hv;
2320 struct txp_rsp_desc *rsp;
2323 bus_dmamap_sync(sc->sc_cdata.txp_rspring_tag,
2324 sc->sc_cdata.txp_rspring_map, BUS_DMASYNC_POSTREAD);
2325 hv = sc->sc_hostvar;
2326 ridx = le32toh(hv->hv_resp_read_idx);
2327 while (ridx != le32toh(hv->hv_resp_write_idx)) {
2328 rsp = (struct txp_rsp_desc *)(((uint8_t *)sc->sc_rspring.base) + ridx);
2330 if (id == le16toh(rsp->rsp_id) &&
2331 le16toh(rsp->rsp_seq) == seq) {
2332 *rspp = (struct txp_rsp_desc *)malloc(
2333 sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1),
2334 M_DEVBUF, M_NOWAIT);
2335 if (*rspp == NULL) {
2336 device_printf(sc->sc_dev,"%s : command 0x%02x "
2337 "memory allocation failure\n",
2341 txp_rsp_fixup(sc, rsp, *rspp);
2345 if ((rsp->rsp_flags & RSP_FLAGS_ERROR) != 0) {
2346 device_printf(sc->sc_dev,
2347 "%s : command 0x%02x response error!\n", __func__,
2348 le16toh(rsp->rsp_id));
2349 txp_rsp_fixup(sc, rsp, NULL);
2350 ridx = le32toh(hv->hv_resp_read_idx);
2355 * The following unsolicited responses are handled during
2356 * processing of TXP_CMD_READ_STATISTICS which requires
2357 * response. Driver abuses the command to detect media
2359 * TXP_CMD_FILTER_DEFINE is not an unsolicited response
2360 * but we don't process response ring in interrupt handler
2361 * so we have to ignore this command here, otherwise
2362 * unknown command message would be printed.
2364 switch (le16toh(rsp->rsp_id)) {
2365 case TXP_CMD_CYCLE_STATISTICS:
2366 case TXP_CMD_FILTER_DEFINE:
2368 case TXP_CMD_MEDIA_STATUS_READ:
2369 if ((le16toh(rsp->rsp_par1) & 0x0800) == 0) {
2370 sc->sc_flags |= TXP_FLAG_LINK;
2371 if_link_state_change(sc->sc_ifp,
2374 sc->sc_flags &= ~TXP_FLAG_LINK;
2375 if_link_state_change(sc->sc_ifp,
2379 case TXP_CMD_HELLO_RESPONSE:
2381 * Driver should repsond to hello message but
2382 * TXP_CMD_READ_STATISTICS is issued for every
2383 * hz, therefore there is no need to send an
2384 * explicit command here.
2386 device_printf(sc->sc_dev, "%s : hello\n", __func__);
2389 device_printf(sc->sc_dev,
2390 "%s : unknown command 0x%02x\n", __func__,
2391 le16toh(rsp->rsp_id));
2393 txp_rsp_fixup(sc, rsp, NULL);
2394 ridx = le32toh(hv->hv_resp_read_idx);
2401 txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp,
2402 struct txp_rsp_desc *dst)
2404 struct txp_rsp_desc *src;
2405 struct txp_hostvar *hv;
2409 hv = sc->sc_hostvar;
2410 ridx = le32toh(hv->hv_resp_read_idx);
2412 for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
2414 bcopy(src, dst++, sizeof(struct txp_rsp_desc));
2415 ridx += sizeof(struct txp_rsp_desc);
2416 if (ridx == sc->sc_rspring.size) {
2417 src = sc->sc_rspring.base;
2421 sc->sc_rspring.lastwrite = ridx;
2424 hv->hv_resp_read_idx = htole32(ridx);
2428 txp_cmd_desc_numfree(struct txp_softc *sc)
2430 struct txp_hostvar *hv;
2431 struct txp_boot_record *br;
2432 uint32_t widx, ridx, nfree;
2434 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2435 sc->sc_cdata.txp_hostvar_map,
2436 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2437 hv = sc->sc_hostvar;
2439 widx = sc->sc_cmdring.lastwrite;
2440 ridx = le32toh(hv->hv_cmd_read_idx);
2443 /* Ring is completely free */
2444 nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc);
2447 nfree = le32toh(br->br_cmd_siz) -
2448 (widx - ridx + sizeof(struct txp_cmd_desc));
2450 nfree = ridx - widx - sizeof(struct txp_cmd_desc);
2453 return (nfree / sizeof(struct txp_cmd_desc));
2457 txp_sleep(struct txp_softc *sc, int capenable)
2463 if ((capenable & IFCAP_WOL_MAGIC) != 0)
2465 error = txp_command(sc, TXP_CMD_ENABLE_WAKEUP_EVENTS, events, 0, 0,
2466 NULL, NULL, NULL, TXP_CMD_NOWAIT);
2469 error = txp_command(sc, TXP_CMD_GOTO_SLEEP, 0, 0, 0, NULL,
2470 NULL, NULL, TXP_CMD_NOWAIT);
2472 error = txp_wait(sc, STAT_SLEEPING);
2474 device_printf(sc->sc_dev,
2475 "unable to enter into sleep\n");
2483 txp_stop(struct txp_softc *sc)
2487 TXP_LOCK_ASSERT(sc);
2490 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2493 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
2494 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
2496 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2497 sc->sc_flags &= ~TXP_FLAG_LINK;
2499 callout_stop(&sc->sc_tick);
2501 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2503 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2505 /* Save statistics for later use. */
2507 /* Halt controller. */
2508 txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL,
2511 if (txp_wait(sc, STAT_HALTED) != 0)
2512 device_printf(sc->sc_dev, "controller halt timedout!\n");
2513 /* Reclaim Tx/Rx buffers. */
2514 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
2515 TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
2516 txp_tx_reclaim(sc, &sc->sc_txhir);
2517 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
2518 TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
2519 txp_tx_reclaim(sc, &sc->sc_txlor);
2520 txp_rxring_empty(sc);
2523 /* Reset controller and make it reload sleep image. */
2525 /* Let controller boot from sleep image. */
2526 if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
2527 device_printf(sc->sc_dev, "could not boot sleep image\n");
2532 txp_watchdog(struct txp_softc *sc)
2536 TXP_LOCK_ASSERT(sc);
2538 if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
2542 if_printf(ifp, "watchdog timeout -- resetting\n");
2545 txp_init_locked(sc);
2549 txp_ifmedia_upd(struct ifnet *ifp)
2551 struct txp_softc *sc = ifp->if_softc;
2552 struct ifmedia *ifm = &sc->sc_ifmedia;
2556 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
2561 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
2562 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2563 new_xcvr = TXP_XCVR_10_FDX;
2565 new_xcvr = TXP_XCVR_10_HDX;
2566 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
2567 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2568 new_xcvr = TXP_XCVR_100_FDX;
2570 new_xcvr = TXP_XCVR_100_HDX;
2571 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
2572 new_xcvr = TXP_XCVR_AUTO;
2579 if (sc->sc_xcvr == new_xcvr) {
2584 txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
2585 NULL, NULL, NULL, TXP_CMD_NOWAIT);
2586 sc->sc_xcvr = new_xcvr;
2593 txp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2595 struct txp_softc *sc = ifp->if_softc;
2596 struct ifmedia *ifm = &sc->sc_ifmedia;
2597 uint16_t bmsr, bmcr, anar, anlpar;
2599 ifmr->ifm_status = IFM_AVALID;
2600 ifmr->ifm_active = IFM_ETHER;
2603 /* Check whether firmware is running. */
2604 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2606 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2607 &bmsr, NULL, NULL, TXP_CMD_WAIT))
2609 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2610 &bmsr, NULL, NULL, TXP_CMD_WAIT))
2613 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
2614 &bmcr, NULL, NULL, TXP_CMD_WAIT))
2617 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
2618 &anlpar, NULL, NULL, TXP_CMD_WAIT))
2621 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANAR, 0,
2622 &anar, NULL, NULL, TXP_CMD_WAIT))
2626 if (bmsr & BMSR_LINK)
2627 ifmr->ifm_status |= IFM_ACTIVE;
2629 if (bmcr & BMCR_ISO) {
2630 ifmr->ifm_active |= IFM_NONE;
2631 ifmr->ifm_status = 0;
2635 if (bmcr & BMCR_LOOP)
2636 ifmr->ifm_active |= IFM_LOOP;
2638 if (bmcr & BMCR_AUTOEN) {
2639 if ((bmsr & BMSR_ACOMP) == 0) {
2640 ifmr->ifm_active |= IFM_NONE;
2645 if (anlpar & ANLPAR_TX_FD)
2646 ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
2647 else if (anlpar & ANLPAR_T4)
2648 ifmr->ifm_active |= IFM_100_T4;
2649 else if (anlpar & ANLPAR_TX)
2650 ifmr->ifm_active |= IFM_100_TX;
2651 else if (anlpar & ANLPAR_10_FD)
2652 ifmr->ifm_active |= IFM_10_T|IFM_FDX;
2653 else if (anlpar & ANLPAR_10)
2654 ifmr->ifm_active |= IFM_10_T;
2656 ifmr->ifm_active |= IFM_NONE;
2658 ifmr->ifm_active = ifm->ifm_cur->ifm_media;
2663 ifmr->ifm_active |= IFM_NONE;
2664 ifmr->ifm_status &= ~IFM_AVALID;
2669 txp_show_descriptor(void *d)
2671 struct txp_cmd_desc *cmd = d;
2672 struct txp_rsp_desc *rsp = d;
2673 struct txp_tx_desc *txd = d;
2674 struct txp_frag_desc *frgd = d;
2676 switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) {
2677 case CMD_FLAGS_TYPE_CMD:
2678 /* command descriptor */
2679 printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2680 cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2681 le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2682 le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2684 case CMD_FLAGS_TYPE_RESP:
2685 /* response descriptor */
2686 printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2687 rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id),
2688 le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1),
2689 le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3));
2691 case CMD_FLAGS_TYPE_DATA:
2692 /* data header (assuming tx for now) */
2693 printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
2694 txd->tx_flags, txd->tx_numdesc, le16toh(txd->tx_totlen),
2695 le32toh(txd->tx_addrlo), le32toh(txd->tx_addrhi),
2696 le32toh(txd->tx_pflags));
2698 case CMD_FLAGS_TYPE_FRAG:
2699 /* fragment descriptor */
2700 printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
2701 frgd->frag_flags, frgd->frag_rsvd1, le16toh(frgd->frag_len),
2702 le32toh(frgd->frag_addrlo), le32toh(frgd->frag_addrhi),
2703 le32toh(frgd->frag_rsvd2));
2706 printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2707 cmd->cmd_flags & CMD_FLAGS_TYPE_M,
2708 cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2709 le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2710 le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2717 txp_set_filter(struct txp_softc *sc)
2720 uint32_t crc, mchash[2];
2722 struct ifmultiaddr *ifma;
2725 TXP_LOCK_ASSERT(sc);
2728 filter = TXP_RXFILT_DIRECT;
2729 if ((ifp->if_flags & IFF_BROADCAST) != 0)
2730 filter |= TXP_RXFILT_BROADCAST;
2731 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2732 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2733 filter |= TXP_RXFILT_ALLMULTI;
2734 if ((ifp->if_flags & IFF_PROMISC) != 0)
2735 filter = TXP_RXFILT_PROMISC;
2739 mchash[0] = mchash[1] = 0;
2741 if_maddr_rlock(ifp);
2742 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2743 if (ifma->ifma_addr->sa_family != AF_LINK)
2745 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2746 ifma->ifma_addr), ETHER_ADDR_LEN);
2748 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2751 if_maddr_runlock(ifp);
2754 filter |= TXP_RXFILT_HASHMULTI;
2755 txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 2, mchash[0],
2756 mchash[1], NULL, NULL, NULL, TXP_CMD_NOWAIT);
2760 txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
2761 NULL, NULL, NULL, TXP_CMD_NOWAIT);
2765 txp_set_capabilities(struct txp_softc *sc)
2768 uint32_t rxcap, txcap;
2770 TXP_LOCK_ASSERT(sc);
2774 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) {
2775 if ((ifp->if_hwassist & CSUM_IP) != 0)
2776 txcap |= OFFLOAD_IPCKSUM;
2777 if ((ifp->if_hwassist & CSUM_TCP) != 0)
2778 txcap |= OFFLOAD_TCPCKSUM;
2779 if ((ifp->if_hwassist & CSUM_UDP) != 0)
2780 txcap |= OFFLOAD_UDPCKSUM;
2783 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2784 rxcap &= ~(OFFLOAD_IPCKSUM | OFFLOAD_TCPCKSUM |
2786 if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2787 rxcap |= OFFLOAD_VLAN;
2788 txcap |= OFFLOAD_VLAN;
2791 /* Tell firmware new offload configuration. */
2792 return (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, txcap, rxcap, NULL,
2793 NULL, NULL, TXP_CMD_NOWAIT));
2797 txp_stats_save(struct txp_softc *sc)
2799 struct txp_rsp_desc *rsp;
2801 TXP_LOCK_ASSERT(sc);
2804 if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2805 &rsp, TXP_CMD_WAIT))
2807 if (rsp->rsp_numdesc != 6)
2809 txp_stats_update(sc, rsp);
2812 free(rsp, M_DEVBUF);
2813 bcopy(&sc->sc_stats, &sc->sc_ostats, sizeof(struct txp_hw_stats));
2817 txp_stats_update(struct txp_softc *sc, struct txp_rsp_desc *rsp)
2820 struct txp_hw_stats *ostats, *stats;
2821 struct txp_ext_desc *ext;
2823 TXP_LOCK_ASSERT(sc);
2826 ext = (struct txp_ext_desc *)(rsp + 1);
2827 ostats = &sc->sc_ostats;
2828 stats = &sc->sc_stats;
2829 stats->tx_frames = ostats->tx_frames + le32toh(rsp->rsp_par2);
2830 stats->tx_bytes = ostats->tx_bytes + (uint64_t)le32toh(rsp->rsp_par3) +
2831 ((uint64_t)le32toh(ext[0].ext_1) << 32);
2832 stats->tx_deferred = ostats->tx_deferred + le32toh(ext[0].ext_2);
2833 stats->tx_late_colls = ostats->tx_late_colls + le32toh(ext[0].ext_3);
2834 stats->tx_colls = ostats->tx_colls + le32toh(ext[0].ext_4);
2835 stats->tx_carrier_lost = ostats->tx_carrier_lost +
2836 le32toh(ext[1].ext_1);
2837 stats->tx_multi_colls = ostats->tx_multi_colls +
2838 le32toh(ext[1].ext_2);
2839 stats->tx_excess_colls = ostats->tx_excess_colls +
2840 le32toh(ext[1].ext_3);
2841 stats->tx_fifo_underruns = ostats->tx_fifo_underruns +
2842 le32toh(ext[1].ext_4);
2843 stats->tx_mcast_oflows = ostats->tx_mcast_oflows +
2844 le32toh(ext[2].ext_1);
2845 stats->tx_filtered = ostats->tx_filtered + le32toh(ext[2].ext_2);
2846 stats->rx_frames = ostats->rx_frames + le32toh(ext[2].ext_3);
2847 stats->rx_bytes = ostats->rx_bytes + (uint64_t)le32toh(ext[2].ext_4) +
2848 ((uint64_t)le32toh(ext[3].ext_1) << 32);
2849 stats->rx_fifo_oflows = ostats->rx_fifo_oflows + le32toh(ext[3].ext_2);
2850 stats->rx_badssd = ostats->rx_badssd + le32toh(ext[3].ext_3);
2851 stats->rx_crcerrs = ostats->rx_crcerrs + le32toh(ext[3].ext_4);
2852 stats->rx_lenerrs = ostats->rx_lenerrs + le32toh(ext[4].ext_1);
2853 stats->rx_bcast_frames = ostats->rx_bcast_frames +
2854 le32toh(ext[4].ext_2);
2855 stats->rx_mcast_frames = ostats->rx_mcast_frames +
2856 le32toh(ext[4].ext_3);
2857 stats->rx_oflows = ostats->rx_oflows + le32toh(ext[4].ext_4);
2858 stats->rx_filtered = ostats->rx_filtered + le32toh(ext[5].ext_1);
2860 ifp->if_ierrors = stats->rx_fifo_oflows + stats->rx_badssd +
2861 stats->rx_crcerrs + stats->rx_lenerrs + stats->rx_oflows;
2862 ifp->if_oerrors = stats->tx_deferred + stats->tx_carrier_lost +
2863 stats->tx_fifo_underruns + stats->tx_mcast_oflows;
2864 ifp->if_collisions = stats->tx_late_colls + stats->tx_multi_colls +
2865 stats->tx_excess_colls;
2866 ifp->if_opackets = stats->tx_frames;
2867 ifp->if_ipackets = stats->rx_frames;
2870 #define TXP_SYSCTL_STAT_ADD32(c, h, n, p, d) \
2871 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2873 #if __FreeBSD_version >= 900030
2874 #define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2875 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2876 #elif __FreeBSD_version > 800000
2877 #define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2878 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2880 #define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2881 SYSCTL_ADD_ULONG(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2885 txp_sysctl_node(struct txp_softc *sc)
2887 struct sysctl_ctx_list *ctx;
2888 struct sysctl_oid_list *child, *parent;
2889 struct sysctl_oid *tree;
2890 struct txp_hw_stats *stats;
2893 stats = &sc->sc_stats;
2894 ctx = device_get_sysctl_ctx(sc->sc_dev);
2895 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
2896 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
2897 CTLTYPE_INT | CTLFLAG_RW, &sc->sc_process_limit, 0,
2898 sysctl_hw_txp_proc_limit, "I",
2899 "max number of Rx events to process");
2900 /* Pull in device tunables. */
2901 sc->sc_process_limit = TXP_PROC_DEFAULT;
2902 error = resource_int_value(device_get_name(sc->sc_dev),
2903 device_get_unit(sc->sc_dev), "process_limit",
2904 &sc->sc_process_limit);
2906 if (sc->sc_process_limit < TXP_PROC_MIN ||
2907 sc->sc_process_limit > TXP_PROC_MAX) {
2908 device_printf(sc->sc_dev,
2909 "process_limit value out of range; "
2910 "using default: %d\n", TXP_PROC_DEFAULT);
2911 sc->sc_process_limit = TXP_PROC_DEFAULT;
2914 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2915 NULL, "TXP statistics");
2916 parent = SYSCTL_CHILDREN(tree);
2918 /* Tx statistics. */
2919 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2920 NULL, "Tx MAC statistics");
2921 child = SYSCTL_CHILDREN(tree);
2923 TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2924 &stats->tx_frames, "Frames");
2925 TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2926 &stats->tx_bytes, "Octets");
2927 TXP_SYSCTL_STAT_ADD32(ctx, child, "deferred",
2928 &stats->tx_deferred, "Deferred frames");
2929 TXP_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2930 &stats->tx_late_colls, "Late collisions");
2931 TXP_SYSCTL_STAT_ADD32(ctx, child, "colls",
2932 &stats->tx_colls, "Collisions");
2933 TXP_SYSCTL_STAT_ADD32(ctx, child, "carrier_lost",
2934 &stats->tx_carrier_lost, "Carrier lost");
2935 TXP_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
2936 &stats->tx_multi_colls, "Multiple collisions");
2937 TXP_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
2938 &stats->tx_excess_colls, "Excessive collisions");
2939 TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_underruns",
2940 &stats->tx_fifo_underruns, "FIFO underruns");
2941 TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_oflows",
2942 &stats->tx_mcast_oflows, "Multicast overflows");
2943 TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2944 &stats->tx_filtered, "Filtered frames");
2946 /* Rx statistics. */
2947 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2948 NULL, "Rx MAC statistics");
2949 child = SYSCTL_CHILDREN(tree);
2951 TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2952 &stats->rx_frames, "Frames");
2953 TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2954 &stats->rx_bytes, "Octets");
2955 TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2956 &stats->rx_fifo_oflows, "FIFO overflows");
2957 TXP_SYSCTL_STAT_ADD32(ctx, child, "badssd",
2958 &stats->rx_badssd, "Bad SSD");
2959 TXP_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2960 &stats->rx_crcerrs, "CRC errors");
2961 TXP_SYSCTL_STAT_ADD32(ctx, child, "lenerrs",
2962 &stats->rx_lenerrs, "Length errors");
2963 TXP_SYSCTL_STAT_ADD32(ctx, child, "bcast_frames",
2964 &stats->rx_bcast_frames, "Broadcast frames");
2965 TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_frames",
2966 &stats->rx_mcast_frames, "Multicast frames");
2967 TXP_SYSCTL_STAT_ADD32(ctx, child, "oflows",
2968 &stats->rx_oflows, "Overflows");
2969 TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2970 &stats->rx_filtered, "Filtered frames");
2973 #undef TXP_SYSCTL_STAT_ADD32
2974 #undef TXP_SYSCTL_STAT_ADD64
2977 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2983 value = *(int *)arg1;
2984 error = sysctl_handle_int(oidp, &value, 0, req);
2985 if (error || req->newptr == NULL)
2987 if (value < low || value > high)
2989 *(int *)arg1 = value;
2995 sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS)
2997 return (sysctl_int_range(oidp, arg1, arg2, req,
2998 TXP_PROC_MIN, TXP_PROC_MAX));