1 /* $OpenBSD: if_txp.c,v 1.48 2001/06/27 06:34:50 kjc Exp $ */
5 * Jason L. Wright <jason@thought.net>, Theo de Raadt, and
6 * Aaron Campbell <aaron@monkey.org>. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Jason L. Wright,
19 * Theo de Raadt and Aaron Campbell.
20 * 4. Neither the name of the author nor the names of any co-contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
34 * THE POSSIBILITY OF SUCH DAMAGE.
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
41 * Driver for 3c990 (Typhoon) Ethernet ASIC
43 #include <sys/param.h>
44 #include <sys/systm.h>
46 #include <sys/endian.h>
47 #include <sys/kernel.h>
49 #include <sys/malloc.h>
51 #include <sys/module.h>
52 #include <sys/mutex.h>
53 #include <sys/queue.h>
55 #include <sys/socket.h>
56 #include <sys/sockio.h>
57 #include <sys/sysctl.h>
58 #include <sys/taskqueue.h>
62 #include <net/if_var.h>
63 #include <net/if_arp.h>
64 #include <net/ethernet.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/if_vlan_var.h>
70 #include <netinet/in.h>
71 #include <netinet/in_systm.h>
72 #include <netinet/ip.h>
74 #include <dev/mii/mii.h>
76 #include <dev/pci/pcireg.h>
77 #include <dev/pci/pcivar.h>
79 #include <machine/bus.h>
80 #include <machine/in_cksum.h>
82 #include <dev/txp/if_txpreg.h>
83 #include <dev/txp/3c990img.h>
85 MODULE_DEPEND(txp, pci, 1, 1, 1);
86 MODULE_DEPEND(txp, ether, 1, 1, 1);
89 * XXX Known Typhoon firmware issues.
91 * 1. It seems that firmware has Tx TCP/UDP checksum offloading bug.
92 * The firmware hangs when it's told to compute TCP/UDP checksum.
93 * I'm not sure whether the firmware requires special alignment to
94 * do checksum offloading but datasheet says nothing about that.
95 * 2. Datasheet says nothing for maximum number of fragmented
96 * descriptors supported. Experimentation shows up to 16 fragment
97 * descriptors are supported in the firmware. For TSO case, upper
98 * stack can send 64KB sized IP datagram plus link header size(
99 * ethernet header + VLAN tag) frame but controller can handle up
100 * to 64KB frame given that PAGE_SIZE is 4KB(i.e. 16 * PAGE_SIZE).
101 * Because frames that need TSO operation of hardware can be
102 * larger than 64KB I disabled TSO capability. TSO operation for
103 * less than or equal to 16 fragment descriptors works without
105 * 3. VLAN hardware tag stripping is always enabled in the firmware
106 * even if it's explicitly told to not strip the tag. It's
107 * possible to add the tag back in Rx handler if VLAN hardware
108 * tag is not active but I didn't try that as it would be
109 * layering violation.
110 * 4. TXP_CMD_RECV_BUFFER_CONTROL does not work as expected in
111 * datasheet such that driver should handle the alignment
112 * restriction by copying received frame to align the frame on
113 * 32bit boundary on strict-alignment architectures. This adds a
114 * lot of CPU burden and it effectively reduce Rx performance on
115 * strict-alignment architectures(e.g. sparc64, arm and mips).
117 * Unfortunately it seems that 3Com have no longer interests in
118 * releasing fixed firmware so we may have to live with these bugs.
121 #define TXP_CSUM_FEATURES (CSUM_IP)
124 * Various supported device vendors/types and their names.
126 static struct txp_type txp_devs[] = {
127 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_95,
128 "3Com 3cR990-TX-95 Etherlink with 3XP Processor" },
129 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_97,
130 "3Com 3cR990-TX-97 Etherlink with 3XP Processor" },
131 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_TXM,
132 "3Com 3cR990B-TXM Etherlink with 3XP Processor" },
133 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_95,
134 "3Com 3cR990-SRV-95 Etherlink Server with 3XP Processor" },
135 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_97,
136 "3Com 3cR990-SRV-97 Etherlink Server with 3XP Processor" },
137 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_SRV,
138 "3Com 3cR990B-SRV Etherlink Server with 3XP Processor" },
142 static int txp_probe(device_t);
143 static int txp_attach(device_t);
144 static int txp_detach(device_t);
145 static int txp_shutdown(device_t);
146 static int txp_suspend(device_t);
147 static int txp_resume(device_t);
148 static int txp_intr(void *);
149 static void txp_int_task(void *, int);
150 static void txp_tick(void *);
151 static int txp_ioctl(struct ifnet *, u_long, caddr_t);
152 static uint64_t txp_get_counter(struct ifnet *, ift_counter);
153 static void txp_start(struct ifnet *);
154 static void txp_start_locked(struct ifnet *);
155 static int txp_encap(struct txp_softc *, struct txp_tx_ring *, struct mbuf **);
156 static void txp_stop(struct txp_softc *);
157 static void txp_init(void *);
158 static void txp_init_locked(struct txp_softc *);
159 static void txp_watchdog(struct txp_softc *);
161 static int txp_reset(struct txp_softc *);
162 static int txp_boot(struct txp_softc *, uint32_t);
163 static int txp_sleep(struct txp_softc *, int);
164 static int txp_wait(struct txp_softc *, uint32_t);
165 static int txp_download_fw(struct txp_softc *);
166 static int txp_download_fw_wait(struct txp_softc *);
167 static int txp_download_fw_section(struct txp_softc *,
168 struct txp_fw_section_header *, int);
169 static int txp_alloc_rings(struct txp_softc *);
170 static void txp_init_rings(struct txp_softc *);
171 static int txp_dma_alloc(struct txp_softc *, char *, bus_dma_tag_t *,
172 bus_size_t, bus_size_t, bus_dmamap_t *, void **, bus_size_t, bus_addr_t *);
173 static void txp_dma_free(struct txp_softc *, bus_dma_tag_t *, bus_dmamap_t,
174 void **, bus_addr_t *);
175 static void txp_free_rings(struct txp_softc *);
176 static int txp_rxring_fill(struct txp_softc *);
177 static void txp_rxring_empty(struct txp_softc *);
178 static void txp_set_filter(struct txp_softc *);
180 static int txp_cmd_desc_numfree(struct txp_softc *);
181 static int txp_command(struct txp_softc *, uint16_t, uint16_t, uint32_t,
182 uint32_t, uint16_t *, uint32_t *, uint32_t *, int);
183 static int txp_ext_command(struct txp_softc *, uint16_t, uint16_t,
184 uint32_t, uint32_t, struct txp_ext_desc *, uint8_t,
185 struct txp_rsp_desc **, int);
186 static int txp_response(struct txp_softc *, uint16_t, uint16_t,
187 struct txp_rsp_desc **);
188 static void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *,
189 struct txp_rsp_desc *);
190 static int txp_set_capabilities(struct txp_softc *);
192 static void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
193 static int txp_ifmedia_upd(struct ifnet *);
195 static void txp_show_descriptor(void *);
197 static void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *);
198 static void txp_rxbuf_reclaim(struct txp_softc *);
199 #ifndef __NO_STRICT_ALIGNMENT
200 static __inline void txp_fixup_rx(struct mbuf *);
202 static int txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *, int);
203 static void txp_stats_save(struct txp_softc *);
204 static void txp_stats_update(struct txp_softc *, struct txp_rsp_desc *);
205 static void txp_sysctl_node(struct txp_softc *);
206 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
207 static int sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS);
209 static int prefer_iomap = 0;
210 TUNABLE_INT("hw.txp.prefer_iomap", &prefer_iomap);
212 static device_method_t txp_methods[] = {
213 /* Device interface */
214 DEVMETHOD(device_probe, txp_probe),
215 DEVMETHOD(device_attach, txp_attach),
216 DEVMETHOD(device_detach, txp_detach),
217 DEVMETHOD(device_shutdown, txp_shutdown),
218 DEVMETHOD(device_suspend, txp_suspend),
219 DEVMETHOD(device_resume, txp_resume),
224 static driver_t txp_driver = {
227 sizeof(struct txp_softc)
230 static devclass_t txp_devclass;
232 DRIVER_MODULE(txp, pci, txp_driver, txp_devclass, 0, 0);
235 txp_probe(device_t dev)
241 while (t->txp_name != NULL) {
242 if ((pci_get_vendor(dev) == t->txp_vid) &&
243 (pci_get_device(dev) == t->txp_did)) {
244 device_set_desc(dev, t->txp_name);
245 return (BUS_PROBE_DEFAULT);
254 txp_attach(device_t dev)
256 struct txp_softc *sc;
258 struct txp_rsp_desc *rsp;
261 int error = 0, pmc, rid;
262 uint8_t eaddr[ETHER_ADDR_LEN], *ver;
264 sc = device_get_softc(dev);
267 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
269 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
270 TASK_INIT(&sc->sc_int_task, 0, txp_int_task, sc);
271 TAILQ_INIT(&sc->sc_busy_list);
272 TAILQ_INIT(&sc->sc_free_list);
274 ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
275 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
276 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 0, NULL);
277 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
278 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
279 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 0, NULL);
280 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
281 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
283 pci_enable_busmaster(dev);
284 /* Prefer memory space register mapping over IO space. */
285 if (prefer_iomap == 0) {
286 sc->sc_res_id = PCIR_BAR(1);
287 sc->sc_res_type = SYS_RES_MEMORY;
289 sc->sc_res_id = PCIR_BAR(0);
290 sc->sc_res_type = SYS_RES_IOPORT;
292 sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
293 &sc->sc_res_id, RF_ACTIVE);
294 if (sc->sc_res == NULL && prefer_iomap == 0) {
295 sc->sc_res_id = PCIR_BAR(0);
296 sc->sc_res_type = SYS_RES_IOPORT;
297 sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
298 &sc->sc_res_id, RF_ACTIVE);
300 if (sc->sc_res == NULL) {
301 device_printf(dev, "couldn't map ports/memory\n");
302 ifmedia_removeall(&sc->sc_ifmedia);
303 mtx_destroy(&sc->sc_mtx);
308 reg = pci_read_config(dev, PCIR_COMMAND, 2);
309 reg |= PCIM_CMD_MWRICEN;
310 pci_write_config(dev, PCIR_COMMAND, reg, 2);
311 /* Check cache line size. */
312 reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
314 if (reg == 0 || (reg % 16) != 0)
315 device_printf(sc->sc_dev,
316 "invalid cache line size : %u\n", reg);
318 /* Allocate interrupt */
320 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
321 RF_SHAREABLE | RF_ACTIVE);
323 if (sc->sc_irq == NULL) {
324 device_printf(dev, "couldn't map interrupt\n");
329 if ((error = txp_alloc_rings(sc)) != 0)
333 /* Reset controller and make it reload sleep image. */
334 if (txp_reset(sc) != 0) {
339 /* Let controller boot from sleep image. */
340 if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
341 device_printf(sc->sc_dev, "could not boot sleep image\n");
346 /* Get station address. */
347 if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
348 &p1, &p2, NULL, TXP_CMD_WAIT)) {
354 eaddr[0] = ((uint8_t *)&p1)[1];
355 eaddr[1] = ((uint8_t *)&p1)[0];
357 eaddr[2] = ((uint8_t *)&p2)[3];
358 eaddr[3] = ((uint8_t *)&p2)[2];
359 eaddr[4] = ((uint8_t *)&p2)[1];
360 eaddr[5] = ((uint8_t *)&p2)[0];
362 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
364 device_printf(dev, "can not allocate ifnet structure\n");
370 * Show sleep image version information which may help to
371 * diagnose sleep image specific issues.
374 if (txp_ext_command(sc, TXP_CMD_READ_VERSION, 0, 0, 0, NULL, 0,
375 &rsp, TXP_CMD_WAIT)) {
376 device_printf(dev, "can not read sleep image version\n");
380 if (rsp->rsp_numdesc == 0) {
381 p2 = le32toh(rsp->rsp_par2) & 0xFFFF;
382 device_printf(dev, "Typhoon 1.0 sleep image (2000/%02u/%02u)\n",
384 } else if (rsp->rsp_numdesc == 2) {
385 p2 = le32toh(rsp->rsp_par2);
386 ver = (uint8_t *)(rsp + 1);
388 * Even if datasheet says the command returns a NULL
389 * terminated version string, explicitly terminate
390 * the string. Given that several bugs of firmware
391 * I can't trust this simple one.
395 "Typhoon 1.1+ sleep image %02u.%03u.%03u %s\n",
396 p2 >> 24, (p2 >> 12) & 0xFFF, p2 & 0xFFF, ver);
398 p2 = le32toh(rsp->rsp_par2);
400 "Unknown Typhoon sleep image version: %u:0x%08x\n",
401 rsp->rsp_numdesc, p2);
406 sc->sc_xcvr = TXP_XCVR_AUTO;
407 txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
408 NULL, NULL, NULL, TXP_CMD_NOWAIT);
409 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);
412 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
413 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
414 ifp->if_ioctl = txp_ioctl;
415 ifp->if_start = txp_start;
416 ifp->if_init = txp_init;
417 ifp->if_get_counter = txp_get_counter;
418 ifp->if_snd.ifq_drv_maxlen = TX_ENTRIES - 1;
419 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
420 IFQ_SET_READY(&ifp->if_snd);
422 * It's possible to read firmware's offload capability but
423 * we have not downloaded the firmware yet so announce
424 * working capability here. We're not interested in IPSec
425 * capability and due to the lots of firmware bug we can't
426 * advertise the whole capability anyway.
428 ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM;
429 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
430 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
431 /* Enable all capabilities. */
432 ifp->if_capenable = ifp->if_capabilities;
434 ether_ifattach(ifp, eaddr);
436 /* VLAN capability setup. */
437 ifp->if_capabilities |= IFCAP_VLAN_MTU;
438 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
439 ifp->if_capenable = ifp->if_capabilities;
440 /* Tell the upper layer(s) we support long frames. */
441 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
443 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
444 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
446 /* Create local taskq. */
447 sc->sc_tq = taskqueue_create_fast("txp_taskq", M_WAITOK,
448 taskqueue_thread_enqueue, &sc->sc_tq);
449 if (sc->sc_tq == NULL) {
450 device_printf(dev, "could not create taskqueue.\n");
455 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
456 device_get_nameunit(sc->sc_dev));
458 /* Put controller into sleep. */
459 if (txp_sleep(sc, 0) != 0) {
465 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
466 txp_intr, NULL, sc, &sc->sc_intrhand);
470 device_printf(dev, "couldn't set up interrupt handler.\n");
483 txp_detach(device_t dev)
485 struct txp_softc *sc;
488 sc = device_get_softc(dev);
491 if (device_is_attached(dev)) {
493 sc->sc_flags |= TXP_FLAG_DETACH;
496 callout_drain(&sc->sc_tick);
497 taskqueue_drain(sc->sc_tq, &sc->sc_int_task);
500 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
502 ifmedia_removeall(&sc->sc_ifmedia);
503 if (sc->sc_intrhand != NULL)
504 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
505 if (sc->sc_irq != NULL)
506 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
507 if (sc->sc_res != NULL)
508 bus_release_resource(dev, sc->sc_res_type, sc->sc_res_id,
510 if (sc->sc_ifp != NULL) {
515 mtx_destroy(&sc->sc_mtx);
521 txp_reset(struct txp_softc *sc)
526 /* Disable interrupts. */
527 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
528 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
529 /* Ack all pending interrupts. */
530 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
533 WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
535 WRITE_REG(sc, TXP_SRR, 0);
537 /* Should wait max 6 seconds. */
538 for (i = 0; i < 6000; i++) {
539 r = READ_REG(sc, TXP_A2H_0);
540 if (r == STAT_WAITING_FOR_HOST_REQUEST)
545 if (r != STAT_WAITING_FOR_HOST_REQUEST)
546 device_printf(sc->sc_dev, "reset hung\n");
548 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
549 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
550 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
553 * Give more time to complete loading sleep image before
554 * trying to boot from sleep image.
562 txp_boot(struct txp_softc *sc, uint32_t state)
565 /* See if it's waiting for boot, and try to boot it. */
566 if (txp_wait(sc, state) != 0) {
567 device_printf(sc->sc_dev, "not waiting for boot\n");
571 WRITE_REG(sc, TXP_H2A_2, TXP_ADDR_HI(sc->sc_ldata.txp_boot_paddr));
572 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
573 WRITE_REG(sc, TXP_H2A_1, TXP_ADDR_LO(sc->sc_ldata.txp_boot_paddr));
574 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
575 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
576 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
578 /* See if it booted. */
579 if (txp_wait(sc, STAT_RUNNING) != 0) {
580 device_printf(sc->sc_dev, "firmware not running\n");
584 /* Clear TX and CMD ring write registers. */
585 WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
586 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
587 WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
588 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
589 WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
590 TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
591 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
592 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
598 txp_download_fw(struct txp_softc *sc)
600 struct txp_fw_file_header *fileheader;
601 struct txp_fw_section_header *secthead;
603 uint32_t error, ier, imr;
608 ier = READ_REG(sc, TXP_IER);
609 WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);
611 imr = READ_REG(sc, TXP_IMR);
612 WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);
614 if (txp_wait(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
615 device_printf(sc->sc_dev, "not waiting for host request\n");
620 /* Ack the status. */
621 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
623 fileheader = (struct txp_fw_file_header *)tc990image;
624 if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) {
625 device_printf(sc->sc_dev, "firmware invalid magic\n");
629 /* Tell boot firmware to get ready for image. */
630 WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr));
631 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
632 WRITE_REG(sc, TXP_H2A_2, le32toh(fileheader->hmac[0]));
633 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
634 WRITE_REG(sc, TXP_H2A_3, le32toh(fileheader->hmac[1]));
635 TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
636 WRITE_REG(sc, TXP_H2A_4, le32toh(fileheader->hmac[2]));
637 TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
638 WRITE_REG(sc, TXP_H2A_5, le32toh(fileheader->hmac[3]));
639 TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
640 WRITE_REG(sc, TXP_H2A_6, le32toh(fileheader->hmac[4]));
641 TXP_BARRIER(sc, TXP_H2A_6, 4, BUS_SPACE_BARRIER_WRITE);
642 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);
643 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
645 if (txp_download_fw_wait(sc)) {
646 device_printf(sc->sc_dev, "firmware wait failed, initial\n");
651 secthead = (struct txp_fw_section_header *)(((uint8_t *)tc990image) +
652 sizeof(struct txp_fw_file_header));
654 for (sect = 0; sect < le32toh(fileheader->nsections); sect++) {
655 if ((error = txp_download_fw_section(sc, secthead, sect)) != 0)
657 secthead = (struct txp_fw_section_header *)
658 (((uint8_t *)secthead) + le32toh(secthead->nbytes) +
662 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);
663 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
665 if (txp_wait(sc, STAT_WAITING_FOR_BOOT) != 0) {
666 device_printf(sc->sc_dev, "not waiting for boot\n");
672 WRITE_REG(sc, TXP_IER, ier);
673 WRITE_REG(sc, TXP_IMR, imr);
679 txp_download_fw_wait(struct txp_softc *sc)
685 for (i = 0; i < TXP_TIMEOUT; i++) {
686 if ((READ_REG(sc, TXP_ISR) & TXP_INT_A2H_0) != 0)
691 if (i == TXP_TIMEOUT) {
692 device_printf(sc->sc_dev, "firmware wait failed comm0\n");
696 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
698 if (READ_REG(sc, TXP_A2H_0) != STAT_WAITING_FOR_SEGMENT) {
699 device_printf(sc->sc_dev, "firmware not waiting for segment\n");
706 txp_download_fw_section(struct txp_softc *sc,
707 struct txp_fw_section_header *sect, int sectnum)
709 bus_dma_tag_t sec_tag;
710 bus_dmamap_t sec_map;
711 bus_addr_t sec_paddr;
719 /* Skip zero length sections. */
720 if (le32toh(sect->nbytes) == 0)
723 /* Make sure we aren't past the end of the image. */
724 rseg = ((uint8_t *)sect) - ((uint8_t *)tc990image);
725 if (rseg >= sizeof(tc990image)) {
726 device_printf(sc->sc_dev,
727 "firmware invalid section address, section %d\n", sectnum);
731 /* Make sure this section doesn't go past the end. */
732 rseg += le32toh(sect->nbytes);
733 if (rseg >= sizeof(tc990image)) {
734 device_printf(sc->sc_dev, "firmware truncated section %d\n",
744 err = txp_dma_alloc(sc, "firmware sections", &sec_tag, sizeof(uint32_t),
745 0, &sec_map, (void **)&sec_buf, le32toh(sect->nbytes), &sec_paddr);
749 bcopy(((uint8_t *)sect) + sizeof(*sect), sec_buf,
750 le32toh(sect->nbytes));
753 * dummy up mbuf and verify section checksum
756 m.m_next = m.m_nextpkt = NULL;
757 m.m_len = le32toh(sect->nbytes);
760 csum = in_cksum(&m, le32toh(sect->nbytes));
761 if (csum != sect->cksum) {
762 device_printf(sc->sc_dev,
763 "firmware section %d, bad cksum (expected 0x%x got 0x%x)\n",
764 sectnum, le16toh(sect->cksum), csum);
769 bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_PREWRITE);
771 WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes));
772 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
773 WRITE_REG(sc, TXP_H2A_2, le16toh(sect->cksum));
774 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
775 WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr));
776 TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
777 WRITE_REG(sc, TXP_H2A_4, TXP_ADDR_HI(sec_paddr));
778 TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
779 WRITE_REG(sc, TXP_H2A_5, TXP_ADDR_LO(sec_paddr));
780 TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
781 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);
782 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
784 if (txp_download_fw_wait(sc)) {
785 device_printf(sc->sc_dev,
786 "firmware wait failed, section %d\n", sectnum);
790 bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_POSTWRITE);
792 txp_dma_free(sc, &sec_tag, sec_map, (void **)&sec_buf, &sec_paddr);
799 struct txp_softc *sc;
803 status = READ_REG(sc, TXP_ISR);
804 if ((status & TXP_INT_LATCH) == 0)
805 return (FILTER_STRAY);
806 WRITE_REG(sc, TXP_ISR, status);
807 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
808 taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
810 return (FILTER_HANDLED);
814 txp_int_task(void *arg, int pending)
816 struct txp_softc *sc;
818 struct txp_hostvar *hv;
822 sc = (struct txp_softc *)arg;
827 isr = READ_REG(sc, TXP_ISR);
828 if ((isr & TXP_INT_LATCH) != 0)
829 WRITE_REG(sc, TXP_ISR, isr);
831 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
832 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
833 sc->sc_cdata.txp_hostvar_map,
834 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
836 if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
837 more += txp_rx_reclaim(sc, &sc->sc_rxhir,
838 sc->sc_process_limit);
839 if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
840 more += txp_rx_reclaim(sc, &sc->sc_rxlor,
841 sc->sc_process_limit);
844 * It seems controller is not smart enough to handle
845 * FIFO overflow conditions under heavy network load.
846 * No matter how often new Rx buffers are passed to
847 * controller the situation didn't change. Maybe
848 * flow-control would be the only way to mitigate the
849 * issue but firmware does not have commands that
850 * control the threshold of emitting pause frames.
852 if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
853 txp_rxbuf_reclaim(sc);
854 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
855 TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
856 txp_tx_reclaim(sc, &sc->sc_txhir);
857 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
858 TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
859 txp_tx_reclaim(sc, &sc->sc_txlor);
860 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
861 sc->sc_cdata.txp_hostvar_map,
862 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
863 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
864 txp_start_locked(sc->sc_ifp);
865 if (more != 0 || READ_REG(sc, TXP_ISR & TXP_INT_LATCH) != 0) {
866 taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
872 /* Re-enable interrupts. */
873 WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
877 #ifndef __NO_STRICT_ALIGNMENT
879 txp_fixup_rx(struct mbuf *m)
884 src = mtod(m, uint16_t *);
885 dst = src - (TXP_RXBUF_ALIGN - ETHER_ALIGN) / sizeof *src;
887 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
890 m->m_data -= TXP_RXBUF_ALIGN - ETHER_ALIGN;
895 txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r, int count)
898 struct txp_rx_desc *rxd;
900 struct txp_rx_swdesc *sd;
901 uint32_t roff, woff, rx_stat, prog;
907 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
908 BUS_DMASYNC_POSTWRITE);
910 roff = le32toh(*r->r_roff);
911 woff = le32toh(*r->r_woff);
912 rxd = r->r_desc + roff / sizeof(struct txp_rx_desc);
913 for (prog = 0; roff != woff; prog++, count--) {
916 bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd));
917 KASSERT(sd != NULL, ("%s: Rx desc ring corrupted", __func__));
918 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
919 BUS_DMASYNC_POSTREAD);
920 bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
922 KASSERT(m != NULL, ("%s: Rx buffer ring corrupted", __func__));
924 TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
925 TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
926 if ((rxd->rx_flags & RX_FLAGS_ERROR) != 0) {
928 device_printf(sc->sc_dev, "Rx error %u\n",
929 le32toh(rxd->rx_stat) & RX_ERROR_MASK);
934 m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len);
935 m->m_pkthdr.rcvif = ifp;
936 #ifndef __NO_STRICT_ALIGNMENT
939 rx_stat = le32toh(rxd->rx_stat);
940 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
941 if ((rx_stat & RX_STAT_IPCKSUMBAD) != 0)
942 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
943 else if ((rx_stat & RX_STAT_IPCKSUMGOOD) != 0)
944 m->m_pkthdr.csum_flags |=
945 CSUM_IP_CHECKED|CSUM_IP_VALID;
947 if ((rx_stat & RX_STAT_TCPCKSUMGOOD) != 0 ||
948 (rx_stat & RX_STAT_UDPCKSUMGOOD) != 0) {
949 m->m_pkthdr.csum_flags |=
950 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
951 m->m_pkthdr.csum_data = 0xffff;
957 * Typhoon has a firmware bug that VLAN tag is always
958 * stripped out even if it is told to not remove the tag.
959 * Therefore don't check if_capenable here.
961 if (/* (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && */
962 (rx_stat & RX_STAT_VLAN) != 0) {
963 m->m_pkthdr.ether_vtag =
964 bswap16((le32toh(rxd->rx_vlan) >> 16));
965 m->m_flags |= M_VLANTAG;
969 (*ifp->if_input)(ifp, m);
973 roff += sizeof(struct txp_rx_desc);
974 if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
985 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
986 BUS_DMASYNC_PREWRITE);
987 *r->r_roff = le32toh(roff);
989 return (count > 0 ? 0 : EAGAIN);
993 txp_rxbuf_reclaim(struct txp_softc *sc)
995 struct txp_hostvar *hv;
996 struct txp_rxbuf_desc *rbd;
997 struct txp_rx_swdesc *sd;
998 bus_dma_segment_t segs[1];
999 int nsegs, prod, prog;
1002 TXP_LOCK_ASSERT(sc);
1004 hv = sc->sc_hostvar;
1005 cons = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx));
1006 prod = sc->sc_rxbufprod;
1007 TXP_DESC_INC(prod, RXBUF_ENTRIES);
1011 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1012 sc->sc_cdata.txp_rxbufs_map,
1013 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1015 for (prog = 0; prod != cons; prog++) {
1016 sd = TAILQ_FIRST(&sc->sc_free_list);
1019 rbd = sc->sc_rxbufs + prod;
1020 bcopy((u_long *)&rbd->rb_vaddrlo, &sd, sizeof(sd));
1021 sd->sd_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1022 if (sd->sd_mbuf == NULL)
1024 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1025 #ifndef __NO_STRICT_ALIGNMENT
1026 m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1028 if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1029 sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0) != 0) {
1030 m_freem(sd->sd_mbuf);
1034 KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1036 TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1037 TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1038 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1039 BUS_DMASYNC_PREREAD);
1040 rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1041 rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1042 TXP_DESC_INC(prod, RXBUF_ENTRIES);
1047 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1048 sc->sc_cdata.txp_rxbufs_map,
1049 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1050 prod = (prod + RXBUF_ENTRIES - 1) % RXBUF_ENTRIES;
1051 sc->sc_rxbufprod = prod;
1052 hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(prod));
1056 * Reclaim mbufs and entries from a transmit ring.
1059 txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r)
1064 struct txp_tx_desc *txd;
1065 struct txp_swdesc *sd;
1067 TXP_LOCK_ASSERT(sc);
1069 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
1070 BUS_DMASYNC_POSTWRITE);
1072 idx = TXP_OFFSET2IDX(le32toh(*(r->r_off)));
1075 txd = r->r_desc + cons;
1076 sd = sc->sc_txd + cons;
1078 for (cnt = r->r_cnt; cons != idx && cnt > 0; cnt--) {
1079 if ((txd->tx_flags & TX_FLAGS_TYPE_M) == TX_FLAGS_TYPE_DATA) {
1080 if (sd->sd_mbuf != NULL) {
1081 bus_dmamap_sync(sc->sc_cdata.txp_tx_tag,
1082 sd->sd_map, BUS_DMASYNC_POSTWRITE);
1083 bus_dmamap_unload(sc->sc_cdata.txp_tx_tag,
1085 m_freem(sd->sd_mbuf);
1092 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1094 if (++cons == TX_ENTRIES) {
1104 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
1105 BUS_DMASYNC_PREWRITE);
1109 sc->sc_watchdog_timer = 0;
1113 txp_shutdown(device_t dev)
1116 return (txp_suspend(dev));
1120 txp_suspend(device_t dev)
1122 struct txp_softc *sc;
1130 sc = device_get_softc(dev);
1136 /* Reset controller and make it reload sleep image. */
1138 /* Let controller boot from sleep image. */
1139 if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
1140 device_printf(sc->sc_dev, "couldn't boot sleep image\n");
1142 /* Set station address. */
1143 eaddr = IF_LLADDR(sc->sc_ifp);
1145 ((uint8_t *)&p1)[1] = eaddr[0];
1146 ((uint8_t *)&p1)[0] = eaddr[1];
1148 ((uint8_t *)&p2)[3] = eaddr[2];
1149 ((uint8_t *)&p2)[2] = eaddr[3];
1150 ((uint8_t *)&p2)[1] = eaddr[4];
1151 ((uint8_t *)&p2)[0] = eaddr[5];
1153 txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0, NULL, NULL,
1154 NULL, TXP_CMD_WAIT);
1156 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
1157 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1158 txp_sleep(sc, sc->sc_ifp->if_capenable);
1159 if (pci_find_cap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1161 pmstat = pci_read_config(sc->sc_dev,
1162 pmc + PCIR_POWER_STATUS, 2);
1163 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1164 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1165 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1166 pci_write_config(sc->sc_dev,
1167 pmc + PCIR_POWER_STATUS, pmstat, 2);
1175 txp_resume(device_t dev)
1177 struct txp_softc *sc;
1181 sc = device_get_softc(dev);
1184 if (pci_find_cap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1185 /* Disable PME and clear PME status. */
1186 pmstat = pci_read_config(sc->sc_dev,
1187 pmc + PCIR_POWER_STATUS, 2);
1188 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1189 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1190 pci_write_config(sc->sc_dev,
1191 pmc + PCIR_POWER_STATUS, pmstat, 2);
1194 if ((sc->sc_ifp->if_flags & IFF_UP) != 0)
1195 txp_init_locked(sc);
1201 struct txp_dmamap_arg {
1202 bus_addr_t txp_busaddr;
1206 txp_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1208 struct txp_dmamap_arg *ctx;
1213 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1215 ctx = (struct txp_dmamap_arg *)arg;
1216 ctx->txp_busaddr = segs[0].ds_addr;
1220 txp_dma_alloc(struct txp_softc *sc, char *type, bus_dma_tag_t *tag,
1221 bus_size_t alignment, bus_size_t boundary, bus_dmamap_t *map, void **buf,
1222 bus_size_t size, bus_addr_t *paddr)
1224 struct txp_dmamap_arg ctx;
1227 /* Create DMA block tag. */
1228 error = bus_dma_tag_create(
1229 sc->sc_cdata.txp_parent_tag, /* parent */
1230 alignment, boundary, /* algnmnt, boundary */
1231 BUS_SPACE_MAXADDR, /* lowaddr */
1232 BUS_SPACE_MAXADDR, /* highaddr */
1233 NULL, NULL, /* filter, filterarg */
1236 size, /* maxsegsize */
1238 NULL, NULL, /* lockfunc, lockarg */
1241 device_printf(sc->sc_dev,
1242 "could not create DMA tag for %s.\n", type);
1247 /* Allocate DMA'able memory and load the DMA map. */
1248 error = bus_dmamem_alloc(*tag, buf, BUS_DMA_WAITOK | BUS_DMA_ZERO |
1249 BUS_DMA_COHERENT, map);
1251 device_printf(sc->sc_dev,
1252 "could not allocate DMA'able memory for %s.\n", type);
1256 ctx.txp_busaddr = 0;
1257 error = bus_dmamap_load(*tag, *map, *(uint8_t **)buf,
1258 size, txp_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1259 if (error != 0 || ctx.txp_busaddr == 0) {
1260 device_printf(sc->sc_dev,
1261 "could not load DMA'able memory for %s.\n", type);
1264 *paddr = ctx.txp_busaddr;
1270 txp_dma_free(struct txp_softc *sc, bus_dma_tag_t *tag, bus_dmamap_t map,
1271 void **buf, bus_addr_t *paddr)
1276 bus_dmamap_unload(*tag, map);
1278 bus_dmamem_free(*tag, *(uint8_t **)buf, map);
1279 *(uint8_t **)buf = NULL;
1281 bus_dma_tag_destroy(*tag);
1287 txp_alloc_rings(struct txp_softc *sc)
1289 struct txp_boot_record *boot;
1290 struct txp_ldata *ld;
1291 struct txp_swdesc *txd;
1292 struct txp_rxbuf_desc *rbd;
1293 struct txp_rx_swdesc *sd;
1297 boot = ld->txp_boot;
1303 * Create parent ring/DMA block tag.
1304 * Datasheet says that all ring addresses and descriptors
1305 * support 64bits addressing. However the controller is
1306 * known to have no support DAC so limit DMA address space
1309 error = bus_dma_tag_create(
1310 bus_get_dma_tag(sc->sc_dev), /* parent */
1311 1, 0, /* algnmnt, boundary */
1312 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1313 BUS_SPACE_MAXADDR, /* highaddr */
1314 NULL, NULL, /* filter, filterarg */
1315 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1317 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1319 NULL, NULL, /* lockfunc, lockarg */
1320 &sc->sc_cdata.txp_parent_tag);
1322 device_printf(sc->sc_dev, "could not create parent DMA tag.\n");
1327 error = txp_dma_alloc(sc, "boot record",
1328 &sc->sc_cdata.txp_boot_tag, sizeof(uint32_t), 0,
1329 &sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot,
1330 sizeof(struct txp_boot_record),
1331 &sc->sc_ldata.txp_boot_paddr);
1334 boot = sc->sc_ldata.txp_boot;
1337 /* Host variables. */
1338 error = txp_dma_alloc(sc, "host variables",
1339 &sc->sc_cdata.txp_hostvar_tag, sizeof(uint32_t), 0,
1340 &sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar,
1341 sizeof(struct txp_hostvar),
1342 &sc->sc_ldata.txp_hostvar_paddr);
1345 boot->br_hostvar_lo =
1346 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_hostvar_paddr));
1347 boot->br_hostvar_hi =
1348 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_hostvar_paddr));
1349 sc->sc_hostvar = sc->sc_ldata.txp_hostvar;
1351 /* Hi priority tx ring. */
1352 error = txp_dma_alloc(sc, "hi priority tx ring",
1353 &sc->sc_cdata.txp_txhiring_tag, sizeof(struct txp_tx_desc), 0,
1354 &sc->sc_cdata.txp_txhiring_map, (void **)&sc->sc_ldata.txp_txhiring,
1355 sizeof(struct txp_tx_desc) * TX_ENTRIES,
1356 &sc->sc_ldata.txp_txhiring_paddr);
1359 boot->br_txhipri_lo =
1360 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txhiring_paddr));
1361 boot->br_txhipri_hi =
1362 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txhiring_paddr));
1363 boot->br_txhipri_siz =
1364 htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1365 sc->sc_txhir.r_tag = sc->sc_cdata.txp_txhiring_tag;
1366 sc->sc_txhir.r_map = sc->sc_cdata.txp_txhiring_map;
1367 sc->sc_txhir.r_reg = TXP_H2A_1;
1368 sc->sc_txhir.r_desc = sc->sc_ldata.txp_txhiring;
1369 sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
1370 sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
1372 /* Low priority tx ring. */
1373 error = txp_dma_alloc(sc, "low priority tx ring",
1374 &sc->sc_cdata.txp_txloring_tag, sizeof(struct txp_tx_desc), 0,
1375 &sc->sc_cdata.txp_txloring_map, (void **)&sc->sc_ldata.txp_txloring,
1376 sizeof(struct txp_tx_desc) * TX_ENTRIES,
1377 &sc->sc_ldata.txp_txloring_paddr);
1380 boot->br_txlopri_lo =
1381 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txloring_paddr));
1382 boot->br_txlopri_hi =
1383 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txloring_paddr));
1384 boot->br_txlopri_siz =
1385 htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1386 sc->sc_txlor.r_tag = sc->sc_cdata.txp_txloring_tag;
1387 sc->sc_txlor.r_map = sc->sc_cdata.txp_txloring_map;
1388 sc->sc_txlor.r_reg = TXP_H2A_3;
1389 sc->sc_txlor.r_desc = sc->sc_ldata.txp_txloring;
1390 sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
1391 sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;
1393 /* High priority rx ring. */
1394 error = txp_dma_alloc(sc, "hi priority rx ring",
1395 &sc->sc_cdata.txp_rxhiring_tag,
1396 roundup(sizeof(struct txp_rx_desc), 16), 0,
1397 &sc->sc_cdata.txp_rxhiring_map, (void **)&sc->sc_ldata.txp_rxhiring,
1398 sizeof(struct txp_rx_desc) * RX_ENTRIES,
1399 &sc->sc_ldata.txp_rxhiring_paddr);
1402 boot->br_rxhipri_lo =
1403 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxhiring_paddr));
1404 boot->br_rxhipri_hi =
1405 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxhiring_paddr));
1406 boot->br_rxhipri_siz =
1407 htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1408 sc->sc_rxhir.r_tag = sc->sc_cdata.txp_rxhiring_tag;
1409 sc->sc_rxhir.r_map = sc->sc_cdata.txp_rxhiring_map;
1410 sc->sc_rxhir.r_desc = sc->sc_ldata.txp_rxhiring;
1411 sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
1412 sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
1414 /* Low priority rx ring. */
1415 error = txp_dma_alloc(sc, "low priority rx ring",
1416 &sc->sc_cdata.txp_rxloring_tag,
1417 roundup(sizeof(struct txp_rx_desc), 16), 0,
1418 &sc->sc_cdata.txp_rxloring_map, (void **)&sc->sc_ldata.txp_rxloring,
1419 sizeof(struct txp_rx_desc) * RX_ENTRIES,
1420 &sc->sc_ldata.txp_rxloring_paddr);
1423 boot->br_rxlopri_lo =
1424 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxloring_paddr));
1425 boot->br_rxlopri_hi =
1426 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxloring_paddr));
1427 boot->br_rxlopri_siz =
1428 htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1429 sc->sc_rxlor.r_tag = sc->sc_cdata.txp_rxloring_tag;
1430 sc->sc_rxlor.r_map = sc->sc_cdata.txp_rxloring_map;
1431 sc->sc_rxlor.r_desc = sc->sc_ldata.txp_rxloring;
1432 sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
1433 sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
1436 error = txp_dma_alloc(sc, "command ring",
1437 &sc->sc_cdata.txp_cmdring_tag, sizeof(struct txp_cmd_desc), 0,
1438 &sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring,
1439 sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
1440 &sc->sc_ldata.txp_cmdring_paddr);
1443 boot->br_cmd_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_cmdring_paddr));
1444 boot->br_cmd_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_cmdring_paddr));
1445 boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc));
1446 sc->sc_cmdring.base = sc->sc_ldata.txp_cmdring;
1447 sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
1448 sc->sc_cmdring.lastwrite = 0;
1450 /* Response ring. */
1451 error = txp_dma_alloc(sc, "response ring",
1452 &sc->sc_cdata.txp_rspring_tag, sizeof(struct txp_rsp_desc), 0,
1453 &sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring,
1454 sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
1455 &sc->sc_ldata.txp_rspring_paddr);
1458 boot->br_resp_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rspring_paddr));
1459 boot->br_resp_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rspring_paddr));
1460 boot->br_resp_siz = htole32(RSP_ENTRIES * sizeof(struct txp_rsp_desc));
1461 sc->sc_rspring.base = sc->sc_ldata.txp_rspring;
1462 sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
1463 sc->sc_rspring.lastwrite = 0;
1465 /* Receive buffer ring. */
1466 error = txp_dma_alloc(sc, "receive buffer ring",
1467 &sc->sc_cdata.txp_rxbufs_tag, sizeof(struct txp_rxbuf_desc), 0,
1468 &sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs,
1469 sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
1470 &sc->sc_ldata.txp_rxbufs_paddr);
1474 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxbufs_paddr));
1476 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxbufs_paddr));
1477 boot->br_rxbuf_siz =
1478 htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc));
1479 sc->sc_rxbufs = sc->sc_ldata.txp_rxbufs;
1482 error = txp_dma_alloc(sc, "zero buffer",
1483 &sc->sc_cdata.txp_zero_tag, sizeof(uint32_t), 0,
1484 &sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero,
1485 sizeof(uint32_t), &sc->sc_ldata.txp_zero_paddr);
1488 boot->br_zero_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_zero_paddr));
1489 boot->br_zero_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_zero_paddr));
1491 bus_dmamap_sync(sc->sc_cdata.txp_boot_tag, sc->sc_cdata.txp_boot_map,
1492 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1494 /* Create Tx buffers. */
1495 error = bus_dma_tag_create(
1496 sc->sc_cdata.txp_parent_tag, /* parent */
1497 1, 0, /* algnmnt, boundary */
1498 BUS_SPACE_MAXADDR, /* lowaddr */
1499 BUS_SPACE_MAXADDR, /* highaddr */
1500 NULL, NULL, /* filter, filterarg */
1501 MCLBYTES * TXP_MAXTXSEGS, /* maxsize */
1502 TXP_MAXTXSEGS, /* nsegments */
1503 MCLBYTES, /* maxsegsize */
1505 NULL, NULL, /* lockfunc, lockarg */
1506 &sc->sc_cdata.txp_tx_tag);
1508 device_printf(sc->sc_dev, "could not create Tx DMA tag.\n");
1512 /* Create tag for Rx buffers. */
1513 error = bus_dma_tag_create(
1514 sc->sc_cdata.txp_parent_tag, /* parent */
1515 TXP_RXBUF_ALIGN, 0, /* algnmnt, boundary */
1516 BUS_SPACE_MAXADDR, /* lowaddr */
1517 BUS_SPACE_MAXADDR, /* highaddr */
1518 NULL, NULL, /* filter, filterarg */
1519 MCLBYTES, /* maxsize */
1521 MCLBYTES, /* maxsegsize */
1523 NULL, NULL, /* lockfunc, lockarg */
1524 &sc->sc_cdata.txp_rx_tag);
1526 device_printf(sc->sc_dev, "could not create Rx DMA tag.\n");
1530 /* Create DMA maps for Tx buffers. */
1531 for (i = 0; i < TX_ENTRIES; i++) {
1532 txd = &sc->sc_txd[i];
1533 txd->sd_mbuf = NULL;
1535 error = bus_dmamap_create(sc->sc_cdata.txp_tx_tag, 0,
1538 device_printf(sc->sc_dev,
1539 "could not create Tx dmamap.\n");
1544 /* Create DMA maps for Rx buffers. */
1545 for (i = 0; i < RXBUF_ENTRIES; i++) {
1546 sd = malloc(sizeof(struct txp_rx_swdesc), M_DEVBUF,
1553 * The virtual address part of descriptor is not used
1554 * by hardware so use that to save an ring entry. We
1555 * need bcopy here otherwise the address wouldn't be
1556 * valid on big-endian architectures.
1558 rbd = sc->sc_rxbufs + i;
1559 bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1562 error = bus_dmamap_create(sc->sc_cdata.txp_rx_tag, 0,
1565 device_printf(sc->sc_dev,
1566 "could not create Rx dmamap.\n");
1569 TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1577 txp_init_rings(struct txp_softc *sc)
1580 bzero(sc->sc_ldata.txp_hostvar, sizeof(struct txp_hostvar));
1581 bzero(sc->sc_ldata.txp_zero, sizeof(uint32_t));
1582 sc->sc_txhir.r_cons = 0;
1583 sc->sc_txhir.r_prod = 0;
1584 sc->sc_txhir.r_cnt = 0;
1585 sc->sc_txlor.r_cons = 0;
1586 sc->sc_txlor.r_prod = 0;
1587 sc->sc_txlor.r_cnt = 0;
1588 sc->sc_cmdring.lastwrite = 0;
1589 sc->sc_rspring.lastwrite = 0;
1590 sc->sc_rxbufprod = 0;
1591 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1592 sc->sc_cdata.txp_hostvar_map,
1593 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1597 txp_wait(struct txp_softc *sc, uint32_t state)
1602 for (i = 0; i < TXP_TIMEOUT; i++) {
1603 reg = READ_REG(sc, TXP_A2H_0);
1609 return (i == TXP_TIMEOUT ? ETIMEDOUT : 0);
1613 txp_free_rings(struct txp_softc *sc)
1615 struct txp_swdesc *txd;
1616 struct txp_rx_swdesc *sd;
1620 if (sc->sc_cdata.txp_tx_tag != NULL) {
1621 for (i = 0; i < TX_ENTRIES; i++) {
1622 txd = &sc->sc_txd[i];
1623 if (txd->sd_map != NULL) {
1624 bus_dmamap_destroy(sc->sc_cdata.txp_tx_tag,
1629 bus_dma_tag_destroy(sc->sc_cdata.txp_tx_tag);
1630 sc->sc_cdata.txp_tx_tag = NULL;
1633 if (sc->sc_cdata.txp_rx_tag != NULL) {
1634 if (sc->sc_rxbufs != NULL) {
1635 KASSERT(TAILQ_FIRST(&sc->sc_busy_list) == NULL,
1636 ("%s : still have busy Rx buffers", __func__));
1637 while ((sd = TAILQ_FIRST(&sc->sc_free_list)) != NULL) {
1638 TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1639 if (sd->sd_map != NULL) {
1641 sc->sc_cdata.txp_rx_tag,
1648 bus_dma_tag_destroy(sc->sc_cdata.txp_rx_tag);
1649 sc->sc_cdata.txp_rx_tag = NULL;
1652 /* Hi priority Tx ring. */
1653 txp_dma_free(sc, &sc->sc_cdata.txp_txhiring_tag,
1654 sc->sc_cdata.txp_txhiring_map,
1655 (void **)&sc->sc_ldata.txp_txhiring,
1656 &sc->sc_ldata.txp_txhiring_paddr);
1657 /* Low priority Tx ring. */
1658 txp_dma_free(sc, &sc->sc_cdata.txp_txloring_tag,
1659 sc->sc_cdata.txp_txloring_map,
1660 (void **)&sc->sc_ldata.txp_txloring,
1661 &sc->sc_ldata.txp_txloring_paddr);
1662 /* Hi priority Rx ring. */
1663 txp_dma_free(sc, &sc->sc_cdata.txp_rxhiring_tag,
1664 sc->sc_cdata.txp_rxhiring_map,
1665 (void **)&sc->sc_ldata.txp_rxhiring,
1666 &sc->sc_ldata.txp_rxhiring_paddr);
1667 /* Low priority Rx ring. */
1668 txp_dma_free(sc, &sc->sc_cdata.txp_rxloring_tag,
1669 sc->sc_cdata.txp_rxloring_map,
1670 (void **)&sc->sc_ldata.txp_rxloring,
1671 &sc->sc_ldata.txp_rxloring_paddr);
1672 /* Receive buffer ring. */
1673 txp_dma_free(sc, &sc->sc_cdata.txp_rxbufs_tag,
1674 sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs,
1675 &sc->sc_ldata.txp_rxbufs_paddr);
1677 txp_dma_free(sc, &sc->sc_cdata.txp_cmdring_tag,
1678 sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring,
1679 &sc->sc_ldata.txp_cmdring_paddr);
1680 /* Response ring. */
1681 txp_dma_free(sc, &sc->sc_cdata.txp_rspring_tag,
1682 sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring,
1683 &sc->sc_ldata.txp_rspring_paddr);
1685 txp_dma_free(sc, &sc->sc_cdata.txp_zero_tag,
1686 sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero,
1687 &sc->sc_ldata.txp_zero_paddr);
1688 /* Host variables. */
1689 txp_dma_free(sc, &sc->sc_cdata.txp_hostvar_tag,
1690 sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar,
1691 &sc->sc_ldata.txp_hostvar_paddr);
1693 txp_dma_free(sc, &sc->sc_cdata.txp_boot_tag,
1694 sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot,
1695 &sc->sc_ldata.txp_boot_paddr);
1697 if (sc->sc_cdata.txp_parent_tag != NULL) {
1698 bus_dma_tag_destroy(sc->sc_cdata.txp_parent_tag);
1699 sc->sc_cdata.txp_parent_tag = NULL;
1705 txp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1707 struct txp_softc *sc = ifp->if_softc;
1708 struct ifreq *ifr = (struct ifreq *)data;
1709 int capenable, error = 0, mask;
1714 if ((ifp->if_flags & IFF_UP) != 0) {
1715 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1716 if (((ifp->if_flags ^ sc->sc_if_flags)
1717 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1720 if ((sc->sc_flags & TXP_FLAG_DETACH) == 0)
1721 txp_init_locked(sc);
1724 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1727 sc->sc_if_flags = ifp->if_flags;
1733 * Multicast list has changed; set the hardware
1734 * filter accordingly.
1737 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1743 capenable = ifp->if_capenable;
1744 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1745 if ((mask & IFCAP_TXCSUM) != 0 &&
1746 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1747 ifp->if_capenable ^= IFCAP_TXCSUM;
1748 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1749 ifp->if_hwassist |= TXP_CSUM_FEATURES;
1751 ifp->if_hwassist &= ~TXP_CSUM_FEATURES;
1753 if ((mask & IFCAP_RXCSUM) != 0 &&
1754 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
1755 ifp->if_capenable ^= IFCAP_RXCSUM;
1756 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1757 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1758 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1759 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1760 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0)
1761 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1762 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1763 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1764 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1765 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
1766 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1767 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1768 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1769 if (capenable != ifp->if_capenable)
1770 txp_set_capabilities(sc);
1772 VLAN_CAPABILITIES(ifp);
1776 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command);
1779 error = ether_ioctl(ifp, command, data);
1787 txp_rxring_fill(struct txp_softc *sc)
1789 struct txp_rxbuf_desc *rbd;
1790 struct txp_rx_swdesc *sd;
1791 bus_dma_segment_t segs[1];
1792 int error, i, nsegs;
1794 TXP_LOCK_ASSERT(sc);
1796 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1797 sc->sc_cdata.txp_rxbufs_map,
1798 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1800 for (i = 0; i < RXBUF_ENTRIES; i++) {
1801 sd = TAILQ_FIRST(&sc->sc_free_list);
1804 rbd = sc->sc_rxbufs + i;
1805 bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1806 KASSERT(sd->sd_mbuf == NULL,
1807 ("%s : Rx buffer ring corrupted", __func__));
1808 sd->sd_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1809 if (sd->sd_mbuf == NULL)
1811 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1812 #ifndef __NO_STRICT_ALIGNMENT
1813 m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1815 if ((error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1816 sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0)) != 0) {
1817 m_freem(sd->sd_mbuf);
1821 KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1823 TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1824 TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1825 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1826 BUS_DMASYNC_PREREAD);
1827 rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1828 rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1831 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1832 sc->sc_cdata.txp_rxbufs_map,
1833 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1834 sc->sc_rxbufprod = RXBUF_ENTRIES - 1;
1835 sc->sc_hostvar->hv_rx_buf_write_idx =
1836 htole32(TXP_IDX2OFFSET(RXBUF_ENTRIES - 1));
1842 txp_rxring_empty(struct txp_softc *sc)
1844 struct txp_rx_swdesc *sd;
1847 TXP_LOCK_ASSERT(sc);
1849 if (sc->sc_rxbufs == NULL)
1851 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1852 sc->sc_cdata.txp_hostvar_map,
1853 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1855 /* Release allocated Rx buffers. */
1857 while ((sd = TAILQ_FIRST(&sc->sc_busy_list)) != NULL) {
1858 TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
1859 KASSERT(sd->sd_mbuf != NULL,
1860 ("%s : Rx buffer ring corrupted", __func__));
1861 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1862 BUS_DMASYNC_POSTREAD);
1863 bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
1864 m_freem(sd->sd_mbuf);
1866 TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1874 struct txp_softc *sc;
1878 txp_init_locked(sc);
1883 txp_init_locked(struct txp_softc *sc)
1891 TXP_LOCK_ASSERT(sc);
1894 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1897 /* Initialize ring structure. */
1899 /* Wakeup controller. */
1900 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_WAKEUP);
1901 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
1903 * It seems that earlier NV image can go back to online from
1904 * wakeup command but newer ones require controller reset.
1905 * So jut reset controller again.
1907 if (txp_reset(sc) != 0)
1909 /* Download firmware. */
1910 error = txp_download_fw(sc);
1912 device_printf(sc->sc_dev, "could not download firmware.\n");
1915 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1916 sc->sc_cdata.txp_hostvar_map,
1917 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1918 if ((error = txp_rxring_fill(sc)) != 0) {
1919 device_printf(sc->sc_dev, "no memory for Rx buffers.\n");
1922 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1923 sc->sc_cdata.txp_hostvar_map,
1924 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1925 if (txp_boot(sc, STAT_WAITING_FOR_BOOT) != 0) {
1926 device_printf(sc->sc_dev, "could not boot firmware.\n");
1931 * Quite contrary to Typhoon T2 software functional specification,
1932 * it seems that TXP_CMD_RECV_BUFFER_CONTROL command is not
1933 * implemented in the firmware. This means driver should have to
1934 * handle misaligned frames on alignment architectures. AFAIK this
1935 * is the only controller manufactured by 3Com that has this stupid
1936 * bug. 3Com should fix this.
1938 if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
1939 NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1941 /* Undocumented command(interrupt coalescing disable?) - From Linux. */
1942 if (txp_command(sc, TXP_CMD_FILTER_DEFINE, 0, 0, 0, NULL, NULL, NULL,
1943 TXP_CMD_NOWAIT) != 0)
1946 /* Set station address. */
1947 eaddr = IF_LLADDR(sc->sc_ifp);
1949 ((uint8_t *)&p1)[1] = eaddr[0];
1950 ((uint8_t *)&p1)[0] = eaddr[1];
1952 ((uint8_t *)&p2)[3] = eaddr[2];
1953 ((uint8_t *)&p2)[2] = eaddr[3];
1954 ((uint8_t *)&p2)[1] = eaddr[4];
1955 ((uint8_t *)&p2)[0] = eaddr[5];
1957 if (txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0,
1958 NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1962 txp_set_capabilities(sc);
1964 if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
1965 NULL, NULL, NULL, TXP_CMD_NOWAIT))
1967 if (txp_command(sc, TXP_CMD_XCVR_SELECT, sc->sc_xcvr, 0, 0,
1968 NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1970 if (txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1971 TXP_CMD_NOWAIT) != 0)
1973 if (txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1974 TXP_CMD_NOWAIT) != 0)
1977 /* Ack all pending interrupts and enable interrupts. */
1978 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
1979 WRITE_REG(sc, TXP_IER, TXP_INTRS);
1980 WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
1982 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1983 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1985 callout_reset(&sc->sc_tick, hz, txp_tick, sc);
1989 txp_rxring_empty(sc);
1992 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1998 struct txp_softc *sc;
2000 struct txp_rsp_desc *rsp;
2001 struct txp_ext_desc *ext;
2005 TXP_LOCK_ASSERT(sc);
2006 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2007 sc->sc_cdata.txp_hostvar_map,
2008 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2009 txp_rxbuf_reclaim(sc);
2010 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2011 sc->sc_cdata.txp_hostvar_map,
2012 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2017 link = sc->sc_flags & TXP_FLAG_LINK;
2018 if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2019 &rsp, TXP_CMD_WAIT))
2021 if (rsp->rsp_numdesc != 6)
2023 txp_stats_update(sc, rsp);
2024 if (link == 0 && (sc->sc_flags & TXP_FLAG_LINK) != 0) {
2025 ext = (struct txp_ext_desc *)(rsp + 1);
2026 /* Update baudrate with resolved speed. */
2027 if ((ext[5].ext_2 & 0x02) != 0)
2028 ifp->if_baudrate = IF_Mbps(100);
2030 ifp->if_baudrate = IF_Mbps(10);
2035 free(rsp, M_DEVBUF);
2037 callout_reset(&sc->sc_tick, hz, txp_tick, sc);
2041 txp_start(struct ifnet *ifp)
2043 struct txp_softc *sc;
2047 txp_start_locked(ifp);
2052 txp_start_locked(struct ifnet *ifp)
2054 struct txp_softc *sc;
2055 struct mbuf *m_head;
2059 TXP_LOCK_ASSERT(sc);
2061 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2062 IFF_DRV_RUNNING || (sc->sc_flags & TXP_FLAG_LINK) == 0)
2065 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
2066 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2070 * Pack the data into the transmit ring. If we
2071 * don't have room, set the OACTIVE flag and wait
2072 * for the NIC to drain the ring.
2073 * ATM only Hi-ring is used.
2075 if (txp_encap(sc, &sc->sc_txhir, &m_head)) {
2078 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2079 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2084 * If there's a BPF listener, bounce a copy of this frame
2087 ETHER_BPF_MTAP(ifp, m_head);
2089 /* Send queued frame. */
2090 WRITE_REG(sc, sc->sc_txhir.r_reg,
2091 TXP_IDX2OFFSET(sc->sc_txhir.r_prod));
2095 /* Set a timeout in case the chip goes out to lunch. */
2096 sc->sc_watchdog_timer = TXP_TX_TIMEOUT;
2101 txp_encap(struct txp_softc *sc, struct txp_tx_ring *r, struct mbuf **m_head)
2103 struct txp_tx_desc *first_txd;
2104 struct txp_frag_desc *fxd;
2105 struct txp_swdesc *sd;
2107 bus_dma_segment_t txsegs[TXP_MAXTXSEGS];
2108 int error, i, nsegs;
2110 TXP_LOCK_ASSERT(sc);
2112 M_ASSERTPKTHDR((*m_head));
2115 first_txd = r->r_desc + r->r_prod;
2116 sd = sc->sc_txd + r->r_prod;
2118 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2119 *m_head, txsegs, &nsegs, 0);
2120 if (error == EFBIG) {
2121 m = m_collapse(*m_head, M_NOWAIT, TXP_MAXTXSEGS);
2128 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag,
2129 sd->sd_map, *m_head, txsegs, &nsegs, 0);
2135 } else if (error != 0)
2143 /* Check descriptor overrun. */
2144 if (r->r_cnt + nsegs >= TX_ENTRIES - TXP_TXD_RESERVED) {
2145 bus_dmamap_unload(sc->sc_cdata.txp_tx_tag, sd->sd_map);
2148 bus_dmamap_sync(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2149 BUS_DMASYNC_PREWRITE);
2152 first_txd->tx_flags = TX_FLAGS_TYPE_DATA;
2153 first_txd->tx_numdesc = 0;
2154 first_txd->tx_addrlo = 0;
2155 first_txd->tx_addrhi = 0;
2156 first_txd->tx_totlen = 0;
2157 first_txd->tx_pflags = 0;
2159 TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2161 /* Configure Tx IP/TCP/UDP checksum offload. */
2162 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2163 first_txd->tx_pflags |= htole32(TX_PFLAGS_IPCKSUM);
2165 /* XXX firmware bug. */
2166 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2167 first_txd->tx_pflags |= htole32(TX_PFLAGS_TCPCKSUM);
2168 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2169 first_txd->tx_pflags |= htole32(TX_PFLAGS_UDPCKSUM);
2172 /* Configure VLAN hardware tag insertion. */
2173 if ((m->m_flags & M_VLANTAG) != 0)
2174 first_txd->tx_pflags |=
2175 htole32(TX_PFLAGS_VLAN | TX_PFLAGS_PRIO |
2176 (bswap16(m->m_pkthdr.ether_vtag) << TX_PFLAGS_VLANTAG_S));
2178 for (i = 0; i < nsegs; i++) {
2179 fxd = (struct txp_frag_desc *)(r->r_desc + r->r_prod);
2180 fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG | TX_FLAGS_VALID;
2181 fxd->frag_rsvd1 = 0;
2182 fxd->frag_len = htole16(txsegs[i].ds_len);
2183 fxd->frag_addrhi = htole32(TXP_ADDR_HI(txsegs[i].ds_addr));
2184 fxd->frag_addrlo = htole32(TXP_ADDR_LO(txsegs[i].ds_addr));
2185 fxd->frag_rsvd2 = 0;
2186 first_txd->tx_numdesc++;
2188 TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2191 /* Lastly set valid flag. */
2192 first_txd->tx_flags |= TX_FLAGS_VALID;
2194 /* Sync descriptors. */
2195 bus_dmamap_sync(r->r_tag, r->r_map,
2196 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2202 * Handle simple commands sent to the typhoon
2205 txp_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2206 uint32_t in3, uint16_t *out1, uint32_t *out2, uint32_t *out3, int wait)
2208 struct txp_rsp_desc *rsp;
2211 if (txp_ext_command(sc, id, in1, in2, in3, NULL, 0, &rsp, wait) != 0) {
2212 device_printf(sc->sc_dev, "command 0x%02x failed\n", id);
2216 if (wait == TXP_CMD_NOWAIT)
2219 KASSERT(rsp != NULL, ("rsp is NULL!\n"));
2221 *out1 = le16toh(rsp->rsp_par1);
2223 *out2 = le32toh(rsp->rsp_par2);
2225 *out3 = le32toh(rsp->rsp_par3);
2226 free(rsp, M_DEVBUF);
2231 txp_ext_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2232 uint32_t in3, struct txp_ext_desc *in_extp, uint8_t in_extn,
2233 struct txp_rsp_desc **rspp, int wait)
2235 struct txp_hostvar *hv;
2236 struct txp_cmd_desc *cmd;
2237 struct txp_ext_desc *ext;
2243 hv = sc->sc_hostvar;
2244 if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
2245 device_printf(sc->sc_dev,
2246 "%s : out of free cmd descriptors for command 0x%02x\n",
2251 bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2252 sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_POSTWRITE);
2253 idx = sc->sc_cmdring.lastwrite;
2254 cmd = (struct txp_cmd_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2255 bzero(cmd, sizeof(*cmd));
2257 cmd->cmd_numdesc = in_extn;
2259 cmd->cmd_seq = htole16(seq);
2260 cmd->cmd_id = htole16(id);
2261 cmd->cmd_par1 = htole16(in1);
2262 cmd->cmd_par2 = htole32(in2);
2263 cmd->cmd_par3 = htole32(in3);
2264 cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
2265 (wait == TXP_CMD_WAIT ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
2267 idx += sizeof(struct txp_cmd_desc);
2268 if (idx == sc->sc_cmdring.size)
2271 for (i = 0; i < in_extn; i++) {
2272 ext = (struct txp_ext_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2273 bcopy(in_extp, ext, sizeof(struct txp_ext_desc));
2275 idx += sizeof(struct txp_cmd_desc);
2276 if (idx == sc->sc_cmdring.size)
2280 sc->sc_cmdring.lastwrite = idx;
2281 bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2282 sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_PREWRITE);
2283 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2284 sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_PREREAD |
2285 BUS_DMASYNC_PREWRITE);
2286 WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
2287 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
2289 if (wait == TXP_CMD_NOWAIT)
2292 for (i = 0; i < TXP_TIMEOUT; i++) {
2293 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2294 sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_POSTREAD |
2295 BUS_DMASYNC_POSTWRITE);
2296 if (le32toh(hv->hv_resp_read_idx) !=
2297 le32toh(hv->hv_resp_write_idx)) {
2298 error = txp_response(sc, id, seq, rspp);
2299 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2300 sc->sc_cdata.txp_hostvar_map,
2301 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2309 if (i == TXP_TIMEOUT) {
2310 device_printf(sc->sc_dev, "command 0x%02x timedout\n", id);
2318 txp_response(struct txp_softc *sc, uint16_t id, uint16_t seq,
2319 struct txp_rsp_desc **rspp)
2321 struct txp_hostvar *hv;
2322 struct txp_rsp_desc *rsp;
2325 bus_dmamap_sync(sc->sc_cdata.txp_rspring_tag,
2326 sc->sc_cdata.txp_rspring_map, BUS_DMASYNC_POSTREAD);
2327 hv = sc->sc_hostvar;
2328 ridx = le32toh(hv->hv_resp_read_idx);
2329 while (ridx != le32toh(hv->hv_resp_write_idx)) {
2330 rsp = (struct txp_rsp_desc *)(((uint8_t *)sc->sc_rspring.base) + ridx);
2332 if (id == le16toh(rsp->rsp_id) &&
2333 le16toh(rsp->rsp_seq) == seq) {
2334 *rspp = (struct txp_rsp_desc *)malloc(
2335 sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1),
2336 M_DEVBUF, M_NOWAIT);
2337 if (*rspp == NULL) {
2338 device_printf(sc->sc_dev,"%s : command 0x%02x "
2339 "memory allocation failure\n",
2343 txp_rsp_fixup(sc, rsp, *rspp);
2347 if ((rsp->rsp_flags & RSP_FLAGS_ERROR) != 0) {
2348 device_printf(sc->sc_dev,
2349 "%s : command 0x%02x response error!\n", __func__,
2350 le16toh(rsp->rsp_id));
2351 txp_rsp_fixup(sc, rsp, NULL);
2352 ridx = le32toh(hv->hv_resp_read_idx);
2357 * The following unsolicited responses are handled during
2358 * processing of TXP_CMD_READ_STATISTICS which requires
2359 * response. Driver abuses the command to detect media
2361 * TXP_CMD_FILTER_DEFINE is not an unsolicited response
2362 * but we don't process response ring in interrupt handler
2363 * so we have to ignore this command here, otherwise
2364 * unknown command message would be printed.
2366 switch (le16toh(rsp->rsp_id)) {
2367 case TXP_CMD_CYCLE_STATISTICS:
2368 case TXP_CMD_FILTER_DEFINE:
2370 case TXP_CMD_MEDIA_STATUS_READ:
2371 if ((le16toh(rsp->rsp_par1) & 0x0800) == 0) {
2372 sc->sc_flags |= TXP_FLAG_LINK;
2373 if_link_state_change(sc->sc_ifp,
2376 sc->sc_flags &= ~TXP_FLAG_LINK;
2377 if_link_state_change(sc->sc_ifp,
2381 case TXP_CMD_HELLO_RESPONSE:
2383 * Driver should repsond to hello message but
2384 * TXP_CMD_READ_STATISTICS is issued for every
2385 * hz, therefore there is no need to send an
2386 * explicit command here.
2388 device_printf(sc->sc_dev, "%s : hello\n", __func__);
2391 device_printf(sc->sc_dev,
2392 "%s : unknown command 0x%02x\n", __func__,
2393 le16toh(rsp->rsp_id));
2395 txp_rsp_fixup(sc, rsp, NULL);
2396 ridx = le32toh(hv->hv_resp_read_idx);
2403 txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp,
2404 struct txp_rsp_desc *dst)
2406 struct txp_rsp_desc *src;
2407 struct txp_hostvar *hv;
2411 hv = sc->sc_hostvar;
2412 ridx = le32toh(hv->hv_resp_read_idx);
2414 for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
2416 bcopy(src, dst++, sizeof(struct txp_rsp_desc));
2417 ridx += sizeof(struct txp_rsp_desc);
2418 if (ridx == sc->sc_rspring.size) {
2419 src = sc->sc_rspring.base;
2423 sc->sc_rspring.lastwrite = ridx;
2426 hv->hv_resp_read_idx = htole32(ridx);
2430 txp_cmd_desc_numfree(struct txp_softc *sc)
2432 struct txp_hostvar *hv;
2433 struct txp_boot_record *br;
2434 uint32_t widx, ridx, nfree;
2436 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2437 sc->sc_cdata.txp_hostvar_map,
2438 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2439 hv = sc->sc_hostvar;
2441 widx = sc->sc_cmdring.lastwrite;
2442 ridx = le32toh(hv->hv_cmd_read_idx);
2445 /* Ring is completely free */
2446 nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc);
2449 nfree = le32toh(br->br_cmd_siz) -
2450 (widx - ridx + sizeof(struct txp_cmd_desc));
2452 nfree = ridx - widx - sizeof(struct txp_cmd_desc);
2455 return (nfree / sizeof(struct txp_cmd_desc));
2459 txp_sleep(struct txp_softc *sc, int capenable)
2465 if ((capenable & IFCAP_WOL_MAGIC) != 0)
2467 error = txp_command(sc, TXP_CMD_ENABLE_WAKEUP_EVENTS, events, 0, 0,
2468 NULL, NULL, NULL, TXP_CMD_NOWAIT);
2471 error = txp_command(sc, TXP_CMD_GOTO_SLEEP, 0, 0, 0, NULL,
2472 NULL, NULL, TXP_CMD_NOWAIT);
2474 error = txp_wait(sc, STAT_SLEEPING);
2476 device_printf(sc->sc_dev,
2477 "unable to enter into sleep\n");
2485 txp_stop(struct txp_softc *sc)
2489 TXP_LOCK_ASSERT(sc);
2492 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2495 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
2496 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
2498 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2499 sc->sc_flags &= ~TXP_FLAG_LINK;
2501 callout_stop(&sc->sc_tick);
2503 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2505 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2507 /* Save statistics for later use. */
2509 /* Halt controller. */
2510 txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL,
2513 if (txp_wait(sc, STAT_HALTED) != 0)
2514 device_printf(sc->sc_dev, "controller halt timedout!\n");
2515 /* Reclaim Tx/Rx buffers. */
2516 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
2517 TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
2518 txp_tx_reclaim(sc, &sc->sc_txhir);
2519 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
2520 TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
2521 txp_tx_reclaim(sc, &sc->sc_txlor);
2522 txp_rxring_empty(sc);
2525 /* Reset controller and make it reload sleep image. */
2527 /* Let controller boot from sleep image. */
2528 if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
2529 device_printf(sc->sc_dev, "could not boot sleep image\n");
2534 txp_watchdog(struct txp_softc *sc)
2538 TXP_LOCK_ASSERT(sc);
2540 if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
2544 if_printf(ifp, "watchdog timeout -- resetting\n");
2545 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2547 txp_init_locked(sc);
2551 txp_ifmedia_upd(struct ifnet *ifp)
2553 struct txp_softc *sc = ifp->if_softc;
2554 struct ifmedia *ifm = &sc->sc_ifmedia;
2558 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
2563 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
2564 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2565 new_xcvr = TXP_XCVR_10_FDX;
2567 new_xcvr = TXP_XCVR_10_HDX;
2568 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
2569 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2570 new_xcvr = TXP_XCVR_100_FDX;
2572 new_xcvr = TXP_XCVR_100_HDX;
2573 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
2574 new_xcvr = TXP_XCVR_AUTO;
2581 if (sc->sc_xcvr == new_xcvr) {
2586 txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
2587 NULL, NULL, NULL, TXP_CMD_NOWAIT);
2588 sc->sc_xcvr = new_xcvr;
2595 txp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2597 struct txp_softc *sc = ifp->if_softc;
2598 struct ifmedia *ifm = &sc->sc_ifmedia;
2599 uint16_t bmsr, bmcr, anar, anlpar;
2601 ifmr->ifm_status = IFM_AVALID;
2602 ifmr->ifm_active = IFM_ETHER;
2605 /* Check whether firmware is running. */
2606 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2608 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2609 &bmsr, NULL, NULL, TXP_CMD_WAIT))
2611 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2612 &bmsr, NULL, NULL, TXP_CMD_WAIT))
2615 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
2616 &bmcr, NULL, NULL, TXP_CMD_WAIT))
2619 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
2620 &anlpar, NULL, NULL, TXP_CMD_WAIT))
2623 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANAR, 0,
2624 &anar, NULL, NULL, TXP_CMD_WAIT))
2628 if (bmsr & BMSR_LINK)
2629 ifmr->ifm_status |= IFM_ACTIVE;
2631 if (bmcr & BMCR_ISO) {
2632 ifmr->ifm_active |= IFM_NONE;
2633 ifmr->ifm_status = 0;
2637 if (bmcr & BMCR_LOOP)
2638 ifmr->ifm_active |= IFM_LOOP;
2640 if (bmcr & BMCR_AUTOEN) {
2641 if ((bmsr & BMSR_ACOMP) == 0) {
2642 ifmr->ifm_active |= IFM_NONE;
2647 if (anlpar & ANLPAR_TX_FD)
2648 ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
2649 else if (anlpar & ANLPAR_T4)
2650 ifmr->ifm_active |= IFM_100_T4;
2651 else if (anlpar & ANLPAR_TX)
2652 ifmr->ifm_active |= IFM_100_TX;
2653 else if (anlpar & ANLPAR_10_FD)
2654 ifmr->ifm_active |= IFM_10_T|IFM_FDX;
2655 else if (anlpar & ANLPAR_10)
2656 ifmr->ifm_active |= IFM_10_T;
2658 ifmr->ifm_active |= IFM_NONE;
2660 ifmr->ifm_active = ifm->ifm_cur->ifm_media;
2665 ifmr->ifm_active |= IFM_NONE;
2666 ifmr->ifm_status &= ~IFM_AVALID;
2671 txp_show_descriptor(void *d)
2673 struct txp_cmd_desc *cmd = d;
2674 struct txp_rsp_desc *rsp = d;
2675 struct txp_tx_desc *txd = d;
2676 struct txp_frag_desc *frgd = d;
2678 switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) {
2679 case CMD_FLAGS_TYPE_CMD:
2680 /* command descriptor */
2681 printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2682 cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2683 le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2684 le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2686 case CMD_FLAGS_TYPE_RESP:
2687 /* response descriptor */
2688 printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2689 rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id),
2690 le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1),
2691 le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3));
2693 case CMD_FLAGS_TYPE_DATA:
2694 /* data header (assuming tx for now) */
2695 printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
2696 txd->tx_flags, txd->tx_numdesc, le16toh(txd->tx_totlen),
2697 le32toh(txd->tx_addrlo), le32toh(txd->tx_addrhi),
2698 le32toh(txd->tx_pflags));
2700 case CMD_FLAGS_TYPE_FRAG:
2701 /* fragment descriptor */
2702 printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
2703 frgd->frag_flags, frgd->frag_rsvd1, le16toh(frgd->frag_len),
2704 le32toh(frgd->frag_addrlo), le32toh(frgd->frag_addrhi),
2705 le32toh(frgd->frag_rsvd2));
2708 printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2709 cmd->cmd_flags & CMD_FLAGS_TYPE_M,
2710 cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2711 le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2712 le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2719 txp_set_filter(struct txp_softc *sc)
2722 uint32_t crc, mchash[2];
2724 struct ifmultiaddr *ifma;
2727 TXP_LOCK_ASSERT(sc);
2730 filter = TXP_RXFILT_DIRECT;
2731 if ((ifp->if_flags & IFF_BROADCAST) != 0)
2732 filter |= TXP_RXFILT_BROADCAST;
2733 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2734 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2735 filter |= TXP_RXFILT_ALLMULTI;
2736 if ((ifp->if_flags & IFF_PROMISC) != 0)
2737 filter = TXP_RXFILT_PROMISC;
2741 mchash[0] = mchash[1] = 0;
2743 if_maddr_rlock(ifp);
2744 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2745 if (ifma->ifma_addr->sa_family != AF_LINK)
2747 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2748 ifma->ifma_addr), ETHER_ADDR_LEN);
2750 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2753 if_maddr_runlock(ifp);
2756 filter |= TXP_RXFILT_HASHMULTI;
2757 txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 2, mchash[0],
2758 mchash[1], NULL, NULL, NULL, TXP_CMD_NOWAIT);
2762 txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
2763 NULL, NULL, NULL, TXP_CMD_NOWAIT);
2767 txp_set_capabilities(struct txp_softc *sc)
2770 uint32_t rxcap, txcap;
2772 TXP_LOCK_ASSERT(sc);
2776 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) {
2777 if ((ifp->if_hwassist & CSUM_IP) != 0)
2778 txcap |= OFFLOAD_IPCKSUM;
2779 if ((ifp->if_hwassist & CSUM_TCP) != 0)
2780 txcap |= OFFLOAD_TCPCKSUM;
2781 if ((ifp->if_hwassist & CSUM_UDP) != 0)
2782 txcap |= OFFLOAD_UDPCKSUM;
2785 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2786 rxcap &= ~(OFFLOAD_IPCKSUM | OFFLOAD_TCPCKSUM |
2788 if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2789 rxcap |= OFFLOAD_VLAN;
2790 txcap |= OFFLOAD_VLAN;
2793 /* Tell firmware new offload configuration. */
2794 return (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, txcap, rxcap, NULL,
2795 NULL, NULL, TXP_CMD_NOWAIT));
2799 txp_stats_save(struct txp_softc *sc)
2801 struct txp_rsp_desc *rsp;
2803 TXP_LOCK_ASSERT(sc);
2806 if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2807 &rsp, TXP_CMD_WAIT))
2809 if (rsp->rsp_numdesc != 6)
2811 txp_stats_update(sc, rsp);
2814 free(rsp, M_DEVBUF);
2815 bcopy(&sc->sc_stats, &sc->sc_ostats, sizeof(struct txp_hw_stats));
2819 txp_stats_update(struct txp_softc *sc, struct txp_rsp_desc *rsp)
2821 struct txp_hw_stats *ostats, *stats;
2822 struct txp_ext_desc *ext;
2824 TXP_LOCK_ASSERT(sc);
2826 ext = (struct txp_ext_desc *)(rsp + 1);
2827 ostats = &sc->sc_ostats;
2828 stats = &sc->sc_stats;
2829 stats->tx_frames = ostats->tx_frames + le32toh(rsp->rsp_par2);
2830 stats->tx_bytes = ostats->tx_bytes + (uint64_t)le32toh(rsp->rsp_par3) +
2831 ((uint64_t)le32toh(ext[0].ext_1) << 32);
2832 stats->tx_deferred = ostats->tx_deferred + le32toh(ext[0].ext_2);
2833 stats->tx_late_colls = ostats->tx_late_colls + le32toh(ext[0].ext_3);
2834 stats->tx_colls = ostats->tx_colls + le32toh(ext[0].ext_4);
2835 stats->tx_carrier_lost = ostats->tx_carrier_lost +
2836 le32toh(ext[1].ext_1);
2837 stats->tx_multi_colls = ostats->tx_multi_colls +
2838 le32toh(ext[1].ext_2);
2839 stats->tx_excess_colls = ostats->tx_excess_colls +
2840 le32toh(ext[1].ext_3);
2841 stats->tx_fifo_underruns = ostats->tx_fifo_underruns +
2842 le32toh(ext[1].ext_4);
2843 stats->tx_mcast_oflows = ostats->tx_mcast_oflows +
2844 le32toh(ext[2].ext_1);
2845 stats->tx_filtered = ostats->tx_filtered + le32toh(ext[2].ext_2);
2846 stats->rx_frames = ostats->rx_frames + le32toh(ext[2].ext_3);
2847 stats->rx_bytes = ostats->rx_bytes + (uint64_t)le32toh(ext[2].ext_4) +
2848 ((uint64_t)le32toh(ext[3].ext_1) << 32);
2849 stats->rx_fifo_oflows = ostats->rx_fifo_oflows + le32toh(ext[3].ext_2);
2850 stats->rx_badssd = ostats->rx_badssd + le32toh(ext[3].ext_3);
2851 stats->rx_crcerrs = ostats->rx_crcerrs + le32toh(ext[3].ext_4);
2852 stats->rx_lenerrs = ostats->rx_lenerrs + le32toh(ext[4].ext_1);
2853 stats->rx_bcast_frames = ostats->rx_bcast_frames +
2854 le32toh(ext[4].ext_2);
2855 stats->rx_mcast_frames = ostats->rx_mcast_frames +
2856 le32toh(ext[4].ext_3);
2857 stats->rx_oflows = ostats->rx_oflows + le32toh(ext[4].ext_4);
2858 stats->rx_filtered = ostats->rx_filtered + le32toh(ext[5].ext_1);
2862 txp_get_counter(struct ifnet *ifp, ift_counter cnt)
2864 struct txp_softc *sc;
2865 struct txp_hw_stats *stats;
2867 sc = if_getsoftc(ifp);
2868 stats = &sc->sc_stats;
2871 case IFCOUNTER_IERRORS:
2872 return (stats->rx_fifo_oflows + stats->rx_badssd +
2873 stats->rx_crcerrs + stats->rx_lenerrs + stats->rx_oflows);
2874 case IFCOUNTER_OERRORS:
2875 return (stats->tx_deferred + stats->tx_carrier_lost +
2876 stats->tx_fifo_underruns + stats->tx_mcast_oflows);
2877 case IFCOUNTER_COLLISIONS:
2878 return (stats->tx_late_colls + stats->tx_multi_colls +
2879 stats->tx_excess_colls);
2880 case IFCOUNTER_OPACKETS:
2881 return (stats->tx_frames);
2882 case IFCOUNTER_IPACKETS:
2883 return (stats->rx_frames);
2885 return (if_get_counter_default(ifp, cnt));
2889 #define TXP_SYSCTL_STAT_ADD32(c, h, n, p, d) \
2890 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2892 #if __FreeBSD_version >= 900030
2893 #define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2894 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2895 #elif __FreeBSD_version > 800000
2896 #define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2897 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2899 #define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2900 SYSCTL_ADD_ULONG(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2904 txp_sysctl_node(struct txp_softc *sc)
2906 struct sysctl_ctx_list *ctx;
2907 struct sysctl_oid_list *child, *parent;
2908 struct sysctl_oid *tree;
2909 struct txp_hw_stats *stats;
2912 stats = &sc->sc_stats;
2913 ctx = device_get_sysctl_ctx(sc->sc_dev);
2914 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
2915 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
2916 CTLTYPE_INT | CTLFLAG_RW, &sc->sc_process_limit, 0,
2917 sysctl_hw_txp_proc_limit, "I",
2918 "max number of Rx events to process");
2919 /* Pull in device tunables. */
2920 sc->sc_process_limit = TXP_PROC_DEFAULT;
2921 error = resource_int_value(device_get_name(sc->sc_dev),
2922 device_get_unit(sc->sc_dev), "process_limit",
2923 &sc->sc_process_limit);
2925 if (sc->sc_process_limit < TXP_PROC_MIN ||
2926 sc->sc_process_limit > TXP_PROC_MAX) {
2927 device_printf(sc->sc_dev,
2928 "process_limit value out of range; "
2929 "using default: %d\n", TXP_PROC_DEFAULT);
2930 sc->sc_process_limit = TXP_PROC_DEFAULT;
2933 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2934 NULL, "TXP statistics");
2935 parent = SYSCTL_CHILDREN(tree);
2937 /* Tx statistics. */
2938 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2939 NULL, "Tx MAC statistics");
2940 child = SYSCTL_CHILDREN(tree);
2942 TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2943 &stats->tx_frames, "Frames");
2944 TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2945 &stats->tx_bytes, "Octets");
2946 TXP_SYSCTL_STAT_ADD32(ctx, child, "deferred",
2947 &stats->tx_deferred, "Deferred frames");
2948 TXP_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2949 &stats->tx_late_colls, "Late collisions");
2950 TXP_SYSCTL_STAT_ADD32(ctx, child, "colls",
2951 &stats->tx_colls, "Collisions");
2952 TXP_SYSCTL_STAT_ADD32(ctx, child, "carrier_lost",
2953 &stats->tx_carrier_lost, "Carrier lost");
2954 TXP_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
2955 &stats->tx_multi_colls, "Multiple collisions");
2956 TXP_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
2957 &stats->tx_excess_colls, "Excessive collisions");
2958 TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_underruns",
2959 &stats->tx_fifo_underruns, "FIFO underruns");
2960 TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_oflows",
2961 &stats->tx_mcast_oflows, "Multicast overflows");
2962 TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2963 &stats->tx_filtered, "Filtered frames");
2965 /* Rx statistics. */
2966 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2967 NULL, "Rx MAC statistics");
2968 child = SYSCTL_CHILDREN(tree);
2970 TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2971 &stats->rx_frames, "Frames");
2972 TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2973 &stats->rx_bytes, "Octets");
2974 TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2975 &stats->rx_fifo_oflows, "FIFO overflows");
2976 TXP_SYSCTL_STAT_ADD32(ctx, child, "badssd",
2977 &stats->rx_badssd, "Bad SSD");
2978 TXP_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2979 &stats->rx_crcerrs, "CRC errors");
2980 TXP_SYSCTL_STAT_ADD32(ctx, child, "lenerrs",
2981 &stats->rx_lenerrs, "Length errors");
2982 TXP_SYSCTL_STAT_ADD32(ctx, child, "bcast_frames",
2983 &stats->rx_bcast_frames, "Broadcast frames");
2984 TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_frames",
2985 &stats->rx_mcast_frames, "Multicast frames");
2986 TXP_SYSCTL_STAT_ADD32(ctx, child, "oflows",
2987 &stats->rx_oflows, "Overflows");
2988 TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2989 &stats->rx_filtered, "Filtered frames");
2992 #undef TXP_SYSCTL_STAT_ADD32
2993 #undef TXP_SYSCTL_STAT_ADD64
2996 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3002 value = *(int *)arg1;
3003 error = sysctl_handle_int(oidp, &value, 0, req);
3004 if (error || req->newptr == NULL)
3006 if (value < low || value > high)
3008 *(int *)arg1 = value;
3014 sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS)
3016 return (sysctl_int_range(oidp, arg1, arg2, req,
3017 TXP_PROC_MIN, TXP_PROC_MAX));