2 * SPDX-License-Identifier: BSD-4-Clause
4 * Copyright (c) 1997, 1998-2003
5 * Bill Paul <wpaul@windriver.com>. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver
41 * Written by Bill Paul <wpaul@windriver.com>
42 * Senior Networking Software Engineer
47 * This driver is designed to support RealTek's next generation of
48 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
49 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
50 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
52 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
53 * with the older 8139 family, however it also supports a special
54 * C+ mode of operation that provides several new performance enhancing
55 * features. These include:
57 * o Descriptor based DMA mechanism. Each descriptor represents
58 * a single packet fragment. Data buffers may be aligned on
63 * o TCP/IP checksum offload for both RX and TX
65 * o High and normal priority transmit DMA rings
67 * o VLAN tag insertion and extraction
69 * o TCP large send (segmentation offload)
71 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
72 * programming API is fairly straightforward. The RX filtering, EEPROM
73 * access and PHY access is the same as it is on the older 8139 series
76 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
77 * same programming API and feature set as the 8139C+ with the following
78 * differences and additions:
84 * o GMII and TBI ports/registers for interfacing with copper
87 * o RX and TX DMA rings can have up to 1024 descriptors
88 * (the 8139C+ allows a maximum of 64)
90 * o Slight differences in register layout from the 8139C+
92 * The TX start and timer interrupt registers are at different locations
93 * on the 8169 than they are on the 8139C+. Also, the status word in the
94 * RX descriptor has a slightly different bit layout. The 8169 does not
95 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
98 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
99 * (the 'S' stands for 'single-chip'). These devices have the same
100 * programming API as the older 8169, but also have some vendor-specific
101 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
102 * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
104 * This driver takes advantage of the RX and TX checksum offload and
105 * VLAN tag insertion/extraction features. It also implements TX
106 * interrupt moderation using the timer interrupt registers, which
107 * significantly reduces TX interrupt load. There is also support
108 * for jumbo frames, however the 8169/8169S/8110S can not transmit
109 * jumbo frames larger than 7440, so the max MTU possible with this
110 * driver is 7422 bytes.
113 #ifdef HAVE_KERNEL_OPTION_HEADERS
114 #include "opt_device_polling.h"
117 #include <sys/param.h>
118 #include <sys/endian.h>
119 #include <sys/systm.h>
120 #include <sys/sockio.h>
121 #include <sys/mbuf.h>
122 #include <sys/malloc.h>
123 #include <sys/module.h>
124 #include <sys/kernel.h>
125 #include <sys/socket.h>
126 #include <sys/lock.h>
127 #include <sys/mutex.h>
128 #include <sys/sysctl.h>
129 #include <sys/taskqueue.h>
131 #include <net/debugnet.h>
133 #include <net/if_var.h>
134 #include <net/if_arp.h>
135 #include <net/ethernet.h>
136 #include <net/if_dl.h>
137 #include <net/if_media.h>
138 #include <net/if_types.h>
139 #include <net/if_vlan_var.h>
143 #include <machine/bus.h>
144 #include <machine/resource.h>
146 #include <sys/rman.h>
148 #include <dev/mii/mii.h>
149 #include <dev/mii/miivar.h>
151 #include <dev/pci/pcireg.h>
152 #include <dev/pci/pcivar.h>
154 #include <dev/rl/if_rlreg.h>
156 MODULE_DEPEND(re, pci, 1, 1, 1);
157 MODULE_DEPEND(re, ether, 1, 1, 1);
158 MODULE_DEPEND(re, miibus, 1, 1, 1);
160 /* "device miibus" required. See GENERIC if you get errors here. */
161 #include "miibus_if.h"
164 static int intr_filter = 0;
165 TUNABLE_INT("hw.re.intr_filter", &intr_filter);
166 static int msi_disable = 0;
167 TUNABLE_INT("hw.re.msi_disable", &msi_disable);
168 static int msix_disable = 0;
169 TUNABLE_INT("hw.re.msix_disable", &msix_disable);
170 static int prefer_iomap = 0;
171 TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap);
173 #define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
176 * Various supported device vendors/types and their names.
178 static const struct rl_type re_devs[] = {
179 { DLINK_VENDORID, DLINK_DEVICEID_528T, 0,
180 "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
181 { DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0,
182 "D-Link DGE-530(T) Gigabit Ethernet Adapter" },
183 { RT_VENDORID, RT_DEVICEID_8139, 0,
184 "RealTek 8139C+ 10/100BaseTX" },
185 { RT_VENDORID, RT_DEVICEID_8101E, 0,
186 "RealTek 810xE PCIe 10/100baseTX" },
187 { RT_VENDORID, RT_DEVICEID_8168, 0,
188 "RealTek 8168/8111 B/C/CP/D/DP/E/F/G PCIe Gigabit Ethernet" },
189 { RT_VENDORID, RT_DEVICEID_8161, 0,
190 "RealTek 8168 Gigabit Ethernet" },
191 { NCUBE_VENDORID, RT_DEVICEID_8168, 0,
192 "TP-Link TG-3468 v2 (RTL8168) Gigabit Ethernet" },
193 { RT_VENDORID, RT_DEVICEID_8169, 0,
194 "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" },
195 { RT_VENDORID, RT_DEVICEID_8169SC, 0,
196 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
197 { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0,
198 "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" },
199 { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0,
200 "Linksys EG1032 (RTL8169S) Gigabit Ethernet" },
201 { USR_VENDORID, USR_DEVICEID_997902, 0,
202 "US Robotics 997902 (RTL8169S) Gigabit Ethernet" }
205 static const struct rl_hwrev re_hwrevs[] = {
206 { RL_HWREV_8139, RL_8139, "", RL_MTU },
207 { RL_HWREV_8139A, RL_8139, "A", RL_MTU },
208 { RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU },
209 { RL_HWREV_8139B, RL_8139, "B", RL_MTU },
210 { RL_HWREV_8130, RL_8139, "8130", RL_MTU },
211 { RL_HWREV_8139C, RL_8139, "C", RL_MTU },
212 { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU },
213 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU },
214 { RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU },
215 { RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU },
216 { RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU },
217 { RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU },
218 { RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU },
219 { RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
220 { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU },
221 { RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
222 { RL_HWREV_8100, RL_8139, "8100", RL_MTU },
223 { RL_HWREV_8101, RL_8139, "8101", RL_MTU },
224 { RL_HWREV_8100E, RL_8169, "8100E", RL_MTU },
225 { RL_HWREV_8101E, RL_8169, "8101E", RL_MTU },
226 { RL_HWREV_8102E, RL_8169, "8102E", RL_MTU },
227 { RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU },
228 { RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU },
229 { RL_HWREV_8103E, RL_8169, "8103E", RL_MTU },
230 { RL_HWREV_8401E, RL_8169, "8401E", RL_MTU },
231 { RL_HWREV_8402, RL_8169, "8402", RL_MTU },
232 { RL_HWREV_8105E, RL_8169, "8105E", RL_MTU },
233 { RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU },
234 { RL_HWREV_8106E, RL_8169, "8106E", RL_MTU },
235 { RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU },
236 { RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU },
237 { RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
238 { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
239 { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K },
240 { RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K },
241 { RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K },
242 { RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K},
243 { RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K},
244 { RL_HWREV_8168EP, RL_8169, "8168EP/8111EP", RL_JUMBO_MTU_9K},
245 { RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K},
246 { RL_HWREV_8168G, RL_8169, "8168G/8111G", RL_JUMBO_MTU_9K},
247 { RL_HWREV_8168GU, RL_8169, "8168GU/8111GU", RL_JUMBO_MTU_9K},
248 { RL_HWREV_8168H, RL_8169, "8168H/8111H", RL_JUMBO_MTU_9K},
249 { RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K},
250 { RL_HWREV_8411B, RL_8169, "8411B", RL_JUMBO_MTU_9K},
254 static int re_probe (device_t);
255 static int re_attach (device_t);
256 static int re_detach (device_t);
258 static int re_encap (struct rl_softc *, struct mbuf **);
260 static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int);
261 static int re_allocmem (device_t, struct rl_softc *);
262 static __inline void re_discard_rxbuf
263 (struct rl_softc *, int);
264 static int re_newbuf (struct rl_softc *, int);
265 static int re_jumbo_newbuf (struct rl_softc *, int);
266 static int re_rx_list_init (struct rl_softc *);
267 static int re_jrx_list_init (struct rl_softc *);
268 static int re_tx_list_init (struct rl_softc *);
270 static __inline void re_fixup_rx
273 static int re_rxeof (struct rl_softc *, int *);
274 static void re_txeof (struct rl_softc *);
275 #ifdef DEVICE_POLLING
276 static int re_poll (struct ifnet *, enum poll_cmd, int);
277 static int re_poll_locked (struct ifnet *, enum poll_cmd, int);
279 static int re_intr (void *);
280 static void re_intr_msi (void *);
281 static void re_tick (void *);
282 static void re_int_task (void *, int);
283 static void re_start (struct ifnet *);
284 static void re_start_locked (struct ifnet *);
285 static void re_start_tx (struct rl_softc *);
286 static int re_ioctl (struct ifnet *, u_long, caddr_t);
287 static void re_init (void *);
288 static void re_init_locked (struct rl_softc *);
289 static void re_stop (struct rl_softc *);
290 static void re_watchdog (struct rl_softc *);
291 static int re_suspend (device_t);
292 static int re_resume (device_t);
293 static int re_shutdown (device_t);
294 static int re_ifmedia_upd (struct ifnet *);
295 static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *);
297 static void re_eeprom_putbyte (struct rl_softc *, int);
298 static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *);
299 static void re_read_eeprom (struct rl_softc *, caddr_t, int, int);
300 static int re_gmii_readreg (device_t, int, int);
301 static int re_gmii_writereg (device_t, int, int, int);
303 static int re_miibus_readreg (device_t, int, int);
304 static int re_miibus_writereg (device_t, int, int, int);
305 static void re_miibus_statchg (device_t);
307 static void re_set_jumbo (struct rl_softc *, int);
308 static void re_set_rxmode (struct rl_softc *);
309 static void re_reset (struct rl_softc *);
310 static void re_setwol (struct rl_softc *);
311 static void re_clrwol (struct rl_softc *);
312 static void re_set_linkspeed (struct rl_softc *);
316 #ifdef DEV_NETMAP /* see ixgbe.c for details */
317 #include <dev/netmap/if_re_netmap.h>
318 MODULE_DEPEND(re, netmap, 1, 1, 1);
319 #endif /* !DEV_NETMAP */
322 static int re_diag (struct rl_softc *);
325 static void re_add_sysctls (struct rl_softc *);
326 static int re_sysctl_stats (SYSCTL_HANDLER_ARGS);
327 static int sysctl_int_range (SYSCTL_HANDLER_ARGS, int, int);
328 static int sysctl_hw_re_int_mod (SYSCTL_HANDLER_ARGS);
330 static device_method_t re_methods[] = {
331 /* Device interface */
332 DEVMETHOD(device_probe, re_probe),
333 DEVMETHOD(device_attach, re_attach),
334 DEVMETHOD(device_detach, re_detach),
335 DEVMETHOD(device_suspend, re_suspend),
336 DEVMETHOD(device_resume, re_resume),
337 DEVMETHOD(device_shutdown, re_shutdown),
340 DEVMETHOD(miibus_readreg, re_miibus_readreg),
341 DEVMETHOD(miibus_writereg, re_miibus_writereg),
342 DEVMETHOD(miibus_statchg, re_miibus_statchg),
347 static driver_t re_driver = {
350 sizeof(struct rl_softc)
353 static devclass_t re_devclass;
355 DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0);
356 DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0);
359 CSR_WRITE_1(sc, RL_EECMD, \
360 CSR_READ_1(sc, RL_EECMD) | x)
363 CSR_WRITE_1(sc, RL_EECMD, \
364 CSR_READ_1(sc, RL_EECMD) & ~x)
367 * Send a read command and address to the EEPROM, check for ACK.
370 re_eeprom_putbyte(struct rl_softc *sc, int addr)
374 d = addr | (RL_9346_READ << sc->rl_eewidth);
377 * Feed in each bit and strobe the clock.
380 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
382 EE_SET(RL_EE_DATAIN);
384 EE_CLR(RL_EE_DATAIN);
395 * Read a word of data stored in the EEPROM at address 'addr.'
398 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
404 * Send address of word we want to read.
406 re_eeprom_putbyte(sc, addr);
409 * Start reading bits from EEPROM.
411 for (i = 0x8000; i; i >>= 1) {
414 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
424 * Read a sequence of words from the EEPROM.
427 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
430 u_int16_t word = 0, *ptr;
432 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
436 for (i = 0; i < cnt; i++) {
437 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL);
438 re_eeprom_getword(sc, off + i, &word);
439 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL);
440 ptr = (u_int16_t *)(dest + (i * 2));
444 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
448 re_gmii_readreg(device_t dev, int phy, int reg)
454 sc = device_get_softc(dev);
456 /* Let the rgephy driver read the GMEDIASTAT register */
458 if (reg == RL_GMEDIASTAT) {
459 rval = CSR_READ_1(sc, RL_GMEDIASTAT);
463 CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
465 for (i = 0; i < RL_PHY_TIMEOUT; i++) {
466 rval = CSR_READ_4(sc, RL_PHYAR);
467 if (rval & RL_PHYAR_BUSY)
472 if (i == RL_PHY_TIMEOUT) {
473 device_printf(sc->rl_dev, "PHY read failed\n");
478 * Controller requires a 20us delay to process next MDIO request.
482 return (rval & RL_PHYAR_PHYDATA);
486 re_gmii_writereg(device_t dev, int phy, int reg, int data)
492 sc = device_get_softc(dev);
494 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
495 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
497 for (i = 0; i < RL_PHY_TIMEOUT; i++) {
498 rval = CSR_READ_4(sc, RL_PHYAR);
499 if (!(rval & RL_PHYAR_BUSY))
504 if (i == RL_PHY_TIMEOUT) {
505 device_printf(sc->rl_dev, "PHY write failed\n");
510 * Controller requires a 20us delay to process next MDIO request.
518 re_miibus_readreg(device_t dev, int phy, int reg)
522 u_int16_t re8139_reg = 0;
524 sc = device_get_softc(dev);
526 if (sc->rl_type == RL_8169) {
527 rval = re_gmii_readreg(dev, phy, reg);
533 re8139_reg = RL_BMCR;
536 re8139_reg = RL_BMSR;
539 re8139_reg = RL_ANAR;
542 re8139_reg = RL_ANER;
545 re8139_reg = RL_LPAR;
551 * Allow the rlphy driver to read the media status
552 * register. If we have a link partner which does not
553 * support NWAY, this is the register which will tell
554 * us the results of parallel detection.
557 rval = CSR_READ_1(sc, RL_MEDIASTAT);
560 device_printf(sc->rl_dev, "bad phy register\n");
563 rval = CSR_READ_2(sc, re8139_reg);
564 if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) {
565 /* 8139C+ has different bit layout. */
566 rval &= ~(BMCR_LOOP | BMCR_ISO);
572 re_miibus_writereg(device_t dev, int phy, int reg, int data)
575 u_int16_t re8139_reg = 0;
578 sc = device_get_softc(dev);
580 if (sc->rl_type == RL_8169) {
581 rval = re_gmii_writereg(dev, phy, reg, data);
587 re8139_reg = RL_BMCR;
588 if (sc->rl_type == RL_8139CPLUS) {
589 /* 8139C+ has different bit layout. */
590 data &= ~(BMCR_LOOP | BMCR_ISO);
594 re8139_reg = RL_BMSR;
597 re8139_reg = RL_ANAR;
600 re8139_reg = RL_ANER;
603 re8139_reg = RL_LPAR;
610 device_printf(sc->rl_dev, "bad phy register\n");
613 CSR_WRITE_2(sc, re8139_reg, data);
618 re_miibus_statchg(device_t dev)
622 struct mii_data *mii;
624 sc = device_get_softc(dev);
625 mii = device_get_softc(sc->rl_miibus);
627 if (mii == NULL || ifp == NULL ||
628 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
631 sc->rl_flags &= ~RL_FLAG_LINK;
632 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
633 (IFM_ACTIVE | IFM_AVALID)) {
634 switch (IFM_SUBTYPE(mii->mii_media_active)) {
637 sc->rl_flags |= RL_FLAG_LINK;
640 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
642 sc->rl_flags |= RL_FLAG_LINK;
649 * RealTek controllers do not provide any interface to the RX/TX
650 * MACs for resolved speed, duplex and flow-control parameters.
655 re_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
657 uint32_t h, *hashes = arg;
659 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
661 hashes[0] |= (1 << h);
663 hashes[1] |= (1 << (h - 32));
669 * Set the RX configuration and 64-bit multicast hash filter.
672 re_set_rxmode(struct rl_softc *sc)
675 uint32_t h, hashes[2] = { 0, 0 };
682 rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD;
683 if ((sc->rl_flags & RL_FLAG_EARLYOFF) != 0)
684 rxfilt |= RL_RXCFG_EARLYOFF;
685 else if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0)
686 rxfilt |= RL_RXCFG_EARLYOFFV2;
688 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
689 if (ifp->if_flags & IFF_PROMISC)
690 rxfilt |= RL_RXCFG_RX_ALLPHYS;
692 * Unlike other hardwares, we have to explicitly set
693 * RL_RXCFG_RX_MULTI to receive multicast frames in
696 rxfilt |= RL_RXCFG_RX_MULTI;
697 hashes[0] = hashes[1] = 0xffffffff;
701 if_foreach_llmaddr(ifp, re_hash_maddr, hashes);
703 if (hashes[0] != 0 || hashes[1] != 0) {
705 * For some unfathomable reason, RealTek decided to
706 * reverse the order of the multicast hash registers
707 * in the PCI Express parts. This means we have to
708 * write the hash pattern in reverse order for those
711 if ((sc->rl_flags & RL_FLAG_PCIE) != 0) {
712 h = bswap32(hashes[0]);
713 hashes[0] = bswap32(hashes[1]);
716 rxfilt |= RL_RXCFG_RX_MULTI;
719 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168F) {
720 /* Disable multicast filtering due to silicon bug. */
721 hashes[0] = 0xffffffff;
722 hashes[1] = 0xffffffff;
726 CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
727 CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
728 CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
732 re_reset(struct rl_softc *sc)
738 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
740 for (i = 0; i < RL_TIMEOUT; i++) {
742 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
746 device_printf(sc->rl_dev, "reset never completed!\n");
748 if ((sc->rl_flags & RL_FLAG_MACRESET) != 0)
749 CSR_WRITE_1(sc, 0x82, 1);
750 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S)
751 re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0);
757 * The following routine is designed to test for a defect on some
758 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
759 * lines connected to the bus, however for a 32-bit only card, they
760 * should be pulled high. The result of this defect is that the
761 * NIC will not work right if you plug it into a 64-bit slot: DMA
762 * operations will be done with 64-bit transfers, which will fail
763 * because the 64-bit data lines aren't connected.
765 * There's no way to work around this (short of talking a soldering
766 * iron to the board), however we can detect it. The method we use
767 * here is to put the NIC into digital loopback mode, set the receiver
768 * to promiscuous mode, and then try to send a frame. We then compare
769 * the frame data we sent to what was received. If the data matches,
770 * then the NIC is working correctly, otherwise we know the user has
771 * a defective NIC which has been mistakenly plugged into a 64-bit PCI
772 * slot. In the latter case, there's no way the NIC can work correctly,
773 * so we print out a message on the console and abort the device attach.
777 re_diag(struct rl_softc *sc)
779 struct ifnet *ifp = sc->rl_ifp;
781 struct ether_header *eh;
782 struct rl_desc *cur_rx;
785 int total_len, i, error = 0, phyaddr;
786 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
787 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
789 /* Allocate a single mbuf */
790 MGETHDR(m0, M_NOWAIT, MT_DATA);
797 * Initialize the NIC in test mode. This sets the chip up
798 * so that it can send and receive frames, but performs the
799 * following special functions:
800 * - Puts receiver in promiscuous mode
801 * - Enables digital loopback mode
802 * - Leaves interrupts turned off
805 ifp->if_flags |= IFF_PROMISC;
807 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
809 sc->rl_flags |= RL_FLAG_LINK;
810 if (sc->rl_type == RL_8169)
815 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET);
816 for (i = 0; i < RL_TIMEOUT; i++) {
817 status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR);
818 if (!(status & BMCR_RESET))
822 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP);
823 CSR_WRITE_2(sc, RL_ISR, RL_INTRS);
827 /* Put some data in the mbuf */
829 eh = mtod(m0, struct ether_header *);
830 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
831 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
832 eh->ether_type = htons(ETHERTYPE_IP);
833 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
836 * Queue the packet, start transmission.
837 * Note: IF_HANDOFF() ultimately calls re_start() for us.
840 CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
842 /* XXX: re_diag must not be called when in ALTQ mode */
843 IF_HANDOFF(&ifp->if_snd, m0, ifp);
847 /* Wait for it to propagate through the chip */
850 for (i = 0; i < RL_TIMEOUT; i++) {
851 status = CSR_READ_2(sc, RL_ISR);
852 CSR_WRITE_2(sc, RL_ISR, status);
853 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
854 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
859 if (i == RL_TIMEOUT) {
860 device_printf(sc->rl_dev,
861 "diagnostic failed, failed to receive packet in"
868 * The packet should have been dumped into the first
869 * entry in the RX DMA ring. Grab it from there.
872 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
873 sc->rl_ldata.rl_rx_list_map,
874 BUS_DMASYNC_POSTREAD);
875 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
876 sc->rl_ldata.rl_rx_desc[0].rx_dmamap,
877 BUS_DMASYNC_POSTREAD);
878 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
879 sc->rl_ldata.rl_rx_desc[0].rx_dmamap);
881 m0 = sc->rl_ldata.rl_rx_desc[0].rx_m;
882 sc->rl_ldata.rl_rx_desc[0].rx_m = NULL;
883 eh = mtod(m0, struct ether_header *);
885 cur_rx = &sc->rl_ldata.rl_rx_list[0];
886 total_len = RL_RXBYTES(cur_rx);
887 rxstat = le32toh(cur_rx->rl_cmdstat);
889 if (total_len != ETHER_MIN_LEN) {
890 device_printf(sc->rl_dev,
891 "diagnostic failed, received short packet\n");
896 /* Test that the received packet data matches what we sent. */
898 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
899 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
900 ntohs(eh->ether_type) != ETHERTYPE_IP) {
901 device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n");
902 device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n",
903 dst, ":", src, ":", ETHERTYPE_IP);
904 device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n",
905 eh->ether_dhost, ":", eh->ether_shost, ":",
906 ntohs(eh->ether_type));
907 device_printf(sc->rl_dev, "You may have a defective 32-bit "
908 "NIC plugged into a 64-bit PCI slot.\n");
909 device_printf(sc->rl_dev, "Please re-install the NIC in a "
910 "32-bit slot for proper operation.\n");
911 device_printf(sc->rl_dev, "Read the re(4) man page for more "
917 /* Turn interface off, release resources */
920 sc->rl_flags &= ~RL_FLAG_LINK;
921 ifp->if_flags &= ~IFF_PROMISC;
934 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
935 * IDs against our list and return a device name if we find a match.
938 re_probe(device_t dev)
940 const struct rl_type *t;
941 uint16_t devid, vendor;
942 uint16_t revid, sdevid;
945 vendor = pci_get_vendor(dev);
946 devid = pci_get_device(dev);
947 revid = pci_get_revid(dev);
948 sdevid = pci_get_subdevice(dev);
950 if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) {
951 if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) {
953 * Only attach to rev. 3 of the Linksys EG1032 adapter.
954 * Rev. 2 is supported by sk(4).
960 if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
962 /* 8139, let rl(4) take care of this device. */
968 for (i = 0; i < nitems(re_devs); i++, t++) {
969 if (vendor == t->rl_vid && devid == t->rl_did) {
970 device_set_desc(dev, t->rl_name);
971 return (BUS_PROBE_DEFAULT);
979 * Map a single buffer address.
983 re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
990 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
992 *addr = segs->ds_addr;
996 re_allocmem(device_t dev, struct rl_softc *sc)
999 bus_size_t rx_list_size, tx_list_size;
1003 rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc);
1004 tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc);
1007 * Allocate the parent bus DMA tag appropriate for PCI.
1008 * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD
1009 * register should be set. However some RealTek chips are known
1010 * to be buggy on DAC handling, therefore disable DAC by limiting
1011 * DMA address space to 32bit. PCIe variants of RealTek chips
1012 * may not have the limitation.
1014 lowaddr = BUS_SPACE_MAXADDR;
1015 if ((sc->rl_flags & RL_FLAG_PCIE) == 0)
1016 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1017 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
1018 lowaddr, BUS_SPACE_MAXADDR, NULL, NULL,
1019 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
1020 NULL, NULL, &sc->rl_parent_tag);
1022 device_printf(dev, "could not allocate parent DMA tag\n");
1027 * Allocate map for TX mbufs.
1029 error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0,
1030 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1031 NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0,
1032 NULL, NULL, &sc->rl_ldata.rl_tx_mtag);
1034 device_printf(dev, "could not allocate TX DMA tag\n");
1039 * Allocate map for RX mbufs.
1042 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1043 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t),
1044 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1045 MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL,
1046 &sc->rl_ldata.rl_jrx_mtag);
1049 "could not allocate jumbo RX DMA tag\n");
1053 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0,
1054 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1055 MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag);
1057 device_printf(dev, "could not allocate RX DMA tag\n");
1062 * Allocate map for TX descriptor list.
1064 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1065 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1066 NULL, tx_list_size, 1, tx_list_size, 0,
1067 NULL, NULL, &sc->rl_ldata.rl_tx_list_tag);
1069 device_printf(dev, "could not allocate TX DMA ring tag\n");
1073 /* Allocate DMA'able memory for the TX ring */
1075 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
1076 (void **)&sc->rl_ldata.rl_tx_list,
1077 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1078 &sc->rl_ldata.rl_tx_list_map);
1080 device_printf(dev, "could not allocate TX DMA ring\n");
1084 /* Load the map for the TX ring. */
1086 sc->rl_ldata.rl_tx_list_addr = 0;
1087 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
1088 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
1089 tx_list_size, re_dma_map_addr,
1090 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
1091 if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) {
1092 device_printf(dev, "could not load TX DMA ring\n");
1096 /* Create DMA maps for TX buffers */
1098 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1099 error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0,
1100 &sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1102 device_printf(dev, "could not create DMA map for TX\n");
1108 * Allocate map for RX descriptor list.
1110 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1111 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1112 NULL, rx_list_size, 1, rx_list_size, 0,
1113 NULL, NULL, &sc->rl_ldata.rl_rx_list_tag);
1115 device_printf(dev, "could not create RX DMA ring tag\n");
1119 /* Allocate DMA'able memory for the RX ring */
1121 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
1122 (void **)&sc->rl_ldata.rl_rx_list,
1123 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1124 &sc->rl_ldata.rl_rx_list_map);
1126 device_printf(dev, "could not allocate RX DMA ring\n");
1130 /* Load the map for the RX ring. */
1132 sc->rl_ldata.rl_rx_list_addr = 0;
1133 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
1134 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
1135 rx_list_size, re_dma_map_addr,
1136 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
1137 if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) {
1138 device_printf(dev, "could not load RX DMA ring\n");
1142 /* Create DMA maps for RX buffers */
1144 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1145 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1146 &sc->rl_ldata.rl_jrx_sparemap);
1149 "could not create spare DMA map for jumbo RX\n");
1152 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1153 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1154 &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1157 "could not create DMA map for jumbo RX\n");
1162 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1163 &sc->rl_ldata.rl_rx_sparemap);
1165 device_printf(dev, "could not create spare DMA map for RX\n");
1168 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1169 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1170 &sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1172 device_printf(dev, "could not create DMA map for RX\n");
1177 /* Create DMA map for statistics. */
1178 error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0,
1179 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1180 sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL,
1181 &sc->rl_ldata.rl_stag);
1183 device_printf(dev, "could not create statistics DMA tag\n");
1186 /* Allocate DMA'able memory for statistics. */
1187 error = bus_dmamem_alloc(sc->rl_ldata.rl_stag,
1188 (void **)&sc->rl_ldata.rl_stats,
1189 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1190 &sc->rl_ldata.rl_smap);
1193 "could not allocate statistics DMA memory\n");
1196 /* Load the map for statistics. */
1197 sc->rl_ldata.rl_stats_addr = 0;
1198 error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap,
1199 sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr,
1200 &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT);
1201 if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) {
1202 device_printf(dev, "could not load statistics DMA memory\n");
1210 * Attach the interface. Allocate softc structures, do ifmedia
1211 * setup and ethernet/BPF attach.
1214 re_attach(device_t dev)
1216 u_char eaddr[ETHER_ADDR_LEN];
1217 u_int16_t as[ETHER_ADDR_LEN / 2];
1218 struct rl_softc *sc;
1220 const struct rl_hwrev *hw_rev;
1221 int capmask, error = 0, hwrev, i, msic, msixc,
1224 u_int16_t devid, re_did = 0;
1227 sc = device_get_softc(dev);
1230 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1232 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
1235 * Map control/status registers.
1237 pci_enable_busmaster(dev);
1239 devid = pci_get_device(dev);
1241 * Prefer memory space register mapping over IO space.
1242 * Because RTL8169SC does not seem to work when memory mapping
1243 * is used always activate io mapping.
1245 if (devid == RT_DEVICEID_8169SC)
1247 if (prefer_iomap == 0) {
1248 sc->rl_res_id = PCIR_BAR(1);
1249 sc->rl_res_type = SYS_RES_MEMORY;
1250 /* RTL8168/8101E seems to use different BARs. */
1251 if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E)
1252 sc->rl_res_id = PCIR_BAR(2);
1254 sc->rl_res_id = PCIR_BAR(0);
1255 sc->rl_res_type = SYS_RES_IOPORT;
1257 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1258 &sc->rl_res_id, RF_ACTIVE);
1259 if (sc->rl_res == NULL && prefer_iomap == 0) {
1260 sc->rl_res_id = PCIR_BAR(0);
1261 sc->rl_res_type = SYS_RES_IOPORT;
1262 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1263 &sc->rl_res_id, RF_ACTIVE);
1265 if (sc->rl_res == NULL) {
1266 device_printf(dev, "couldn't map ports/memory\n");
1271 sc->rl_btag = rman_get_bustag(sc->rl_res);
1272 sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
1274 msic = pci_msi_count(dev);
1275 msixc = pci_msix_count(dev);
1276 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
1277 sc->rl_flags |= RL_FLAG_PCIE;
1278 sc->rl_expcap = reg;
1281 device_printf(dev, "MSI count : %d\n", msic);
1282 device_printf(dev, "MSI-X count : %d\n", msixc);
1284 if (msix_disable > 0)
1286 if (msi_disable > 0)
1288 /* Prefer MSI-X to MSI. */
1290 msixc = RL_MSI_MESSAGES;
1292 sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1294 if (sc->rl_res_pba == NULL) {
1295 device_printf(sc->rl_dev,
1296 "could not allocate MSI-X PBA resource\n");
1298 if (sc->rl_res_pba != NULL &&
1299 pci_alloc_msix(dev, &msixc) == 0) {
1300 if (msixc == RL_MSI_MESSAGES) {
1301 device_printf(dev, "Using %d MSI-X message\n",
1303 sc->rl_flags |= RL_FLAG_MSIX;
1305 pci_release_msi(dev);
1307 if ((sc->rl_flags & RL_FLAG_MSIX) == 0) {
1308 if (sc->rl_res_pba != NULL)
1309 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1311 sc->rl_res_pba = NULL;
1315 /* Prefer MSI to INTx. */
1316 if (msixc == 0 && msic > 0) {
1317 msic = RL_MSI_MESSAGES;
1318 if (pci_alloc_msi(dev, &msic) == 0) {
1319 if (msic == RL_MSI_MESSAGES) {
1320 device_printf(dev, "Using %d MSI message\n",
1322 sc->rl_flags |= RL_FLAG_MSI;
1323 /* Explicitly set MSI enable bit. */
1324 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1325 cfg = CSR_READ_1(sc, RL_CFG2);
1327 CSR_WRITE_1(sc, RL_CFG2, cfg);
1328 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1330 pci_release_msi(dev);
1332 if ((sc->rl_flags & RL_FLAG_MSI) == 0)
1336 /* Allocate interrupt */
1337 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) {
1339 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1340 RF_SHAREABLE | RF_ACTIVE);
1341 if (sc->rl_irq[0] == NULL) {
1342 device_printf(dev, "couldn't allocate IRQ resources\n");
1347 for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) {
1348 sc->rl_irq[i] = bus_alloc_resource_any(dev,
1349 SYS_RES_IRQ, &rid, RF_ACTIVE);
1350 if (sc->rl_irq[i] == NULL) {
1352 "couldn't allocate IRQ resources for "
1353 "message %d\n", rid);
1360 if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
1361 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1362 cfg = CSR_READ_1(sc, RL_CFG2);
1363 if ((cfg & RL_CFG2_MSI) != 0) {
1364 device_printf(dev, "turning off MSI enable bit.\n");
1365 cfg &= ~RL_CFG2_MSI;
1366 CSR_WRITE_1(sc, RL_CFG2, cfg);
1368 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1371 /* Disable ASPM L0S/L1 and CLKREQ. */
1372 if (sc->rl_expcap != 0) {
1373 cap = pci_read_config(dev, sc->rl_expcap +
1375 if ((cap & PCIEM_LINK_CAP_ASPM) != 0) {
1376 ctl = pci_read_config(dev, sc->rl_expcap +
1378 if ((ctl & (PCIEM_LINK_CTL_ECPM |
1379 PCIEM_LINK_CTL_ASPMC))!= 0) {
1380 ctl &= ~(PCIEM_LINK_CTL_ECPM |
1381 PCIEM_LINK_CTL_ASPMC);
1382 pci_write_config(dev, sc->rl_expcap +
1383 PCIER_LINK_CTL, ctl, 2);
1384 device_printf(dev, "ASPM disabled\n");
1387 device_printf(dev, "no ASPM capability\n");
1391 hwrev = CSR_READ_4(sc, RL_TXCFG);
1392 switch (hwrev & 0x70000000) {
1395 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000);
1396 hwrev &= (RL_TXCFG_HWREV | 0x80000000);
1399 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000);
1400 sc->rl_macrev = hwrev & 0x00700000;
1401 hwrev &= RL_TXCFG_HWREV;
1404 device_printf(dev, "MAC rev. 0x%08x\n", sc->rl_macrev);
1405 while (hw_rev->rl_desc != NULL) {
1406 if (hw_rev->rl_rev == hwrev) {
1407 sc->rl_type = hw_rev->rl_type;
1408 sc->rl_hwrev = hw_rev;
1413 if (hw_rev->rl_desc == NULL) {
1414 device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev);
1419 switch (hw_rev->rl_rev) {
1420 case RL_HWREV_8139CPLUS:
1421 sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD;
1423 case RL_HWREV_8100E:
1424 case RL_HWREV_8101E:
1425 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER;
1427 case RL_HWREV_8102E:
1428 case RL_HWREV_8102EL:
1429 case RL_HWREV_8102EL_SPIN1:
1430 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1431 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1434 case RL_HWREV_8103E:
1435 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1436 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1437 RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP;
1439 case RL_HWREV_8401E:
1440 case RL_HWREV_8105E:
1441 case RL_HWREV_8105E_SPIN1:
1442 case RL_HWREV_8106E:
1443 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1444 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1445 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
1448 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1449 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1450 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD |
1451 RL_FLAG_CMDSTOP_WAIT_TXQ;
1453 case RL_HWREV_8168B_SPIN1:
1454 case RL_HWREV_8168B_SPIN2:
1455 sc->rl_flags |= RL_FLAG_WOLRXENB;
1457 case RL_HWREV_8168B_SPIN3:
1458 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT;
1460 case RL_HWREV_8168C_SPIN2:
1461 sc->rl_flags |= RL_FLAG_MACSLEEP;
1463 case RL_HWREV_8168C:
1464 if (sc->rl_macrev == 0x00200000)
1465 sc->rl_flags |= RL_FLAG_MACSLEEP;
1467 case RL_HWREV_8168CP:
1468 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1469 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1470 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
1472 case RL_HWREV_8168D:
1473 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1474 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1475 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1476 RL_FLAG_WOL_MANLINK;
1478 case RL_HWREV_8168DP:
1479 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1480 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD |
1481 RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK;
1483 case RL_HWREV_8168E:
1484 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1485 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1486 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1487 RL_FLAG_WOL_MANLINK;
1489 case RL_HWREV_8168E_VL:
1490 case RL_HWREV_8168F:
1491 sc->rl_flags |= RL_FLAG_EARLYOFF;
1494 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1495 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1496 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1497 RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK;
1499 case RL_HWREV_8168EP:
1500 case RL_HWREV_8168G:
1501 case RL_HWREV_8411B:
1502 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1503 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1504 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1505 RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK |
1508 case RL_HWREV_8168GU:
1509 case RL_HWREV_8168H:
1510 if (pci_get_device(dev) == RT_DEVICEID_8101E) {
1511 /* RTL8106E(US), RTL8107E */
1512 sc->rl_flags |= RL_FLAG_FASTETHER;
1514 sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
1516 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1517 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1518 RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ |
1521 case RL_HWREV_8169_8110SB:
1522 case RL_HWREV_8169_8110SBL:
1523 case RL_HWREV_8169_8110SC:
1524 case RL_HWREV_8169_8110SCE:
1525 sc->rl_flags |= RL_FLAG_PHYWAKE;
1528 case RL_HWREV_8169S:
1529 case RL_HWREV_8110S:
1530 sc->rl_flags |= RL_FLAG_MACRESET;
1536 if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) {
1537 sc->rl_cfg0 = RL_8139_CFG0;
1538 sc->rl_cfg1 = RL_8139_CFG1;
1540 sc->rl_cfg3 = RL_8139_CFG3;
1541 sc->rl_cfg4 = RL_8139_CFG4;
1542 sc->rl_cfg5 = RL_8139_CFG5;
1544 sc->rl_cfg0 = RL_CFG0;
1545 sc->rl_cfg1 = RL_CFG1;
1546 sc->rl_cfg2 = RL_CFG2;
1547 sc->rl_cfg3 = RL_CFG3;
1548 sc->rl_cfg4 = RL_CFG4;
1549 sc->rl_cfg5 = RL_CFG5;
1552 /* Reset the adapter. */
1558 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1559 cfg = CSR_READ_1(sc, sc->rl_cfg1);
1561 CSR_WRITE_1(sc, sc->rl_cfg1, cfg);
1562 cfg = CSR_READ_1(sc, sc->rl_cfg5);
1563 cfg &= RL_CFG5_PME_STS;
1564 CSR_WRITE_1(sc, sc->rl_cfg5, cfg);
1565 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1567 if ((sc->rl_flags & RL_FLAG_PAR) != 0) {
1569 * XXX Should have a better way to extract station
1570 * address from EEPROM.
1572 for (i = 0; i < ETHER_ADDR_LEN; i++)
1573 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
1575 sc->rl_eewidth = RL_9356_ADDR_LEN;
1576 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
1577 if (re_did != 0x8129)
1578 sc->rl_eewidth = RL_9346_ADDR_LEN;
1581 * Get station address from the EEPROM.
1583 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
1584 for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
1585 as[i] = le16toh(as[i]);
1586 bcopy(as, eaddr, ETHER_ADDR_LEN);
1589 if (sc->rl_type == RL_8169) {
1590 /* Set RX length mask and number of descriptors. */
1591 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
1592 sc->rl_txstart = RL_GTXSTART;
1593 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT;
1594 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT;
1596 /* Set RX length mask and number of descriptors. */
1597 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
1598 sc->rl_txstart = RL_TXSTART;
1599 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT;
1600 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT;
1603 error = re_allocmem(dev, sc);
1608 ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
1610 device_printf(dev, "can not if_alloc()\n");
1615 /* Take controller out of deep sleep mode. */
1616 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
1617 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
1618 CSR_WRITE_1(sc, RL_GPIO,
1619 CSR_READ_1(sc, RL_GPIO) | 0x01);
1621 CSR_WRITE_1(sc, RL_GPIO,
1622 CSR_READ_1(sc, RL_GPIO) & ~0x01);
1625 /* Take PHY out of power down mode. */
1626 if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) {
1627 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80);
1628 if (hw_rev->rl_rev == RL_HWREV_8401E)
1629 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08);
1631 if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) {
1632 re_gmii_writereg(dev, 1, 0x1f, 0);
1633 re_gmii_writereg(dev, 1, 0x0e, 0);
1637 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1638 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1639 ifp->if_ioctl = re_ioctl;
1640 ifp->if_start = re_start;
1642 * RTL8168/8111C generates wrong IP checksummed frame if the
1643 * packet has IP options so disable TX checksum offloading.
1645 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C ||
1646 sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2 ||
1647 sc->rl_hwrev->rl_rev == RL_HWREV_8168CP) {
1648 ifp->if_hwassist = 0;
1649 ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TSO4;
1651 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
1652 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
1654 ifp->if_hwassist |= CSUM_TSO;
1655 ifp->if_capenable = ifp->if_capabilities;
1656 ifp->if_init = re_init;
1657 IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN);
1658 ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN;
1659 IFQ_SET_READY(&ifp->if_snd);
1661 NET_TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc);
1663 #define RE_PHYAD_INTERNAL 0
1666 phy = RE_PHYAD_INTERNAL;
1667 if (sc->rl_type == RL_8169)
1669 capmask = BMSR_DEFCAPMASK;
1670 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
1671 capmask &= ~BMSR_EXTSTAT;
1672 error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd,
1673 re_ifmedia_sts, capmask, phy, MII_OFFSET_ANY, MIIF_DOPAUSE);
1675 device_printf(dev, "attaching PHYs failed\n");
1680 * Call MI attach routine.
1682 ether_ifattach(ifp, eaddr);
1684 /* VLAN capability setup */
1685 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1686 if (ifp->if_capabilities & IFCAP_HWCSUM)
1687 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1688 /* Enable WOL if PM is supported. */
1689 if (pci_find_cap(sc->rl_dev, PCIY_PMG, ®) == 0)
1690 ifp->if_capabilities |= IFCAP_WOL;
1691 ifp->if_capenable = ifp->if_capabilities;
1692 ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST);
1694 * Don't enable TSO by default. It is known to generate
1695 * corrupted TCP segments(bad TCP options) under certain
1698 ifp->if_hwassist &= ~CSUM_TSO;
1699 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO);
1700 #ifdef DEVICE_POLLING
1701 ifp->if_capabilities |= IFCAP_POLLING;
1704 * Tell the upper layer(s) we support long frames.
1705 * Must appear after the call to ether_ifattach() because
1706 * ether_ifattach() sets ifi_hdrlen to the default value.
1708 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1711 re_netmap_attach(sc);
1712 #endif /* DEV_NETMAP */
1716 * Perform hardware diagnostic on the original RTL8169.
1717 * Some 32-bit cards were incorrectly wired and would
1718 * malfunction if plugged into a 64-bit slot.
1720 if (hwrev == RL_HWREV_8169) {
1721 error = re_diag(sc);
1724 "attach aborted due to hardware diag failure\n");
1725 ether_ifdetach(ifp);
1731 #ifdef RE_TX_MODERATION
1734 /* Hook interrupt last to avoid having to lock softc */
1735 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
1737 error = bus_setup_intr(dev, sc->rl_irq[0],
1738 INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc,
1739 &sc->rl_intrhand[0]);
1741 error = bus_setup_intr(dev, sc->rl_irq[0],
1742 INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc,
1743 &sc->rl_intrhand[0]);
1746 device_printf(dev, "couldn't set up irq\n");
1747 ether_ifdetach(ifp);
1751 DEBUGNET_SET(ifp, re);
1761 * Shutdown hardware and free up resources. This can be called any
1762 * time after the mutex has been initialized. It is called in both
1763 * the error case in attach and the normal detach case so it needs
1764 * to be careful about only freeing resources that have actually been
1768 re_detach(device_t dev)
1770 struct rl_softc *sc;
1774 sc = device_get_softc(dev);
1776 KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized"));
1778 /* These should only be active if attach succeeded */
1779 if (device_is_attached(dev)) {
1780 #ifdef DEVICE_POLLING
1781 if (ifp->if_capenable & IFCAP_POLLING)
1782 ether_poll_deregister(ifp);
1790 callout_drain(&sc->rl_stat_callout);
1791 taskqueue_drain(taskqueue_fast, &sc->rl_inttask);
1793 * Force off the IFF_UP flag here, in case someone
1794 * still had a BPF descriptor attached to this
1795 * interface. If they do, ether_ifdetach() will cause
1796 * the BPF code to try and clear the promisc mode
1797 * flag, which will bubble down to re_ioctl(),
1798 * which will try to call re_init() again. This will
1799 * turn the NIC back on and restart the MII ticker,
1800 * which will panic the system when the kernel tries
1801 * to invoke the re_tick() function that isn't there
1804 ifp->if_flags &= ~IFF_UP;
1805 ether_ifdetach(ifp);
1808 device_delete_child(dev, sc->rl_miibus);
1809 bus_generic_detach(dev);
1812 * The rest is resource deallocation, so we should already be
1816 if (sc->rl_intrhand[0] != NULL) {
1817 bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
1818 sc->rl_intrhand[0] = NULL;
1823 #endif /* DEV_NETMAP */
1826 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
1830 if (sc->rl_irq[0] != NULL) {
1831 bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]);
1832 sc->rl_irq[0] = NULL;
1834 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0)
1835 pci_release_msi(dev);
1836 if (sc->rl_res_pba) {
1838 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba);
1841 bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
1844 /* Unload and free the RX DMA ring memory and map */
1846 if (sc->rl_ldata.rl_rx_list_tag) {
1847 if (sc->rl_ldata.rl_rx_list_addr)
1848 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag,
1849 sc->rl_ldata.rl_rx_list_map);
1850 if (sc->rl_ldata.rl_rx_list)
1851 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag,
1852 sc->rl_ldata.rl_rx_list,
1853 sc->rl_ldata.rl_rx_list_map);
1854 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag);
1857 /* Unload and free the TX DMA ring memory and map */
1859 if (sc->rl_ldata.rl_tx_list_tag) {
1860 if (sc->rl_ldata.rl_tx_list_addr)
1861 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag,
1862 sc->rl_ldata.rl_tx_list_map);
1863 if (sc->rl_ldata.rl_tx_list)
1864 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag,
1865 sc->rl_ldata.rl_tx_list,
1866 sc->rl_ldata.rl_tx_list_map);
1867 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag);
1870 /* Destroy all the RX and TX buffer maps */
1872 if (sc->rl_ldata.rl_tx_mtag) {
1873 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1874 if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap)
1875 bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag,
1876 sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1878 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag);
1880 if (sc->rl_ldata.rl_rx_mtag) {
1881 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1882 if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap)
1883 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1884 sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1886 if (sc->rl_ldata.rl_rx_sparemap)
1887 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1888 sc->rl_ldata.rl_rx_sparemap);
1889 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag);
1891 if (sc->rl_ldata.rl_jrx_mtag) {
1892 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1893 if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap)
1894 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1895 sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1897 if (sc->rl_ldata.rl_jrx_sparemap)
1898 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1899 sc->rl_ldata.rl_jrx_sparemap);
1900 bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag);
1902 /* Unload and free the stats buffer and map */
1904 if (sc->rl_ldata.rl_stag) {
1905 if (sc->rl_ldata.rl_stats_addr)
1906 bus_dmamap_unload(sc->rl_ldata.rl_stag,
1907 sc->rl_ldata.rl_smap);
1908 if (sc->rl_ldata.rl_stats)
1909 bus_dmamem_free(sc->rl_ldata.rl_stag,
1910 sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap);
1911 bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
1914 if (sc->rl_parent_tag)
1915 bus_dma_tag_destroy(sc->rl_parent_tag);
1917 mtx_destroy(&sc->rl_mtx);
1922 static __inline void
1923 re_discard_rxbuf(struct rl_softc *sc, int idx)
1925 struct rl_desc *desc;
1926 struct rl_rxdesc *rxd;
1929 if (sc->rl_ifp->if_mtu > RL_MTU &&
1930 (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
1931 rxd = &sc->rl_ldata.rl_jrx_desc[idx];
1933 rxd = &sc->rl_ldata.rl_rx_desc[idx];
1934 desc = &sc->rl_ldata.rl_rx_list[idx];
1935 desc->rl_vlanctl = 0;
1936 cmdstat = rxd->rx_size;
1937 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1938 cmdstat |= RL_RDESC_CMD_EOR;
1939 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1943 re_newbuf(struct rl_softc *sc, int idx)
1946 struct rl_rxdesc *rxd;
1947 bus_dma_segment_t segs[1];
1949 struct rl_desc *desc;
1953 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1957 m->m_len = m->m_pkthdr.len = MCLBYTES;
1960 * This is part of an evil trick to deal with non-x86 platforms.
1961 * The RealTek chip requires RX buffers to be aligned on 64-bit
1962 * boundaries, but that will hose non-x86 machines. To get around
1963 * this, we leave some empty space at the start of each buffer
1964 * and for non-x86 hosts, we copy the buffer back six bytes
1965 * to achieve word alignment. This is slightly more efficient
1966 * than allocating a new buffer, copying the contents, and
1967 * discarding the old buffer.
1969 m_adj(m, RE_ETHER_ALIGN);
1971 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag,
1972 sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
1977 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
1979 rxd = &sc->rl_ldata.rl_rx_desc[idx];
1980 if (rxd->rx_m != NULL) {
1981 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1982 BUS_DMASYNC_POSTREAD);
1983 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap);
1987 map = rxd->rx_dmamap;
1988 rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap;
1989 rxd->rx_size = segs[0].ds_len;
1990 sc->rl_ldata.rl_rx_sparemap = map;
1991 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1992 BUS_DMASYNC_PREREAD);
1994 desc = &sc->rl_ldata.rl_rx_list[idx];
1995 desc->rl_vlanctl = 0;
1996 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
1997 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
1998 cmdstat = segs[0].ds_len;
1999 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
2000 cmdstat |= RL_RDESC_CMD_EOR;
2001 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
2007 re_jumbo_newbuf(struct rl_softc *sc, int idx)
2010 struct rl_rxdesc *rxd;
2011 bus_dma_segment_t segs[1];
2013 struct rl_desc *desc;
2017 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
2020 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
2022 m_adj(m, RE_ETHER_ALIGN);
2024 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag,
2025 sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
2030 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
2032 rxd = &sc->rl_ldata.rl_jrx_desc[idx];
2033 if (rxd->rx_m != NULL) {
2034 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
2035 BUS_DMASYNC_POSTREAD);
2036 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap);
2040 map = rxd->rx_dmamap;
2041 rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap;
2042 rxd->rx_size = segs[0].ds_len;
2043 sc->rl_ldata.rl_jrx_sparemap = map;
2044 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
2045 BUS_DMASYNC_PREREAD);
2047 desc = &sc->rl_ldata.rl_rx_list[idx];
2048 desc->rl_vlanctl = 0;
2049 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
2050 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
2051 cmdstat = segs[0].ds_len;
2052 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
2053 cmdstat |= RL_RDESC_CMD_EOR;
2054 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
2060 static __inline void
2061 re_fixup_rx(struct mbuf *m)
2064 uint16_t *src, *dst;
2066 src = mtod(m, uint16_t *);
2067 dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src;
2069 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2072 m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN;
2077 re_tx_list_init(struct rl_softc *sc)
2079 struct rl_desc *desc;
2084 bzero(sc->rl_ldata.rl_tx_list,
2085 sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc));
2086 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++)
2087 sc->rl_ldata.rl_tx_desc[i].tx_m = NULL;
2089 re_netmap_tx_init(sc);
2090 #endif /* DEV_NETMAP */
2092 desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1];
2093 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR);
2095 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2096 sc->rl_ldata.rl_tx_list_map,
2097 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2099 sc->rl_ldata.rl_tx_prodidx = 0;
2100 sc->rl_ldata.rl_tx_considx = 0;
2101 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt;
2107 re_rx_list_init(struct rl_softc *sc)
2111 bzero(sc->rl_ldata.rl_rx_list,
2112 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
2113 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2114 sc->rl_ldata.rl_rx_desc[i].rx_m = NULL;
2115 if ((error = re_newbuf(sc, i)) != 0)
2119 re_netmap_rx_init(sc);
2120 #endif /* DEV_NETMAP */
2122 /* Flush the RX descriptors */
2124 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2125 sc->rl_ldata.rl_rx_list_map,
2126 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2128 sc->rl_ldata.rl_rx_prodidx = 0;
2129 sc->rl_head = sc->rl_tail = NULL;
2130 sc->rl_int_rx_act = 0;
2136 re_jrx_list_init(struct rl_softc *sc)
2140 bzero(sc->rl_ldata.rl_rx_list,
2141 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
2142 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2143 sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL;
2144 if ((error = re_jumbo_newbuf(sc, i)) != 0)
2148 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2149 sc->rl_ldata.rl_rx_list_map,
2150 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2152 sc->rl_ldata.rl_rx_prodidx = 0;
2153 sc->rl_head = sc->rl_tail = NULL;
2154 sc->rl_int_rx_act = 0;
2160 * RX handler for C+ and 8169. For the gigE chips, we support
2161 * the reception of jumbo frames that have been fragmented
2162 * across multiple 2K mbuf cluster buffers.
2165 re_rxeof(struct rl_softc *sc, int *rx_npktsp)
2169 int i, rxerr, total_len;
2170 struct rl_desc *cur_rx;
2171 u_int32_t rxstat, rxvlan;
2172 int jumbo, maxpkt = 16, rx_npkts = 0;
2178 if (netmap_rx_irq(ifp, 0, &rx_npkts))
2180 #endif /* DEV_NETMAP */
2181 if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
2186 /* Invalidate the descriptor memory */
2188 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2189 sc->rl_ldata.rl_rx_list_map,
2190 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2192 for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0;
2193 i = RL_RX_DESC_NXT(sc, i)) {
2194 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2196 cur_rx = &sc->rl_ldata.rl_rx_list[i];
2197 rxstat = le32toh(cur_rx->rl_cmdstat);
2198 if ((rxstat & RL_RDESC_STAT_OWN) != 0)
2200 total_len = rxstat & sc->rl_rxlenmask;
2201 rxvlan = le32toh(cur_rx->rl_vlanctl);
2203 m = sc->rl_ldata.rl_jrx_desc[i].rx_m;
2205 m = sc->rl_ldata.rl_rx_desc[i].rx_m;
2207 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
2208 (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) !=
2209 (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) {
2211 * RTL8168C or later controllers do not
2212 * support multi-fragment packet.
2214 re_discard_rxbuf(sc, i);
2216 } else if ((rxstat & RL_RDESC_STAT_EOF) == 0) {
2217 if (re_newbuf(sc, i) != 0) {
2219 * If this is part of a multi-fragment packet,
2220 * discard all the pieces.
2222 if (sc->rl_head != NULL) {
2223 m_freem(sc->rl_head);
2224 sc->rl_head = sc->rl_tail = NULL;
2226 re_discard_rxbuf(sc, i);
2229 m->m_len = RE_RX_DESC_BUFLEN;
2230 if (sc->rl_head == NULL)
2231 sc->rl_head = sc->rl_tail = m;
2233 m->m_flags &= ~M_PKTHDR;
2234 sc->rl_tail->m_next = m;
2241 * NOTE: for the 8139C+, the frame length field
2242 * is always 12 bits in size, but for the gigE chips,
2243 * it is 13 bits (since the max RX frame length is 16K).
2244 * Unfortunately, all 32 bits in the status word
2245 * were already used, so to make room for the extra
2246 * length bit, RealTek took out the 'frame alignment
2247 * error' bit and shifted the other status bits
2248 * over one slot. The OWN, EOR, FS and LS bits are
2249 * still in the same places. We have already extracted
2250 * the frame length and checked the OWN bit, so rather
2251 * than using an alternate bit mapping, we shift the
2252 * status bits one space to the right so we can evaluate
2253 * them using the 8169 status as though it was in the
2254 * same format as that of the 8139C+.
2256 if (sc->rl_type == RL_8169)
2260 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be
2261 * set, but if CRC is clear, it will still be a valid frame.
2263 if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) {
2265 if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 &&
2267 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)
2270 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2272 * If this is part of a multi-fragment packet,
2273 * discard all the pieces.
2275 if (sc->rl_head != NULL) {
2276 m_freem(sc->rl_head);
2277 sc->rl_head = sc->rl_tail = NULL;
2279 re_discard_rxbuf(sc, i);
2285 * If allocating a replacement mbuf fails,
2286 * reload the current one.
2289 rxerr = re_jumbo_newbuf(sc, i);
2291 rxerr = re_newbuf(sc, i);
2293 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2294 if (sc->rl_head != NULL) {
2295 m_freem(sc->rl_head);
2296 sc->rl_head = sc->rl_tail = NULL;
2298 re_discard_rxbuf(sc, i);
2302 if (sc->rl_head != NULL) {
2304 m->m_len = total_len;
2306 m->m_len = total_len % RE_RX_DESC_BUFLEN;
2308 m->m_len = RE_RX_DESC_BUFLEN;
2311 * Special case: if there's 4 bytes or less
2312 * in this buffer, the mbuf can be discarded:
2313 * the last 4 bytes is the CRC, which we don't
2314 * care about anyway.
2316 if (m->m_len <= ETHER_CRC_LEN) {
2317 sc->rl_tail->m_len -=
2318 (ETHER_CRC_LEN - m->m_len);
2321 m->m_len -= ETHER_CRC_LEN;
2322 m->m_flags &= ~M_PKTHDR;
2323 sc->rl_tail->m_next = m;
2326 sc->rl_head = sc->rl_tail = NULL;
2327 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
2329 m->m_pkthdr.len = m->m_len =
2330 (total_len - ETHER_CRC_LEN);
2335 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2336 m->m_pkthdr.rcvif = ifp;
2338 /* Do RX checksumming if enabled */
2340 if (ifp->if_capenable & IFCAP_RXCSUM) {
2341 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2342 /* Check IP header checksum */
2343 if (rxstat & RL_RDESC_STAT_PROTOID)
2344 m->m_pkthdr.csum_flags |=
2346 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
2347 m->m_pkthdr.csum_flags |=
2350 /* Check TCP/UDP checksum */
2351 if ((RL_TCPPKT(rxstat) &&
2352 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2353 (RL_UDPPKT(rxstat) &&
2354 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2355 m->m_pkthdr.csum_flags |=
2356 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2357 m->m_pkthdr.csum_data = 0xffff;
2361 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP
2363 if ((rxstat & RL_RDESC_STAT_PROTOID) &&
2364 (rxvlan & RL_RDESC_IPV4))
2365 m->m_pkthdr.csum_flags |=
2367 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) &&
2368 (rxvlan & RL_RDESC_IPV4))
2369 m->m_pkthdr.csum_flags |=
2371 if (((rxstat & RL_RDESC_STAT_TCP) &&
2372 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2373 ((rxstat & RL_RDESC_STAT_UDP) &&
2374 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2375 m->m_pkthdr.csum_flags |=
2376 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2377 m->m_pkthdr.csum_data = 0xffff;
2382 if (rxvlan & RL_RDESC_VLANCTL_TAG) {
2383 m->m_pkthdr.ether_vtag =
2384 bswap16((rxvlan & RL_RDESC_VLANCTL_DATA));
2385 m->m_flags |= M_VLANTAG;
2388 (*ifp->if_input)(ifp, m);
2393 /* Flush the RX DMA ring */
2395 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2396 sc->rl_ldata.rl_rx_list_map,
2397 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2399 sc->rl_ldata.rl_rx_prodidx = i;
2401 if (rx_npktsp != NULL)
2402 *rx_npktsp = rx_npkts;
2410 re_txeof(struct rl_softc *sc)
2413 struct rl_txdesc *txd;
2417 cons = sc->rl_ldata.rl_tx_considx;
2418 if (cons == sc->rl_ldata.rl_tx_prodidx)
2423 if (netmap_tx_irq(ifp, 0))
2425 #endif /* DEV_NETMAP */
2426 /* Invalidate the TX descriptor list */
2427 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2428 sc->rl_ldata.rl_tx_list_map,
2429 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2431 for (; cons != sc->rl_ldata.rl_tx_prodidx;
2432 cons = RL_TX_DESC_NXT(sc, cons)) {
2433 txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat);
2434 if (txstat & RL_TDESC_STAT_OWN)
2437 * We only stash mbufs in the last descriptor
2438 * in a fragment chain, which also happens to
2439 * be the only place where the TX status bits
2442 if (txstat & RL_TDESC_CMD_EOF) {
2443 txd = &sc->rl_ldata.rl_tx_desc[cons];
2444 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
2445 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2446 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
2448 KASSERT(txd->tx_m != NULL,
2449 ("%s: freeing NULL mbufs!", __func__));
2452 if (txstat & (RL_TDESC_STAT_EXCESSCOL|
2453 RL_TDESC_STAT_COLCNT))
2454 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
2455 if (txstat & RL_TDESC_STAT_TXERRSUM)
2456 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2458 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2460 sc->rl_ldata.rl_tx_free++;
2461 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2463 sc->rl_ldata.rl_tx_considx = cons;
2465 /* No changes made to the TX ring, so no flush needed */
2467 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) {
2468 #ifdef RE_TX_MODERATION
2470 * If not all descriptors have been reaped yet, reload
2471 * the timer so that we will eventually get another
2472 * interrupt that will cause us to re-enter this routine.
2473 * This is done in case the transmitter has gone idle.
2475 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2478 sc->rl_watchdog_timer = 0;
2484 struct rl_softc *sc;
2485 struct mii_data *mii;
2491 mii = device_get_softc(sc->rl_miibus);
2493 if ((sc->rl_flags & RL_FLAG_LINK) == 0)
2494 re_miibus_statchg(sc->rl_dev);
2496 * Reclaim transmitted frames here. Technically it is not
2497 * necessary to do here but it ensures periodic reclamation
2498 * regardless of Tx completion interrupt which seems to be
2499 * lost on PCIe based controllers under certain situations.
2503 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
2506 #ifdef DEVICE_POLLING
2508 re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2510 struct rl_softc *sc = ifp->if_softc;
2514 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2515 rx_npkts = re_poll_locked(ifp, cmd, count);
2521 re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2523 struct rl_softc *sc = ifp->if_softc;
2528 sc->rxcycles = count;
2529 re_rxeof(sc, &rx_npkts);
2532 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2533 re_start_locked(ifp);
2535 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
2538 status = CSR_READ_2(sc, RL_ISR);
2539 if (status == 0xffff)
2542 CSR_WRITE_2(sc, RL_ISR, status);
2543 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2544 (sc->rl_flags & RL_FLAG_PCIE))
2545 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2548 * XXX check behaviour on receiver stalls.
2551 if (status & RL_ISR_SYSTEM_ERR) {
2552 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2558 #endif /* DEVICE_POLLING */
2563 struct rl_softc *sc;
2568 status = CSR_READ_2(sc, RL_ISR);
2569 if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0)
2570 return (FILTER_STRAY);
2571 CSR_WRITE_2(sc, RL_IMR, 0);
2573 taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask);
2575 return (FILTER_HANDLED);
2579 re_int_task(void *arg, int npending)
2581 struct rl_softc *sc;
2591 status = CSR_READ_2(sc, RL_ISR);
2592 CSR_WRITE_2(sc, RL_ISR, status);
2594 if (sc->suspended ||
2595 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2600 #ifdef DEVICE_POLLING
2601 if (ifp->if_capenable & IFCAP_POLLING) {
2607 if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW))
2608 rval = re_rxeof(sc, NULL);
2611 * Some chips will ignore a second TX request issued
2612 * while an existing transmission is in progress. If
2613 * the transmitter goes idle but there are still
2614 * packets waiting to be sent, we need to restart the
2615 * channel here to flush them out. This only seems to
2616 * be required with the PCIe devices.
2618 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2619 (sc->rl_flags & RL_FLAG_PCIE))
2620 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2622 #ifdef RE_TX_MODERATION
2623 RL_ISR_TIMEOUT_EXPIRED|
2627 RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL))
2630 if (status & RL_ISR_SYSTEM_ERR) {
2631 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2635 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2636 re_start_locked(ifp);
2640 if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) {
2641 taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask);
2645 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
2649 re_intr_msi(void *xsc)
2651 struct rl_softc *sc;
2653 uint16_t intrs, status;
2659 #ifdef DEVICE_POLLING
2660 if (ifp->if_capenable & IFCAP_POLLING) {
2665 /* Disable interrupts. */
2666 CSR_WRITE_2(sc, RL_IMR, 0);
2667 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2672 intrs = RL_INTRS_CPLUS;
2673 status = CSR_READ_2(sc, RL_ISR);
2674 CSR_WRITE_2(sc, RL_ISR, status);
2675 if (sc->rl_int_rx_act > 0) {
2676 intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2678 status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2682 if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR |
2683 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) {
2685 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2686 if (sc->rl_int_rx_mod != 0 &&
2687 (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR |
2688 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) {
2689 /* Rearm one-shot timer. */
2690 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2691 intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR |
2692 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN);
2693 sc->rl_int_rx_act = 1;
2695 intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR |
2696 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN;
2697 sc->rl_int_rx_act = 0;
2703 * Some chips will ignore a second TX request issued
2704 * while an existing transmission is in progress. If
2705 * the transmitter goes idle but there are still
2706 * packets waiting to be sent, we need to restart the
2707 * channel here to flush them out. This only seems to
2708 * be required with the PCIe devices.
2710 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2711 (sc->rl_flags & RL_FLAG_PCIE))
2712 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2713 if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL))
2716 if (status & RL_ISR_SYSTEM_ERR) {
2717 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2721 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2722 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2723 re_start_locked(ifp);
2724 CSR_WRITE_2(sc, RL_IMR, intrs);
2730 re_encap(struct rl_softc *sc, struct mbuf **m_head)
2732 struct rl_txdesc *txd, *txd_last;
2733 bus_dma_segment_t segs[RL_NTXSEGS];
2736 struct rl_desc *desc;
2738 int i, error, ei, si;
2740 uint32_t cmdstat, csum_flags, vlanctl;
2743 M_ASSERTPKTHDR((*m_head));
2746 * With some of the RealTek chips, using the checksum offload
2747 * support in conjunction with the autopadding feature results
2748 * in the transmission of corrupt frames. For example, if we
2749 * need to send a really small IP fragment that's less than 60
2750 * bytes in size, and IP header checksumming is enabled, the
2751 * resulting ethernet frame that appears on the wire will
2752 * have garbled payload. To work around this, if TX IP checksum
2753 * offload is enabled, we always manually pad short frames out
2754 * to the minimum ethernet frame size.
2756 if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 &&
2757 (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN &&
2758 ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) {
2759 padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len;
2760 if (M_WRITABLE(*m_head) == 0) {
2761 /* Get a writable copy. */
2762 m_new = m_dup(*m_head, M_NOWAIT);
2764 if (m_new == NULL) {
2770 if ((*m_head)->m_next != NULL ||
2771 M_TRAILINGSPACE(*m_head) < padlen) {
2772 m_new = m_defrag(*m_head, M_NOWAIT);
2773 if (m_new == NULL) {
2782 * Manually pad short frames, and zero the pad space
2783 * to avoid leaking data.
2785 bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen);
2786 m_new->m_pkthdr.len += padlen;
2787 m_new->m_len = m_new->m_pkthdr.len;
2791 prod = sc->rl_ldata.rl_tx_prodidx;
2792 txd = &sc->rl_ldata.rl_tx_desc[prod];
2793 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2794 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2795 if (error == EFBIG) {
2796 m_new = m_collapse(*m_head, M_NOWAIT, RL_NTXSEGS);
2797 if (m_new == NULL) {
2803 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag,
2804 txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2810 } else if (error != 0)
2818 /* Check for number of available descriptors. */
2819 if (sc->rl_ldata.rl_tx_free - nsegs <= 1) {
2820 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap);
2824 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2825 BUS_DMASYNC_PREWRITE);
2828 * Set up checksum offload. Note: checksum offload bits must
2829 * appear in all descriptors of a multi-descriptor transmit
2830 * attempt. This is according to testing done with an 8169
2831 * chip. This is a requirement.
2835 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2836 if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) {
2837 csum_flags |= RL_TDESC_CMD_LGSEND;
2838 vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2839 RL_TDESC_CMD_MSSVALV2_SHIFT);
2841 csum_flags |= RL_TDESC_CMD_LGSEND |
2842 ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2843 RL_TDESC_CMD_MSSVAL_SHIFT);
2847 * Unconditionally enable IP checksum if TCP or UDP
2848 * checksum is required. Otherwise, TCP/UDP checksum
2849 * doesn't make effects.
2851 if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) {
2852 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2853 csum_flags |= RL_TDESC_CMD_IPCSUM;
2854 if (((*m_head)->m_pkthdr.csum_flags &
2856 csum_flags |= RL_TDESC_CMD_TCPCSUM;
2857 if (((*m_head)->m_pkthdr.csum_flags &
2859 csum_flags |= RL_TDESC_CMD_UDPCSUM;
2861 vlanctl |= RL_TDESC_CMD_IPCSUMV2;
2862 if (((*m_head)->m_pkthdr.csum_flags &
2864 vlanctl |= RL_TDESC_CMD_TCPCSUMV2;
2865 if (((*m_head)->m_pkthdr.csum_flags &
2867 vlanctl |= RL_TDESC_CMD_UDPCSUMV2;
2873 * Set up hardware VLAN tagging. Note: vlan tag info must
2874 * appear in all descriptors of a multi-descriptor
2875 * transmission attempt.
2877 if ((*m_head)->m_flags & M_VLANTAG)
2878 vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) |
2879 RL_TDESC_VLANCTL_TAG;
2882 for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) {
2883 desc = &sc->rl_ldata.rl_tx_list[prod];
2884 desc->rl_vlanctl = htole32(vlanctl);
2885 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
2886 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
2887 cmdstat = segs[i].ds_len;
2889 cmdstat |= RL_TDESC_CMD_OWN;
2890 if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1)
2891 cmdstat |= RL_TDESC_CMD_EOR;
2892 desc->rl_cmdstat = htole32(cmdstat | csum_flags);
2893 sc->rl_ldata.rl_tx_free--;
2895 /* Update producer index. */
2896 sc->rl_ldata.rl_tx_prodidx = prod;
2898 /* Set EOF on the last descriptor. */
2899 ei = RL_TX_DESC_PRV(sc, prod);
2900 desc = &sc->rl_ldata.rl_tx_list[ei];
2901 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
2903 desc = &sc->rl_ldata.rl_tx_list[si];
2904 /* Set SOF and transfer ownership of packet to the chip. */
2905 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF);
2908 * Insure that the map for this transmission
2909 * is placed at the array index of the last descriptor
2910 * in this chain. (Swap last and first dmamaps.)
2912 txd_last = &sc->rl_ldata.rl_tx_desc[ei];
2913 map = txd->tx_dmamap;
2914 txd->tx_dmamap = txd_last->tx_dmamap;
2915 txd_last->tx_dmamap = map;
2916 txd_last->tx_m = *m_head;
2922 re_start(struct ifnet *ifp)
2924 struct rl_softc *sc;
2928 re_start_locked(ifp);
2933 * Main transmit routine for C+ and gigE NICs.
2936 re_start_locked(struct ifnet *ifp)
2938 struct rl_softc *sc;
2939 struct mbuf *m_head;
2945 /* XXX is this necessary ? */
2946 if (ifp->if_capenable & IFCAP_NETMAP) {
2947 struct netmap_kring *kring = NA(ifp)->tx_rings[0];
2948 if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) {
2949 /* kick the tx unit */
2950 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2951 #ifdef RE_TX_MODERATION
2952 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2954 sc->rl_watchdog_timer = 5;
2958 #endif /* DEV_NETMAP */
2960 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2961 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
2964 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2965 sc->rl_ldata.rl_tx_free > 1;) {
2966 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2970 if (re_encap(sc, &m_head) != 0) {
2973 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2974 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2979 * If there's a BPF listener, bounce a copy of this frame
2982 ETHER_BPF_MTAP(ifp, m_head);
2988 #ifdef RE_TX_MODERATION
2989 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt)
2990 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2999 re_start_tx(struct rl_softc *sc)
3002 /* Flush the TX descriptors */
3003 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
3004 sc->rl_ldata.rl_tx_list_map,
3005 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
3007 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
3009 #ifdef RE_TX_MODERATION
3011 * Use the countdown timer for interrupt moderation.
3012 * 'TX done' interrupts are disabled. Instead, we reset the
3013 * countdown timer, which will begin counting until it hits
3014 * the value in the TIMERINT register, and then trigger an
3015 * interrupt. Each time we write to the TIMERCNT register,
3016 * the timer count is reset to 0.
3018 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
3022 * Set a timeout in case the chip goes out to lunch.
3024 sc->rl_watchdog_timer = 5;
3028 re_set_jumbo(struct rl_softc *sc, int jumbo)
3031 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) {
3032 pci_set_max_read_req(sc->rl_dev, 4096);
3036 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
3038 CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) |
3040 switch (sc->rl_hwrev->rl_rev) {
3041 case RL_HWREV_8168DP:
3043 case RL_HWREV_8168E:
3044 CSR_WRITE_1(sc, sc->rl_cfg4,
3045 CSR_READ_1(sc, sc->rl_cfg4) | 0x01);
3048 CSR_WRITE_1(sc, sc->rl_cfg4,
3049 CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1);
3052 CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) &
3053 ~RL_CFG3_JUMBO_EN0);
3054 switch (sc->rl_hwrev->rl_rev) {
3055 case RL_HWREV_8168DP:
3057 case RL_HWREV_8168E:
3058 CSR_WRITE_1(sc, sc->rl_cfg4,
3059 CSR_READ_1(sc, sc->rl_cfg4) & ~0x01);
3062 CSR_WRITE_1(sc, sc->rl_cfg4,
3063 CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1);
3066 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3068 switch (sc->rl_hwrev->rl_rev) {
3069 case RL_HWREV_8168DP:
3070 pci_set_max_read_req(sc->rl_dev, 4096);
3074 pci_set_max_read_req(sc->rl_dev, 512);
3076 pci_set_max_read_req(sc->rl_dev, 4096);
3083 struct rl_softc *sc = xsc;
3091 re_init_locked(struct rl_softc *sc)
3093 struct ifnet *ifp = sc->rl_ifp;
3094 struct mii_data *mii;
3098 uint32_t align_dummy;
3099 u_char eaddr[ETHER_ADDR_LEN];
3104 mii = device_get_softc(sc->rl_miibus);
3106 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3110 * Cancel pending I/O and free all RX/TX buffers.
3114 /* Put controller into known state. */
3118 * For C+ mode, initialize the RX descriptors and mbufs.
3120 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3121 if (ifp->if_mtu > RL_MTU) {
3122 if (re_jrx_list_init(sc) != 0) {
3123 device_printf(sc->rl_dev,
3124 "no memory for jumbo RX buffers\n");
3128 /* Disable checksum offloading for jumbo frames. */
3129 ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4);
3130 ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO);
3132 if (re_rx_list_init(sc) != 0) {
3133 device_printf(sc->rl_dev,
3134 "no memory for RX buffers\n");
3139 re_set_jumbo(sc, ifp->if_mtu > RL_MTU);
3141 if (re_rx_list_init(sc) != 0) {
3142 device_printf(sc->rl_dev, "no memory for RX buffers\n");
3146 if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3147 pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) {
3148 if (ifp->if_mtu > RL_MTU)
3149 pci_set_max_read_req(sc->rl_dev, 512);
3151 pci_set_max_read_req(sc->rl_dev, 4096);
3154 re_tx_list_init(sc);
3157 * Enable C+ RX and TX mode, as well as VLAN stripping and
3158 * RX checksum offload. We must configure the C+ register
3159 * before all others.
3161 cfg = RL_CPLUSCMD_PCI_MRW;
3162 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3163 cfg |= RL_CPLUSCMD_RXCSUM_ENB;
3164 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3165 cfg |= RL_CPLUSCMD_VLANSTRIP;
3166 if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) {
3167 cfg |= RL_CPLUSCMD_MACSTAT_DIS;
3171 cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB;
3172 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg);
3173 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC ||
3174 sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) {
3176 if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0)
3178 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE)
3180 CSR_WRITE_4(sc, 0x7c, reg);
3181 /* Disable interrupt mitigation. */
3182 CSR_WRITE_2(sc, 0xe2, 0);
3185 * Disable TSO if interface MTU size is greater than MSS
3186 * allowed in controller.
3188 if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) {
3189 ifp->if_capenable &= ~IFCAP_TSO4;
3190 ifp->if_hwassist &= ~CSUM_TSO;
3194 * Init our MAC address. Even though the chipset
3195 * documentation doesn't mention it, we need to enter "Config
3196 * register write enable" mode to modify the ID registers.
3198 /* Copy MAC address on stack to align. */
3199 bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN);
3200 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
3201 CSR_WRITE_4(sc, RL_IDR0,
3202 htole32(*(u_int32_t *)(&eaddr.eaddr[0])));
3203 CSR_WRITE_4(sc, RL_IDR4,
3204 htole32(*(u_int32_t *)(&eaddr.eaddr[4])));
3205 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3208 * Load the addresses of the RX and TX lists into the chip.
3211 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
3212 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr));
3213 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
3214 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr));
3216 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
3217 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr));
3218 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
3219 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr));
3221 if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
3222 /* Disable RXDV gate. */
3223 CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) &
3228 * Enable transmit and receive for pre-RTL8168G controllers.
3229 * RX/TX MACs should be enabled before RX/TX configuration.
3231 if ((sc->rl_flags & RL_FLAG_8168G_PLUS) == 0)
3232 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB);
3235 * Set the initial TX configuration.
3237 if (sc->rl_testmode) {
3238 if (sc->rl_type == RL_8169)
3239 CSR_WRITE_4(sc, RL_TXCFG,
3240 RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
3242 CSR_WRITE_4(sc, RL_TXCFG,
3243 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
3245 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
3247 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
3250 * Set the initial RX configuration.
3254 /* Configure interrupt moderation. */
3255 if (sc->rl_type == RL_8169) {
3256 /* Magic from vendor. */
3257 CSR_WRITE_2(sc, RL_INTRMOD, 0x5100);
3261 * Enable transmit and receive for RTL8168G and later controllers.
3262 * RX/TX MACs should be enabled after RX/TX configuration.
3264 if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0)
3265 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB);
3267 #ifdef DEVICE_POLLING
3269 * Disable interrupts if we are polling.
3271 if (ifp->if_capenable & IFCAP_POLLING)
3272 CSR_WRITE_2(sc, RL_IMR, 0);
3273 else /* otherwise ... */
3277 * Enable interrupts.
3279 if (sc->rl_testmode)
3280 CSR_WRITE_2(sc, RL_IMR, 0);
3282 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3283 CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS);
3285 /* Set initial TX threshold */
3286 sc->rl_txthresh = RL_TX_THRESH_INIT;
3288 /* Start RX/TX process. */
3289 CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
3292 * Initialize the timer interrupt register so that
3293 * a timer interrupt will be generated once the timer
3294 * reaches a certain number of ticks. The timer is
3295 * reloaded on each transmit.
3297 #ifdef RE_TX_MODERATION
3299 * Use timer interrupt register to moderate TX interrupt
3300 * moderation, which dramatically improves TX frame rate.
3302 if (sc->rl_type == RL_8169)
3303 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800);
3305 CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
3308 * Use timer interrupt register to moderate RX interrupt
3311 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
3313 if (sc->rl_type == RL_8169)
3314 CSR_WRITE_4(sc, RL_TIMERINT_8169,
3315 RL_USECS(sc->rl_int_rx_mod));
3317 if (sc->rl_type == RL_8169)
3318 CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0));
3323 * For 8169 gigE NICs, set the max allowed RX packet
3324 * size so we can receive jumbo frames.
3326 if (sc->rl_type == RL_8169) {
3327 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3329 * For controllers that use new jumbo frame scheme,
3330 * set maximum size of jumbo frame depending on
3331 * controller revisions.
3333 if (ifp->if_mtu > RL_MTU)
3334 CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3335 sc->rl_hwrev->rl_max_mtu +
3336 ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN +
3339 CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3341 } else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3342 sc->rl_hwrev->rl_max_mtu == RL_MTU) {
3343 /* RTL810x has no jumbo frame support. */
3344 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN);
3346 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
3349 if (sc->rl_testmode)
3352 CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) |
3355 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3356 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3358 sc->rl_flags &= ~RL_FLAG_LINK;
3361 sc->rl_watchdog_timer = 0;
3362 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
3365 netmap_enable_all_rings(ifp);
3366 #endif /* DEV_NETMAP */
3370 * Set media options.
3373 re_ifmedia_upd(struct ifnet *ifp)
3375 struct rl_softc *sc;
3376 struct mii_data *mii;
3380 mii = device_get_softc(sc->rl_miibus);
3382 error = mii_mediachg(mii);
3389 * Report current media status.
3392 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3394 struct rl_softc *sc;
3395 struct mii_data *mii;
3398 mii = device_get_softc(sc->rl_miibus);
3402 ifmr->ifm_active = mii->mii_media_active;
3403 ifmr->ifm_status = mii->mii_media_status;
3408 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3410 struct rl_softc *sc = ifp->if_softc;
3411 struct ifreq *ifr = (struct ifreq *) data;
3412 struct mii_data *mii;
3417 if (ifr->ifr_mtu < ETHERMIN ||
3418 ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu ||
3419 ((sc->rl_flags & RL_FLAG_FASTETHER) != 0 &&
3420 ifr->ifr_mtu > RL_MTU)) {
3425 if (ifp->if_mtu != ifr->ifr_mtu) {
3426 ifp->if_mtu = ifr->ifr_mtu;
3427 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3428 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3429 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3432 if (ifp->if_mtu > RL_TSO_MTU &&
3433 (ifp->if_capenable & IFCAP_TSO4) != 0) {
3434 ifp->if_capenable &= ~(IFCAP_TSO4 |
3436 ifp->if_hwassist &= ~CSUM_TSO;
3438 VLAN_CAPABILITIES(ifp);
3444 if ((ifp->if_flags & IFF_UP) != 0) {
3445 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3446 if (((ifp->if_flags ^ sc->rl_if_flags)
3447 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3452 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3455 sc->rl_if_flags = ifp->if_flags;
3461 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3467 mii = device_get_softc(sc->rl_miibus);
3468 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3474 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3476 #ifdef DEVICE_POLLING
3477 if (mask & IFCAP_POLLING) {
3478 if (ifr->ifr_reqcap & IFCAP_POLLING) {
3479 error = ether_poll_register(re_poll, ifp);
3483 /* Disable interrupts */
3484 CSR_WRITE_2(sc, RL_IMR, 0x0000);
3485 ifp->if_capenable |= IFCAP_POLLING;
3488 error = ether_poll_deregister(ifp);
3489 /* Enable interrupts. */
3491 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3492 ifp->if_capenable &= ~IFCAP_POLLING;
3496 #endif /* DEVICE_POLLING */
3498 if ((mask & IFCAP_TXCSUM) != 0 &&
3499 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
3500 ifp->if_capenable ^= IFCAP_TXCSUM;
3501 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
3502 ifp->if_hwassist |= RE_CSUM_FEATURES;
3504 ifp->if_hwassist &= ~RE_CSUM_FEATURES;
3507 if ((mask & IFCAP_RXCSUM) != 0 &&
3508 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
3509 ifp->if_capenable ^= IFCAP_RXCSUM;
3512 if ((mask & IFCAP_TSO4) != 0 &&
3513 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
3514 ifp->if_capenable ^= IFCAP_TSO4;
3515 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
3516 ifp->if_hwassist |= CSUM_TSO;
3518 ifp->if_hwassist &= ~CSUM_TSO;
3519 if (ifp->if_mtu > RL_TSO_MTU &&
3520 (ifp->if_capenable & IFCAP_TSO4) != 0) {
3521 ifp->if_capenable &= ~IFCAP_TSO4;
3522 ifp->if_hwassist &= ~CSUM_TSO;
3525 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
3526 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
3527 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3528 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
3529 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
3530 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3531 /* TSO over VLAN requires VLAN hardware tagging. */
3532 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
3533 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
3536 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3537 (mask & (IFCAP_HWCSUM | IFCAP_TSO4 |
3538 IFCAP_VLAN_HWTSO)) != 0)
3540 if ((mask & IFCAP_WOL) != 0 &&
3541 (ifp->if_capabilities & IFCAP_WOL) != 0) {
3542 if ((mask & IFCAP_WOL_UCAST) != 0)
3543 ifp->if_capenable ^= IFCAP_WOL_UCAST;
3544 if ((mask & IFCAP_WOL_MCAST) != 0)
3545 ifp->if_capenable ^= IFCAP_WOL_MCAST;
3546 if ((mask & IFCAP_WOL_MAGIC) != 0)
3547 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
3549 if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3550 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3554 VLAN_CAPABILITIES(ifp);
3558 error = ether_ioctl(ifp, command, data);
3566 re_watchdog(struct rl_softc *sc)
3572 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0)
3577 if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) {
3578 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
3580 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3581 re_start_locked(ifp);
3585 if_printf(ifp, "watchdog timeout\n");
3586 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
3589 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3591 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3592 re_start_locked(ifp);
3596 * Stop the adapter and free any mbufs allocated to the
3600 re_stop(struct rl_softc *sc)
3604 struct rl_txdesc *txd;
3605 struct rl_rxdesc *rxd;
3611 sc->rl_watchdog_timer = 0;
3612 callout_stop(&sc->rl_stat_callout);
3613 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3616 netmap_disable_all_rings(ifp);
3617 #endif /* DEV_NETMAP */
3620 * Disable accepting frames to put RX MAC into idle state.
3621 * Otherwise it's possible to get frames while stop command
3622 * execution is in progress and controller can DMA the frame
3623 * to already freed RX buffer during that period.
3625 CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) &
3626 ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI |
3627 RL_RXCFG_RX_BROAD));
3629 if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
3630 /* Enable RXDV gate. */
3631 CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) |
3635 if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) {
3636 for (i = RL_TIMEOUT; i > 0; i--) {
3637 if ((CSR_READ_1(sc, sc->rl_txstart) &
3638 RL_TXSTART_START) == 0)
3643 device_printf(sc->rl_dev,
3644 "stopping TX poll timed out!\n");
3645 CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3646 } else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) {
3647 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB |
3649 if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) {
3650 for (i = RL_TIMEOUT; i > 0; i--) {
3651 if ((CSR_READ_4(sc, RL_TXCFG) &
3652 RL_TXCFG_QUEUE_EMPTY) != 0)
3657 device_printf(sc->rl_dev,
3658 "stopping TXQ timed out!\n");
3661 CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3663 CSR_WRITE_2(sc, RL_IMR, 0x0000);
3664 CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
3666 if (sc->rl_head != NULL) {
3667 m_freem(sc->rl_head);
3668 sc->rl_head = sc->rl_tail = NULL;
3671 /* Free the TX list buffers. */
3672 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
3673 txd = &sc->rl_ldata.rl_tx_desc[i];
3674 if (txd->tx_m != NULL) {
3675 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
3676 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3677 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
3684 /* Free the RX list buffers. */
3685 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3686 rxd = &sc->rl_ldata.rl_rx_desc[i];
3687 if (rxd->rx_m != NULL) {
3688 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
3689 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3690 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
3697 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3698 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3699 rxd = &sc->rl_ldata.rl_jrx_desc[i];
3700 if (rxd->rx_m != NULL) {
3701 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag,
3702 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3703 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag,
3713 * Device suspend routine. Stop the interface and save some PCI
3714 * settings in case the BIOS doesn't restore them properly on
3718 re_suspend(device_t dev)
3720 struct rl_softc *sc;
3722 sc = device_get_softc(dev);
3734 * Device resume routine. Restore some PCI settings in case the BIOS
3735 * doesn't, re-enable busmastering, and restart the interface if
3739 re_resume(device_t dev)
3741 struct rl_softc *sc;
3744 sc = device_get_softc(dev);
3749 /* Take controller out of sleep mode. */
3750 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3751 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3752 CSR_WRITE_1(sc, RL_GPIO,
3753 CSR_READ_1(sc, RL_GPIO) | 0x01);
3757 * Clear WOL matching such that normal Rx filtering
3758 * wouldn't interfere with WOL patterns.
3762 /* reinitialize interface if necessary */
3763 if (ifp->if_flags & IFF_UP)
3773 * Stop all chip I/O so that the kernel's probe routines don't
3774 * get confused by errant DMAs when rebooting.
3777 re_shutdown(device_t dev)
3779 struct rl_softc *sc;
3781 sc = device_get_softc(dev);
3786 * Mark interface as down since otherwise we will panic if
3787 * interrupt comes in later on, which can happen in some
3790 sc->rl_ifp->if_flags &= ~IFF_UP;
3798 re_set_linkspeed(struct rl_softc *sc)
3800 struct mii_softc *miisc;
3801 struct mii_data *mii;
3806 mii = device_get_softc(sc->rl_miibus);
3809 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3810 (IFM_ACTIVE | IFM_AVALID)) {
3811 switch IFM_SUBTYPE(mii->mii_media_active) {
3822 miisc = LIST_FIRST(&mii->mii_phys);
3823 phyno = miisc->mii_phy;
3824 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3826 re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0);
3827 re_miibus_writereg(sc->rl_dev, phyno,
3828 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3829 re_miibus_writereg(sc->rl_dev, phyno,
3830 MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
3834 * Poll link state until re(4) get a 10/100Mbps link.
3836 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3838 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3839 == (IFM_ACTIVE | IFM_AVALID)) {
3840 switch (IFM_SUBTYPE(mii->mii_media_active)) {
3852 if (i == MII_ANEGTICKS_GIGE)
3853 device_printf(sc->rl_dev,
3854 "establishing a link failed, WOL may not work!");
3857 * No link, force MAC to have 100Mbps, full-duplex link.
3858 * MAC does not require reprogramming on resolved speed/duplex,
3859 * so this is just for completeness.
3861 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3862 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3866 re_setwol(struct rl_softc *sc)
3875 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3879 /* Put controller into sleep mode. */
3880 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3881 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3882 CSR_WRITE_1(sc, RL_GPIO,
3883 CSR_READ_1(sc, RL_GPIO) & ~0x01);
3885 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
3886 if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
3887 /* Disable RXDV gate. */
3888 CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) &
3892 if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0)
3893 re_set_linkspeed(sc);
3894 if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0)
3895 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB);
3897 /* Enable config register write. */
3898 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3901 v = CSR_READ_1(sc, sc->rl_cfg1);
3903 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3905 CSR_WRITE_1(sc, sc->rl_cfg1, v);
3907 v = CSR_READ_1(sc, sc->rl_cfg3);
3908 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3909 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3910 v |= RL_CFG3_WOL_MAGIC;
3911 CSR_WRITE_1(sc, sc->rl_cfg3, v);
3913 v = CSR_READ_1(sc, sc->rl_cfg5);
3914 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST |
3915 RL_CFG5_WOL_LANWAKE);
3916 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
3917 v |= RL_CFG5_WOL_UCAST;
3918 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
3919 v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
3920 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3921 v |= RL_CFG5_WOL_LANWAKE;
3922 CSR_WRITE_1(sc, sc->rl_cfg5, v);
3924 /* Config register write done. */
3925 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3927 if ((ifp->if_capenable & IFCAP_WOL) == 0 &&
3928 (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0)
3929 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80);
3931 * It seems that hardware resets its link speed to 100Mbps in
3932 * power down mode so switching to 100Mbps in driver is not
3936 /* Request PME if WOL is requested. */
3937 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
3938 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3939 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3940 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3941 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
3945 re_clrwol(struct rl_softc *sc)
3952 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3955 /* Enable config register write. */
3956 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3958 v = CSR_READ_1(sc, sc->rl_cfg3);
3959 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3960 CSR_WRITE_1(sc, sc->rl_cfg3, v);
3962 /* Config register write done. */
3963 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3965 v = CSR_READ_1(sc, sc->rl_cfg5);
3966 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
3967 v &= ~RL_CFG5_WOL_LANWAKE;
3968 CSR_WRITE_1(sc, sc->rl_cfg5, v);
3972 re_add_sysctls(struct rl_softc *sc)
3974 struct sysctl_ctx_list *ctx;
3975 struct sysctl_oid_list *children;
3978 ctx = device_get_sysctl_ctx(sc->rl_dev);
3979 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
3981 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats",
3982 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
3983 re_sysctl_stats, "I", "Statistics Information");
3984 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
3987 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod",
3988 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3989 &sc->rl_int_rx_mod, 0, sysctl_hw_re_int_mod, "I",
3990 "re RX interrupt moderation");
3991 /* Pull in device tunables. */
3992 sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3993 error = resource_int_value(device_get_name(sc->rl_dev),
3994 device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod);
3996 if (sc->rl_int_rx_mod < RL_TIMER_MIN ||
3997 sc->rl_int_rx_mod > RL_TIMER_MAX) {
3998 device_printf(sc->rl_dev, "int_rx_mod value out of "
3999 "range; using default: %d\n",
4001 sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
4007 re_sysctl_stats(SYSCTL_HANDLER_ARGS)
4009 struct rl_softc *sc;
4010 struct rl_stats *stats;
4011 int error, i, result;
4014 error = sysctl_handle_int(oidp, &result, 0, req);
4015 if (error || req->newptr == NULL)
4019 sc = (struct rl_softc *)arg1;
4021 if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4025 bus_dmamap_sync(sc->rl_ldata.rl_stag,
4026 sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD);
4027 CSR_WRITE_4(sc, RL_DUMPSTATS_HI,
4028 RL_ADDR_HI(sc->rl_ldata.rl_stats_addr));
4029 CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
4030 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr));
4031 CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
4032 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr |
4033 RL_DUMPSTATS_START));
4034 for (i = RL_TIMEOUT; i > 0; i--) {
4035 if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) &
4036 RL_DUMPSTATS_START) == 0)
4040 bus_dmamap_sync(sc->rl_ldata.rl_stag,
4041 sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD);
4044 device_printf(sc->rl_dev,
4045 "DUMP statistics request timed out\n");
4049 stats = sc->rl_ldata.rl_stats;
4050 printf("%s statistics:\n", device_get_nameunit(sc->rl_dev));
4051 printf("Tx frames : %ju\n",
4052 (uintmax_t)le64toh(stats->rl_tx_pkts));
4053 printf("Rx frames : %ju\n",
4054 (uintmax_t)le64toh(stats->rl_rx_pkts));
4055 printf("Tx errors : %ju\n",
4056 (uintmax_t)le64toh(stats->rl_tx_errs));
4057 printf("Rx errors : %u\n",
4058 le32toh(stats->rl_rx_errs));
4059 printf("Rx missed frames : %u\n",
4060 (uint32_t)le16toh(stats->rl_missed_pkts));
4061 printf("Rx frame alignment errs : %u\n",
4062 (uint32_t)le16toh(stats->rl_rx_framealign_errs));
4063 printf("Tx single collisions : %u\n",
4064 le32toh(stats->rl_tx_onecoll));
4065 printf("Tx multiple collisions : %u\n",
4066 le32toh(stats->rl_tx_multicolls));
4067 printf("Rx unicast frames : %ju\n",
4068 (uintmax_t)le64toh(stats->rl_rx_ucasts));
4069 printf("Rx broadcast frames : %ju\n",
4070 (uintmax_t)le64toh(stats->rl_rx_bcasts));
4071 printf("Rx multicast frames : %u\n",
4072 le32toh(stats->rl_rx_mcasts));
4073 printf("Tx aborts : %u\n",
4074 (uint32_t)le16toh(stats->rl_tx_aborts));
4075 printf("Tx underruns : %u\n",
4076 (uint32_t)le16toh(stats->rl_rx_underruns));
4083 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4089 value = *(int *)arg1;
4090 error = sysctl_handle_int(oidp, &value, 0, req);
4091 if (error || req->newptr == NULL)
4093 if (value < low || value > high)
4095 *(int *)arg1 = value;
4101 sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS)
4104 return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN,
4110 re_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
4112 struct rl_softc *sc;
4114 sc = if_getsoftc(ifp);
4116 *nrxr = sc->rl_ldata.rl_rx_desc_cnt;
4117 *ncl = DEBUGNET_MAX_IN_FLIGHT;
4118 *clsize = (ifp->if_mtu > RL_MTU &&
4119 (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) ? MJUM9BYTES : MCLBYTES;
4124 re_debugnet_event(struct ifnet *ifp __unused, enum debugnet_ev event __unused)
4129 re_debugnet_transmit(struct ifnet *ifp, struct mbuf *m)
4131 struct rl_softc *sc;
4134 sc = if_getsoftc(ifp);
4135 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4136 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
4139 error = re_encap(sc, &m);
4146 re_debugnet_poll(struct ifnet *ifp, int count)
4148 struct rl_softc *sc;
4151 sc = if_getsoftc(ifp);
4152 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
4153 (sc->rl_flags & RL_FLAG_LINK) == 0)
4157 error = re_rxeof(sc, NULL);
4158 if (error != 0 && error != EAGAIN)
4162 #endif /* DEBUGNET */