2 * Copyright (c) 1997, 1998-2003
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Networking Software Engineer
45 * This driver is designed to support RealTek's next generation of
46 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
47 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
48 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
50 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
51 * with the older 8139 family, however it also supports a special
52 * C+ mode of operation that provides several new performance enhancing
53 * features. These include:
55 * o Descriptor based DMA mechanism. Each descriptor represents
56 * a single packet fragment. Data buffers may be aligned on
61 * o TCP/IP checksum offload for both RX and TX
63 * o High and normal priority transmit DMA rings
65 * o VLAN tag insertion and extraction
67 * o TCP large send (segmentation offload)
69 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
70 * programming API is fairly straightforward. The RX filtering, EEPROM
71 * access and PHY access is the same as it is on the older 8139 series
74 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
75 * same programming API and feature set as the 8139C+ with the following
76 * differences and additions:
82 * o GMII and TBI ports/registers for interfacing with copper
85 * o RX and TX DMA rings can have up to 1024 descriptors
86 * (the 8139C+ allows a maximum of 64)
88 * o Slight differences in register layout from the 8139C+
90 * The TX start and timer interrupt registers are at different locations
91 * on the 8169 than they are on the 8139C+. Also, the status word in the
92 * RX descriptor has a slightly different bit layout. The 8169 does not
93 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
96 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
97 * (the 'S' stands for 'single-chip'). These devices have the same
98 * programming API as the older 8169, but also have some vendor-specific
99 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
100 * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
102 * This driver takes advantage of the RX and TX checksum offload and
103 * VLAN tag insertion/extraction features. It also implements TX
104 * interrupt moderation using the timer interrupt registers, which
105 * significantly reduces TX interrupt load. There is also support
106 * for jumbo frames, however the 8169/8169S/8110S can not transmit
107 * jumbo frames larger than 7440, so the max MTU possible with this
108 * driver is 7422 bytes.
111 #ifdef HAVE_KERNEL_OPTION_HEADERS
112 #include "opt_device_polling.h"
115 #include <sys/param.h>
116 #include <sys/endian.h>
117 #include <sys/systm.h>
118 #include <sys/sockio.h>
119 #include <sys/mbuf.h>
120 #include <sys/malloc.h>
121 #include <sys/module.h>
122 #include <sys/kernel.h>
123 #include <sys/socket.h>
124 #include <sys/lock.h>
125 #include <sys/mutex.h>
126 #include <sys/sysctl.h>
127 #include <sys/taskqueue.h>
130 #include <net/if_arp.h>
131 #include <net/ethernet.h>
132 #include <net/if_dl.h>
133 #include <net/if_media.h>
134 #include <net/if_types.h>
135 #include <net/if_vlan_var.h>
139 #include <machine/bus.h>
140 #include <machine/resource.h>
142 #include <sys/rman.h>
144 #include <dev/mii/mii.h>
145 #include <dev/mii/miivar.h>
147 #include <dev/pci/pcireg.h>
148 #include <dev/pci/pcivar.h>
150 #include <pci/if_rlreg.h>
152 MODULE_DEPEND(re, pci, 1, 1, 1);
153 MODULE_DEPEND(re, ether, 1, 1, 1);
154 MODULE_DEPEND(re, miibus, 1, 1, 1);
156 /* "device miibus" required. See GENERIC if you get errors here. */
157 #include "miibus_if.h"
160 static int intr_filter = 0;
161 TUNABLE_INT("hw.re.intr_filter", &intr_filter);
162 static int msi_disable = 0;
163 TUNABLE_INT("hw.re.msi_disable", &msi_disable);
164 static int msix_disable = 0;
165 TUNABLE_INT("hw.re.msix_disable", &msix_disable);
166 static int prefer_iomap = 0;
167 TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap);
169 #define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
172 * Various supported device vendors/types and their names.
174 static const struct rl_type re_devs[] = {
175 { DLINK_VENDORID, DLINK_DEVICEID_528T, 0,
176 "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
177 { DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0,
178 "D-Link DGE-530(T) Gigabit Ethernet Adapter" },
179 { RT_VENDORID, RT_DEVICEID_8139, 0,
180 "RealTek 8139C+ 10/100BaseTX" },
181 { RT_VENDORID, RT_DEVICEID_8101E, 0,
182 "RealTek 810xE PCIe 10/100baseTX" },
183 { RT_VENDORID, RT_DEVICEID_8168, 0,
184 "RealTek 8168/8111 B/C/CP/D/DP/E/F/G PCIe Gigabit Ethernet" },
185 { RT_VENDORID, RT_DEVICEID_8169, 0,
186 "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" },
187 { RT_VENDORID, RT_DEVICEID_8169SC, 0,
188 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
189 { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0,
190 "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" },
191 { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0,
192 "Linksys EG1032 (RTL8169S) Gigabit Ethernet" },
193 { USR_VENDORID, USR_DEVICEID_997902, 0,
194 "US Robotics 997902 (RTL8169S) Gigabit Ethernet" }
197 static const struct rl_hwrev re_hwrevs[] = {
198 { RL_HWREV_8139, RL_8139, "", RL_MTU },
199 { RL_HWREV_8139A, RL_8139, "A", RL_MTU },
200 { RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU },
201 { RL_HWREV_8139B, RL_8139, "B", RL_MTU },
202 { RL_HWREV_8130, RL_8139, "8130", RL_MTU },
203 { RL_HWREV_8139C, RL_8139, "C", RL_MTU },
204 { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU },
205 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU },
206 { RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU },
207 { RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU },
208 { RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU },
209 { RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU },
210 { RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU },
211 { RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
212 { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU },
213 { RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
214 { RL_HWREV_8100, RL_8139, "8100", RL_MTU },
215 { RL_HWREV_8101, RL_8139, "8101", RL_MTU },
216 { RL_HWREV_8100E, RL_8169, "8100E", RL_MTU },
217 { RL_HWREV_8101E, RL_8169, "8101E", RL_MTU },
218 { RL_HWREV_8102E, RL_8169, "8102E", RL_MTU },
219 { RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU },
220 { RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU },
221 { RL_HWREV_8103E, RL_8169, "8103E", RL_MTU },
222 { RL_HWREV_8401E, RL_8169, "8401E", RL_MTU },
223 { RL_HWREV_8402, RL_8169, "8402", RL_MTU },
224 { RL_HWREV_8105E, RL_8169, "8105E", RL_MTU },
225 { RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU },
226 { RL_HWREV_8106E, RL_8169, "8106E", RL_MTU },
227 { RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU },
228 { RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU },
229 { RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
230 { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
231 { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K },
232 { RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K },
233 { RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K },
234 { RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K},
235 { RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K},
236 { RL_HWREV_8168EP, RL_8169, "8168EP/8111EP", RL_JUMBO_MTU_9K},
237 { RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K},
238 { RL_HWREV_8168G, RL_8169, "8168G/8111G", RL_JUMBO_MTU_9K},
239 { RL_HWREV_8168GU, RL_8169, "8168GU/8111GU", RL_JUMBO_MTU_9K},
240 { RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K},
241 { RL_HWREV_8411B, RL_8169, "8411B", RL_JUMBO_MTU_9K},
245 static int re_probe (device_t);
246 static int re_attach (device_t);
247 static int re_detach (device_t);
249 static int re_encap (struct rl_softc *, struct mbuf **);
251 static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int);
252 static int re_allocmem (device_t, struct rl_softc *);
253 static __inline void re_discard_rxbuf
254 (struct rl_softc *, int);
255 static int re_newbuf (struct rl_softc *, int);
256 static int re_jumbo_newbuf (struct rl_softc *, int);
257 static int re_rx_list_init (struct rl_softc *);
258 static int re_jrx_list_init (struct rl_softc *);
259 static int re_tx_list_init (struct rl_softc *);
261 static __inline void re_fixup_rx
264 static int re_rxeof (struct rl_softc *, int *);
265 static void re_txeof (struct rl_softc *);
266 #ifdef DEVICE_POLLING
267 static int re_poll (struct ifnet *, enum poll_cmd, int);
268 static int re_poll_locked (struct ifnet *, enum poll_cmd, int);
270 static int re_intr (void *);
271 static void re_intr_msi (void *);
272 static void re_tick (void *);
273 static void re_int_task (void *, int);
274 static void re_start (struct ifnet *);
275 static void re_start_locked (struct ifnet *);
276 static int re_ioctl (struct ifnet *, u_long, caddr_t);
277 static void re_init (void *);
278 static void re_init_locked (struct rl_softc *);
279 static void re_stop (struct rl_softc *);
280 static void re_watchdog (struct rl_softc *);
281 static int re_suspend (device_t);
282 static int re_resume (device_t);
283 static int re_shutdown (device_t);
284 static int re_ifmedia_upd (struct ifnet *);
285 static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *);
287 static void re_eeprom_putbyte (struct rl_softc *, int);
288 static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *);
289 static void re_read_eeprom (struct rl_softc *, caddr_t, int, int);
290 static int re_gmii_readreg (device_t, int, int);
291 static int re_gmii_writereg (device_t, int, int, int);
293 static int re_miibus_readreg (device_t, int, int);
294 static int re_miibus_writereg (device_t, int, int, int);
295 static void re_miibus_statchg (device_t);
297 static void re_set_jumbo (struct rl_softc *, int);
298 static void re_set_rxmode (struct rl_softc *);
299 static void re_reset (struct rl_softc *);
300 static void re_setwol (struct rl_softc *);
301 static void re_clrwol (struct rl_softc *);
302 static void re_set_linkspeed (struct rl_softc *);
304 #ifdef DEV_NETMAP /* see ixgbe.c for details */
305 #include <dev/netmap/if_re_netmap.h>
306 #endif /* !DEV_NETMAP */
309 static int re_diag (struct rl_softc *);
312 static void re_add_sysctls (struct rl_softc *);
313 static int re_sysctl_stats (SYSCTL_HANDLER_ARGS);
314 static int sysctl_int_range (SYSCTL_HANDLER_ARGS, int, int);
315 static int sysctl_hw_re_int_mod (SYSCTL_HANDLER_ARGS);
317 static device_method_t re_methods[] = {
318 /* Device interface */
319 DEVMETHOD(device_probe, re_probe),
320 DEVMETHOD(device_attach, re_attach),
321 DEVMETHOD(device_detach, re_detach),
322 DEVMETHOD(device_suspend, re_suspend),
323 DEVMETHOD(device_resume, re_resume),
324 DEVMETHOD(device_shutdown, re_shutdown),
327 DEVMETHOD(miibus_readreg, re_miibus_readreg),
328 DEVMETHOD(miibus_writereg, re_miibus_writereg),
329 DEVMETHOD(miibus_statchg, re_miibus_statchg),
334 static driver_t re_driver = {
337 sizeof(struct rl_softc)
340 static devclass_t re_devclass;
342 DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0);
343 DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0);
346 CSR_WRITE_1(sc, RL_EECMD, \
347 CSR_READ_1(sc, RL_EECMD) | x)
350 CSR_WRITE_1(sc, RL_EECMD, \
351 CSR_READ_1(sc, RL_EECMD) & ~x)
354 * Send a read command and address to the EEPROM, check for ACK.
357 re_eeprom_putbyte(struct rl_softc *sc, int addr)
361 d = addr | (RL_9346_READ << sc->rl_eewidth);
364 * Feed in each bit and strobe the clock.
367 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
369 EE_SET(RL_EE_DATAIN);
371 EE_CLR(RL_EE_DATAIN);
382 * Read a word of data stored in the EEPROM at address 'addr.'
385 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
391 * Send address of word we want to read.
393 re_eeprom_putbyte(sc, addr);
396 * Start reading bits from EEPROM.
398 for (i = 0x8000; i; i >>= 1) {
401 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
411 * Read a sequence of words from the EEPROM.
414 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
417 u_int16_t word = 0, *ptr;
419 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
423 for (i = 0; i < cnt; i++) {
424 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL);
425 re_eeprom_getword(sc, off + i, &word);
426 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL);
427 ptr = (u_int16_t *)(dest + (i * 2));
431 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
435 re_gmii_readreg(device_t dev, int phy, int reg)
441 sc = device_get_softc(dev);
443 /* Let the rgephy driver read the GMEDIASTAT register */
445 if (reg == RL_GMEDIASTAT) {
446 rval = CSR_READ_1(sc, RL_GMEDIASTAT);
450 CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
452 for (i = 0; i < RL_PHY_TIMEOUT; i++) {
453 rval = CSR_READ_4(sc, RL_PHYAR);
454 if (rval & RL_PHYAR_BUSY)
459 if (i == RL_PHY_TIMEOUT) {
460 device_printf(sc->rl_dev, "PHY read failed\n");
465 * Controller requires a 20us delay to process next MDIO request.
469 return (rval & RL_PHYAR_PHYDATA);
473 re_gmii_writereg(device_t dev, int phy, int reg, int data)
479 sc = device_get_softc(dev);
481 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
482 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
484 for (i = 0; i < RL_PHY_TIMEOUT; i++) {
485 rval = CSR_READ_4(sc, RL_PHYAR);
486 if (!(rval & RL_PHYAR_BUSY))
491 if (i == RL_PHY_TIMEOUT) {
492 device_printf(sc->rl_dev, "PHY write failed\n");
497 * Controller requires a 20us delay to process next MDIO request.
505 re_miibus_readreg(device_t dev, int phy, int reg)
509 u_int16_t re8139_reg = 0;
511 sc = device_get_softc(dev);
513 if (sc->rl_type == RL_8169) {
514 rval = re_gmii_readreg(dev, phy, reg);
520 re8139_reg = RL_BMCR;
523 re8139_reg = RL_BMSR;
526 re8139_reg = RL_ANAR;
529 re8139_reg = RL_ANER;
532 re8139_reg = RL_LPAR;
538 * Allow the rlphy driver to read the media status
539 * register. If we have a link partner which does not
540 * support NWAY, this is the register which will tell
541 * us the results of parallel detection.
544 rval = CSR_READ_1(sc, RL_MEDIASTAT);
547 device_printf(sc->rl_dev, "bad phy register\n");
550 rval = CSR_READ_2(sc, re8139_reg);
551 if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) {
552 /* 8139C+ has different bit layout. */
553 rval &= ~(BMCR_LOOP | BMCR_ISO);
559 re_miibus_writereg(device_t dev, int phy, int reg, int data)
562 u_int16_t re8139_reg = 0;
565 sc = device_get_softc(dev);
567 if (sc->rl_type == RL_8169) {
568 rval = re_gmii_writereg(dev, phy, reg, data);
574 re8139_reg = RL_BMCR;
575 if (sc->rl_type == RL_8139CPLUS) {
576 /* 8139C+ has different bit layout. */
577 data &= ~(BMCR_LOOP | BMCR_ISO);
581 re8139_reg = RL_BMSR;
584 re8139_reg = RL_ANAR;
587 re8139_reg = RL_ANER;
590 re8139_reg = RL_LPAR;
597 device_printf(sc->rl_dev, "bad phy register\n");
600 CSR_WRITE_2(sc, re8139_reg, data);
605 re_miibus_statchg(device_t dev)
609 struct mii_data *mii;
611 sc = device_get_softc(dev);
612 mii = device_get_softc(sc->rl_miibus);
614 if (mii == NULL || ifp == NULL ||
615 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
618 sc->rl_flags &= ~RL_FLAG_LINK;
619 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
620 (IFM_ACTIVE | IFM_AVALID)) {
621 switch (IFM_SUBTYPE(mii->mii_media_active)) {
624 sc->rl_flags |= RL_FLAG_LINK;
627 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
629 sc->rl_flags |= RL_FLAG_LINK;
636 * RealTek controllers does not provide any interface to
637 * Tx/Rx MACs for resolved speed, duplex and flow-control
643 * Set the RX configuration and 64-bit multicast hash filter.
646 re_set_rxmode(struct rl_softc *sc)
649 struct ifmultiaddr *ifma;
650 uint32_t hashes[2] = { 0, 0 };
657 rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD;
659 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
660 if (ifp->if_flags & IFF_PROMISC)
661 rxfilt |= RL_RXCFG_RX_ALLPHYS;
663 * Unlike other hardwares, we have to explicitly set
664 * RL_RXCFG_RX_MULTI to receive multicast frames in
667 rxfilt |= RL_RXCFG_RX_MULTI;
668 hashes[0] = hashes[1] = 0xffffffff;
673 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
674 if (ifma->ifma_addr->sa_family != AF_LINK)
676 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
677 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
679 hashes[0] |= (1 << h);
681 hashes[1] |= (1 << (h - 32));
683 if_maddr_runlock(ifp);
685 if (hashes[0] != 0 || hashes[1] != 0) {
687 * For some unfathomable reason, RealTek decided to
688 * reverse the order of the multicast hash registers
689 * in the PCI Express parts. This means we have to
690 * write the hash pattern in reverse order for those
693 if ((sc->rl_flags & RL_FLAG_PCIE) != 0) {
694 h = bswap32(hashes[0]);
695 hashes[0] = bswap32(hashes[1]);
698 rxfilt |= RL_RXCFG_RX_MULTI;
702 CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
703 CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
704 CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
708 re_reset(struct rl_softc *sc)
714 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
716 for (i = 0; i < RL_TIMEOUT; i++) {
718 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
722 device_printf(sc->rl_dev, "reset never completed!\n");
724 if ((sc->rl_flags & RL_FLAG_MACRESET) != 0)
725 CSR_WRITE_1(sc, 0x82, 1);
726 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S)
727 re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0);
733 * The following routine is designed to test for a defect on some
734 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
735 * lines connected to the bus, however for a 32-bit only card, they
736 * should be pulled high. The result of this defect is that the
737 * NIC will not work right if you plug it into a 64-bit slot: DMA
738 * operations will be done with 64-bit transfers, which will fail
739 * because the 64-bit data lines aren't connected.
741 * There's no way to work around this (short of talking a soldering
742 * iron to the board), however we can detect it. The method we use
743 * here is to put the NIC into digital loopback mode, set the receiver
744 * to promiscuous mode, and then try to send a frame. We then compare
745 * the frame data we sent to what was received. If the data matches,
746 * then the NIC is working correctly, otherwise we know the user has
747 * a defective NIC which has been mistakenly plugged into a 64-bit PCI
748 * slot. In the latter case, there's no way the NIC can work correctly,
749 * so we print out a message on the console and abort the device attach.
753 re_diag(struct rl_softc *sc)
755 struct ifnet *ifp = sc->rl_ifp;
757 struct ether_header *eh;
758 struct rl_desc *cur_rx;
761 int total_len, i, error = 0, phyaddr;
762 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
763 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
765 /* Allocate a single mbuf */
766 MGETHDR(m0, M_NOWAIT, MT_DATA);
773 * Initialize the NIC in test mode. This sets the chip up
774 * so that it can send and receive frames, but performs the
775 * following special functions:
776 * - Puts receiver in promiscuous mode
777 * - Enables digital loopback mode
778 * - Leaves interrupts turned off
781 ifp->if_flags |= IFF_PROMISC;
783 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
785 sc->rl_flags |= RL_FLAG_LINK;
786 if (sc->rl_type == RL_8169)
791 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET);
792 for (i = 0; i < RL_TIMEOUT; i++) {
793 status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR);
794 if (!(status & BMCR_RESET))
798 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP);
799 CSR_WRITE_2(sc, RL_ISR, RL_INTRS);
803 /* Put some data in the mbuf */
805 eh = mtod(m0, struct ether_header *);
806 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
807 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
808 eh->ether_type = htons(ETHERTYPE_IP);
809 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
812 * Queue the packet, start transmission.
813 * Note: IF_HANDOFF() ultimately calls re_start() for us.
816 CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
818 /* XXX: re_diag must not be called when in ALTQ mode */
819 IF_HANDOFF(&ifp->if_snd, m0, ifp);
823 /* Wait for it to propagate through the chip */
826 for (i = 0; i < RL_TIMEOUT; i++) {
827 status = CSR_READ_2(sc, RL_ISR);
828 CSR_WRITE_2(sc, RL_ISR, status);
829 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
830 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
835 if (i == RL_TIMEOUT) {
836 device_printf(sc->rl_dev,
837 "diagnostic failed, failed to receive packet in"
844 * The packet should have been dumped into the first
845 * entry in the RX DMA ring. Grab it from there.
848 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
849 sc->rl_ldata.rl_rx_list_map,
850 BUS_DMASYNC_POSTREAD);
851 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
852 sc->rl_ldata.rl_rx_desc[0].rx_dmamap,
853 BUS_DMASYNC_POSTREAD);
854 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
855 sc->rl_ldata.rl_rx_desc[0].rx_dmamap);
857 m0 = sc->rl_ldata.rl_rx_desc[0].rx_m;
858 sc->rl_ldata.rl_rx_desc[0].rx_m = NULL;
859 eh = mtod(m0, struct ether_header *);
861 cur_rx = &sc->rl_ldata.rl_rx_list[0];
862 total_len = RL_RXBYTES(cur_rx);
863 rxstat = le32toh(cur_rx->rl_cmdstat);
865 if (total_len != ETHER_MIN_LEN) {
866 device_printf(sc->rl_dev,
867 "diagnostic failed, received short packet\n");
872 /* Test that the received packet data matches what we sent. */
874 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
875 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
876 ntohs(eh->ether_type) != ETHERTYPE_IP) {
877 device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n");
878 device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n",
879 dst, ":", src, ":", ETHERTYPE_IP);
880 device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n",
881 eh->ether_dhost, ":", eh->ether_shost, ":",
882 ntohs(eh->ether_type));
883 device_printf(sc->rl_dev, "You may have a defective 32-bit "
884 "NIC plugged into a 64-bit PCI slot.\n");
885 device_printf(sc->rl_dev, "Please re-install the NIC in a "
886 "32-bit slot for proper operation.\n");
887 device_printf(sc->rl_dev, "Read the re(4) man page for more "
893 /* Turn interface off, release resources */
896 sc->rl_flags &= ~RL_FLAG_LINK;
897 ifp->if_flags &= ~IFF_PROMISC;
910 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
911 * IDs against our list and return a device name if we find a match.
914 re_probe(device_t dev)
916 const struct rl_type *t;
917 uint16_t devid, vendor;
918 uint16_t revid, sdevid;
921 vendor = pci_get_vendor(dev);
922 devid = pci_get_device(dev);
923 revid = pci_get_revid(dev);
924 sdevid = pci_get_subdevice(dev);
926 if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) {
927 if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) {
929 * Only attach to rev. 3 of the Linksys EG1032 adapter.
930 * Rev. 2 is supported by sk(4).
936 if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
938 /* 8139, let rl(4) take care of this device. */
944 for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) {
945 if (vendor == t->rl_vid && devid == t->rl_did) {
946 device_set_desc(dev, t->rl_name);
947 return (BUS_PROBE_DEFAULT);
955 * Map a single buffer address.
959 re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
966 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
968 *addr = segs->ds_addr;
972 re_allocmem(device_t dev, struct rl_softc *sc)
975 bus_size_t rx_list_size, tx_list_size;
979 rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc);
980 tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc);
983 * Allocate the parent bus DMA tag appropriate for PCI.
984 * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD
985 * register should be set. However some RealTek chips are known
986 * to be buggy on DAC handling, therefore disable DAC by limiting
987 * DMA address space to 32bit. PCIe variants of RealTek chips
988 * may not have the limitation.
990 lowaddr = BUS_SPACE_MAXADDR;
991 if ((sc->rl_flags & RL_FLAG_PCIE) == 0)
992 lowaddr = BUS_SPACE_MAXADDR_32BIT;
993 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
994 lowaddr, BUS_SPACE_MAXADDR, NULL, NULL,
995 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
996 NULL, NULL, &sc->rl_parent_tag);
998 device_printf(dev, "could not allocate parent DMA tag\n");
1003 * Allocate map for TX mbufs.
1005 error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0,
1006 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1007 NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0,
1008 NULL, NULL, &sc->rl_ldata.rl_tx_mtag);
1010 device_printf(dev, "could not allocate TX DMA tag\n");
1015 * Allocate map for RX mbufs.
1018 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1019 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t),
1020 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1021 MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL,
1022 &sc->rl_ldata.rl_jrx_mtag);
1025 "could not allocate jumbo RX DMA tag\n");
1029 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0,
1030 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1031 MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag);
1033 device_printf(dev, "could not allocate RX DMA tag\n");
1038 * Allocate map for TX descriptor list.
1040 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1041 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1042 NULL, tx_list_size, 1, tx_list_size, 0,
1043 NULL, NULL, &sc->rl_ldata.rl_tx_list_tag);
1045 device_printf(dev, "could not allocate TX DMA ring tag\n");
1049 /* Allocate DMA'able memory for the TX ring */
1051 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
1052 (void **)&sc->rl_ldata.rl_tx_list,
1053 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1054 &sc->rl_ldata.rl_tx_list_map);
1056 device_printf(dev, "could not allocate TX DMA ring\n");
1060 /* Load the map for the TX ring. */
1062 sc->rl_ldata.rl_tx_list_addr = 0;
1063 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
1064 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
1065 tx_list_size, re_dma_map_addr,
1066 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
1067 if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) {
1068 device_printf(dev, "could not load TX DMA ring\n");
1072 /* Create DMA maps for TX buffers */
1074 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1075 error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0,
1076 &sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1078 device_printf(dev, "could not create DMA map for TX\n");
1084 * Allocate map for RX descriptor list.
1086 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1087 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1088 NULL, rx_list_size, 1, rx_list_size, 0,
1089 NULL, NULL, &sc->rl_ldata.rl_rx_list_tag);
1091 device_printf(dev, "could not create RX DMA ring tag\n");
1095 /* Allocate DMA'able memory for the RX ring */
1097 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
1098 (void **)&sc->rl_ldata.rl_rx_list,
1099 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1100 &sc->rl_ldata.rl_rx_list_map);
1102 device_printf(dev, "could not allocate RX DMA ring\n");
1106 /* Load the map for the RX ring. */
1108 sc->rl_ldata.rl_rx_list_addr = 0;
1109 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
1110 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
1111 rx_list_size, re_dma_map_addr,
1112 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
1113 if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) {
1114 device_printf(dev, "could not load RX DMA ring\n");
1118 /* Create DMA maps for RX buffers */
1120 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1121 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1122 &sc->rl_ldata.rl_jrx_sparemap);
1125 "could not create spare DMA map for jumbo RX\n");
1128 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1129 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1130 &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1133 "could not create DMA map for jumbo RX\n");
1138 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1139 &sc->rl_ldata.rl_rx_sparemap);
1141 device_printf(dev, "could not create spare DMA map for RX\n");
1144 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1145 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1146 &sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1148 device_printf(dev, "could not create DMA map for RX\n");
1153 /* Create DMA map for statistics. */
1154 error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0,
1155 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1156 sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL,
1157 &sc->rl_ldata.rl_stag);
1159 device_printf(dev, "could not create statistics DMA tag\n");
1162 /* Allocate DMA'able memory for statistics. */
1163 error = bus_dmamem_alloc(sc->rl_ldata.rl_stag,
1164 (void **)&sc->rl_ldata.rl_stats,
1165 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1166 &sc->rl_ldata.rl_smap);
1169 "could not allocate statistics DMA memory\n");
1172 /* Load the map for statistics. */
1173 sc->rl_ldata.rl_stats_addr = 0;
1174 error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap,
1175 sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr,
1176 &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT);
1177 if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) {
1178 device_printf(dev, "could not load statistics DMA memory\n");
1186 * Attach the interface. Allocate softc structures, do ifmedia
1187 * setup and ethernet/BPF attach.
1190 re_attach(device_t dev)
1192 u_char eaddr[ETHER_ADDR_LEN];
1193 u_int16_t as[ETHER_ADDR_LEN / 2];
1194 struct rl_softc *sc;
1196 const struct rl_hwrev *hw_rev;
1199 u_int16_t devid, re_did = 0;
1200 int error = 0, i, phy, rid;
1201 int msic, msixc, reg;
1204 sc = device_get_softc(dev);
1207 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1209 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
1212 * Map control/status registers.
1214 pci_enable_busmaster(dev);
1216 devid = pci_get_device(dev);
1218 * Prefer memory space register mapping over IO space.
1219 * Because RTL8169SC does not seem to work when memory mapping
1220 * is used always activate io mapping.
1222 if (devid == RT_DEVICEID_8169SC)
1224 if (prefer_iomap == 0) {
1225 sc->rl_res_id = PCIR_BAR(1);
1226 sc->rl_res_type = SYS_RES_MEMORY;
1227 /* RTL8168/8101E seems to use different BARs. */
1228 if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E)
1229 sc->rl_res_id = PCIR_BAR(2);
1231 sc->rl_res_id = PCIR_BAR(0);
1232 sc->rl_res_type = SYS_RES_IOPORT;
1234 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1235 &sc->rl_res_id, RF_ACTIVE);
1236 if (sc->rl_res == NULL && prefer_iomap == 0) {
1237 sc->rl_res_id = PCIR_BAR(0);
1238 sc->rl_res_type = SYS_RES_IOPORT;
1239 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1240 &sc->rl_res_id, RF_ACTIVE);
1242 if (sc->rl_res == NULL) {
1243 device_printf(dev, "couldn't map ports/memory\n");
1248 sc->rl_btag = rman_get_bustag(sc->rl_res);
1249 sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
1251 msic = pci_msi_count(dev);
1252 msixc = pci_msix_count(dev);
1253 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
1254 sc->rl_flags |= RL_FLAG_PCIE;
1255 sc->rl_expcap = reg;
1258 device_printf(dev, "MSI count : %d\n", msic);
1259 device_printf(dev, "MSI-X count : %d\n", msixc);
1261 if (msix_disable > 0)
1263 if (msi_disable > 0)
1265 /* Prefer MSI-X to MSI. */
1269 sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1271 if (sc->rl_res_pba == NULL) {
1272 device_printf(sc->rl_dev,
1273 "could not allocate MSI-X PBA resource\n");
1275 if (sc->rl_res_pba != NULL &&
1276 pci_alloc_msix(dev, &msixc) == 0) {
1278 device_printf(dev, "Using %d MSI-X message\n",
1280 sc->rl_flags |= RL_FLAG_MSIX;
1282 pci_release_msi(dev);
1284 if ((sc->rl_flags & RL_FLAG_MSIX) == 0) {
1285 if (sc->rl_res_pba != NULL)
1286 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1288 sc->rl_res_pba = NULL;
1292 /* Prefer MSI to INTx. */
1293 if (msixc == 0 && msic > 0) {
1295 if (pci_alloc_msi(dev, &msic) == 0) {
1296 if (msic == RL_MSI_MESSAGES) {
1297 device_printf(dev, "Using %d MSI message\n",
1299 sc->rl_flags |= RL_FLAG_MSI;
1300 /* Explicitly set MSI enable bit. */
1301 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1302 cfg = CSR_READ_1(sc, RL_CFG2);
1304 CSR_WRITE_1(sc, RL_CFG2, cfg);
1305 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1307 pci_release_msi(dev);
1309 if ((sc->rl_flags & RL_FLAG_MSI) == 0)
1313 /* Allocate interrupt */
1314 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) {
1316 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1317 RF_SHAREABLE | RF_ACTIVE);
1318 if (sc->rl_irq[0] == NULL) {
1319 device_printf(dev, "couldn't allocate IRQ resources\n");
1324 for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) {
1325 sc->rl_irq[i] = bus_alloc_resource_any(dev,
1326 SYS_RES_IRQ, &rid, RF_ACTIVE);
1327 if (sc->rl_irq[i] == NULL) {
1329 "couldn't allocate IRQ resources for "
1330 "message %d\n", rid);
1337 if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
1338 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1339 cfg = CSR_READ_1(sc, RL_CFG2);
1340 if ((cfg & RL_CFG2_MSI) != 0) {
1341 device_printf(dev, "turning off MSI enable bit.\n");
1342 cfg &= ~RL_CFG2_MSI;
1343 CSR_WRITE_1(sc, RL_CFG2, cfg);
1345 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1348 /* Disable ASPM L0S/L1. */
1349 if (sc->rl_expcap != 0) {
1350 cap = pci_read_config(dev, sc->rl_expcap +
1352 if ((cap & PCIEM_LINK_CAP_ASPM) != 0) {
1353 ctl = pci_read_config(dev, sc->rl_expcap +
1355 if ((ctl & PCIEM_LINK_CTL_ASPMC) != 0) {
1356 ctl &= ~PCIEM_LINK_CTL_ASPMC;
1357 pci_write_config(dev, sc->rl_expcap +
1358 PCIER_LINK_CTL, ctl, 2);
1359 device_printf(dev, "ASPM disabled\n");
1362 device_printf(dev, "no ASPM capability\n");
1366 hwrev = CSR_READ_4(sc, RL_TXCFG);
1367 switch (hwrev & 0x70000000) {
1370 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000);
1371 hwrev &= (RL_TXCFG_HWREV | 0x80000000);
1374 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000);
1375 sc->rl_macrev = hwrev & 0x00700000;
1376 hwrev &= RL_TXCFG_HWREV;
1379 device_printf(dev, "MAC rev. 0x%08x\n", sc->rl_macrev);
1380 while (hw_rev->rl_desc != NULL) {
1381 if (hw_rev->rl_rev == hwrev) {
1382 sc->rl_type = hw_rev->rl_type;
1383 sc->rl_hwrev = hw_rev;
1388 if (hw_rev->rl_desc == NULL) {
1389 device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev);
1394 switch (hw_rev->rl_rev) {
1395 case RL_HWREV_8139CPLUS:
1396 sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD;
1398 case RL_HWREV_8100E:
1399 case RL_HWREV_8101E:
1400 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER;
1402 case RL_HWREV_8102E:
1403 case RL_HWREV_8102EL:
1404 case RL_HWREV_8102EL_SPIN1:
1405 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1406 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1409 case RL_HWREV_8103E:
1410 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1411 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1412 RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP;
1414 case RL_HWREV_8401E:
1415 case RL_HWREV_8105E:
1416 case RL_HWREV_8105E_SPIN1:
1417 case RL_HWREV_8106E:
1418 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1419 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1420 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
1423 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1424 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1425 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD |
1426 RL_FLAG_CMDSTOP_WAIT_TXQ;
1428 case RL_HWREV_8168B_SPIN1:
1429 case RL_HWREV_8168B_SPIN2:
1430 sc->rl_flags |= RL_FLAG_WOLRXENB;
1432 case RL_HWREV_8168B_SPIN3:
1433 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT;
1435 case RL_HWREV_8168C_SPIN2:
1436 sc->rl_flags |= RL_FLAG_MACSLEEP;
1438 case RL_HWREV_8168C:
1439 if (sc->rl_macrev == 0x00200000)
1440 sc->rl_flags |= RL_FLAG_MACSLEEP;
1442 case RL_HWREV_8168CP:
1443 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1444 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1445 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
1447 case RL_HWREV_8168D:
1448 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1449 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1450 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1451 RL_FLAG_WOL_MANLINK;
1453 case RL_HWREV_8168DP:
1454 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1455 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD |
1456 RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK;
1458 case RL_HWREV_8168E:
1459 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1460 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1461 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1462 RL_FLAG_WOL_MANLINK;
1464 case RL_HWREV_8168E_VL:
1465 case RL_HWREV_8168EP:
1466 case RL_HWREV_8168F:
1467 case RL_HWREV_8168G:
1469 case RL_HWREV_8411B:
1470 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1471 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1472 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1473 RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK;
1475 case RL_HWREV_8168GU:
1476 if (pci_get_device(dev) == RT_DEVICEID_8101E) {
1478 sc->rl_flags |= RL_FLAG_FASTETHER;
1480 sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
1482 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1483 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1484 RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ;
1486 case RL_HWREV_8169_8110SB:
1487 case RL_HWREV_8169_8110SBL:
1488 case RL_HWREV_8169_8110SC:
1489 case RL_HWREV_8169_8110SCE:
1490 sc->rl_flags |= RL_FLAG_PHYWAKE;
1493 case RL_HWREV_8169S:
1494 case RL_HWREV_8110S:
1495 sc->rl_flags |= RL_FLAG_MACRESET;
1501 if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) {
1502 sc->rl_cfg0 = RL_8139_CFG0;
1503 sc->rl_cfg1 = RL_8139_CFG1;
1505 sc->rl_cfg3 = RL_8139_CFG3;
1506 sc->rl_cfg4 = RL_8139_CFG4;
1507 sc->rl_cfg5 = RL_8139_CFG5;
1509 sc->rl_cfg0 = RL_CFG0;
1510 sc->rl_cfg1 = RL_CFG1;
1511 sc->rl_cfg2 = RL_CFG2;
1512 sc->rl_cfg3 = RL_CFG3;
1513 sc->rl_cfg4 = RL_CFG4;
1514 sc->rl_cfg5 = RL_CFG5;
1517 /* Reset the adapter. */
1523 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1524 cfg = CSR_READ_1(sc, sc->rl_cfg1);
1526 CSR_WRITE_1(sc, sc->rl_cfg1, cfg);
1527 cfg = CSR_READ_1(sc, sc->rl_cfg5);
1528 cfg &= RL_CFG5_PME_STS;
1529 CSR_WRITE_1(sc, sc->rl_cfg5, cfg);
1530 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1532 if ((sc->rl_flags & RL_FLAG_PAR) != 0) {
1534 * XXX Should have a better way to extract station
1535 * address from EEPROM.
1537 for (i = 0; i < ETHER_ADDR_LEN; i++)
1538 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
1540 sc->rl_eewidth = RL_9356_ADDR_LEN;
1541 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
1542 if (re_did != 0x8129)
1543 sc->rl_eewidth = RL_9346_ADDR_LEN;
1546 * Get station address from the EEPROM.
1548 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
1549 for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
1550 as[i] = le16toh(as[i]);
1551 bcopy(as, eaddr, ETHER_ADDR_LEN);
1554 if (sc->rl_type == RL_8169) {
1555 /* Set RX length mask and number of descriptors. */
1556 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
1557 sc->rl_txstart = RL_GTXSTART;
1558 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT;
1559 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT;
1561 /* Set RX length mask and number of descriptors. */
1562 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
1563 sc->rl_txstart = RL_TXSTART;
1564 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT;
1565 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT;
1568 error = re_allocmem(dev, sc);
1573 ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
1575 device_printf(dev, "can not if_alloc()\n");
1580 /* Take controller out of deep sleep mode. */
1581 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
1582 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
1583 CSR_WRITE_1(sc, RL_GPIO,
1584 CSR_READ_1(sc, RL_GPIO) | 0x01);
1586 CSR_WRITE_1(sc, RL_GPIO,
1587 CSR_READ_1(sc, RL_GPIO) & ~0x01);
1590 /* Take PHY out of power down mode. */
1591 if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) {
1592 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80);
1593 if (hw_rev->rl_rev == RL_HWREV_8401E)
1594 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08);
1596 if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) {
1597 re_gmii_writereg(dev, 1, 0x1f, 0);
1598 re_gmii_writereg(dev, 1, 0x0e, 0);
1602 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1603 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1604 ifp->if_ioctl = re_ioctl;
1605 ifp->if_start = re_start;
1607 * RTL8168/8111C generates wrong IP checksummed frame if the
1608 * packet has IP options so disable TX IP checksum offloading.
1610 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C ||
1611 sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2 ||
1612 sc->rl_hwrev->rl_rev == RL_HWREV_8168CP)
1613 ifp->if_hwassist = CSUM_TCP | CSUM_UDP;
1615 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
1616 ifp->if_hwassist |= CSUM_TSO;
1617 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
1618 ifp->if_capenable = ifp->if_capabilities;
1619 ifp->if_init = re_init;
1620 IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN);
1621 ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN;
1622 IFQ_SET_READY(&ifp->if_snd);
1624 TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc);
1626 #define RE_PHYAD_INTERNAL 0
1629 phy = RE_PHYAD_INTERNAL;
1630 if (sc->rl_type == RL_8169)
1632 error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd,
1633 re_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, MIIF_DOPAUSE);
1635 device_printf(dev, "attaching PHYs failed\n");
1640 * Call MI attach routine.
1642 ether_ifattach(ifp, eaddr);
1644 /* VLAN capability setup */
1645 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1646 if (ifp->if_capabilities & IFCAP_HWCSUM)
1647 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1648 /* Enable WOL if PM is supported. */
1649 if (pci_find_cap(sc->rl_dev, PCIY_PMG, ®) == 0)
1650 ifp->if_capabilities |= IFCAP_WOL;
1651 ifp->if_capenable = ifp->if_capabilities;
1652 ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST);
1654 * Don't enable TSO by default. It is known to generate
1655 * corrupted TCP segments(bad TCP options) under certain
1658 ifp->if_hwassist &= ~CSUM_TSO;
1659 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO);
1660 #ifdef DEVICE_POLLING
1661 ifp->if_capabilities |= IFCAP_POLLING;
1664 * Tell the upper layer(s) we support long frames.
1665 * Must appear after the call to ether_ifattach() because
1666 * ether_ifattach() sets ifi_hdrlen to the default value.
1668 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1671 re_netmap_attach(sc);
1672 #endif /* DEV_NETMAP */
1675 * Perform hardware diagnostic on the original RTL8169.
1676 * Some 32-bit cards were incorrectly wired and would
1677 * malfunction if plugged into a 64-bit slot.
1680 if (hwrev == RL_HWREV_8169) {
1681 error = re_diag(sc);
1684 "attach aborted due to hardware diag failure\n");
1685 ether_ifdetach(ifp);
1691 #ifdef RE_TX_MODERATION
1694 /* Hook interrupt last to avoid having to lock softc */
1695 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
1697 error = bus_setup_intr(dev, sc->rl_irq[0],
1698 INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc,
1699 &sc->rl_intrhand[0]);
1701 error = bus_setup_intr(dev, sc->rl_irq[0],
1702 INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc,
1703 &sc->rl_intrhand[0]);
1706 device_printf(dev, "couldn't set up irq\n");
1707 ether_ifdetach(ifp);
1719 * Shutdown hardware and free up resources. This can be called any
1720 * time after the mutex has been initialized. It is called in both
1721 * the error case in attach and the normal detach case so it needs
1722 * to be careful about only freeing resources that have actually been
1726 re_detach(device_t dev)
1728 struct rl_softc *sc;
1732 sc = device_get_softc(dev);
1734 KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized"));
1736 /* These should only be active if attach succeeded */
1737 if (device_is_attached(dev)) {
1738 #ifdef DEVICE_POLLING
1739 if (ifp->if_capenable & IFCAP_POLLING)
1740 ether_poll_deregister(ifp);
1748 callout_drain(&sc->rl_stat_callout);
1749 taskqueue_drain(taskqueue_fast, &sc->rl_inttask);
1751 * Force off the IFF_UP flag here, in case someone
1752 * still had a BPF descriptor attached to this
1753 * interface. If they do, ether_ifdetach() will cause
1754 * the BPF code to try and clear the promisc mode
1755 * flag, which will bubble down to re_ioctl(),
1756 * which will try to call re_init() again. This will
1757 * turn the NIC back on and restart the MII ticker,
1758 * which will panic the system when the kernel tries
1759 * to invoke the re_tick() function that isn't there
1762 ifp->if_flags &= ~IFF_UP;
1763 ether_ifdetach(ifp);
1766 device_delete_child(dev, sc->rl_miibus);
1767 bus_generic_detach(dev);
1770 * The rest is resource deallocation, so we should already be
1774 if (sc->rl_intrhand[0] != NULL) {
1775 bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
1776 sc->rl_intrhand[0] = NULL;
1781 #endif /* DEV_NETMAP */
1784 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
1788 if (sc->rl_irq[0] != NULL) {
1789 bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]);
1790 sc->rl_irq[0] = NULL;
1792 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0)
1793 pci_release_msi(dev);
1794 if (sc->rl_res_pba) {
1796 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba);
1799 bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
1802 /* Unload and free the RX DMA ring memory and map */
1804 if (sc->rl_ldata.rl_rx_list_tag) {
1805 if (sc->rl_ldata.rl_rx_list_map)
1806 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag,
1807 sc->rl_ldata.rl_rx_list_map);
1808 if (sc->rl_ldata.rl_rx_list_map && sc->rl_ldata.rl_rx_list)
1809 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag,
1810 sc->rl_ldata.rl_rx_list,
1811 sc->rl_ldata.rl_rx_list_map);
1812 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag);
1815 /* Unload and free the TX DMA ring memory and map */
1817 if (sc->rl_ldata.rl_tx_list_tag) {
1818 if (sc->rl_ldata.rl_tx_list_map)
1819 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag,
1820 sc->rl_ldata.rl_tx_list_map);
1821 if (sc->rl_ldata.rl_tx_list_map && sc->rl_ldata.rl_tx_list)
1822 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag,
1823 sc->rl_ldata.rl_tx_list,
1824 sc->rl_ldata.rl_tx_list_map);
1825 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag);
1828 /* Destroy all the RX and TX buffer maps */
1830 if (sc->rl_ldata.rl_tx_mtag) {
1831 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1832 if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap)
1833 bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag,
1834 sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1836 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag);
1838 if (sc->rl_ldata.rl_rx_mtag) {
1839 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1840 if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap)
1841 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1842 sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1844 if (sc->rl_ldata.rl_rx_sparemap)
1845 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1846 sc->rl_ldata.rl_rx_sparemap);
1847 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag);
1849 if (sc->rl_ldata.rl_jrx_mtag) {
1850 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1851 if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap)
1852 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1853 sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1855 if (sc->rl_ldata.rl_jrx_sparemap)
1856 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1857 sc->rl_ldata.rl_jrx_sparemap);
1858 bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag);
1860 /* Unload and free the stats buffer and map */
1862 if (sc->rl_ldata.rl_stag) {
1863 if (sc->rl_ldata.rl_smap)
1864 bus_dmamap_unload(sc->rl_ldata.rl_stag,
1865 sc->rl_ldata.rl_smap);
1866 if (sc->rl_ldata.rl_smap && sc->rl_ldata.rl_stats)
1867 bus_dmamem_free(sc->rl_ldata.rl_stag,
1868 sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap);
1869 bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
1872 if (sc->rl_parent_tag)
1873 bus_dma_tag_destroy(sc->rl_parent_tag);
1875 mtx_destroy(&sc->rl_mtx);
1880 static __inline void
1881 re_discard_rxbuf(struct rl_softc *sc, int idx)
1883 struct rl_desc *desc;
1884 struct rl_rxdesc *rxd;
1887 if (sc->rl_ifp->if_mtu > RL_MTU &&
1888 (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
1889 rxd = &sc->rl_ldata.rl_jrx_desc[idx];
1891 rxd = &sc->rl_ldata.rl_rx_desc[idx];
1892 desc = &sc->rl_ldata.rl_rx_list[idx];
1893 desc->rl_vlanctl = 0;
1894 cmdstat = rxd->rx_size;
1895 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1896 cmdstat |= RL_RDESC_CMD_EOR;
1897 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1901 re_newbuf(struct rl_softc *sc, int idx)
1904 struct rl_rxdesc *rxd;
1905 bus_dma_segment_t segs[1];
1907 struct rl_desc *desc;
1911 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1915 m->m_len = m->m_pkthdr.len = MCLBYTES;
1918 * This is part of an evil trick to deal with non-x86 platforms.
1919 * The RealTek chip requires RX buffers to be aligned on 64-bit
1920 * boundaries, but that will hose non-x86 machines. To get around
1921 * this, we leave some empty space at the start of each buffer
1922 * and for non-x86 hosts, we copy the buffer back six bytes
1923 * to achieve word alignment. This is slightly more efficient
1924 * than allocating a new buffer, copying the contents, and
1925 * discarding the old buffer.
1927 m_adj(m, RE_ETHER_ALIGN);
1929 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag,
1930 sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
1935 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
1937 rxd = &sc->rl_ldata.rl_rx_desc[idx];
1938 if (rxd->rx_m != NULL) {
1939 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1940 BUS_DMASYNC_POSTREAD);
1941 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap);
1945 map = rxd->rx_dmamap;
1946 rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap;
1947 rxd->rx_size = segs[0].ds_len;
1948 sc->rl_ldata.rl_rx_sparemap = map;
1949 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1950 BUS_DMASYNC_PREREAD);
1952 desc = &sc->rl_ldata.rl_rx_list[idx];
1953 desc->rl_vlanctl = 0;
1954 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
1955 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
1956 cmdstat = segs[0].ds_len;
1957 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1958 cmdstat |= RL_RDESC_CMD_EOR;
1959 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1965 re_jumbo_newbuf(struct rl_softc *sc, int idx)
1968 struct rl_rxdesc *rxd;
1969 bus_dma_segment_t segs[1];
1971 struct rl_desc *desc;
1975 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1978 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1980 m_adj(m, RE_ETHER_ALIGN);
1982 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag,
1983 sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
1988 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
1990 rxd = &sc->rl_ldata.rl_jrx_desc[idx];
1991 if (rxd->rx_m != NULL) {
1992 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
1993 BUS_DMASYNC_POSTREAD);
1994 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap);
1998 map = rxd->rx_dmamap;
1999 rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap;
2000 rxd->rx_size = segs[0].ds_len;
2001 sc->rl_ldata.rl_jrx_sparemap = map;
2002 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
2003 BUS_DMASYNC_PREREAD);
2005 desc = &sc->rl_ldata.rl_rx_list[idx];
2006 desc->rl_vlanctl = 0;
2007 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
2008 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
2009 cmdstat = segs[0].ds_len;
2010 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
2011 cmdstat |= RL_RDESC_CMD_EOR;
2012 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
2018 static __inline void
2019 re_fixup_rx(struct mbuf *m)
2022 uint16_t *src, *dst;
2024 src = mtod(m, uint16_t *);
2025 dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src;
2027 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2030 m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN;
2035 re_tx_list_init(struct rl_softc *sc)
2037 struct rl_desc *desc;
2042 bzero(sc->rl_ldata.rl_tx_list,
2043 sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc));
2044 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++)
2045 sc->rl_ldata.rl_tx_desc[i].tx_m = NULL;
2047 re_netmap_tx_init(sc);
2048 #endif /* DEV_NETMAP */
2050 desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1];
2051 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR);
2053 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2054 sc->rl_ldata.rl_tx_list_map,
2055 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2057 sc->rl_ldata.rl_tx_prodidx = 0;
2058 sc->rl_ldata.rl_tx_considx = 0;
2059 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt;
2065 re_rx_list_init(struct rl_softc *sc)
2069 bzero(sc->rl_ldata.rl_rx_list,
2070 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
2071 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2072 sc->rl_ldata.rl_rx_desc[i].rx_m = NULL;
2073 if ((error = re_newbuf(sc, i)) != 0)
2077 re_netmap_rx_init(sc);
2078 #endif /* DEV_NETMAP */
2080 /* Flush the RX descriptors */
2082 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2083 sc->rl_ldata.rl_rx_list_map,
2084 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2086 sc->rl_ldata.rl_rx_prodidx = 0;
2087 sc->rl_head = sc->rl_tail = NULL;
2088 sc->rl_int_rx_act = 0;
2094 re_jrx_list_init(struct rl_softc *sc)
2098 bzero(sc->rl_ldata.rl_rx_list,
2099 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
2100 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2101 sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL;
2102 if ((error = re_jumbo_newbuf(sc, i)) != 0)
2106 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2107 sc->rl_ldata.rl_rx_list_map,
2108 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2110 sc->rl_ldata.rl_rx_prodidx = 0;
2111 sc->rl_head = sc->rl_tail = NULL;
2112 sc->rl_int_rx_act = 0;
2118 * RX handler for C+ and 8169. For the gigE chips, we support
2119 * the reception of jumbo frames that have been fragmented
2120 * across multiple 2K mbuf cluster buffers.
2123 re_rxeof(struct rl_softc *sc, int *rx_npktsp)
2127 int i, rxerr, total_len;
2128 struct rl_desc *cur_rx;
2129 u_int32_t rxstat, rxvlan;
2130 int jumbo, maxpkt = 16, rx_npkts = 0;
2136 if (netmap_rx_irq(ifp, 0 | (NETMAP_LOCKED_ENTER|NETMAP_LOCKED_EXIT),
2139 #endif /* DEV_NETMAP */
2140 if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
2145 /* Invalidate the descriptor memory */
2147 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2148 sc->rl_ldata.rl_rx_list_map,
2149 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2151 for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0;
2152 i = RL_RX_DESC_NXT(sc, i)) {
2153 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2155 cur_rx = &sc->rl_ldata.rl_rx_list[i];
2156 rxstat = le32toh(cur_rx->rl_cmdstat);
2157 if ((rxstat & RL_RDESC_STAT_OWN) != 0)
2159 total_len = rxstat & sc->rl_rxlenmask;
2160 rxvlan = le32toh(cur_rx->rl_vlanctl);
2162 m = sc->rl_ldata.rl_jrx_desc[i].rx_m;
2164 m = sc->rl_ldata.rl_rx_desc[i].rx_m;
2166 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
2167 (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) !=
2168 (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) {
2170 * RTL8168C or later controllers do not
2171 * support multi-fragment packet.
2173 re_discard_rxbuf(sc, i);
2175 } else if ((rxstat & RL_RDESC_STAT_EOF) == 0) {
2176 if (re_newbuf(sc, i) != 0) {
2178 * If this is part of a multi-fragment packet,
2179 * discard all the pieces.
2181 if (sc->rl_head != NULL) {
2182 m_freem(sc->rl_head);
2183 sc->rl_head = sc->rl_tail = NULL;
2185 re_discard_rxbuf(sc, i);
2188 m->m_len = RE_RX_DESC_BUFLEN;
2189 if (sc->rl_head == NULL)
2190 sc->rl_head = sc->rl_tail = m;
2192 m->m_flags &= ~M_PKTHDR;
2193 sc->rl_tail->m_next = m;
2200 * NOTE: for the 8139C+, the frame length field
2201 * is always 12 bits in size, but for the gigE chips,
2202 * it is 13 bits (since the max RX frame length is 16K).
2203 * Unfortunately, all 32 bits in the status word
2204 * were already used, so to make room for the extra
2205 * length bit, RealTek took out the 'frame alignment
2206 * error' bit and shifted the other status bits
2207 * over one slot. The OWN, EOR, FS and LS bits are
2208 * still in the same places. We have already extracted
2209 * the frame length and checked the OWN bit, so rather
2210 * than using an alternate bit mapping, we shift the
2211 * status bits one space to the right so we can evaluate
2212 * them using the 8169 status as though it was in the
2213 * same format as that of the 8139C+.
2215 if (sc->rl_type == RL_8169)
2219 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be
2220 * set, but if CRC is clear, it will still be a valid frame.
2222 if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) {
2224 if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 &&
2226 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)
2231 * If this is part of a multi-fragment packet,
2232 * discard all the pieces.
2234 if (sc->rl_head != NULL) {
2235 m_freem(sc->rl_head);
2236 sc->rl_head = sc->rl_tail = NULL;
2238 re_discard_rxbuf(sc, i);
2244 * If allocating a replacement mbuf fails,
2245 * reload the current one.
2248 rxerr = re_jumbo_newbuf(sc, i);
2250 rxerr = re_newbuf(sc, i);
2253 if (sc->rl_head != NULL) {
2254 m_freem(sc->rl_head);
2255 sc->rl_head = sc->rl_tail = NULL;
2257 re_discard_rxbuf(sc, i);
2261 if (sc->rl_head != NULL) {
2263 m->m_len = total_len;
2265 m->m_len = total_len % RE_RX_DESC_BUFLEN;
2267 m->m_len = RE_RX_DESC_BUFLEN;
2270 * Special case: if there's 4 bytes or less
2271 * in this buffer, the mbuf can be discarded:
2272 * the last 4 bytes is the CRC, which we don't
2273 * care about anyway.
2275 if (m->m_len <= ETHER_CRC_LEN) {
2276 sc->rl_tail->m_len -=
2277 (ETHER_CRC_LEN - m->m_len);
2280 m->m_len -= ETHER_CRC_LEN;
2281 m->m_flags &= ~M_PKTHDR;
2282 sc->rl_tail->m_next = m;
2285 sc->rl_head = sc->rl_tail = NULL;
2286 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
2288 m->m_pkthdr.len = m->m_len =
2289 (total_len - ETHER_CRC_LEN);
2295 m->m_pkthdr.rcvif = ifp;
2297 /* Do RX checksumming if enabled */
2299 if (ifp->if_capenable & IFCAP_RXCSUM) {
2300 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2301 /* Check IP header checksum */
2302 if (rxstat & RL_RDESC_STAT_PROTOID)
2303 m->m_pkthdr.csum_flags |=
2305 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
2306 m->m_pkthdr.csum_flags |=
2309 /* Check TCP/UDP checksum */
2310 if ((RL_TCPPKT(rxstat) &&
2311 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2312 (RL_UDPPKT(rxstat) &&
2313 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2314 m->m_pkthdr.csum_flags |=
2315 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2316 m->m_pkthdr.csum_data = 0xffff;
2320 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP
2322 if ((rxstat & RL_RDESC_STAT_PROTOID) &&
2323 (rxvlan & RL_RDESC_IPV4))
2324 m->m_pkthdr.csum_flags |=
2326 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) &&
2327 (rxvlan & RL_RDESC_IPV4))
2328 m->m_pkthdr.csum_flags |=
2330 if (((rxstat & RL_RDESC_STAT_TCP) &&
2331 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2332 ((rxstat & RL_RDESC_STAT_UDP) &&
2333 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2334 m->m_pkthdr.csum_flags |=
2335 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2336 m->m_pkthdr.csum_data = 0xffff;
2341 if (rxvlan & RL_RDESC_VLANCTL_TAG) {
2342 m->m_pkthdr.ether_vtag =
2343 bswap16((rxvlan & RL_RDESC_VLANCTL_DATA));
2344 m->m_flags |= M_VLANTAG;
2347 (*ifp->if_input)(ifp, m);
2352 /* Flush the RX DMA ring */
2354 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2355 sc->rl_ldata.rl_rx_list_map,
2356 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2358 sc->rl_ldata.rl_rx_prodidx = i;
2360 if (rx_npktsp != NULL)
2361 *rx_npktsp = rx_npkts;
2369 re_txeof(struct rl_softc *sc)
2372 struct rl_txdesc *txd;
2376 cons = sc->rl_ldata.rl_tx_considx;
2377 if (cons == sc->rl_ldata.rl_tx_prodidx)
2382 if (netmap_tx_irq(ifp, 0 | (NETMAP_LOCKED_ENTER|NETMAP_LOCKED_EXIT)))
2384 #endif /* DEV_NETMAP */
2385 /* Invalidate the TX descriptor list */
2386 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2387 sc->rl_ldata.rl_tx_list_map,
2388 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2390 for (; cons != sc->rl_ldata.rl_tx_prodidx;
2391 cons = RL_TX_DESC_NXT(sc, cons)) {
2392 txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat);
2393 if (txstat & RL_TDESC_STAT_OWN)
2396 * We only stash mbufs in the last descriptor
2397 * in a fragment chain, which also happens to
2398 * be the only place where the TX status bits
2401 if (txstat & RL_TDESC_CMD_EOF) {
2402 txd = &sc->rl_ldata.rl_tx_desc[cons];
2403 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
2404 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2405 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
2407 KASSERT(txd->tx_m != NULL,
2408 ("%s: freeing NULL mbufs!", __func__));
2411 if (txstat & (RL_TDESC_STAT_EXCESSCOL|
2412 RL_TDESC_STAT_COLCNT))
2413 ifp->if_collisions++;
2414 if (txstat & RL_TDESC_STAT_TXERRSUM)
2419 sc->rl_ldata.rl_tx_free++;
2420 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2422 sc->rl_ldata.rl_tx_considx = cons;
2424 /* No changes made to the TX ring, so no flush needed */
2426 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) {
2427 #ifdef RE_TX_MODERATION
2429 * If not all descriptors have been reaped yet, reload
2430 * the timer so that we will eventually get another
2431 * interrupt that will cause us to re-enter this routine.
2432 * This is done in case the transmitter has gone idle.
2434 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2437 sc->rl_watchdog_timer = 0;
2443 struct rl_softc *sc;
2444 struct mii_data *mii;
2450 mii = device_get_softc(sc->rl_miibus);
2452 if ((sc->rl_flags & RL_FLAG_LINK) == 0)
2453 re_miibus_statchg(sc->rl_dev);
2455 * Reclaim transmitted frames here. Technically it is not
2456 * necessary to do here but it ensures periodic reclamation
2457 * regardless of Tx completion interrupt which seems to be
2458 * lost on PCIe based controllers under certain situations.
2462 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
2465 #ifdef DEVICE_POLLING
2467 re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2469 struct rl_softc *sc = ifp->if_softc;
2473 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2474 rx_npkts = re_poll_locked(ifp, cmd, count);
2480 re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2482 struct rl_softc *sc = ifp->if_softc;
2487 sc->rxcycles = count;
2488 re_rxeof(sc, &rx_npkts);
2491 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2492 re_start_locked(ifp);
2494 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
2497 status = CSR_READ_2(sc, RL_ISR);
2498 if (status == 0xffff)
2501 CSR_WRITE_2(sc, RL_ISR, status);
2502 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2503 (sc->rl_flags & RL_FLAG_PCIE))
2504 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2507 * XXX check behaviour on receiver stalls.
2510 if (status & RL_ISR_SYSTEM_ERR) {
2511 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2517 #endif /* DEVICE_POLLING */
2522 struct rl_softc *sc;
2527 status = CSR_READ_2(sc, RL_ISR);
2528 if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0)
2529 return (FILTER_STRAY);
2530 CSR_WRITE_2(sc, RL_IMR, 0);
2532 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
2534 return (FILTER_HANDLED);
2538 re_int_task(void *arg, int npending)
2540 struct rl_softc *sc;
2550 status = CSR_READ_2(sc, RL_ISR);
2551 CSR_WRITE_2(sc, RL_ISR, status);
2553 if (sc->suspended ||
2554 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2559 #ifdef DEVICE_POLLING
2560 if (ifp->if_capenable & IFCAP_POLLING) {
2566 if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW))
2567 rval = re_rxeof(sc, NULL);
2570 * Some chips will ignore a second TX request issued
2571 * while an existing transmission is in progress. If
2572 * the transmitter goes idle but there are still
2573 * packets waiting to be sent, we need to restart the
2574 * channel here to flush them out. This only seems to
2575 * be required with the PCIe devices.
2577 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2578 (sc->rl_flags & RL_FLAG_PCIE))
2579 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2581 #ifdef RE_TX_MODERATION
2582 RL_ISR_TIMEOUT_EXPIRED|
2586 RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL))
2589 if (status & RL_ISR_SYSTEM_ERR) {
2590 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2594 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2595 re_start_locked(ifp);
2599 if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) {
2600 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
2604 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
2608 re_intr_msi(void *xsc)
2610 struct rl_softc *sc;
2612 uint16_t intrs, status;
2618 #ifdef DEVICE_POLLING
2619 if (ifp->if_capenable & IFCAP_POLLING) {
2624 /* Disable interrupts. */
2625 CSR_WRITE_2(sc, RL_IMR, 0);
2626 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2631 intrs = RL_INTRS_CPLUS;
2632 status = CSR_READ_2(sc, RL_ISR);
2633 CSR_WRITE_2(sc, RL_ISR, status);
2634 if (sc->rl_int_rx_act > 0) {
2635 intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2637 status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2641 if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR |
2642 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) {
2644 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2645 if (sc->rl_int_rx_mod != 0 &&
2646 (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR |
2647 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) {
2648 /* Rearm one-shot timer. */
2649 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2650 intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR |
2651 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN);
2652 sc->rl_int_rx_act = 1;
2654 intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR |
2655 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN;
2656 sc->rl_int_rx_act = 0;
2662 * Some chips will ignore a second TX request issued
2663 * while an existing transmission is in progress. If
2664 * the transmitter goes idle but there are still
2665 * packets waiting to be sent, we need to restart the
2666 * channel here to flush them out. This only seems to
2667 * be required with the PCIe devices.
2669 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2670 (sc->rl_flags & RL_FLAG_PCIE))
2671 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2672 if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL))
2675 if (status & RL_ISR_SYSTEM_ERR) {
2676 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2680 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2681 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2682 re_start_locked(ifp);
2683 CSR_WRITE_2(sc, RL_IMR, intrs);
2689 re_encap(struct rl_softc *sc, struct mbuf **m_head)
2691 struct rl_txdesc *txd, *txd_last;
2692 bus_dma_segment_t segs[RL_NTXSEGS];
2695 struct rl_desc *desc;
2697 int i, error, ei, si;
2699 uint32_t cmdstat, csum_flags, vlanctl;
2702 M_ASSERTPKTHDR((*m_head));
2705 * With some of the RealTek chips, using the checksum offload
2706 * support in conjunction with the autopadding feature results
2707 * in the transmission of corrupt frames. For example, if we
2708 * need to send a really small IP fragment that's less than 60
2709 * bytes in size, and IP header checksumming is enabled, the
2710 * resulting ethernet frame that appears on the wire will
2711 * have garbled payload. To work around this, if TX IP checksum
2712 * offload is enabled, we always manually pad short frames out
2713 * to the minimum ethernet frame size.
2715 if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 &&
2716 (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN &&
2717 ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) {
2718 padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len;
2719 if (M_WRITABLE(*m_head) == 0) {
2720 /* Get a writable copy. */
2721 m_new = m_dup(*m_head, M_NOWAIT);
2723 if (m_new == NULL) {
2729 if ((*m_head)->m_next != NULL ||
2730 M_TRAILINGSPACE(*m_head) < padlen) {
2731 m_new = m_defrag(*m_head, M_NOWAIT);
2732 if (m_new == NULL) {
2741 * Manually pad short frames, and zero the pad space
2742 * to avoid leaking data.
2744 bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen);
2745 m_new->m_pkthdr.len += padlen;
2746 m_new->m_len = m_new->m_pkthdr.len;
2750 prod = sc->rl_ldata.rl_tx_prodidx;
2751 txd = &sc->rl_ldata.rl_tx_desc[prod];
2752 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2753 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2754 if (error == EFBIG) {
2755 m_new = m_collapse(*m_head, M_NOWAIT, RL_NTXSEGS);
2756 if (m_new == NULL) {
2762 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag,
2763 txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2769 } else if (error != 0)
2777 /* Check for number of available descriptors. */
2778 if (sc->rl_ldata.rl_tx_free - nsegs <= 1) {
2779 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap);
2783 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2784 BUS_DMASYNC_PREWRITE);
2787 * Set up checksum offload. Note: checksum offload bits must
2788 * appear in all descriptors of a multi-descriptor transmit
2789 * attempt. This is according to testing done with an 8169
2790 * chip. This is a requirement.
2794 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2795 if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) {
2796 csum_flags |= RL_TDESC_CMD_LGSEND;
2797 vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2798 RL_TDESC_CMD_MSSVALV2_SHIFT);
2800 csum_flags |= RL_TDESC_CMD_LGSEND |
2801 ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2802 RL_TDESC_CMD_MSSVAL_SHIFT);
2806 * Unconditionally enable IP checksum if TCP or UDP
2807 * checksum is required. Otherwise, TCP/UDP checksum
2808 * doesn't make effects.
2810 if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) {
2811 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2812 csum_flags |= RL_TDESC_CMD_IPCSUM;
2813 if (((*m_head)->m_pkthdr.csum_flags &
2815 csum_flags |= RL_TDESC_CMD_TCPCSUM;
2816 if (((*m_head)->m_pkthdr.csum_flags &
2818 csum_flags |= RL_TDESC_CMD_UDPCSUM;
2820 vlanctl |= RL_TDESC_CMD_IPCSUMV2;
2821 if (((*m_head)->m_pkthdr.csum_flags &
2823 vlanctl |= RL_TDESC_CMD_TCPCSUMV2;
2824 if (((*m_head)->m_pkthdr.csum_flags &
2826 vlanctl |= RL_TDESC_CMD_UDPCSUMV2;
2832 * Set up hardware VLAN tagging. Note: vlan tag info must
2833 * appear in all descriptors of a multi-descriptor
2834 * transmission attempt.
2836 if ((*m_head)->m_flags & M_VLANTAG)
2837 vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) |
2838 RL_TDESC_VLANCTL_TAG;
2841 for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) {
2842 desc = &sc->rl_ldata.rl_tx_list[prod];
2843 desc->rl_vlanctl = htole32(vlanctl);
2844 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
2845 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
2846 cmdstat = segs[i].ds_len;
2848 cmdstat |= RL_TDESC_CMD_OWN;
2849 if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1)
2850 cmdstat |= RL_TDESC_CMD_EOR;
2851 desc->rl_cmdstat = htole32(cmdstat | csum_flags);
2852 sc->rl_ldata.rl_tx_free--;
2854 /* Update producer index. */
2855 sc->rl_ldata.rl_tx_prodidx = prod;
2857 /* Set EOF on the last descriptor. */
2858 ei = RL_TX_DESC_PRV(sc, prod);
2859 desc = &sc->rl_ldata.rl_tx_list[ei];
2860 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
2862 desc = &sc->rl_ldata.rl_tx_list[si];
2863 /* Set SOF and transfer ownership of packet to the chip. */
2864 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF);
2867 * Insure that the map for this transmission
2868 * is placed at the array index of the last descriptor
2869 * in this chain. (Swap last and first dmamaps.)
2871 txd_last = &sc->rl_ldata.rl_tx_desc[ei];
2872 map = txd->tx_dmamap;
2873 txd->tx_dmamap = txd_last->tx_dmamap;
2874 txd_last->tx_dmamap = map;
2875 txd_last->tx_m = *m_head;
2881 re_start(struct ifnet *ifp)
2883 struct rl_softc *sc;
2887 re_start_locked(ifp);
2892 * Main transmit routine for C+ and gigE NICs.
2895 re_start_locked(struct ifnet *ifp)
2897 struct rl_softc *sc;
2898 struct mbuf *m_head;
2904 /* XXX is this necessary ? */
2905 if (ifp->if_capenable & IFCAP_NETMAP) {
2906 struct netmap_kring *kring = &NA(ifp)->tx_rings[0];
2907 if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) {
2908 /* kick the tx unit */
2909 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2910 #ifdef RE_TX_MODERATION
2911 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2913 sc->rl_watchdog_timer = 5;
2917 #endif /* DEV_NETMAP */
2918 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2919 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
2922 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2923 sc->rl_ldata.rl_tx_free > 1;) {
2924 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2928 if (re_encap(sc, &m_head) != 0) {
2931 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2932 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2937 * If there's a BPF listener, bounce a copy of this frame
2940 ETHER_BPF_MTAP(ifp, m_head);
2946 #ifdef RE_TX_MODERATION
2947 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt)
2948 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2953 /* Flush the TX descriptors */
2955 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2956 sc->rl_ldata.rl_tx_list_map,
2957 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2959 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2961 #ifdef RE_TX_MODERATION
2963 * Use the countdown timer for interrupt moderation.
2964 * 'TX done' interrupts are disabled. Instead, we reset the
2965 * countdown timer, which will begin counting until it hits
2966 * the value in the TIMERINT register, and then trigger an
2967 * interrupt. Each time we write to the TIMERCNT register,
2968 * the timer count is reset to 0.
2970 CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2974 * Set a timeout in case the chip goes out to lunch.
2976 sc->rl_watchdog_timer = 5;
2980 re_set_jumbo(struct rl_softc *sc, int jumbo)
2983 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) {
2984 pci_set_max_read_req(sc->rl_dev, 4096);
2988 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
2990 CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) |
2992 switch (sc->rl_hwrev->rl_rev) {
2993 case RL_HWREV_8168DP:
2995 case RL_HWREV_8168E:
2996 CSR_WRITE_1(sc, sc->rl_cfg4,
2997 CSR_READ_1(sc, sc->rl_cfg4) | 0x01);
3000 CSR_WRITE_1(sc, sc->rl_cfg4,
3001 CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1);
3004 CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) &
3005 ~RL_CFG3_JUMBO_EN0);
3006 switch (sc->rl_hwrev->rl_rev) {
3007 case RL_HWREV_8168DP:
3009 case RL_HWREV_8168E:
3010 CSR_WRITE_1(sc, sc->rl_cfg4,
3011 CSR_READ_1(sc, sc->rl_cfg4) & ~0x01);
3014 CSR_WRITE_1(sc, sc->rl_cfg4,
3015 CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1);
3018 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3020 switch (sc->rl_hwrev->rl_rev) {
3021 case RL_HWREV_8168DP:
3022 pci_set_max_read_req(sc->rl_dev, 4096);
3026 pci_set_max_read_req(sc->rl_dev, 512);
3028 pci_set_max_read_req(sc->rl_dev, 4096);
3035 struct rl_softc *sc = xsc;
3043 re_init_locked(struct rl_softc *sc)
3045 struct ifnet *ifp = sc->rl_ifp;
3046 struct mii_data *mii;
3050 uint32_t align_dummy;
3051 u_char eaddr[ETHER_ADDR_LEN];
3056 mii = device_get_softc(sc->rl_miibus);
3058 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3062 * Cancel pending I/O and free all RX/TX buffers.
3066 /* Put controller into known state. */
3070 * For C+ mode, initialize the RX descriptors and mbufs.
3072 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3073 if (ifp->if_mtu > RL_MTU) {
3074 if (re_jrx_list_init(sc) != 0) {
3075 device_printf(sc->rl_dev,
3076 "no memory for jumbo RX buffers\n");
3080 /* Disable checksum offloading for jumbo frames. */
3081 ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4);
3082 ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO);
3084 if (re_rx_list_init(sc) != 0) {
3085 device_printf(sc->rl_dev,
3086 "no memory for RX buffers\n");
3091 re_set_jumbo(sc, ifp->if_mtu > RL_MTU);
3093 if (re_rx_list_init(sc) != 0) {
3094 device_printf(sc->rl_dev, "no memory for RX buffers\n");
3098 if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3099 pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) {
3100 if (ifp->if_mtu > RL_MTU)
3101 pci_set_max_read_req(sc->rl_dev, 512);
3103 pci_set_max_read_req(sc->rl_dev, 4096);
3106 re_tx_list_init(sc);
3109 * Enable C+ RX and TX mode, as well as VLAN stripping and
3110 * RX checksum offload. We must configure the C+ register
3111 * before all others.
3113 cfg = RL_CPLUSCMD_PCI_MRW;
3114 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3115 cfg |= RL_CPLUSCMD_RXCSUM_ENB;
3116 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3117 cfg |= RL_CPLUSCMD_VLANSTRIP;
3118 if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) {
3119 cfg |= RL_CPLUSCMD_MACSTAT_DIS;
3123 cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB;
3124 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg);
3125 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC ||
3126 sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) {
3128 if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0)
3130 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE)
3132 CSR_WRITE_4(sc, 0x7c, reg);
3133 /* Disable interrupt mitigation. */
3134 CSR_WRITE_2(sc, 0xe2, 0);
3137 * Disable TSO if interface MTU size is greater than MSS
3138 * allowed in controller.
3140 if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) {
3141 ifp->if_capenable &= ~IFCAP_TSO4;
3142 ifp->if_hwassist &= ~CSUM_TSO;
3146 * Init our MAC address. Even though the chipset
3147 * documentation doesn't mention it, we need to enter "Config
3148 * register write enable" mode to modify the ID registers.
3150 /* Copy MAC address on stack to align. */
3151 bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN);
3152 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
3153 CSR_WRITE_4(sc, RL_IDR0,
3154 htole32(*(u_int32_t *)(&eaddr.eaddr[0])));
3155 CSR_WRITE_4(sc, RL_IDR4,
3156 htole32(*(u_int32_t *)(&eaddr.eaddr[4])));
3157 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3160 * Load the addresses of the RX and TX lists into the chip.
3163 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
3164 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr));
3165 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
3166 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr));
3168 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
3169 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr));
3170 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
3171 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr));
3174 * Enable transmit and receive.
3176 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
3179 * Set the initial TX configuration.
3181 if (sc->rl_testmode) {
3182 if (sc->rl_type == RL_8169)
3183 CSR_WRITE_4(sc, RL_TXCFG,
3184 RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
3186 CSR_WRITE_4(sc, RL_TXCFG,
3187 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
3189 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
3191 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
3194 * Set the initial RX configuration.
3198 /* Configure interrupt moderation. */
3199 if (sc->rl_type == RL_8169) {
3200 /* Magic from vendor. */
3201 CSR_WRITE_2(sc, RL_INTRMOD, 0x5100);
3204 #ifdef DEVICE_POLLING
3206 * Disable interrupts if we are polling.
3208 if (ifp->if_capenable & IFCAP_POLLING)
3209 CSR_WRITE_2(sc, RL_IMR, 0);
3210 else /* otherwise ... */
3214 * Enable interrupts.
3216 if (sc->rl_testmode)
3217 CSR_WRITE_2(sc, RL_IMR, 0);
3219 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3220 CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS);
3222 /* Set initial TX threshold */
3223 sc->rl_txthresh = RL_TX_THRESH_INIT;
3225 /* Start RX/TX process. */
3226 CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
3228 /* Enable receiver and transmitter. */
3229 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
3233 * Initialize the timer interrupt register so that
3234 * a timer interrupt will be generated once the timer
3235 * reaches a certain number of ticks. The timer is
3236 * reloaded on each transmit.
3238 #ifdef RE_TX_MODERATION
3240 * Use timer interrupt register to moderate TX interrupt
3241 * moderation, which dramatically improves TX frame rate.
3243 if (sc->rl_type == RL_8169)
3244 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800);
3246 CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
3249 * Use timer interrupt register to moderate RX interrupt
3252 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
3254 if (sc->rl_type == RL_8169)
3255 CSR_WRITE_4(sc, RL_TIMERINT_8169,
3256 RL_USECS(sc->rl_int_rx_mod));
3258 if (sc->rl_type == RL_8169)
3259 CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0));
3264 * For 8169 gigE NICs, set the max allowed RX packet
3265 * size so we can receive jumbo frames.
3267 if (sc->rl_type == RL_8169) {
3268 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3270 * For controllers that use new jumbo frame scheme,
3271 * set maximum size of jumbo frame depending on
3272 * controller revisions.
3274 if (ifp->if_mtu > RL_MTU)
3275 CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3276 sc->rl_hwrev->rl_max_mtu +
3277 ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN +
3280 CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3282 } else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3283 sc->rl_hwrev->rl_max_mtu == RL_MTU) {
3284 /* RTL810x has no jumbo frame support. */
3285 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN);
3287 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
3290 if (sc->rl_testmode)
3293 CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) |
3296 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3297 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3299 sc->rl_flags &= ~RL_FLAG_LINK;
3302 sc->rl_watchdog_timer = 0;
3303 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
3307 * Set media options.
3310 re_ifmedia_upd(struct ifnet *ifp)
3312 struct rl_softc *sc;
3313 struct mii_data *mii;
3317 mii = device_get_softc(sc->rl_miibus);
3319 error = mii_mediachg(mii);
3326 * Report current media status.
3329 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3331 struct rl_softc *sc;
3332 struct mii_data *mii;
3335 mii = device_get_softc(sc->rl_miibus);
3339 ifmr->ifm_active = mii->mii_media_active;
3340 ifmr->ifm_status = mii->mii_media_status;
3345 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3347 struct rl_softc *sc = ifp->if_softc;
3348 struct ifreq *ifr = (struct ifreq *) data;
3349 struct mii_data *mii;
3355 if (ifr->ifr_mtu < ETHERMIN ||
3356 ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu ||
3357 ((sc->rl_flags & RL_FLAG_FASTETHER) != 0 &&
3358 ifr->ifr_mtu > RL_MTU)) {
3363 if (ifp->if_mtu != ifr->ifr_mtu) {
3364 ifp->if_mtu = ifr->ifr_mtu;
3365 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3366 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3367 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3370 if (ifp->if_mtu > RL_TSO_MTU &&
3371 (ifp->if_capenable & IFCAP_TSO4) != 0) {
3372 ifp->if_capenable &= ~(IFCAP_TSO4 |
3374 ifp->if_hwassist &= ~CSUM_TSO;
3376 VLAN_CAPABILITIES(ifp);
3382 if ((ifp->if_flags & IFF_UP) != 0) {
3383 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3384 if (((ifp->if_flags ^ sc->rl_if_flags)
3385 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3390 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3393 sc->rl_if_flags = ifp->if_flags;
3399 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3405 mii = device_get_softc(sc->rl_miibus);
3406 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3412 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3414 #ifdef DEVICE_POLLING
3415 if (mask & IFCAP_POLLING) {
3416 if (ifr->ifr_reqcap & IFCAP_POLLING) {
3417 error = ether_poll_register(re_poll, ifp);
3421 /* Disable interrupts */
3422 CSR_WRITE_2(sc, RL_IMR, 0x0000);
3423 ifp->if_capenable |= IFCAP_POLLING;
3426 error = ether_poll_deregister(ifp);
3427 /* Enable interrupts. */
3429 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3430 ifp->if_capenable &= ~IFCAP_POLLING;
3434 #endif /* DEVICE_POLLING */
3436 if ((mask & IFCAP_TXCSUM) != 0 &&
3437 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
3438 ifp->if_capenable ^= IFCAP_TXCSUM;
3439 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) {
3440 rev = sc->rl_hwrev->rl_rev;
3441 if (rev == RL_HWREV_8168C ||
3442 rev == RL_HWREV_8168C_SPIN2 ||
3443 rev == RL_HWREV_8168CP)
3444 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
3446 ifp->if_hwassist |= RE_CSUM_FEATURES;
3448 ifp->if_hwassist &= ~RE_CSUM_FEATURES;
3451 if ((mask & IFCAP_RXCSUM) != 0 &&
3452 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
3453 ifp->if_capenable ^= IFCAP_RXCSUM;
3456 if ((mask & IFCAP_TSO4) != 0 &&
3457 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
3458 ifp->if_capenable ^= IFCAP_TSO4;
3459 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
3460 ifp->if_hwassist |= CSUM_TSO;
3462 ifp->if_hwassist &= ~CSUM_TSO;
3463 if (ifp->if_mtu > RL_TSO_MTU &&
3464 (ifp->if_capenable & IFCAP_TSO4) != 0) {
3465 ifp->if_capenable &= ~IFCAP_TSO4;
3466 ifp->if_hwassist &= ~CSUM_TSO;
3469 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
3470 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
3471 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3472 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
3473 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
3474 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3475 /* TSO over VLAN requires VLAN hardware tagging. */
3476 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
3477 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
3480 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3481 (mask & (IFCAP_HWCSUM | IFCAP_TSO4 |
3482 IFCAP_VLAN_HWTSO)) != 0)
3484 if ((mask & IFCAP_WOL) != 0 &&
3485 (ifp->if_capabilities & IFCAP_WOL) != 0) {
3486 if ((mask & IFCAP_WOL_UCAST) != 0)
3487 ifp->if_capenable ^= IFCAP_WOL_UCAST;
3488 if ((mask & IFCAP_WOL_MCAST) != 0)
3489 ifp->if_capenable ^= IFCAP_WOL_MCAST;
3490 if ((mask & IFCAP_WOL_MAGIC) != 0)
3491 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
3493 if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3494 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3498 VLAN_CAPABILITIES(ifp);
3502 error = ether_ioctl(ifp, command, data);
3510 re_watchdog(struct rl_softc *sc)
3516 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0)
3521 if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) {
3522 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
3524 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3525 re_start_locked(ifp);
3529 if_printf(ifp, "watchdog timeout\n");
3533 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3535 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3536 re_start_locked(ifp);
3540 * Stop the adapter and free any mbufs allocated to the
3544 re_stop(struct rl_softc *sc)
3548 struct rl_txdesc *txd;
3549 struct rl_rxdesc *rxd;
3555 sc->rl_watchdog_timer = 0;
3556 callout_stop(&sc->rl_stat_callout);
3557 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3560 * Disable accepting frames to put RX MAC into idle state.
3561 * Otherwise it's possible to get frames while stop command
3562 * execution is in progress and controller can DMA the frame
3563 * to already freed RX buffer during that period.
3565 CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) &
3566 ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI |
3567 RL_RXCFG_RX_BROAD));
3569 if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) {
3570 for (i = RL_TIMEOUT; i > 0; i--) {
3571 if ((CSR_READ_1(sc, sc->rl_txstart) &
3572 RL_TXSTART_START) == 0)
3577 device_printf(sc->rl_dev,
3578 "stopping TX poll timed out!\n");
3579 CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3580 } else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) {
3581 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB |
3583 if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) {
3584 for (i = RL_TIMEOUT; i > 0; i--) {
3585 if ((CSR_READ_4(sc, RL_TXCFG) &
3586 RL_TXCFG_QUEUE_EMPTY) != 0)
3591 device_printf(sc->rl_dev,
3592 "stopping TXQ timed out!\n");
3595 CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3597 CSR_WRITE_2(sc, RL_IMR, 0x0000);
3598 CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
3600 if (sc->rl_head != NULL) {
3601 m_freem(sc->rl_head);
3602 sc->rl_head = sc->rl_tail = NULL;
3605 /* Free the TX list buffers. */
3606 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
3607 txd = &sc->rl_ldata.rl_tx_desc[i];
3608 if (txd->tx_m != NULL) {
3609 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
3610 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3611 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
3618 /* Free the RX list buffers. */
3619 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3620 rxd = &sc->rl_ldata.rl_rx_desc[i];
3621 if (rxd->rx_m != NULL) {
3622 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
3623 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3624 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
3631 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3632 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3633 rxd = &sc->rl_ldata.rl_jrx_desc[i];
3634 if (rxd->rx_m != NULL) {
3635 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag,
3636 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3637 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag,
3647 * Device suspend routine. Stop the interface and save some PCI
3648 * settings in case the BIOS doesn't restore them properly on
3652 re_suspend(device_t dev)
3654 struct rl_softc *sc;
3656 sc = device_get_softc(dev);
3668 * Device resume routine. Restore some PCI settings in case the BIOS
3669 * doesn't, re-enable busmastering, and restart the interface if
3673 re_resume(device_t dev)
3675 struct rl_softc *sc;
3678 sc = device_get_softc(dev);
3683 /* Take controller out of sleep mode. */
3684 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3685 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3686 CSR_WRITE_1(sc, RL_GPIO,
3687 CSR_READ_1(sc, RL_GPIO) | 0x01);
3691 * Clear WOL matching such that normal Rx filtering
3692 * wouldn't interfere with WOL patterns.
3696 /* reinitialize interface if necessary */
3697 if (ifp->if_flags & IFF_UP)
3707 * Stop all chip I/O so that the kernel's probe routines don't
3708 * get confused by errant DMAs when rebooting.
3711 re_shutdown(device_t dev)
3713 struct rl_softc *sc;
3715 sc = device_get_softc(dev);
3720 * Mark interface as down since otherwise we will panic if
3721 * interrupt comes in later on, which can happen in some
3724 sc->rl_ifp->if_flags &= ~IFF_UP;
3732 re_set_linkspeed(struct rl_softc *sc)
3734 struct mii_softc *miisc;
3735 struct mii_data *mii;
3740 mii = device_get_softc(sc->rl_miibus);
3743 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3744 (IFM_ACTIVE | IFM_AVALID)) {
3745 switch IFM_SUBTYPE(mii->mii_media_active) {
3756 miisc = LIST_FIRST(&mii->mii_phys);
3757 phyno = miisc->mii_phy;
3758 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3760 re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0);
3761 re_miibus_writereg(sc->rl_dev, phyno,
3762 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3763 re_miibus_writereg(sc->rl_dev, phyno,
3764 MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
3768 * Poll link state until re(4) get a 10/100Mbps link.
3770 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3772 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3773 == (IFM_ACTIVE | IFM_AVALID)) {
3774 switch (IFM_SUBTYPE(mii->mii_media_active)) {
3786 if (i == MII_ANEGTICKS_GIGE)
3787 device_printf(sc->rl_dev,
3788 "establishing a link failed, WOL may not work!");
3791 * No link, force MAC to have 100Mbps, full-duplex link.
3792 * MAC does not require reprogramming on resolved speed/duplex,
3793 * so this is just for completeness.
3795 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3796 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3800 re_setwol(struct rl_softc *sc)
3809 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3813 /* Put controller into sleep mode. */
3814 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3815 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3816 CSR_WRITE_1(sc, RL_GPIO,
3817 CSR_READ_1(sc, RL_GPIO) & ~0x01);
3819 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
3821 if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0)
3822 re_set_linkspeed(sc);
3823 if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0)
3824 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB);
3826 /* Enable config register write. */
3827 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3830 v = CSR_READ_1(sc, sc->rl_cfg1);
3832 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3834 CSR_WRITE_1(sc, sc->rl_cfg1, v);
3836 v = CSR_READ_1(sc, sc->rl_cfg3);
3837 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3838 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3839 v |= RL_CFG3_WOL_MAGIC;
3840 CSR_WRITE_1(sc, sc->rl_cfg3, v);
3842 v = CSR_READ_1(sc, sc->rl_cfg5);
3843 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST |
3844 RL_CFG5_WOL_LANWAKE);
3845 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
3846 v |= RL_CFG5_WOL_UCAST;
3847 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
3848 v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
3849 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3850 v |= RL_CFG5_WOL_LANWAKE;
3851 CSR_WRITE_1(sc, sc->rl_cfg5, v);
3853 /* Config register write done. */
3854 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3856 if ((ifp->if_capenable & IFCAP_WOL) == 0 &&
3857 (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0)
3858 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80);
3860 * It seems that hardware resets its link speed to 100Mbps in
3861 * power down mode so switching to 100Mbps in driver is not
3865 /* Request PME if WOL is requested. */
3866 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
3867 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3868 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3869 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3870 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
3874 re_clrwol(struct rl_softc *sc)
3881 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3884 /* Enable config register write. */
3885 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3887 v = CSR_READ_1(sc, sc->rl_cfg3);
3888 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3889 CSR_WRITE_1(sc, sc->rl_cfg3, v);
3891 /* Config register write done. */
3892 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3894 v = CSR_READ_1(sc, sc->rl_cfg5);
3895 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
3896 v &= ~RL_CFG5_WOL_LANWAKE;
3897 CSR_WRITE_1(sc, sc->rl_cfg5, v);
3901 re_add_sysctls(struct rl_softc *sc)
3903 struct sysctl_ctx_list *ctx;
3904 struct sysctl_oid_list *children;
3907 ctx = device_get_sysctl_ctx(sc->rl_dev);
3908 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
3910 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats",
3911 CTLTYPE_INT | CTLFLAG_RW, sc, 0, re_sysctl_stats, "I",
3912 "Statistics Information");
3913 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
3916 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod",
3917 CTLTYPE_INT | CTLFLAG_RW, &sc->rl_int_rx_mod, 0,
3918 sysctl_hw_re_int_mod, "I", "re RX interrupt moderation");
3919 /* Pull in device tunables. */
3920 sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3921 error = resource_int_value(device_get_name(sc->rl_dev),
3922 device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod);
3924 if (sc->rl_int_rx_mod < RL_TIMER_MIN ||
3925 sc->rl_int_rx_mod > RL_TIMER_MAX) {
3926 device_printf(sc->rl_dev, "int_rx_mod value out of "
3927 "range; using default: %d\n",
3929 sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3936 re_sysctl_stats(SYSCTL_HANDLER_ARGS)
3938 struct rl_softc *sc;
3939 struct rl_stats *stats;
3940 int error, i, result;
3943 error = sysctl_handle_int(oidp, &result, 0, req);
3944 if (error || req->newptr == NULL)
3948 sc = (struct rl_softc *)arg1;
3950 if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3954 bus_dmamap_sync(sc->rl_ldata.rl_stag,
3955 sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD);
3956 CSR_WRITE_4(sc, RL_DUMPSTATS_HI,
3957 RL_ADDR_HI(sc->rl_ldata.rl_stats_addr));
3958 CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
3959 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr));
3960 CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
3961 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr |
3962 RL_DUMPSTATS_START));
3963 for (i = RL_TIMEOUT; i > 0; i--) {
3964 if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) &
3965 RL_DUMPSTATS_START) == 0)
3969 bus_dmamap_sync(sc->rl_ldata.rl_stag,
3970 sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD);
3973 device_printf(sc->rl_dev,
3974 "DUMP statistics request timed out\n");
3978 stats = sc->rl_ldata.rl_stats;
3979 printf("%s statistics:\n", device_get_nameunit(sc->rl_dev));
3980 printf("Tx frames : %ju\n",
3981 (uintmax_t)le64toh(stats->rl_tx_pkts));
3982 printf("Rx frames : %ju\n",
3983 (uintmax_t)le64toh(stats->rl_rx_pkts));
3984 printf("Tx errors : %ju\n",
3985 (uintmax_t)le64toh(stats->rl_tx_errs));
3986 printf("Rx errors : %u\n",
3987 le32toh(stats->rl_rx_errs));
3988 printf("Rx missed frames : %u\n",
3989 (uint32_t)le16toh(stats->rl_missed_pkts));
3990 printf("Rx frame alignment errs : %u\n",
3991 (uint32_t)le16toh(stats->rl_rx_framealign_errs));
3992 printf("Tx single collisions : %u\n",
3993 le32toh(stats->rl_tx_onecoll));
3994 printf("Tx multiple collisions : %u\n",
3995 le32toh(stats->rl_tx_multicolls));
3996 printf("Rx unicast frames : %ju\n",
3997 (uintmax_t)le64toh(stats->rl_rx_ucasts));
3998 printf("Rx broadcast frames : %ju\n",
3999 (uintmax_t)le64toh(stats->rl_rx_bcasts));
4000 printf("Rx multicast frames : %u\n",
4001 le32toh(stats->rl_rx_mcasts));
4002 printf("Tx aborts : %u\n",
4003 (uint32_t)le16toh(stats->rl_tx_aborts));
4004 printf("Tx underruns : %u\n",
4005 (uint32_t)le16toh(stats->rl_rx_underruns));
4012 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4018 value = *(int *)arg1;
4019 error = sysctl_handle_int(oidp, &value, 0, req);
4020 if (error || req->newptr == NULL)
4022 if (value < low || value > high)
4024 *(int *)arg1 = value;
4030 sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS)
4033 return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN,