1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
43 #include <sys/endian.h>
44 #include <sys/kernel.h>
45 #include <sys/kthread.h>
46 #include <sys/malloc.h>
48 #include <sys/module.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/taskqueue.h>
54 #include <sys/eventhandler.h>
55 #include <machine/bus.h>
56 #include <machine/resource.h>
59 #include <net/ethernet.h>
61 #include <net/if_arp.h>
62 #include <net/if_dl.h>
63 #include <net/if_media.h>
65 #include <net/if_types.h>
66 #include <net/if_vlan_var.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/in.h>
70 #include <netinet/if_ether.h>
71 #include <netinet/ip.h>
72 #include <netinet/ip6.h>
73 #include <netinet/tcp.h>
74 #include <netinet/udp.h>
76 #include <machine/in_cksum.h>
77 #include <dev/led/led.h>
78 #include <dev/pci/pcivar.h>
79 #include <dev/pci/pcireg.h>
81 #include "e1000_api.h"
84 /*********************************************************************
85 * Legacy Em Driver version:
86 *********************************************************************/
87 char lem_driver_version[] = "1.0.3";
89 /*********************************************************************
92 * Used by probe to select devices to load on
93 * Last field stores an index into e1000_strings
94 * Last entry must be all 0s
96 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
97 *********************************************************************/
99 static em_vendor_info_t lem_vendor_info_array[] =
101 /* Intel(R) PRO/1000 Network Connection */
102 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
103 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
104 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
105 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
106 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
108 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
109 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
110 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
111 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
112 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
113 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
118 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
126 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
128 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
132 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
141 PCI_ANY_ID, PCI_ANY_ID, 0},
143 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
146 /* required last entry */
150 /*********************************************************************
151 * Table of branding strings for all supported NICs.
152 *********************************************************************/
154 static char *lem_strings[] = {
155 "Intel(R) PRO/1000 Legacy Network Connection"
158 /*********************************************************************
159 * Function prototypes
160 *********************************************************************/
161 static int lem_probe(device_t);
162 static int lem_attach(device_t);
163 static int lem_detach(device_t);
164 static int lem_shutdown(device_t);
165 static int lem_suspend(device_t);
166 static int lem_resume(device_t);
167 static void lem_start(struct ifnet *);
168 static void lem_start_locked(struct ifnet *ifp);
169 static int lem_ioctl(struct ifnet *, u_long, caddr_t);
170 static void lem_init(void *);
171 static void lem_init_locked(struct adapter *);
172 static void lem_stop(void *);
173 static void lem_media_status(struct ifnet *, struct ifmediareq *);
174 static int lem_media_change(struct ifnet *);
175 static void lem_identify_hardware(struct adapter *);
176 static int lem_allocate_pci_resources(struct adapter *);
177 static int lem_allocate_irq(struct adapter *adapter);
178 static void lem_free_pci_resources(struct adapter *);
179 static void lem_local_timer(void *);
180 static int lem_hardware_init(struct adapter *);
181 static int lem_setup_interface(device_t, struct adapter *);
182 static void lem_setup_transmit_structures(struct adapter *);
183 static void lem_initialize_transmit_unit(struct adapter *);
184 static int lem_setup_receive_structures(struct adapter *);
185 static void lem_initialize_receive_unit(struct adapter *);
186 static void lem_enable_intr(struct adapter *);
187 static void lem_disable_intr(struct adapter *);
188 static void lem_free_transmit_structures(struct adapter *);
189 static void lem_free_receive_structures(struct adapter *);
190 static void lem_update_stats_counters(struct adapter *);
191 static void lem_add_hw_stats(struct adapter *adapter);
192 static void lem_txeof(struct adapter *);
193 static void lem_tx_purge(struct adapter *);
194 static int lem_allocate_receive_structures(struct adapter *);
195 static int lem_allocate_transmit_structures(struct adapter *);
196 static bool lem_rxeof(struct adapter *, int, int *);
197 #ifndef __NO_STRICT_ALIGNMENT
198 static int lem_fixup_rx(struct adapter *);
200 static void lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
202 static void lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
204 static void lem_set_promisc(struct adapter *);
205 static void lem_disable_promisc(struct adapter *);
206 static void lem_set_multi(struct adapter *);
207 static void lem_update_link_status(struct adapter *);
208 static int lem_get_buf(struct adapter *, int);
209 static void lem_register_vlan(void *, struct ifnet *, u16);
210 static void lem_unregister_vlan(void *, struct ifnet *, u16);
211 static void lem_setup_vlan_hw_support(struct adapter *);
212 static int lem_xmit(struct adapter *, struct mbuf **);
213 static void lem_smartspeed(struct adapter *);
214 static int lem_82547_fifo_workaround(struct adapter *, int);
215 static void lem_82547_update_fifo_head(struct adapter *, int);
216 static int lem_82547_tx_fifo_reset(struct adapter *);
217 static void lem_82547_move_tail(void *);
218 static int lem_dma_malloc(struct adapter *, bus_size_t,
219 struct em_dma_alloc *, int);
220 static void lem_dma_free(struct adapter *, struct em_dma_alloc *);
221 static int lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
222 static void lem_print_nvm_info(struct adapter *);
223 static int lem_is_valid_ether_addr(u8 *);
224 static u32 lem_fill_descriptors (bus_addr_t address, u32 length,
225 PDESC_ARRAY desc_array);
226 static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
227 static void lem_add_int_delay_sysctl(struct adapter *, const char *,
228 const char *, struct em_int_delay_info *, int, int);
229 static void lem_set_flow_cntrl(struct adapter *, const char *,
230 const char *, int *, int);
231 /* Management and WOL Support */
232 static void lem_init_manageability(struct adapter *);
233 static void lem_release_manageability(struct adapter *);
234 static void lem_get_hw_control(struct adapter *);
235 static void lem_release_hw_control(struct adapter *);
236 static void lem_get_wakeup(device_t);
237 static void lem_enable_wakeup(device_t);
238 static int lem_enable_phy_wakeup(struct adapter *);
239 static void lem_led_func(void *, int);
242 static void lem_intr(void *);
244 static int lem_irq_fast(void *);
245 static void lem_handle_rxtx(void *context, int pending);
246 static void lem_handle_link(void *context, int pending);
247 static void lem_add_rx_process_limit(struct adapter *, const char *,
248 const char *, int *, int);
249 #endif /* ~EM_LEGACY_IRQ */
251 #ifdef DEVICE_POLLING
252 static poll_handler_t lem_poll;
255 /*********************************************************************
256 * FreeBSD Device Interface Entry Points
257 *********************************************************************/
259 static device_method_t lem_methods[] = {
260 /* Device interface */
261 DEVMETHOD(device_probe, lem_probe),
262 DEVMETHOD(device_attach, lem_attach),
263 DEVMETHOD(device_detach, lem_detach),
264 DEVMETHOD(device_shutdown, lem_shutdown),
265 DEVMETHOD(device_suspend, lem_suspend),
266 DEVMETHOD(device_resume, lem_resume),
270 static driver_t lem_driver = {
271 "em", lem_methods, sizeof(struct adapter),
274 extern devclass_t em_devclass;
275 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
276 MODULE_DEPEND(lem, pci, 1, 1, 1);
277 MODULE_DEPEND(lem, ether, 1, 1, 1);
279 /*********************************************************************
280 * Tunable default values.
281 *********************************************************************/
283 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
284 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
286 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
287 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
288 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
289 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
290 static int lem_rxd = EM_DEFAULT_RXD;
291 static int lem_txd = EM_DEFAULT_TXD;
292 static int lem_smart_pwr_down = FALSE;
294 /* Controls whether promiscuous also shows bad packets */
295 static int lem_debug_sbp = FALSE;
297 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
298 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
299 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
300 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
301 TUNABLE_INT("hw.em.rxd", &lem_rxd);
302 TUNABLE_INT("hw.em.txd", &lem_txd);
303 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
304 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
306 #ifndef EM_LEGACY_IRQ
307 /* How many packets rxeof tries to clean at a time */
308 static int lem_rx_process_limit = 100;
309 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
312 /* Flow control setting - default to FULL */
313 static int lem_fc_setting = e1000_fc_full;
314 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
316 /* Global used in WOL setup with multiport cards */
317 static int global_quad_port_a = 0;
319 /*********************************************************************
320 * Device identification routine
322 * em_probe determines if the driver should be loaded on
323 * adapter based on PCI vendor/device id of the adapter.
325 * return BUS_PROBE_DEFAULT on success, positive on failure
326 *********************************************************************/
329 lem_probe(device_t dev)
331 char adapter_name[60];
332 u16 pci_vendor_id = 0;
333 u16 pci_device_id = 0;
334 u16 pci_subvendor_id = 0;
335 u16 pci_subdevice_id = 0;
336 em_vendor_info_t *ent;
338 INIT_DEBUGOUT("em_probe: begin");
340 pci_vendor_id = pci_get_vendor(dev);
341 if (pci_vendor_id != EM_VENDOR_ID)
344 pci_device_id = pci_get_device(dev);
345 pci_subvendor_id = pci_get_subvendor(dev);
346 pci_subdevice_id = pci_get_subdevice(dev);
348 ent = lem_vendor_info_array;
349 while (ent->vendor_id != 0) {
350 if ((pci_vendor_id == ent->vendor_id) &&
351 (pci_device_id == ent->device_id) &&
353 ((pci_subvendor_id == ent->subvendor_id) ||
354 (ent->subvendor_id == PCI_ANY_ID)) &&
356 ((pci_subdevice_id == ent->subdevice_id) ||
357 (ent->subdevice_id == PCI_ANY_ID))) {
358 sprintf(adapter_name, "%s %s",
359 lem_strings[ent->index],
361 device_set_desc_copy(dev, adapter_name);
362 return (BUS_PROBE_DEFAULT);
370 /*********************************************************************
371 * Device initialization routine
373 * The attach entry point is called when the driver is being loaded.
374 * This routine identifies the type of hardware, allocates all resources
375 * and initializes the hardware.
377 * return 0 on success, positive on failure
378 *********************************************************************/
381 lem_attach(device_t dev)
383 struct adapter *adapter;
387 INIT_DEBUGOUT("lem_attach: begin");
389 adapter = device_get_softc(dev);
390 adapter->dev = adapter->osdep.dev = dev;
391 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
392 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
393 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
396 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
397 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
398 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
399 lem_sysctl_nvm_info, "I", "NVM Information");
401 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
402 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
404 /* Determine hardware and mac info */
405 lem_identify_hardware(adapter);
407 /* Setup PCI resources */
408 if (lem_allocate_pci_resources(adapter)) {
409 device_printf(dev, "Allocation of PCI resources failed\n");
414 /* Do Shared Code initialization */
415 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
416 device_printf(dev, "Setup of Shared code failed\n");
421 e1000_get_bus_info(&adapter->hw);
423 /* Set up some sysctls for the tunable interrupt delays */
424 lem_add_int_delay_sysctl(adapter, "rx_int_delay",
425 "receive interrupt delay in usecs", &adapter->rx_int_delay,
426 E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
427 lem_add_int_delay_sysctl(adapter, "tx_int_delay",
428 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
429 E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
430 if (adapter->hw.mac.type >= e1000_82540) {
431 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
432 "receive interrupt delay limit in usecs",
433 &adapter->rx_abs_int_delay,
434 E1000_REGISTER(&adapter->hw, E1000_RADV),
435 lem_rx_abs_int_delay_dflt);
436 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
437 "transmit interrupt delay limit in usecs",
438 &adapter->tx_abs_int_delay,
439 E1000_REGISTER(&adapter->hw, E1000_TADV),
440 lem_tx_abs_int_delay_dflt);
443 #ifndef EM_LEGACY_IRQ
444 /* Sysctls for limiting the amount of work done in the taskqueue */
445 lem_add_rx_process_limit(adapter, "rx_processing_limit",
446 "max number of rx packets to process", &adapter->rx_process_limit,
447 lem_rx_process_limit);
450 /* Sysctl for setting the interface flow control */
451 lem_set_flow_cntrl(adapter, "flow_control",
452 "max number of rx packets to process",
453 &adapter->fc_setting, lem_fc_setting);
456 * Validate number of transmit and receive descriptors. It
457 * must not exceed hardware maximum, and must be multiple
458 * of E1000_DBA_ALIGN.
460 if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
461 (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
462 (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
463 (lem_txd < EM_MIN_TXD)) {
464 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
465 EM_DEFAULT_TXD, lem_txd);
466 adapter->num_tx_desc = EM_DEFAULT_TXD;
468 adapter->num_tx_desc = lem_txd;
469 if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
470 (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
471 (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
472 (lem_rxd < EM_MIN_RXD)) {
473 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
474 EM_DEFAULT_RXD, lem_rxd);
475 adapter->num_rx_desc = EM_DEFAULT_RXD;
477 adapter->num_rx_desc = lem_rxd;
479 adapter->hw.mac.autoneg = DO_AUTO_NEG;
480 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
481 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
482 adapter->rx_buffer_len = 2048;
484 e1000_init_script_state_82541(&adapter->hw, TRUE);
485 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
488 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
489 adapter->hw.phy.mdix = AUTO_ALL_MODES;
490 adapter->hw.phy.disable_polarity_correction = FALSE;
491 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
495 * Set the frame limits assuming
496 * standard ethernet sized frames.
498 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
499 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
502 * This controls when hardware reports transmit completion
505 adapter->hw.mac.report_tx_early = 1;
507 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
510 /* Allocate Transmit Descriptor ring */
511 if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
512 device_printf(dev, "Unable to allocate tx_desc memory\n");
516 adapter->tx_desc_base =
517 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
519 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
522 /* Allocate Receive Descriptor ring */
523 if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
524 device_printf(dev, "Unable to allocate rx_desc memory\n");
528 adapter->rx_desc_base =
529 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
531 /* Allocate multicast array memory. */
532 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
533 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
534 if (adapter->mta == NULL) {
535 device_printf(dev, "Can not allocate multicast setup array\n");
541 ** Start from a known state, this is
542 ** important in reading the nvm and
545 e1000_reset_hw(&adapter->hw);
547 /* Make sure we have a good EEPROM before we read from it */
548 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
550 ** Some PCI-E parts fail the first check due to
551 ** the link being in sleep state, call it again,
552 ** if it fails a second time its a real issue.
554 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
556 "The EEPROM Checksum Is Not Valid\n");
562 /* Copy the permanent MAC address out of the EEPROM */
563 if (e1000_read_mac_addr(&adapter->hw) < 0) {
564 device_printf(dev, "EEPROM read error while reading MAC"
570 if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
571 device_printf(dev, "Invalid MAC address\n");
576 /* Initialize the hardware */
577 if (lem_hardware_init(adapter)) {
578 device_printf(dev, "Unable to initialize the hardware\n");
583 /* Allocate transmit descriptors and buffers */
584 if (lem_allocate_transmit_structures(adapter)) {
585 device_printf(dev, "Could not setup transmit structures\n");
590 /* Allocate receive descriptors and buffers */
591 if (lem_allocate_receive_structures(adapter)) {
592 device_printf(dev, "Could not setup receive structures\n");
598 ** Do interrupt configuration
600 error = lem_allocate_irq(adapter);
605 * Get Wake-on-Lan and Management info for later use
609 /* Setup OS specific network interface */
610 if (lem_setup_interface(dev, adapter) != 0)
613 /* Initialize statistics */
614 lem_update_stats_counters(adapter);
616 adapter->hw.mac.get_link_status = 1;
617 lem_update_link_status(adapter);
619 /* Indicate SOL/IDER usage */
620 if (e1000_check_reset_block(&adapter->hw))
622 "PHY reset is blocked due to SOL/IDER session.\n");
624 /* Do we need workaround for 82544 PCI-X adapter? */
625 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
626 adapter->hw.mac.type == e1000_82544)
627 adapter->pcix_82544 = TRUE;
629 adapter->pcix_82544 = FALSE;
631 /* Register for VLAN events */
632 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
633 lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
634 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
635 lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
637 lem_add_hw_stats(adapter);
639 /* Non-AMT based hardware can now take control from firmware */
640 if (adapter->has_manage && !adapter->has_amt)
641 lem_get_hw_control(adapter);
643 /* Tell the stack that the interface is not active */
644 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
646 adapter->led_dev = led_create(lem_led_func, adapter,
647 device_get_nameunit(dev));
649 INIT_DEBUGOUT("lem_attach: end");
654 lem_free_transmit_structures(adapter);
657 lem_release_hw_control(adapter);
658 lem_dma_free(adapter, &adapter->rxdma);
660 lem_dma_free(adapter, &adapter->txdma);
663 if (adapter->ifp != NULL)
664 if_free(adapter->ifp);
665 lem_free_pci_resources(adapter);
666 free(adapter->mta, M_DEVBUF);
667 EM_TX_LOCK_DESTROY(adapter);
668 EM_RX_LOCK_DESTROY(adapter);
669 EM_CORE_LOCK_DESTROY(adapter);
674 /*********************************************************************
675 * Device removal routine
677 * The detach entry point is called when the driver is being removed.
678 * This routine stops the adapter and deallocates all the resources
679 * that were allocated for driver operation.
681 * return 0 on success, positive on failure
682 *********************************************************************/
685 lem_detach(device_t dev)
687 struct adapter *adapter = device_get_softc(dev);
688 struct ifnet *ifp = adapter->ifp;
690 INIT_DEBUGOUT("em_detach: begin");
692 /* Make sure VLANS are not using driver */
693 if (adapter->ifp->if_vlantrunk != NULL) {
694 device_printf(dev,"Vlan in use, detach first\n");
698 #ifdef DEVICE_POLLING
699 if (ifp->if_capenable & IFCAP_POLLING)
700 ether_poll_deregister(ifp);
703 if (adapter->led_dev != NULL)
704 led_destroy(adapter->led_dev);
706 EM_CORE_LOCK(adapter);
708 adapter->in_detach = 1;
710 e1000_phy_hw_reset(&adapter->hw);
712 lem_release_manageability(adapter);
714 EM_TX_UNLOCK(adapter);
715 EM_CORE_UNLOCK(adapter);
717 /* Unregister VLAN events */
718 if (adapter->vlan_attach != NULL)
719 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
720 if (adapter->vlan_detach != NULL)
721 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
723 ether_ifdetach(adapter->ifp);
724 callout_drain(&adapter->timer);
725 callout_drain(&adapter->tx_fifo_timer);
727 lem_free_pci_resources(adapter);
728 bus_generic_detach(dev);
731 lem_free_transmit_structures(adapter);
732 lem_free_receive_structures(adapter);
734 /* Free Transmit Descriptor ring */
735 if (adapter->tx_desc_base) {
736 lem_dma_free(adapter, &adapter->txdma);
737 adapter->tx_desc_base = NULL;
740 /* Free Receive Descriptor ring */
741 if (adapter->rx_desc_base) {
742 lem_dma_free(adapter, &adapter->rxdma);
743 adapter->rx_desc_base = NULL;
746 lem_release_hw_control(adapter);
747 free(adapter->mta, M_DEVBUF);
748 EM_TX_LOCK_DESTROY(adapter);
749 EM_RX_LOCK_DESTROY(adapter);
750 EM_CORE_LOCK_DESTROY(adapter);
755 /*********************************************************************
757 * Shutdown entry point
759 **********************************************************************/
762 lem_shutdown(device_t dev)
764 return lem_suspend(dev);
768 * Suspend/resume device methods.
771 lem_suspend(device_t dev)
773 struct adapter *adapter = device_get_softc(dev);
775 EM_CORE_LOCK(adapter);
777 lem_release_manageability(adapter);
778 lem_release_hw_control(adapter);
779 lem_enable_wakeup(dev);
781 EM_CORE_UNLOCK(adapter);
783 return bus_generic_suspend(dev);
787 lem_resume(device_t dev)
789 struct adapter *adapter = device_get_softc(dev);
790 struct ifnet *ifp = adapter->ifp;
792 EM_CORE_LOCK(adapter);
793 lem_init_locked(adapter);
794 lem_init_manageability(adapter);
795 EM_CORE_UNLOCK(adapter);
798 return bus_generic_resume(dev);
803 lem_start_locked(struct ifnet *ifp)
805 struct adapter *adapter = ifp->if_softc;
808 EM_TX_LOCK_ASSERT(adapter);
810 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
813 if (!adapter->link_active)
817 * Force a cleanup if number of TX descriptors
818 * available hits the threshold
820 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
822 /* Now do we at least have a minimal? */
823 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
824 adapter->no_tx_desc_avail1++;
829 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
831 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
835 * Encapsulation can modify our pointer, and or make it
836 * NULL on failure. In that event, we can't requeue.
838 if (lem_xmit(adapter, &m_head)) {
841 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
842 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
846 /* Send a copy of the frame to the BPF listener */
847 ETHER_BPF_MTAP(ifp, m_head);
849 /* Set timeout in case hardware has problems transmitting. */
850 adapter->watchdog_check = TRUE;
851 adapter->watchdog_time = ticks;
853 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
854 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
860 lem_start(struct ifnet *ifp)
862 struct adapter *adapter = ifp->if_softc;
865 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
866 lem_start_locked(ifp);
867 EM_TX_UNLOCK(adapter);
870 /*********************************************************************
873 * em_ioctl is called when the user wants to configure the
876 * return 0 on success, positive on failure
877 **********************************************************************/
880 lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
882 struct adapter *adapter = ifp->if_softc;
883 struct ifreq *ifr = (struct ifreq *)data;
885 struct ifaddr *ifa = (struct ifaddr *)data;
889 if (adapter->in_detach)
895 if (ifa->ifa_addr->sa_family == AF_INET) {
898 * Since resetting hardware takes a very long time
899 * and results in link renegotiation we only
900 * initialize the hardware only when it is absolutely
903 ifp->if_flags |= IFF_UP;
904 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
905 EM_CORE_LOCK(adapter);
906 lem_init_locked(adapter);
907 EM_CORE_UNLOCK(adapter);
909 arp_ifinit(ifp, ifa);
912 error = ether_ioctl(ifp, command, data);
918 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
920 EM_CORE_LOCK(adapter);
921 switch (adapter->hw.mac.type) {
923 max_frame_size = ETHER_MAX_LEN;
926 max_frame_size = MAX_JUMBO_FRAME_SIZE;
928 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
930 EM_CORE_UNLOCK(adapter);
935 ifp->if_mtu = ifr->ifr_mtu;
936 adapter->max_frame_size =
937 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
938 lem_init_locked(adapter);
939 EM_CORE_UNLOCK(adapter);
943 IOCTL_DEBUGOUT("ioctl rcv'd:\
944 SIOCSIFFLAGS (Set Interface Flags)");
945 EM_CORE_LOCK(adapter);
946 if (ifp->if_flags & IFF_UP) {
947 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
948 if ((ifp->if_flags ^ adapter->if_flags) &
949 (IFF_PROMISC | IFF_ALLMULTI)) {
950 lem_disable_promisc(adapter);
951 lem_set_promisc(adapter);
954 lem_init_locked(adapter);
956 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
959 EM_TX_UNLOCK(adapter);
961 adapter->if_flags = ifp->if_flags;
962 EM_CORE_UNLOCK(adapter);
966 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
967 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
968 EM_CORE_LOCK(adapter);
969 lem_disable_intr(adapter);
970 lem_set_multi(adapter);
971 if (adapter->hw.mac.type == e1000_82542 &&
972 adapter->hw.revision_id == E1000_REVISION_2) {
973 lem_initialize_receive_unit(adapter);
975 #ifdef DEVICE_POLLING
976 if (!(ifp->if_capenable & IFCAP_POLLING))
978 lem_enable_intr(adapter);
979 EM_CORE_UNLOCK(adapter);
983 /* Check SOL/IDER usage */
984 EM_CORE_LOCK(adapter);
985 if (e1000_check_reset_block(&adapter->hw)) {
986 EM_CORE_UNLOCK(adapter);
987 device_printf(adapter->dev, "Media change is"
988 " blocked due to SOL/IDER session.\n");
991 EM_CORE_UNLOCK(adapter);
993 IOCTL_DEBUGOUT("ioctl rcv'd: \
994 SIOCxIFMEDIA (Get/Set Interface Media)");
995 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1001 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1003 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1004 #ifdef DEVICE_POLLING
1005 if (mask & IFCAP_POLLING) {
1006 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1007 error = ether_poll_register(lem_poll, ifp);
1010 EM_CORE_LOCK(adapter);
1011 lem_disable_intr(adapter);
1012 ifp->if_capenable |= IFCAP_POLLING;
1013 EM_CORE_UNLOCK(adapter);
1015 error = ether_poll_deregister(ifp);
1016 /* Enable interrupt even in error case */
1017 EM_CORE_LOCK(adapter);
1018 lem_enable_intr(adapter);
1019 ifp->if_capenable &= ~IFCAP_POLLING;
1020 EM_CORE_UNLOCK(adapter);
1024 if (mask & IFCAP_HWCSUM) {
1025 ifp->if_capenable ^= IFCAP_HWCSUM;
1028 if (mask & IFCAP_VLAN_HWTAGGING) {
1029 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1032 if ((mask & IFCAP_WOL) &&
1033 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1034 if (mask & IFCAP_WOL_MCAST)
1035 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1036 if (mask & IFCAP_WOL_MAGIC)
1037 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1039 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1041 VLAN_CAPABILITIES(ifp);
1046 error = ether_ioctl(ifp, command, data);
1054 /*********************************************************************
1057 * This routine is used in two ways. It is used by the stack as
1058 * init entry point in network interface structure. It is also used
1059 * by the driver as a hw/sw initialization routine to get to a
1062 * return 0 on success, positive on failure
1063 **********************************************************************/
1066 lem_init_locked(struct adapter *adapter)
1068 struct ifnet *ifp = adapter->ifp;
1069 device_t dev = adapter->dev;
1072 INIT_DEBUGOUT("lem_init: begin");
1074 EM_CORE_LOCK_ASSERT(adapter);
1076 EM_TX_LOCK(adapter);
1078 EM_TX_UNLOCK(adapter);
1081 * Packet Buffer Allocation (PBA)
1082 * Writing PBA sets the receive portion of the buffer
1083 * the remainder is used for the transmit buffer.
1085 * Devices before the 82547 had a Packet Buffer of 64K.
1086 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1087 * After the 82547 the buffer was reduced to 40K.
1088 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1089 * Note: default does not leave enough room for Jumbo Frame >10k.
1091 switch (adapter->hw.mac.type) {
1093 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1094 if (adapter->max_frame_size > 8192)
1095 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1097 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1098 adapter->tx_fifo_head = 0;
1099 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1100 adapter->tx_fifo_size =
1101 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1104 /* Devices before 82547 had a Packet Buffer of 64K. */
1105 if (adapter->max_frame_size > 8192)
1106 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1108 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1111 INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1112 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1114 /* Get the latest mac address, User can use a LAA */
1115 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1118 /* Put the address into the Receive Address Array */
1119 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1121 /* Initialize the hardware */
1122 if (lem_hardware_init(adapter)) {
1123 device_printf(dev, "Unable to initialize the hardware\n");
1126 lem_update_link_status(adapter);
1128 /* Setup VLAN support, basic and offload if available */
1129 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1131 /* Set hardware offload abilities */
1132 ifp->if_hwassist = 0;
1133 if (adapter->hw.mac.type >= e1000_82543) {
1134 if (ifp->if_capenable & IFCAP_TXCSUM)
1135 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1138 /* Configure for OS presence */
1139 lem_init_manageability(adapter);
1141 /* Prepare transmit descriptors and buffers */
1142 lem_setup_transmit_structures(adapter);
1143 lem_initialize_transmit_unit(adapter);
1145 /* Setup Multicast table */
1146 lem_set_multi(adapter);
1148 /* Prepare receive descriptors and buffers */
1149 if (lem_setup_receive_structures(adapter)) {
1150 device_printf(dev, "Could not setup receive structures\n");
1151 EM_TX_LOCK(adapter);
1153 EM_TX_UNLOCK(adapter);
1156 lem_initialize_receive_unit(adapter);
1158 /* Use real VLAN Filter support? */
1159 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1160 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1161 /* Use real VLAN Filter support */
1162 lem_setup_vlan_hw_support(adapter);
1165 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1166 ctrl |= E1000_CTRL_VME;
1167 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1171 /* Don't lose promiscuous settings */
1172 lem_set_promisc(adapter);
1174 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1175 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1177 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1178 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1180 /* MSI/X configuration for 82574 */
1181 if (adapter->hw.mac.type == e1000_82574) {
1183 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1184 tmp |= E1000_CTRL_EXT_PBA_CLR;
1185 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1187 ** Set the IVAR - interrupt vector routing.
1188 ** Each nibble represents a vector, high bit
1189 ** is enable, other 3 bits are the MSIX table
1190 ** entry, we map RXQ0 to 0, TXQ0 to 1, and
1191 ** Link (other) to 2, hence the magic number.
1193 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1196 #ifdef DEVICE_POLLING
1198 * Only enable interrupts if we are not polling, make sure
1199 * they are off otherwise.
1201 if (ifp->if_capenable & IFCAP_POLLING)
1202 lem_disable_intr(adapter);
1204 #endif /* DEVICE_POLLING */
1205 lem_enable_intr(adapter);
1207 /* AMT based hardware can now take control from firmware */
1208 if (adapter->has_manage && adapter->has_amt)
1209 lem_get_hw_control(adapter);
1211 /* Don't reset the phy next time init gets called */
1212 adapter->hw.phy.reset_disable = TRUE;
1218 struct adapter *adapter = arg;
1220 EM_CORE_LOCK(adapter);
1221 lem_init_locked(adapter);
1222 EM_CORE_UNLOCK(adapter);
1226 #ifdef DEVICE_POLLING
1227 /*********************************************************************
1229 * Legacy polling routine
1231 *********************************************************************/
1233 lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1235 struct adapter *adapter = ifp->if_softc;
1236 u32 reg_icr, rx_done = 0;
1238 EM_CORE_LOCK(adapter);
1239 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1240 EM_CORE_UNLOCK(adapter);
1244 if (cmd == POLL_AND_CHECK_STATUS) {
1245 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1246 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1247 callout_stop(&adapter->timer);
1248 adapter->hw.mac.get_link_status = 1;
1249 lem_update_link_status(adapter);
1250 callout_reset(&adapter->timer, hz,
1251 lem_local_timer, adapter);
1254 EM_CORE_UNLOCK(adapter);
1256 lem_rxeof(adapter, count, &rx_done);
1258 EM_TX_LOCK(adapter);
1260 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1261 lem_start_locked(ifp);
1262 EM_TX_UNLOCK(adapter);
1265 #endif /* DEVICE_POLLING */
1267 #ifdef EM_LEGACY_IRQ
1268 /*********************************************************************
1270 * Legacy Interrupt Service routine
1272 *********************************************************************/
1276 struct adapter *adapter = arg;
1277 struct ifnet *ifp = adapter->ifp;
1281 if (ifp->if_capenable & IFCAP_POLLING)
1284 EM_CORE_LOCK(adapter);
1285 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1286 if (reg_icr & E1000_ICR_RXO)
1287 adapter->rx_overruns++;
1289 if ((reg_icr == 0xffffffff) || (reg_icr == 0))
1292 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1295 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1296 callout_stop(&adapter->timer);
1297 adapter->hw.mac.get_link_status = 1;
1298 lem_update_link_status(adapter);
1299 /* Deal with TX cruft when link lost */
1300 lem_tx_purge(adapter);
1301 callout_reset(&adapter->timer, hz,
1302 lem_local_timer, adapter);
1306 EM_TX_LOCK(adapter);
1307 lem_rxeof(adapter, -1, NULL);
1309 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1310 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1311 lem_start_locked(ifp);
1312 EM_TX_UNLOCK(adapter);
1315 EM_CORE_UNLOCK(adapter);
1319 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1322 lem_handle_link(void *context, int pending)
1324 struct adapter *adapter = context;
1325 struct ifnet *ifp = adapter->ifp;
1327 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1330 EM_CORE_LOCK(adapter);
1331 callout_stop(&adapter->timer);
1332 lem_update_link_status(adapter);
1333 /* Deal with TX cruft when link lost */
1334 lem_tx_purge(adapter);
1335 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1336 EM_CORE_UNLOCK(adapter);
1340 /* Combined RX/TX handler, used by Legacy and MSI */
1342 lem_handle_rxtx(void *context, int pending)
1344 struct adapter *adapter = context;
1345 struct ifnet *ifp = adapter->ifp;
1348 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1349 lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1350 EM_TX_LOCK(adapter);
1352 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1353 lem_start_locked(ifp);
1354 EM_TX_UNLOCK(adapter);
1357 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1358 lem_enable_intr(adapter);
1361 /*********************************************************************
1363 * Fast Legacy/MSI Combined Interrupt Service routine
1365 *********************************************************************/
1367 lem_irq_fast(void *arg)
1369 struct adapter *adapter = arg;
1375 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1378 if (reg_icr == 0xffffffff)
1379 return FILTER_STRAY;
1381 /* Definitely not our interrupt. */
1383 return FILTER_STRAY;
1386 * Mask interrupts until the taskqueue is finished running. This is
1387 * cheap, just assume that it is needed. This also works around the
1388 * MSI message reordering errata on certain systems.
1390 lem_disable_intr(adapter);
1391 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1393 /* Link status change */
1394 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1395 adapter->hw.mac.get_link_status = 1;
1396 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1399 if (reg_icr & E1000_ICR_RXO)
1400 adapter->rx_overruns++;
1401 return FILTER_HANDLED;
1403 #endif /* ~EM_LEGACY_IRQ */
1406 /*********************************************************************
1408 * Media Ioctl callback
1410 * This routine is called whenever the user queries the status of
1411 * the interface using ifconfig.
1413 **********************************************************************/
1415 lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1417 struct adapter *adapter = ifp->if_softc;
1418 u_char fiber_type = IFM_1000_SX;
1420 INIT_DEBUGOUT("lem_media_status: begin");
1422 EM_CORE_LOCK(adapter);
1423 lem_update_link_status(adapter);
1425 ifmr->ifm_status = IFM_AVALID;
1426 ifmr->ifm_active = IFM_ETHER;
1428 if (!adapter->link_active) {
1429 EM_CORE_UNLOCK(adapter);
1433 ifmr->ifm_status |= IFM_ACTIVE;
1435 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1436 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1437 if (adapter->hw.mac.type == e1000_82545)
1438 fiber_type = IFM_1000_LX;
1439 ifmr->ifm_active |= fiber_type | IFM_FDX;
1441 switch (adapter->link_speed) {
1443 ifmr->ifm_active |= IFM_10_T;
1446 ifmr->ifm_active |= IFM_100_TX;
1449 ifmr->ifm_active |= IFM_1000_T;
1452 if (adapter->link_duplex == FULL_DUPLEX)
1453 ifmr->ifm_active |= IFM_FDX;
1455 ifmr->ifm_active |= IFM_HDX;
1457 EM_CORE_UNLOCK(adapter);
1460 /*********************************************************************
1462 * Media Ioctl callback
1464 * This routine is called when the user changes speed/duplex using
1465 * media/mediopt option with ifconfig.
1467 **********************************************************************/
1469 lem_media_change(struct ifnet *ifp)
1471 struct adapter *adapter = ifp->if_softc;
1472 struct ifmedia *ifm = &adapter->media;
1474 INIT_DEBUGOUT("lem_media_change: begin");
1476 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1479 EM_CORE_LOCK(adapter);
1480 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1482 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1483 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1488 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1489 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1492 adapter->hw.mac.autoneg = FALSE;
1493 adapter->hw.phy.autoneg_advertised = 0;
1494 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1495 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1497 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1500 adapter->hw.mac.autoneg = FALSE;
1501 adapter->hw.phy.autoneg_advertised = 0;
1502 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1503 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1505 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1508 device_printf(adapter->dev, "Unsupported media type\n");
1511 /* As the speed/duplex settings my have changed we need to
1514 adapter->hw.phy.reset_disable = FALSE;
1516 lem_init_locked(adapter);
1517 EM_CORE_UNLOCK(adapter);
1522 /*********************************************************************
1524 * This routine maps the mbufs to tx descriptors.
1526 * return 0 on success, positive on failure
1527 **********************************************************************/
1530 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1532 bus_dma_segment_t segs[EM_MAX_SCATTER];
1534 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1535 struct e1000_tx_desc *ctxd = NULL;
1536 struct mbuf *m_head;
1537 u32 txd_upper, txd_lower, txd_used, txd_saved;
1538 int error, nsegs, i, j, first, last = 0;
1541 txd_upper = txd_lower = txd_used = txd_saved = 0;
1544 ** When doing checksum offload, it is critical to
1545 ** make sure the first mbuf has more than header,
1546 ** because that routine expects data to be present.
1548 if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1549 (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1550 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1557 * Map the packet for DMA
1559 * Capture the first descriptor index,
1560 * this descriptor will have the index
1561 * of the EOP which is the only one that
1562 * now gets a DONE bit writeback.
1564 first = adapter->next_avail_tx_desc;
1565 tx_buffer = &adapter->tx_buffer_area[first];
1566 tx_buffer_mapped = tx_buffer;
1567 map = tx_buffer->map;
1569 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1570 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1573 * There are two types of errors we can (try) to handle:
1574 * - EFBIG means the mbuf chain was too long and bus_dma ran
1575 * out of segments. Defragment the mbuf chain and try again.
1576 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1577 * at this point in time. Defer sending and try again later.
1578 * All other errors, in particular EINVAL, are fatal and prevent the
1579 * mbuf chain from ever going through. Drop it and report error.
1581 if (error == EFBIG) {
1584 m = m_defrag(*m_headp, M_DONTWAIT);
1586 adapter->mbuf_alloc_failed++;
1594 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1595 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1598 adapter->no_tx_dma_setup++;
1603 } else if (error != 0) {
1604 adapter->no_tx_dma_setup++;
1608 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1609 adapter->no_tx_desc_avail2++;
1610 bus_dmamap_unload(adapter->txtag, map);
1615 /* Do hardware assists */
1616 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1617 lem_transmit_checksum_setup(adapter, m_head,
1618 &txd_upper, &txd_lower);
1620 i = adapter->next_avail_tx_desc;
1621 if (adapter->pcix_82544)
1624 /* Set up our transmit descriptors */
1625 for (j = 0; j < nsegs; j++) {
1627 bus_addr_t seg_addr;
1628 /* If adapter is 82544 and on PCIX bus */
1629 if(adapter->pcix_82544) {
1630 DESC_ARRAY desc_array;
1631 u32 array_elements, counter;
1633 * Check the Address and Length combination and
1634 * split the data accordingly
1636 array_elements = lem_fill_descriptors(segs[j].ds_addr,
1637 segs[j].ds_len, &desc_array);
1638 for (counter = 0; counter < array_elements; counter++) {
1639 if (txd_used == adapter->num_tx_desc_avail) {
1640 adapter->next_avail_tx_desc = txd_saved;
1641 adapter->no_tx_desc_avail2++;
1642 bus_dmamap_unload(adapter->txtag, map);
1645 tx_buffer = &adapter->tx_buffer_area[i];
1646 ctxd = &adapter->tx_desc_base[i];
1647 ctxd->buffer_addr = htole64(
1648 desc_array.descriptor[counter].address);
1649 ctxd->lower.data = htole32(
1650 (adapter->txd_cmd | txd_lower | (u16)
1651 desc_array.descriptor[counter].length));
1653 htole32((txd_upper));
1655 if (++i == adapter->num_tx_desc)
1657 tx_buffer->m_head = NULL;
1658 tx_buffer->next_eop = -1;
1662 tx_buffer = &adapter->tx_buffer_area[i];
1663 ctxd = &adapter->tx_desc_base[i];
1664 seg_addr = segs[j].ds_addr;
1665 seg_len = segs[j].ds_len;
1666 ctxd->buffer_addr = htole64(seg_addr);
1667 ctxd->lower.data = htole32(
1668 adapter->txd_cmd | txd_lower | seg_len);
1672 if (++i == adapter->num_tx_desc)
1674 tx_buffer->m_head = NULL;
1675 tx_buffer->next_eop = -1;
1679 adapter->next_avail_tx_desc = i;
1681 if (adapter->pcix_82544)
1682 adapter->num_tx_desc_avail -= txd_used;
1684 adapter->num_tx_desc_avail -= nsegs;
1686 if (m_head->m_flags & M_VLANTAG) {
1687 /* Set the vlan id. */
1688 ctxd->upper.fields.special =
1689 htole16(m_head->m_pkthdr.ether_vtag);
1690 /* Tell hardware to add tag */
1691 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1694 tx_buffer->m_head = m_head;
1695 tx_buffer_mapped->map = tx_buffer->map;
1696 tx_buffer->map = map;
1697 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1700 * Last Descriptor of Packet
1701 * needs End Of Packet (EOP)
1702 * and Report Status (RS)
1705 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1707 * Keep track in the first buffer which
1708 * descriptor will be written back
1710 tx_buffer = &adapter->tx_buffer_area[first];
1711 tx_buffer->next_eop = last;
1712 adapter->watchdog_time = ticks;
1715 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1716 * that this frame is available to transmit.
1718 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1719 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1720 if (adapter->hw.mac.type == e1000_82547 &&
1721 adapter->link_duplex == HALF_DUPLEX)
1722 lem_82547_move_tail(adapter);
1724 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1725 if (adapter->hw.mac.type == e1000_82547)
1726 lem_82547_update_fifo_head(adapter,
1727 m_head->m_pkthdr.len);
1733 /*********************************************************************
1735 * 82547 workaround to avoid controller hang in half-duplex environment.
1736 * The workaround is to avoid queuing a large packet that would span
1737 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1738 * in this case. We do that only when FIFO is quiescent.
1740 **********************************************************************/
1742 lem_82547_move_tail(void *arg)
1744 struct adapter *adapter = arg;
1745 struct e1000_tx_desc *tx_desc;
1746 u16 hw_tdt, sw_tdt, length = 0;
1749 EM_TX_LOCK_ASSERT(adapter);
1751 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1752 sw_tdt = adapter->next_avail_tx_desc;
1754 while (hw_tdt != sw_tdt) {
1755 tx_desc = &adapter->tx_desc_base[hw_tdt];
1756 length += tx_desc->lower.flags.length;
1757 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1758 if (++hw_tdt == adapter->num_tx_desc)
1762 if (lem_82547_fifo_workaround(adapter, length)) {
1763 adapter->tx_fifo_wrk_cnt++;
1764 callout_reset(&adapter->tx_fifo_timer, 1,
1765 lem_82547_move_tail, adapter);
1768 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1769 lem_82547_update_fifo_head(adapter, length);
1776 lem_82547_fifo_workaround(struct adapter *adapter, int len)
1778 int fifo_space, fifo_pkt_len;
1780 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1782 if (adapter->link_duplex == HALF_DUPLEX) {
1783 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1785 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1786 if (lem_82547_tx_fifo_reset(adapter))
1797 lem_82547_update_fifo_head(struct adapter *adapter, int len)
1799 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1801 /* tx_fifo_head is always 16 byte aligned */
1802 adapter->tx_fifo_head += fifo_pkt_len;
1803 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1804 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1810 lem_82547_tx_fifo_reset(struct adapter *adapter)
1814 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1815 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1816 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1817 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1818 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1819 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1820 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1821 /* Disable TX unit */
1822 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1823 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1824 tctl & ~E1000_TCTL_EN);
1826 /* Reset FIFO pointers */
1827 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1828 adapter->tx_head_addr);
1829 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1830 adapter->tx_head_addr);
1831 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1832 adapter->tx_head_addr);
1833 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1834 adapter->tx_head_addr);
1836 /* Re-enable TX unit */
1837 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1838 E1000_WRITE_FLUSH(&adapter->hw);
1840 adapter->tx_fifo_head = 0;
1841 adapter->tx_fifo_reset_cnt++;
1851 lem_set_promisc(struct adapter *adapter)
1853 struct ifnet *ifp = adapter->ifp;
1856 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1858 if (ifp->if_flags & IFF_PROMISC) {
1859 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1860 /* Turn this on if you want to see bad packets */
1862 reg_rctl |= E1000_RCTL_SBP;
1863 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1864 } else if (ifp->if_flags & IFF_ALLMULTI) {
1865 reg_rctl |= E1000_RCTL_MPE;
1866 reg_rctl &= ~E1000_RCTL_UPE;
1867 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1872 lem_disable_promisc(struct adapter *adapter)
1876 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1878 reg_rctl &= (~E1000_RCTL_UPE);
1879 reg_rctl &= (~E1000_RCTL_MPE);
1880 reg_rctl &= (~E1000_RCTL_SBP);
1881 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1885 /*********************************************************************
1888 * This routine is called whenever multicast address list is updated.
1890 **********************************************************************/
1893 lem_set_multi(struct adapter *adapter)
1895 struct ifnet *ifp = adapter->ifp;
1896 struct ifmultiaddr *ifma;
1898 u8 *mta; /* Multicast array memory */
1901 IOCTL_DEBUGOUT("lem_set_multi: begin");
1904 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1906 if (adapter->hw.mac.type == e1000_82542 &&
1907 adapter->hw.revision_id == E1000_REVISION_2) {
1908 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1909 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1910 e1000_pci_clear_mwi(&adapter->hw);
1911 reg_rctl |= E1000_RCTL_RST;
1912 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1916 #if __FreeBSD_version < 800000
1919 if_maddr_rlock(ifp);
1921 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1922 if (ifma->ifma_addr->sa_family != AF_LINK)
1925 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1928 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1929 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1932 #if __FreeBSD_version < 800000
1933 IF_ADDR_UNLOCK(ifp);
1935 if_maddr_runlock(ifp);
1937 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1938 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1939 reg_rctl |= E1000_RCTL_MPE;
1940 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1942 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1944 if (adapter->hw.mac.type == e1000_82542 &&
1945 adapter->hw.revision_id == E1000_REVISION_2) {
1946 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1947 reg_rctl &= ~E1000_RCTL_RST;
1948 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1950 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1951 e1000_pci_set_mwi(&adapter->hw);
1956 /*********************************************************************
1959 * This routine checks for link status and updates statistics.
1961 **********************************************************************/
1964 lem_local_timer(void *arg)
1966 struct adapter *adapter = arg;
1968 EM_CORE_LOCK_ASSERT(adapter);
1970 lem_update_link_status(adapter);
1971 lem_update_stats_counters(adapter);
1973 lem_smartspeed(adapter);
1976 * We check the watchdog: the time since
1977 * the last TX descriptor was cleaned.
1978 * This implies a functional TX engine.
1980 if ((adapter->watchdog_check == TRUE) &&
1981 (ticks - adapter->watchdog_time > EM_WATCHDOG))
1984 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1987 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1988 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1989 adapter->watchdog_events++;
1990 lem_init_locked(adapter);
1994 lem_update_link_status(struct adapter *adapter)
1996 struct e1000_hw *hw = &adapter->hw;
1997 struct ifnet *ifp = adapter->ifp;
1998 device_t dev = adapter->dev;
2001 /* Get the cached link value or read phy for real */
2002 switch (hw->phy.media_type) {
2003 case e1000_media_type_copper:
2004 if (hw->mac.get_link_status) {
2005 /* Do the work to read phy */
2006 e1000_check_for_link(hw);
2007 link_check = !hw->mac.get_link_status;
2008 if (link_check) /* ESB2 fix */
2009 e1000_cfg_on_link_up(hw);
2013 case e1000_media_type_fiber:
2014 e1000_check_for_link(hw);
2015 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2018 case e1000_media_type_internal_serdes:
2019 e1000_check_for_link(hw);
2020 link_check = adapter->hw.mac.serdes_has_link;
2023 case e1000_media_type_unknown:
2027 /* Now check for a transition */
2028 if (link_check && (adapter->link_active == 0)) {
2029 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2030 &adapter->link_duplex);
2032 device_printf(dev, "Link is up %d Mbps %s\n",
2033 adapter->link_speed,
2034 ((adapter->link_duplex == FULL_DUPLEX) ?
2035 "Full Duplex" : "Half Duplex"));
2036 adapter->link_active = 1;
2037 adapter->smartspeed = 0;
2038 ifp->if_baudrate = adapter->link_speed * 1000000;
2039 if_link_state_change(ifp, LINK_STATE_UP);
2040 } else if (!link_check && (adapter->link_active == 1)) {
2041 ifp->if_baudrate = adapter->link_speed = 0;
2042 adapter->link_duplex = 0;
2044 device_printf(dev, "Link is Down\n");
2045 adapter->link_active = 0;
2046 /* Link down, disable watchdog */
2047 adapter->watchdog_check = FALSE;
2048 if_link_state_change(ifp, LINK_STATE_DOWN);
2052 /*********************************************************************
2054 * This routine disables all traffic on the adapter by issuing a
2055 * global reset on the MAC and deallocates TX/RX buffers.
2057 * This routine should always be called with BOTH the CORE
2059 **********************************************************************/
2064 struct adapter *adapter = arg;
2065 struct ifnet *ifp = adapter->ifp;
2067 EM_CORE_LOCK_ASSERT(adapter);
2068 EM_TX_LOCK_ASSERT(adapter);
2070 INIT_DEBUGOUT("lem_stop: begin");
2072 lem_disable_intr(adapter);
2073 callout_stop(&adapter->timer);
2074 callout_stop(&adapter->tx_fifo_timer);
2076 /* Tell the stack that the interface is no longer active */
2077 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2079 e1000_reset_hw(&adapter->hw);
2080 if (adapter->hw.mac.type >= e1000_82544)
2081 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2083 e1000_led_off(&adapter->hw);
2084 e1000_cleanup_led(&adapter->hw);
2088 /*********************************************************************
2090 * Determine hardware revision.
2092 **********************************************************************/
2094 lem_identify_hardware(struct adapter *adapter)
2096 device_t dev = adapter->dev;
2098 /* Make sure our PCI config space has the necessary stuff set */
2099 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2100 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2101 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2102 device_printf(dev, "Memory Access and/or Bus Master bits "
2104 adapter->hw.bus.pci_cmd_word |=
2105 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2106 pci_write_config(dev, PCIR_COMMAND,
2107 adapter->hw.bus.pci_cmd_word, 2);
2110 /* Save off the information about this board */
2111 adapter->hw.vendor_id = pci_get_vendor(dev);
2112 adapter->hw.device_id = pci_get_device(dev);
2113 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2114 adapter->hw.subsystem_vendor_id =
2115 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2116 adapter->hw.subsystem_device_id =
2117 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2119 /* Do Shared Code Init and Setup */
2120 if (e1000_set_mac_type(&adapter->hw)) {
2121 device_printf(dev, "Setup init failure\n");
2127 lem_allocate_pci_resources(struct adapter *adapter)
2129 device_t dev = adapter->dev;
2130 int val, rid, error = E1000_SUCCESS;
2133 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2135 if (adapter->memory == NULL) {
2136 device_printf(dev, "Unable to allocate bus resource: memory\n");
2139 adapter->osdep.mem_bus_space_tag =
2140 rman_get_bustag(adapter->memory);
2141 adapter->osdep.mem_bus_space_handle =
2142 rman_get_bushandle(adapter->memory);
2143 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2145 /* Only older adapters use IO mapping */
2146 if (adapter->hw.mac.type > e1000_82543) {
2147 /* Figure our where our IO BAR is ? */
2148 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2149 val = pci_read_config(dev, rid, 4);
2150 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2151 adapter->io_rid = rid;
2155 /* check for 64bit BAR */
2156 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2159 if (rid >= PCIR_CIS) {
2160 device_printf(dev, "Unable to locate IO BAR\n");
2163 adapter->ioport = bus_alloc_resource_any(dev,
2164 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2165 if (adapter->ioport == NULL) {
2166 device_printf(dev, "Unable to allocate bus resource: "
2170 adapter->hw.io_base = 0;
2171 adapter->osdep.io_bus_space_tag =
2172 rman_get_bustag(adapter->ioport);
2173 adapter->osdep.io_bus_space_handle =
2174 rman_get_bushandle(adapter->ioport);
2177 adapter->hw.back = &adapter->osdep;
2182 /*********************************************************************
2184 * Setup the Legacy or MSI Interrupt handler
2186 **********************************************************************/
2188 lem_allocate_irq(struct adapter *adapter)
2190 device_t dev = adapter->dev;
2193 /* Manually turn off all interrupts */
2194 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2196 /* We allocate a single interrupt resource */
2197 adapter->res[0] = bus_alloc_resource_any(dev,
2198 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2199 if (adapter->res[0] == NULL) {
2200 device_printf(dev, "Unable to allocate bus resource: "
2205 #ifdef EM_LEGACY_IRQ
2206 /* We do Legacy setup */
2207 if ((error = bus_setup_intr(dev, adapter->res[0],
2208 INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2209 &adapter->tag[0])) != 0) {
2210 device_printf(dev, "Failed to register interrupt handler");
2214 #else /* FAST_IRQ */
2216 * Try allocating a fast interrupt and the associated deferred
2217 * processing contexts.
2219 TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2220 TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2221 adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2222 taskqueue_thread_enqueue, &adapter->tq);
2223 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2224 device_get_nameunit(adapter->dev));
2225 if ((error = bus_setup_intr(dev, adapter->res[0],
2226 INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2227 &adapter->tag[0])) != 0) {
2228 device_printf(dev, "Failed to register fast interrupt "
2229 "handler: %d\n", error);
2230 taskqueue_free(adapter->tq);
2234 #endif /* EM_LEGACY_IRQ */
2241 lem_free_pci_resources(struct adapter *adapter)
2243 device_t dev = adapter->dev;
2246 if (adapter->tag[0] != NULL) {
2247 bus_teardown_intr(dev, adapter->res[0],
2249 adapter->tag[0] = NULL;
2252 if (adapter->res[0] != NULL) {
2253 bus_release_resource(dev, SYS_RES_IRQ,
2254 0, adapter->res[0]);
2257 if (adapter->memory != NULL)
2258 bus_release_resource(dev, SYS_RES_MEMORY,
2259 PCIR_BAR(0), adapter->memory);
2261 if (adapter->ioport != NULL)
2262 bus_release_resource(dev, SYS_RES_IOPORT,
2263 adapter->io_rid, adapter->ioport);
2267 /*********************************************************************
2269 * Initialize the hardware to a configuration
2270 * as specified by the adapter structure.
2272 **********************************************************************/
2274 lem_hardware_init(struct adapter *adapter)
2276 device_t dev = adapter->dev;
2279 INIT_DEBUGOUT("lem_hardware_init: begin");
2281 /* Issue a global reset */
2282 e1000_reset_hw(&adapter->hw);
2284 /* When hardware is reset, fifo_head is also reset */
2285 adapter->tx_fifo_head = 0;
2288 * These parameters control the automatic generation (Tx) and
2289 * response (Rx) to Ethernet PAUSE frames.
2290 * - High water mark should allow for at least two frames to be
2291 * received after sending an XOFF.
2292 * - Low water mark works best when it is very near the high water mark.
2293 * This allows the receiver to restart by sending XON when it has
2294 * drained a bit. Here we use an arbitary value of 1500 which will
2295 * restart after one full frame is pulled from the buffer. There
2296 * could be several smaller frames in the buffer and if so they will
2297 * not trigger the XON until their total number reduces the buffer
2299 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2301 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2304 adapter->hw.fc.high_water = rx_buffer_size -
2305 roundup2(adapter->max_frame_size, 1024);
2306 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2308 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2309 adapter->hw.fc.send_xon = TRUE;
2311 /* Set Flow control, use the tunable location if sane */
2312 if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2313 adapter->hw.fc.requested_mode = lem_fc_setting;
2315 adapter->hw.fc.requested_mode = e1000_fc_none;
2317 if (e1000_init_hw(&adapter->hw) < 0) {
2318 device_printf(dev, "Hardware Initialization Failed\n");
2322 e1000_check_for_link(&adapter->hw);
2327 /*********************************************************************
2329 * Setup networking device structure and register an interface.
2331 **********************************************************************/
2333 lem_setup_interface(device_t dev, struct adapter *adapter)
2337 INIT_DEBUGOUT("lem_setup_interface: begin");
2339 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2341 device_printf(dev, "can not allocate ifnet structure\n");
2344 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2345 ifp->if_mtu = ETHERMTU;
2346 ifp->if_init = lem_init;
2347 ifp->if_softc = adapter;
2348 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2349 ifp->if_ioctl = lem_ioctl;
2350 ifp->if_start = lem_start;
2351 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2352 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2353 IFQ_SET_READY(&ifp->if_snd);
2355 ether_ifattach(ifp, adapter->hw.mac.addr);
2357 ifp->if_capabilities = ifp->if_capenable = 0;
2359 if (adapter->hw.mac.type >= e1000_82543) {
2360 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2361 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2365 * Tell the upper layer(s) we support long frames.
2367 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2368 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2369 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2372 ** Dont turn this on by default, if vlans are
2373 ** created on another pseudo device (eg. lagg)
2374 ** then vlan events are not passed thru, breaking
2375 ** operation, but with HW FILTER off it works. If
2376 ** using vlans directly on the em driver you can
2377 ** enable this and get full hardware tag filtering.
2379 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2381 #ifdef DEVICE_POLLING
2382 ifp->if_capabilities |= IFCAP_POLLING;
2385 /* Enable only WOL MAGIC by default */
2387 ifp->if_capabilities |= IFCAP_WOL;
2388 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2392 * Specify the media types supported by this adapter and register
2393 * callbacks to update media and link information
2395 ifmedia_init(&adapter->media, IFM_IMASK,
2396 lem_media_change, lem_media_status);
2397 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2398 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2399 u_char fiber_type = IFM_1000_SX; /* default type */
2401 if (adapter->hw.mac.type == e1000_82545)
2402 fiber_type = IFM_1000_LX;
2403 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2405 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2407 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2408 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2410 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2412 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2414 if (adapter->hw.phy.type != e1000_phy_ife) {
2415 ifmedia_add(&adapter->media,
2416 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2417 ifmedia_add(&adapter->media,
2418 IFM_ETHER | IFM_1000_T, 0, NULL);
2421 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2422 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2427 /*********************************************************************
2429 * Workaround for SmartSpeed on 82541 and 82547 controllers
2431 **********************************************************************/
2433 lem_smartspeed(struct adapter *adapter)
2437 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2438 adapter->hw.mac.autoneg == 0 ||
2439 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2442 if (adapter->smartspeed == 0) {
2443 /* If Master/Slave config fault is asserted twice,
2444 * we assume back-to-back */
2445 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2446 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2448 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2449 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2450 e1000_read_phy_reg(&adapter->hw,
2451 PHY_1000T_CTRL, &phy_tmp);
2452 if(phy_tmp & CR_1000T_MS_ENABLE) {
2453 phy_tmp &= ~CR_1000T_MS_ENABLE;
2454 e1000_write_phy_reg(&adapter->hw,
2455 PHY_1000T_CTRL, phy_tmp);
2456 adapter->smartspeed++;
2457 if(adapter->hw.mac.autoneg &&
2458 !e1000_copper_link_autoneg(&adapter->hw) &&
2459 !e1000_read_phy_reg(&adapter->hw,
2460 PHY_CONTROL, &phy_tmp)) {
2461 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2462 MII_CR_RESTART_AUTO_NEG);
2463 e1000_write_phy_reg(&adapter->hw,
2464 PHY_CONTROL, phy_tmp);
2469 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2470 /* If still no link, perhaps using 2/3 pair cable */
2471 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2472 phy_tmp |= CR_1000T_MS_ENABLE;
2473 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2474 if(adapter->hw.mac.autoneg &&
2475 !e1000_copper_link_autoneg(&adapter->hw) &&
2476 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2477 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2478 MII_CR_RESTART_AUTO_NEG);
2479 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2482 /* Restart process after EM_SMARTSPEED_MAX iterations */
2483 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2484 adapter->smartspeed = 0;
2489 * Manage DMA'able memory.
2492 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2496 *(bus_addr_t *) arg = segs[0].ds_addr;
2500 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2501 struct em_dma_alloc *dma, int mapflags)
2505 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2506 EM_DBA_ALIGN, 0, /* alignment, bounds */
2507 BUS_SPACE_MAXADDR, /* lowaddr */
2508 BUS_SPACE_MAXADDR, /* highaddr */
2509 NULL, NULL, /* filter, filterarg */
2512 size, /* maxsegsize */
2514 NULL, /* lockfunc */
2518 device_printf(adapter->dev,
2519 "%s: bus_dma_tag_create failed: %d\n",
2524 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2525 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2527 device_printf(adapter->dev,
2528 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2529 __func__, (uintmax_t)size, error);
2534 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2535 size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2536 if (error || dma->dma_paddr == 0) {
2537 device_printf(adapter->dev,
2538 "%s: bus_dmamap_load failed: %d\n",
2546 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2548 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2549 bus_dma_tag_destroy(dma->dma_tag);
2551 dma->dma_map = NULL;
2552 dma->dma_tag = NULL;
2558 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2560 if (dma->dma_tag == NULL)
2562 if (dma->dma_map != NULL) {
2563 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2564 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2565 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2566 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2567 dma->dma_map = NULL;
2569 bus_dma_tag_destroy(dma->dma_tag);
2570 dma->dma_tag = NULL;
2574 /*********************************************************************
2576 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2577 * the information needed to transmit a packet on the wire.
2579 **********************************************************************/
2581 lem_allocate_transmit_structures(struct adapter *adapter)
2583 device_t dev = adapter->dev;
2584 struct em_buffer *tx_buffer;
2588 * Create DMA tags for tx descriptors
2590 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2591 1, 0, /* alignment, bounds */
2592 BUS_SPACE_MAXADDR, /* lowaddr */
2593 BUS_SPACE_MAXADDR, /* highaddr */
2594 NULL, NULL, /* filter, filterarg */
2595 MCLBYTES * EM_MAX_SCATTER, /* maxsize */
2596 EM_MAX_SCATTER, /* nsegments */
2597 MCLBYTES, /* maxsegsize */
2599 NULL, /* lockfunc */
2601 &adapter->txtag)) != 0) {
2602 device_printf(dev, "Unable to allocate TX DMA tag\n");
2606 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2607 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2608 if (adapter->tx_buffer_area == NULL) {
2609 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2614 /* Create the descriptor buffer dma maps */
2615 for (int i = 0; i < adapter->num_tx_desc; i++) {
2616 tx_buffer = &adapter->tx_buffer_area[i];
2617 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2619 device_printf(dev, "Unable to create TX DMA map\n");
2622 tx_buffer->next_eop = -1;
2627 lem_free_transmit_structures(adapter);
2631 /*********************************************************************
2633 * (Re)Initialize transmit structures.
2635 **********************************************************************/
2637 lem_setup_transmit_structures(struct adapter *adapter)
2639 struct em_buffer *tx_buffer;
2641 /* Clear the old ring contents */
2642 bzero(adapter->tx_desc_base,
2643 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2645 /* Free any existing TX buffers */
2646 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2647 tx_buffer = &adapter->tx_buffer_area[i];
2648 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2649 BUS_DMASYNC_POSTWRITE);
2650 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2651 m_freem(tx_buffer->m_head);
2652 tx_buffer->m_head = NULL;
2653 tx_buffer->next_eop = -1;
2657 adapter->next_avail_tx_desc = 0;
2658 adapter->next_tx_to_clean = 0;
2659 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2661 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2662 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2667 /*********************************************************************
2669 * Enable transmit unit.
2671 **********************************************************************/
2673 lem_initialize_transmit_unit(struct adapter *adapter)
2678 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2679 /* Setup the Base and Length of the Tx Descriptor Ring */
2680 bus_addr = adapter->txdma.dma_paddr;
2681 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2682 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2683 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2684 (u32)(bus_addr >> 32));
2685 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2687 /* Setup the HW Tx Head and Tail descriptor pointers */
2688 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2689 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2691 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2692 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2693 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2695 /* Set the default values for the Tx Inter Packet Gap timer */
2696 switch (adapter->hw.mac.type) {
2698 tipg = DEFAULT_82542_TIPG_IPGT;
2699 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2700 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2703 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2704 (adapter->hw.phy.media_type ==
2705 e1000_media_type_internal_serdes))
2706 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2708 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2709 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2710 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2713 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2714 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2715 if(adapter->hw.mac.type >= e1000_82540)
2716 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2717 adapter->tx_abs_int_delay.value);
2719 /* Program the Transmit Control Register */
2720 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2721 tctl &= ~E1000_TCTL_CT;
2722 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2723 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2725 /* This write will effectively turn on the transmit unit. */
2726 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2728 /* Setup Transmit Descriptor Base Settings */
2729 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2731 if (adapter->tx_int_delay.value > 0)
2732 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2735 /*********************************************************************
2737 * Free all transmit related data structures.
2739 **********************************************************************/
2741 lem_free_transmit_structures(struct adapter *adapter)
2743 struct em_buffer *tx_buffer;
2745 INIT_DEBUGOUT("free_transmit_structures: begin");
2747 if (adapter->tx_buffer_area != NULL) {
2748 for (int i = 0; i < adapter->num_tx_desc; i++) {
2749 tx_buffer = &adapter->tx_buffer_area[i];
2750 if (tx_buffer->m_head != NULL) {
2751 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2752 BUS_DMASYNC_POSTWRITE);
2753 bus_dmamap_unload(adapter->txtag,
2755 m_freem(tx_buffer->m_head);
2756 tx_buffer->m_head = NULL;
2757 } else if (tx_buffer->map != NULL)
2758 bus_dmamap_unload(adapter->txtag,
2760 if (tx_buffer->map != NULL) {
2761 bus_dmamap_destroy(adapter->txtag,
2763 tx_buffer->map = NULL;
2767 if (adapter->tx_buffer_area != NULL) {
2768 free(adapter->tx_buffer_area, M_DEVBUF);
2769 adapter->tx_buffer_area = NULL;
2771 if (adapter->txtag != NULL) {
2772 bus_dma_tag_destroy(adapter->txtag);
2773 adapter->txtag = NULL;
2775 #if __FreeBSD_version >= 800000
2776 if (adapter->br != NULL)
2777 buf_ring_free(adapter->br, M_DEVBUF);
2781 /*********************************************************************
2783 * The offload context needs to be set when we transfer the first
2784 * packet of a particular protocol (TCP/UDP). This routine has been
2785 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2787 * Added back the old method of keeping the current context type
2788 * and not setting if unnecessary, as this is reported to be a
2789 * big performance win. -jfv
2790 **********************************************************************/
2792 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2793 u32 *txd_upper, u32 *txd_lower)
2795 struct e1000_context_desc *TXD = NULL;
2796 struct em_buffer *tx_buffer;
2797 struct ether_vlan_header *eh;
2798 struct ip *ip = NULL;
2799 struct ip6_hdr *ip6;
2800 int curr_txd, ehdrlen;
2801 u32 cmd, hdr_len, ip_hlen;
2806 cmd = hdr_len = ipproto = 0;
2807 *txd_upper = *txd_lower = 0;
2808 curr_txd = adapter->next_avail_tx_desc;
2811 * Determine where frame payload starts.
2812 * Jump over vlan headers if already present,
2813 * helpful for QinQ too.
2815 eh = mtod(mp, struct ether_vlan_header *);
2816 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2817 etype = ntohs(eh->evl_proto);
2818 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2820 etype = ntohs(eh->evl_encap_proto);
2821 ehdrlen = ETHER_HDR_LEN;
2825 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2826 * TODO: Support SCTP too when it hits the tree.
2830 ip = (struct ip *)(mp->m_data + ehdrlen);
2831 ip_hlen = ip->ip_hl << 2;
2833 /* Setup of IP header checksum. */
2834 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2836 * Start offset for header checksum calculation.
2837 * End offset for header checksum calculation.
2838 * Offset of place to put the checksum.
2840 TXD = (struct e1000_context_desc *)
2841 &adapter->tx_desc_base[curr_txd];
2842 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2843 TXD->lower_setup.ip_fields.ipcse =
2844 htole16(ehdrlen + ip_hlen);
2845 TXD->lower_setup.ip_fields.ipcso =
2846 ehdrlen + offsetof(struct ip, ip_sum);
2847 cmd |= E1000_TXD_CMD_IP;
2848 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2851 hdr_len = ehdrlen + ip_hlen;
2855 case ETHERTYPE_IPV6:
2856 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2857 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2859 /* IPv6 doesn't have a header checksum. */
2861 hdr_len = ehdrlen + ip_hlen;
2862 ipproto = ip6->ip6_nxt;
2871 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2872 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2873 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2874 /* no need for context if already set */
2875 if (adapter->last_hw_offload == CSUM_TCP)
2877 adapter->last_hw_offload = CSUM_TCP;
2879 * Start offset for payload checksum calculation.
2880 * End offset for payload checksum calculation.
2881 * Offset of place to put the checksum.
2883 TXD = (struct e1000_context_desc *)
2884 &adapter->tx_desc_base[curr_txd];
2885 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2886 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2887 TXD->upper_setup.tcp_fields.tucso =
2888 hdr_len + offsetof(struct tcphdr, th_sum);
2889 cmd |= E1000_TXD_CMD_TCP;
2894 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2895 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2896 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2897 /* no need for context if already set */
2898 if (adapter->last_hw_offload == CSUM_UDP)
2900 adapter->last_hw_offload = CSUM_UDP;
2902 * Start offset for header checksum calculation.
2903 * End offset for header checksum calculation.
2904 * Offset of place to put the checksum.
2906 TXD = (struct e1000_context_desc *)
2907 &adapter->tx_desc_base[curr_txd];
2908 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2909 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2910 TXD->upper_setup.tcp_fields.tucso =
2911 hdr_len + offsetof(struct udphdr, uh_sum);
2921 TXD->tcp_seg_setup.data = htole32(0);
2922 TXD->cmd_and_length =
2923 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
2924 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2925 tx_buffer->m_head = NULL;
2926 tx_buffer->next_eop = -1;
2928 if (++curr_txd == adapter->num_tx_desc)
2931 adapter->num_tx_desc_avail--;
2932 adapter->next_avail_tx_desc = curr_txd;
2936 /**********************************************************************
2938 * Examine each tx_buffer in the used queue. If the hardware is done
2939 * processing the packet then free associated resources. The
2940 * tx_buffer is put back on the free queue.
2942 **********************************************************************/
2944 lem_txeof(struct adapter *adapter)
2946 int first, last, done, num_avail;
2947 struct em_buffer *tx_buffer;
2948 struct e1000_tx_desc *tx_desc, *eop_desc;
2949 struct ifnet *ifp = adapter->ifp;
2951 EM_TX_LOCK_ASSERT(adapter);
2953 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2956 num_avail = adapter->num_tx_desc_avail;
2957 first = adapter->next_tx_to_clean;
2958 tx_desc = &adapter->tx_desc_base[first];
2959 tx_buffer = &adapter->tx_buffer_area[first];
2960 last = tx_buffer->next_eop;
2961 eop_desc = &adapter->tx_desc_base[last];
2964 * What this does is get the index of the
2965 * first descriptor AFTER the EOP of the
2966 * first packet, that way we can do the
2967 * simple comparison on the inner while loop.
2969 if (++last == adapter->num_tx_desc)
2973 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2974 BUS_DMASYNC_POSTREAD);
2976 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2977 /* We clean the range of the packet */
2978 while (first != done) {
2979 tx_desc->upper.data = 0;
2980 tx_desc->lower.data = 0;
2981 tx_desc->buffer_addr = 0;
2984 if (tx_buffer->m_head) {
2986 bus_dmamap_sync(adapter->txtag,
2988 BUS_DMASYNC_POSTWRITE);
2989 bus_dmamap_unload(adapter->txtag,
2992 m_freem(tx_buffer->m_head);
2993 tx_buffer->m_head = NULL;
2995 tx_buffer->next_eop = -1;
2996 adapter->watchdog_time = ticks;
2998 if (++first == adapter->num_tx_desc)
3001 tx_buffer = &adapter->tx_buffer_area[first];
3002 tx_desc = &adapter->tx_desc_base[first];
3004 /* See if we can continue to the next packet */
3005 last = tx_buffer->next_eop;
3007 eop_desc = &adapter->tx_desc_base[last];
3008 /* Get new done point */
3009 if (++last == adapter->num_tx_desc) last = 0;
3014 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3015 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3017 adapter->next_tx_to_clean = first;
3018 adapter->num_tx_desc_avail = num_avail;
3021 * If we have enough room, clear IFF_DRV_OACTIVE to
3022 * tell the stack that it is OK to send packets.
3023 * If there are no pending descriptors, clear the watchdog.
3025 if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {
3026 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3027 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3028 adapter->watchdog_check = FALSE;
3034 /*********************************************************************
3036 * When Link is lost sometimes there is work still in the TX ring
3037 * which may result in a watchdog, rather than allow that we do an
3038 * attempted cleanup and then reinit here. Note that this has been
3039 * seens mostly with fiber adapters.
3041 **********************************************************************/
3043 lem_tx_purge(struct adapter *adapter)
3045 if ((!adapter->link_active) && (adapter->watchdog_check)) {
3046 EM_TX_LOCK(adapter);
3048 EM_TX_UNLOCK(adapter);
3049 if (adapter->watchdog_check) /* Still outstanding? */
3050 lem_init_locked(adapter);
3054 /*********************************************************************
3056 * Get a buffer from system mbuf buffer pool.
3058 **********************************************************************/
3060 lem_get_buf(struct adapter *adapter, int i)
3063 bus_dma_segment_t segs[1];
3065 struct em_buffer *rx_buffer;
3068 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3070 adapter->mbuf_cluster_failed++;
3073 m->m_len = m->m_pkthdr.len = MCLBYTES;
3075 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3076 m_adj(m, ETHER_ALIGN);
3079 * Using memory from the mbuf cluster pool, invoke the
3080 * bus_dma machinery to arrange the memory mapping.
3082 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3083 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3089 /* If nsegs is wrong then the stack is corrupt. */
3090 KASSERT(nsegs == 1, ("Too many segments returned!"));
3092 rx_buffer = &adapter->rx_buffer_area[i];
3093 if (rx_buffer->m_head != NULL)
3094 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3096 map = rx_buffer->map;
3097 rx_buffer->map = adapter->rx_sparemap;
3098 adapter->rx_sparemap = map;
3099 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3100 rx_buffer->m_head = m;
3102 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3106 /*********************************************************************
3108 * Allocate memory for rx_buffer structures. Since we use one
3109 * rx_buffer per received packet, the maximum number of rx_buffer's
3110 * that we'll need is equal to the number of receive descriptors
3111 * that we've allocated.
3113 **********************************************************************/
3115 lem_allocate_receive_structures(struct adapter *adapter)
3117 device_t dev = adapter->dev;
3118 struct em_buffer *rx_buffer;
3121 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3122 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3123 if (adapter->rx_buffer_area == NULL) {
3124 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3128 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3129 1, 0, /* alignment, bounds */
3130 BUS_SPACE_MAXADDR, /* lowaddr */
3131 BUS_SPACE_MAXADDR, /* highaddr */
3132 NULL, NULL, /* filter, filterarg */
3133 MCLBYTES, /* maxsize */
3135 MCLBYTES, /* maxsegsize */
3137 NULL, /* lockfunc */
3141 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3146 /* Create the spare map (used by getbuf) */
3147 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3148 &adapter->rx_sparemap);
3150 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3155 rx_buffer = adapter->rx_buffer_area;
3156 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3157 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3160 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3169 lem_free_receive_structures(adapter);
3173 /*********************************************************************
3175 * (Re)initialize receive structures.
3177 **********************************************************************/
3179 lem_setup_receive_structures(struct adapter *adapter)
3181 struct em_buffer *rx_buffer;
3184 /* Reset descriptor ring */
3185 bzero(adapter->rx_desc_base,
3186 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3188 /* Free current RX buffers. */
3189 rx_buffer = adapter->rx_buffer_area;
3190 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3191 if (rx_buffer->m_head != NULL) {
3192 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3193 BUS_DMASYNC_POSTREAD);
3194 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3195 m_freem(rx_buffer->m_head);
3196 rx_buffer->m_head = NULL;
3200 /* Allocate new ones. */
3201 for (i = 0; i < adapter->num_rx_desc; i++) {
3202 error = lem_get_buf(adapter, i);
3207 /* Setup our descriptor pointers */
3208 adapter->next_rx_desc_to_check = 0;
3209 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3210 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3215 /*********************************************************************
3217 * Enable receive unit.
3219 **********************************************************************/
3220 #define MAX_INTS_PER_SEC 8000
3221 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
3224 lem_initialize_receive_unit(struct adapter *adapter)
3226 struct ifnet *ifp = adapter->ifp;
3230 INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3233 * Make sure receives are disabled while setting
3234 * up the descriptor ring
3236 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3237 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3239 if (adapter->hw.mac.type >= e1000_82540) {
3240 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3241 adapter->rx_abs_int_delay.value);
3243 * Set the interrupt throttling rate. Value is calculated
3244 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3246 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3250 ** When using MSIX interrupts we need to throttle
3251 ** using the EITR register (82574 only)
3254 for (int i = 0; i < 4; i++)
3255 E1000_WRITE_REG(&adapter->hw,
3256 E1000_EITR_82574(i), DEFAULT_ITR);
3258 /* Disable accelerated ackknowledge */
3259 if (adapter->hw.mac.type == e1000_82574)
3260 E1000_WRITE_REG(&adapter->hw,
3261 E1000_RFCTL, E1000_RFCTL_ACK_DIS);
3263 /* Setup the Base and Length of the Rx Descriptor Ring */
3264 bus_addr = adapter->rxdma.dma_paddr;
3265 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3266 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3267 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3268 (u32)(bus_addr >> 32));
3269 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3272 /* Setup the Receive Control Register */
3273 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3274 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3275 E1000_RCTL_RDMTS_HALF |
3276 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3278 /* Make sure VLAN Filters are off */
3279 rctl &= ~E1000_RCTL_VFE;
3281 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3282 rctl |= E1000_RCTL_SBP;
3284 rctl &= ~E1000_RCTL_SBP;
3286 switch (adapter->rx_buffer_len) {
3289 rctl |= E1000_RCTL_SZ_2048;
3292 rctl |= E1000_RCTL_SZ_4096 |
3293 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3296 rctl |= E1000_RCTL_SZ_8192 |
3297 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3300 rctl |= E1000_RCTL_SZ_16384 |
3301 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3305 if (ifp->if_mtu > ETHERMTU)
3306 rctl |= E1000_RCTL_LPE;
3308 rctl &= ~E1000_RCTL_LPE;
3310 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3311 if ((adapter->hw.mac.type >= e1000_82543) &&
3312 (ifp->if_capenable & IFCAP_RXCSUM)) {
3313 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3314 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3315 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3318 /* Enable Receives */
3319 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3322 * Setup the HW Rx Head and
3323 * Tail Descriptor Pointers
3325 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3326 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
3331 /*********************************************************************
3333 * Free receive related data structures.
3335 **********************************************************************/
3337 lem_free_receive_structures(struct adapter *adapter)
3339 struct em_buffer *rx_buffer;
3342 INIT_DEBUGOUT("free_receive_structures: begin");
3344 if (adapter->rx_sparemap) {
3345 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3346 adapter->rx_sparemap = NULL;
3349 /* Cleanup any existing buffers */
3350 if (adapter->rx_buffer_area != NULL) {
3351 rx_buffer = adapter->rx_buffer_area;
3352 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3353 if (rx_buffer->m_head != NULL) {
3354 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3355 BUS_DMASYNC_POSTREAD);
3356 bus_dmamap_unload(adapter->rxtag,
3358 m_freem(rx_buffer->m_head);
3359 rx_buffer->m_head = NULL;
3360 } else if (rx_buffer->map != NULL)
3361 bus_dmamap_unload(adapter->rxtag,
3363 if (rx_buffer->map != NULL) {
3364 bus_dmamap_destroy(adapter->rxtag,
3366 rx_buffer->map = NULL;
3371 if (adapter->rx_buffer_area != NULL) {
3372 free(adapter->rx_buffer_area, M_DEVBUF);
3373 adapter->rx_buffer_area = NULL;
3376 if (adapter->rxtag != NULL) {
3377 bus_dma_tag_destroy(adapter->rxtag);
3378 adapter->rxtag = NULL;
3382 /*********************************************************************
3384 * This routine executes in interrupt context. It replenishes
3385 * the mbufs in the descriptor and sends data which has been
3386 * dma'ed into host memory to upper layer.
3388 * We loop at most count times if count is > 0, or until done if
3391 * For polling we also now return the number of cleaned packets
3392 *********************************************************************/
3394 lem_rxeof(struct adapter *adapter, int count, int *done)
3396 struct ifnet *ifp = adapter->ifp;;
3398 u8 status = 0, accept_frame = 0, eop = 0;
3399 u16 len, desc_len, prev_len_adj;
3401 struct e1000_rx_desc *current_desc;
3403 EM_RX_LOCK(adapter);
3404 i = adapter->next_rx_desc_to_check;
3405 current_desc = &adapter->rx_desc_base[i];
3406 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3407 BUS_DMASYNC_POSTREAD);
3409 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3412 EM_RX_UNLOCK(adapter);
3416 while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3417 struct mbuf *m = NULL;
3419 status = current_desc->status;
3420 if ((status & E1000_RXD_STAT_DD) == 0)
3423 mp = adapter->rx_buffer_area[i].m_head;
3425 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3426 * needs to access the last received byte in the mbuf.
3428 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3429 BUS_DMASYNC_POSTREAD);
3433 desc_len = le16toh(current_desc->length);
3434 if (status & E1000_RXD_STAT_EOP) {
3437 if (desc_len < ETHER_CRC_LEN) {
3439 prev_len_adj = ETHER_CRC_LEN - desc_len;
3441 len = desc_len - ETHER_CRC_LEN;
3447 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3449 u32 pkt_len = desc_len;
3451 if (adapter->fmp != NULL)
3452 pkt_len += adapter->fmp->m_pkthdr.len;
3454 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3455 if (TBI_ACCEPT(&adapter->hw, status,
3456 current_desc->errors, pkt_len, last_byte,
3457 adapter->min_frame_size, adapter->max_frame_size)) {
3458 e1000_tbi_adjust_stats_82543(&adapter->hw,
3459 &adapter->stats, pkt_len,
3460 adapter->hw.mac.addr,
3461 adapter->max_frame_size);
3469 if (lem_get_buf(adapter, i) != 0) {
3474 /* Assign correct length to the current fragment */
3477 if (adapter->fmp == NULL) {
3478 mp->m_pkthdr.len = len;
3479 adapter->fmp = mp; /* Store the first mbuf */
3482 /* Chain mbuf's together */
3483 mp->m_flags &= ~M_PKTHDR;
3485 * Adjust length of previous mbuf in chain if
3486 * we received less than 4 bytes in the last
3489 if (prev_len_adj > 0) {
3490 adapter->lmp->m_len -= prev_len_adj;
3491 adapter->fmp->m_pkthdr.len -=
3494 adapter->lmp->m_next = mp;
3495 adapter->lmp = adapter->lmp->m_next;
3496 adapter->fmp->m_pkthdr.len += len;
3500 adapter->fmp->m_pkthdr.rcvif = ifp;
3502 lem_receive_checksum(adapter, current_desc,
3504 #ifndef __NO_STRICT_ALIGNMENT
3505 if (adapter->max_frame_size >
3506 (MCLBYTES - ETHER_ALIGN) &&
3507 lem_fixup_rx(adapter) != 0)
3510 if (status & E1000_RXD_STAT_VP) {
3511 adapter->fmp->m_pkthdr.ether_vtag =
3512 (le16toh(current_desc->special) &
3513 E1000_RXD_SPC_VLAN_MASK);
3514 adapter->fmp->m_flags |= M_VLANTAG;
3516 #ifndef __NO_STRICT_ALIGNMENT
3520 adapter->fmp = NULL;
3521 adapter->lmp = NULL;
3526 /* Reuse loaded DMA map and just update mbuf chain */
3527 mp = adapter->rx_buffer_area[i].m_head;
3528 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3529 mp->m_data = mp->m_ext.ext_buf;
3531 if (adapter->max_frame_size <=
3532 (MCLBYTES - ETHER_ALIGN))
3533 m_adj(mp, ETHER_ALIGN);
3534 if (adapter->fmp != NULL) {
3535 m_freem(adapter->fmp);
3536 adapter->fmp = NULL;
3537 adapter->lmp = NULL;
3542 /* Zero out the receive descriptors status. */
3543 current_desc->status = 0;
3544 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3545 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3547 /* Advance our pointers to the next descriptor. */
3548 if (++i == adapter->num_rx_desc)
3550 /* Call into the stack */
3552 adapter->next_rx_desc_to_check = i;
3553 EM_RX_UNLOCK(adapter);
3554 (*ifp->if_input)(ifp, m);
3555 EM_RX_LOCK(adapter);
3557 i = adapter->next_rx_desc_to_check;
3559 current_desc = &adapter->rx_desc_base[i];
3561 adapter->next_rx_desc_to_check = i;
3563 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3565 i = adapter->num_rx_desc - 1;
3566 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3569 EM_RX_UNLOCK(adapter);
3570 return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3573 #ifndef __NO_STRICT_ALIGNMENT
3575 * When jumbo frames are enabled we should realign entire payload on
3576 * architecures with strict alignment. This is serious design mistake of 8254x
3577 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3578 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3579 * payload. On architecures without strict alignment restrictions 8254x still
3580 * performs unaligned memory access which would reduce the performance too.
3581 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3582 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3583 * existing mbuf chain.
3585 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3586 * not used at all on architectures with strict alignment.
3589 lem_fixup_rx(struct adapter *adapter)
3596 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3597 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3598 m->m_data += ETHER_HDR_LEN;
3600 MGETHDR(n, M_DONTWAIT, MT_DATA);
3602 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3603 m->m_data += ETHER_HDR_LEN;
3604 m->m_len -= ETHER_HDR_LEN;
3605 n->m_len = ETHER_HDR_LEN;
3606 M_MOVE_PKTHDR(n, m);
3610 adapter->dropped_pkts++;
3611 m_freem(adapter->fmp);
3612 adapter->fmp = NULL;
3621 /*********************************************************************
3623 * Verify that the hardware indicated that the checksum is valid.
3624 * Inform the stack about the status of checksum so that stack
3625 * doesn't spend time verifying the checksum.
3627 *********************************************************************/
3629 lem_receive_checksum(struct adapter *adapter,
3630 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3632 /* 82543 or newer only */
3633 if ((adapter->hw.mac.type < e1000_82543) ||
3634 /* Ignore Checksum bit is set */
3635 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3636 mp->m_pkthdr.csum_flags = 0;
3640 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3642 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3643 /* IP Checksum Good */
3644 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3645 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3648 mp->m_pkthdr.csum_flags = 0;
3652 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3654 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3655 mp->m_pkthdr.csum_flags |=
3656 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3657 mp->m_pkthdr.csum_data = htons(0xffff);
3663 * This routine is run via an vlan
3667 lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3669 struct adapter *adapter = ifp->if_softc;
3672 if (ifp->if_softc != arg) /* Not our event */
3675 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
3678 EM_CORE_LOCK(adapter);
3679 index = (vtag >> 5) & 0x7F;
3681 adapter->shadow_vfta[index] |= (1 << bit);
3682 ++adapter->num_vlans;
3683 /* Re-init to load the changes */
3684 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3685 lem_init_locked(adapter);
3686 EM_CORE_UNLOCK(adapter);
3690 * This routine is run via an vlan
3694 lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3696 struct adapter *adapter = ifp->if_softc;
3699 if (ifp->if_softc != arg)
3702 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3705 EM_CORE_LOCK(adapter);
3706 index = (vtag >> 5) & 0x7F;
3708 adapter->shadow_vfta[index] &= ~(1 << bit);
3709 --adapter->num_vlans;
3710 /* Re-init to load the changes */
3711 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3712 lem_init_locked(adapter);
3713 EM_CORE_UNLOCK(adapter);
3717 lem_setup_vlan_hw_support(struct adapter *adapter)
3719 struct e1000_hw *hw = &adapter->hw;
3723 ** We get here thru init_locked, meaning
3724 ** a soft reset, this has already cleared
3725 ** the VFTA and other state, so if there
3726 ** have been no vlan's registered do nothing.
3728 if (adapter->num_vlans == 0)
3732 ** A soft reset zero's out the VFTA, so
3733 ** we need to repopulate it now.
3735 for (int i = 0; i < EM_VFTA_SIZE; i++)
3736 if (adapter->shadow_vfta[i] != 0)
3737 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3738 i, adapter->shadow_vfta[i]);
3740 reg = E1000_READ_REG(hw, E1000_CTRL);
3741 reg |= E1000_CTRL_VME;
3742 E1000_WRITE_REG(hw, E1000_CTRL, reg);
3744 /* Enable the Filter Table */
3745 reg = E1000_READ_REG(hw, E1000_RCTL);
3746 reg &= ~E1000_RCTL_CFIEN;
3747 reg |= E1000_RCTL_VFE;
3748 E1000_WRITE_REG(hw, E1000_RCTL, reg);
3750 /* Update the frame size */
3751 E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
3752 adapter->max_frame_size + VLAN_TAG_SIZE);
3756 lem_enable_intr(struct adapter *adapter)
3758 struct e1000_hw *hw = &adapter->hw;
3759 u32 ims_mask = IMS_ENABLE_MASK;
3761 if (adapter->msix) {
3762 E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
3763 ims_mask |= EM_MSIX_MASK;
3765 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3769 lem_disable_intr(struct adapter *adapter)
3771 struct e1000_hw *hw = &adapter->hw;
3774 E1000_WRITE_REG(hw, EM_EIAC, 0);
3775 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
3779 * Bit of a misnomer, what this really means is
3780 * to enable OS management of the system... aka
3781 * to disable special hardware management features
3784 lem_init_manageability(struct adapter *adapter)
3786 /* A shared code workaround */
3787 if (adapter->has_manage) {
3788 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3789 /* disable hardware interception of ARP */
3790 manc &= ~(E1000_MANC_ARP_EN);
3791 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3796 * Give control back to hardware management
3797 * controller if there is one.
3800 lem_release_manageability(struct adapter *adapter)
3802 if (adapter->has_manage) {
3803 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3805 /* re-enable hardware interception of ARP */
3806 manc |= E1000_MANC_ARP_EN;
3807 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3812 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3813 * For ASF and Pass Through versions of f/w this means
3814 * that the driver is loaded. For AMT version type f/w
3815 * this means that the network i/f is open.
3818 lem_get_hw_control(struct adapter *adapter)
3822 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3823 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3824 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3829 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3830 * For ASF and Pass Through versions of f/w this means that
3831 * the driver is no longer loaded. For AMT versions of the
3832 * f/w this means that the network i/f is closed.
3835 lem_release_hw_control(struct adapter *adapter)
3839 if (!adapter->has_manage)
3842 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3843 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3844 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3849 lem_is_valid_ether_addr(u8 *addr)
3851 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3853 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3861 ** Parse the interface capabilities with regard
3862 ** to both system management and wake-on-lan for
3866 lem_get_wakeup(device_t dev)
3868 struct adapter *adapter = device_get_softc(dev);
3869 u16 eeprom_data = 0, device_id, apme_mask;
3871 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3872 apme_mask = EM_EEPROM_APME;
3874 switch (adapter->hw.mac.type) {
3879 e1000_read_nvm(&adapter->hw,
3880 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3881 apme_mask = EM_82544_APME;
3884 case e1000_82546_rev_3:
3885 if (adapter->hw.bus.func == 1) {
3886 e1000_read_nvm(&adapter->hw,
3887 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3890 e1000_read_nvm(&adapter->hw,
3891 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3894 e1000_read_nvm(&adapter->hw,
3895 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3898 if (eeprom_data & apme_mask)
3899 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
3901 * We have the eeprom settings, now apply the special cases
3902 * where the eeprom may be wrong or the board won't support
3903 * wake on lan on a particular port
3905 device_id = pci_get_device(dev);
3906 switch (device_id) {
3907 case E1000_DEV_ID_82546GB_PCIE:
3910 case E1000_DEV_ID_82546EB_FIBER:
3911 case E1000_DEV_ID_82546GB_FIBER:
3912 /* Wake events only supported on port A for dual fiber
3913 * regardless of eeprom setting */
3914 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3915 E1000_STATUS_FUNC_1)
3918 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
3919 /* if quad port adapter, disable WoL on all but port A */
3920 if (global_quad_port_a != 0)
3922 /* Reset for multiple quad port adapters */
3923 if (++global_quad_port_a == 4)
3924 global_quad_port_a = 0;
3932 * Enable PCI Wake On Lan capability
3935 lem_enable_wakeup(device_t dev)
3937 struct adapter *adapter = device_get_softc(dev);
3938 struct ifnet *ifp = adapter->ifp;
3939 u32 pmc, ctrl, ctrl_ext, rctl;
3942 if ((pci_find_extcap(dev, PCIY_PMG, &pmc) != 0))
3945 /* Advertise the wakeup capability */
3946 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
3947 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
3948 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
3949 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3951 /* Keep the laser running on Fiber adapters */
3952 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
3953 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
3954 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3955 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
3956 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
3960 ** Determine type of Wakeup: note that wol
3961 ** is set with all bits on by default.
3963 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
3964 adapter->wol &= ~E1000_WUFC_MAG;
3966 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
3967 adapter->wol &= ~E1000_WUFC_MC;
3969 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3970 rctl |= E1000_RCTL_MPE;
3971 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3974 if (adapter->hw.mac.type == e1000_pchlan) {
3975 if (lem_enable_phy_wakeup(adapter))
3978 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3979 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
3984 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
3985 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3986 if (ifp->if_capenable & IFCAP_WOL)
3987 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3988 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
3994 ** WOL in the newer chipset interfaces (pchlan)
3995 ** require thing to be copied into the phy
3998 lem_enable_phy_wakeup(struct adapter *adapter)
4000 struct e1000_hw *hw = &adapter->hw;
4004 /* copy MAC RARs to PHY RARs */
4005 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4006 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4007 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4008 e1000_write_phy_reg(hw, BM_RAR_M(i),
4009 (u16)((mreg >> 16) & 0xFFFF));
4010 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4011 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4012 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4013 (u16)((mreg >> 16) & 0xFFFF));
4016 /* copy MAC MTA to PHY MTA */
4017 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4018 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4019 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4020 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4021 (u16)((mreg >> 16) & 0xFFFF));
4024 /* configure PHY Rx Control register */
4025 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4026 mreg = E1000_READ_REG(hw, E1000_RCTL);
4027 if (mreg & E1000_RCTL_UPE)
4028 preg |= BM_RCTL_UPE;
4029 if (mreg & E1000_RCTL_MPE)
4030 preg |= BM_RCTL_MPE;
4031 preg &= ~(BM_RCTL_MO_MASK);
4032 if (mreg & E1000_RCTL_MO_3)
4033 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4034 << BM_RCTL_MO_SHIFT);
4035 if (mreg & E1000_RCTL_BAM)
4036 preg |= BM_RCTL_BAM;
4037 if (mreg & E1000_RCTL_PMCF)
4038 preg |= BM_RCTL_PMCF;
4039 mreg = E1000_READ_REG(hw, E1000_CTRL);
4040 if (mreg & E1000_CTRL_RFCE)
4041 preg |= BM_RCTL_RFCE;
4042 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4044 /* enable PHY wakeup in MAC register */
4045 E1000_WRITE_REG(hw, E1000_WUC,
4046 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4047 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4049 /* configure and enable PHY wakeup in PHY registers */
4050 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4051 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4053 /* activate PHY wakeup */
4054 ret = hw->phy.ops.acquire(hw);
4056 printf("Could not acquire PHY\n");
4059 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4060 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4061 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4063 printf("Could not read PHY page 769\n");
4066 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4067 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4069 printf("Could not set PHY Host Wakeup bit\n");
4071 hw->phy.ops.release(hw);
4077 lem_led_func(void *arg, int onoff)
4079 struct adapter *adapter = arg;
4081 EM_CORE_LOCK(adapter);
4083 e1000_setup_led(&adapter->hw);
4084 e1000_led_on(&adapter->hw);
4086 e1000_led_off(&adapter->hw);
4087 e1000_cleanup_led(&adapter->hw);
4089 EM_CORE_UNLOCK(adapter);
4092 /*********************************************************************
4093 * 82544 Coexistence issue workaround.
4094 * There are 2 issues.
4095 * 1. Transmit Hang issue.
4096 * To detect this issue, following equation can be used...
4097 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4098 * If SUM[3:0] is in between 1 to 4, we will have this issue.
4101 * To detect this issue, following equation can be used...
4102 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4103 * If SUM[3:0] is in between 9 to c, we will have this issue.
4107 * Make sure we do not have ending address
4108 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4110 *************************************************************************/
4112 lem_fill_descriptors (bus_addr_t address, u32 length,
4113 PDESC_ARRAY desc_array)
4115 u32 safe_terminator;
4117 /* Since issue is sensitive to length and address.*/
4118 /* Let us first check the address...*/
4120 desc_array->descriptor[0].address = address;
4121 desc_array->descriptor[0].length = length;
4122 desc_array->elements = 1;
4123 return (desc_array->elements);
4125 safe_terminator = (u32)((((u32)address & 0x7) +
4126 (length & 0xF)) & 0xF);
4127 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4128 if (safe_terminator == 0 ||
4129 (safe_terminator > 4 &&
4130 safe_terminator < 9) ||
4131 (safe_terminator > 0xC &&
4132 safe_terminator <= 0xF)) {
4133 desc_array->descriptor[0].address = address;
4134 desc_array->descriptor[0].length = length;
4135 desc_array->elements = 1;
4136 return (desc_array->elements);
4139 desc_array->descriptor[0].address = address;
4140 desc_array->descriptor[0].length = length - 4;
4141 desc_array->descriptor[1].address = address + (length - 4);
4142 desc_array->descriptor[1].length = 4;
4143 desc_array->elements = 2;
4144 return (desc_array->elements);
4147 /**********************************************************************
4149 * Update the board statistics counters.
4151 **********************************************************************/
4153 lem_update_stats_counters(struct adapter *adapter)
4157 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4158 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4159 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4160 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4162 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4163 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4164 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4165 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4167 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4168 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4169 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4170 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4171 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4172 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4173 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4174 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4175 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4176 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4177 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4178 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4179 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4180 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4181 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4182 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4183 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4184 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4185 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4186 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4188 /* For the 64-bit byte counters the low dword must be read first. */
4189 /* Both registers clear on the read of the high dword */
4191 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4192 ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4193 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4194 ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4196 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4197 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4198 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4199 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4200 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4202 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4203 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4205 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4206 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4207 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4208 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4209 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4210 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4211 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4212 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4213 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4214 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4216 if (adapter->hw.mac.type >= e1000_82543) {
4217 adapter->stats.algnerrc +=
4218 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4219 adapter->stats.rxerrc +=
4220 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4221 adapter->stats.tncrs +=
4222 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4223 adapter->stats.cexterr +=
4224 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4225 adapter->stats.tsctc +=
4226 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4227 adapter->stats.tsctfc +=
4228 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4232 ifp->if_collisions = adapter->stats.colc;
4235 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4236 adapter->stats.crcerrs + adapter->stats.algnerrc +
4237 adapter->stats.ruc + adapter->stats.roc +
4238 adapter->stats.mpc + adapter->stats.cexterr;
4241 ifp->if_oerrors = adapter->stats.ecol +
4242 adapter->stats.latecol + adapter->watchdog_events;
4245 /* Export a single 32-bit register via a read-only sysctl. */
4247 lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4249 struct adapter *adapter;
4252 adapter = oidp->oid_arg1;
4253 val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4254 return (sysctl_handle_int(oidp, &val, 0, req));
4258 * Add sysctl variables, one per statistic, to the system.
4261 lem_add_hw_stats(struct adapter *adapter)
4263 device_t dev = adapter->dev;
4265 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4266 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4267 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4268 struct e1000_hw_stats *stats = &adapter->stats;
4270 struct sysctl_oid *stat_node;
4271 struct sysctl_oid_list *stat_list;
4273 /* Driver Statistics */
4274 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail",
4275 CTLFLAG_RD, &adapter->mbuf_alloc_failed,
4277 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
4278 CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4279 "Std mbuf cluster failed");
4280 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4281 CTLFLAG_RD, &adapter->dropped_pkts,
4282 "Driver dropped packets");
4283 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
4284 CTLFLAG_RD, &adapter->no_tx_dma_setup,
4285 "Driver tx dma failure in xmit");
4286 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4287 CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4288 "Not enough tx descriptors failure in xmit");
4289 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4290 CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4291 "Not enough tx descriptors failure in xmit");
4292 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4293 CTLFLAG_RD, &adapter->rx_overruns,
4295 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4296 CTLFLAG_RD, &adapter->watchdog_events,
4297 "Watchdog timeouts");
4299 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4300 CTLFLAG_RD, adapter, E1000_CTRL,
4301 lem_sysctl_reg_handler, "IU",
4302 "Device Control Register");
4303 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4304 CTLFLAG_RD, adapter, E1000_RCTL,
4305 lem_sysctl_reg_handler, "IU",
4306 "Receiver Control Register");
4307 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4308 CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4309 "Flow Control High Watermark");
4310 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
4311 CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4312 "Flow Control Low Watermark");
4313 SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, "fifo_workaround",
4314 CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4315 "TX FIFO workaround events");
4316 SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, "fifo_reset",
4317 CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4320 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head",
4321 CTLFLAG_RD, adapter, E1000_TDH(0),
4322 lem_sysctl_reg_handler, "IU",
4323 "Transmit Descriptor Head");
4324 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail",
4325 CTLFLAG_RD, adapter, E1000_TDT(0),
4326 lem_sysctl_reg_handler, "IU",
4327 "Transmit Descriptor Tail");
4328 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head",
4329 CTLFLAG_RD, adapter, E1000_RDH(0),
4330 lem_sysctl_reg_handler, "IU",
4331 "Receive Descriptor Head");
4332 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail",
4333 CTLFLAG_RD, adapter, E1000_RDT(0),
4334 lem_sysctl_reg_handler, "IU",
4335 "Receive Descriptor Tail");
4338 /* MAC stats get their own sub node */
4340 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4341 CTLFLAG_RD, NULL, "Statistics");
4342 stat_list = SYSCTL_CHILDREN(stat_node);
4344 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4345 CTLFLAG_RD, &stats->ecol,
4346 "Excessive collisions");
4347 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "single_coll",
4348 CTLFLAG_RD, &stats->scc,
4349 "Single collisions");
4350 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4351 CTLFLAG_RD, &stats->mcc,
4352 "Multiple collisions");
4353 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "late_coll",
4354 CTLFLAG_RD, &stats->latecol,
4356 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "collision_count",
4357 CTLFLAG_RD, &stats->colc,
4359 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4360 CTLFLAG_RD, &adapter->stats.symerrs,
4362 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4363 CTLFLAG_RD, &adapter->stats.sec,
4365 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "defer_count",
4366 CTLFLAG_RD, &adapter->stats.dc,
4368 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4369 CTLFLAG_RD, &adapter->stats.mpc,
4371 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4372 CTLFLAG_RD, &adapter->stats.rnbc,
4373 "Receive No Buffers");
4374 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4375 CTLFLAG_RD, &adapter->stats.ruc,
4376 "Receive Undersize");
4377 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4378 CTLFLAG_RD, &adapter->stats.rfc,
4379 "Fragmented Packets Received ");
4380 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4381 CTLFLAG_RD, &adapter->stats.roc,
4382 "Oversized Packets Received");
4383 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4384 CTLFLAG_RD, &adapter->stats.rjc,
4386 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4387 CTLFLAG_RD, &adapter->stats.rxerrc,
4389 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4390 CTLFLAG_RD, &adapter->stats.crcerrs,
4392 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4393 CTLFLAG_RD, &adapter->stats.algnerrc,
4394 "Alignment Errors");
4395 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4396 CTLFLAG_RD, &adapter->stats.cexterr,
4397 "Collision/Carrier extension errors");
4398 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4399 CTLFLAG_RD, &adapter->stats.xonrxc,
4401 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4402 CTLFLAG_RD, &adapter->stats.xontxc,
4404 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4405 CTLFLAG_RD, &adapter->stats.xoffrxc,
4407 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4408 CTLFLAG_RD, &adapter->stats.xofftxc,
4409 "XOFF Transmitted");
4411 /* Packet Reception Stats */
4412 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4413 CTLFLAG_RD, &adapter->stats.tpr,
4414 "Total Packets Received ");
4415 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4416 CTLFLAG_RD, &adapter->stats.gprc,
4417 "Good Packets Received");
4418 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4419 CTLFLAG_RD, &adapter->stats.bprc,
4420 "Broadcast Packets Received");
4421 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4422 CTLFLAG_RD, &adapter->stats.mprc,
4423 "Multicast Packets Received");
4424 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4425 CTLFLAG_RD, &adapter->stats.prc64,
4426 "64 byte frames received ");
4427 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4428 CTLFLAG_RD, &adapter->stats.prc127,
4429 "65-127 byte frames received");
4430 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4431 CTLFLAG_RD, &adapter->stats.prc255,
4432 "128-255 byte frames received");
4433 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4434 CTLFLAG_RD, &adapter->stats.prc511,
4435 "256-511 byte frames received");
4436 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4437 CTLFLAG_RD, &adapter->stats.prc1023,
4438 "512-1023 byte frames received");
4439 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4440 CTLFLAG_RD, &adapter->stats.prc1522,
4441 "1023-1522 byte frames received");
4442 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4443 CTLFLAG_RD, &adapter->stats.gorc,
4444 "Good Octets Received");
4446 /* Packet Transmission Stats */
4447 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4448 CTLFLAG_RD, &adapter->stats.gotc,
4449 "Good Octets Transmitted");
4450 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4451 CTLFLAG_RD, &adapter->stats.tpt,
4452 "Total Packets Transmitted");
4453 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4454 CTLFLAG_RD, &adapter->stats.gptc,
4455 "Good Packets Transmitted");
4456 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4457 CTLFLAG_RD, &adapter->stats.bptc,
4458 "Broadcast Packets Transmitted");
4459 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4460 CTLFLAG_RD, &adapter->stats.mptc,
4461 "Multicast Packets Transmitted");
4462 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4463 CTLFLAG_RD, &adapter->stats.ptc64,
4464 "64 byte frames transmitted ");
4465 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4466 CTLFLAG_RD, &adapter->stats.ptc127,
4467 "65-127 byte frames transmitted");
4468 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4469 CTLFLAG_RD, &adapter->stats.ptc255,
4470 "128-255 byte frames transmitted");
4471 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4472 CTLFLAG_RD, &adapter->stats.ptc511,
4473 "256-511 byte frames transmitted");
4474 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4475 CTLFLAG_RD, &adapter->stats.ptc1023,
4476 "512-1023 byte frames transmitted");
4477 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4478 CTLFLAG_RD, &adapter->stats.ptc1522,
4479 "1024-1522 byte frames transmitted");
4480 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4481 CTLFLAG_RD, &adapter->stats.tsctc,
4482 "TSO Contexts Transmitted");
4483 SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4484 CTLFLAG_RD, &adapter->stats.tsctfc,
4485 "TSO Contexts Failed");
4488 /**********************************************************************
4490 * This routine provides a way to dump out the adapter eeprom,
4491 * often a useful debug/service tool. This only dumps the first
4492 * 32 words, stuff that matters is in that extent.
4494 **********************************************************************/
4497 lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4499 struct adapter *adapter;
4504 error = sysctl_handle_int(oidp, &result, 0, req);
4506 if (error || !req->newptr)
4510 * This value will cause a hex dump of the
4511 * first 32 16-bit words of the EEPROM to
4515 adapter = (struct adapter *)arg1;
4516 lem_print_nvm_info(adapter);
4523 lem_print_nvm_info(struct adapter *adapter)
4528 /* Its a bit crude, but it gets the job done */
4529 printf("\nInterface EEPROM Dump:\n");
4530 printf("Offset\n0x0000 ");
4531 for (i = 0, j = 0; i < 32; i++, j++) {
4532 if (j == 8) { /* Make the offset block */
4534 printf("\n0x00%x0 ",row);
4536 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4537 printf("%04x ", eeprom_data);
4543 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4545 struct em_int_delay_info *info;
4546 struct adapter *adapter;
4552 info = (struct em_int_delay_info *)arg1;
4553 usecs = info->value;
4554 error = sysctl_handle_int(oidp, &usecs, 0, req);
4555 if (error != 0 || req->newptr == NULL)
4557 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4559 info->value = usecs;
4560 ticks = EM_USECS_TO_TICKS(usecs);
4562 adapter = info->adapter;
4564 EM_CORE_LOCK(adapter);
4565 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4566 regval = (regval & ~0xffff) | (ticks & 0xffff);
4567 /* Handle a few special cases. */
4568 switch (info->offset) {
4573 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4574 /* Don't write 0 into the TIDV register. */
4577 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4580 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4581 EM_CORE_UNLOCK(adapter);
4586 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4587 const char *description, struct em_int_delay_info *info,
4588 int offset, int value)
4590 info->adapter = adapter;
4591 info->offset = offset;
4592 info->value = value;
4593 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4594 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4595 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4596 info, 0, lem_sysctl_int_delay, "I", description);
4600 lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4601 const char *description, int *limit, int value)
4604 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4605 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4606 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4609 #ifndef EM_LEGACY_IRQ
4611 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4612 const char *description, int *limit, int value)
4615 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4616 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4617 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);