1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 * Uncomment the following extensions for better performance in a VM,
37 * especially if you have support in the hypervisor.
38 * See http://info.iet.unipi.it/~luigi/netmap/
40 // #define BATCH_DISPATCH
41 // #define NIC_SEND_COMBINING
42 // #define NIC_PARAVIRT /* enable virtio-like synchronization */
45 #include "opt_inet6.h"
47 #ifdef HAVE_KERNEL_OPTION_HEADERS
48 #include "opt_device_polling.h"
51 #include <sys/param.h>
52 #include <sys/systm.h>
54 #include <sys/endian.h>
55 #include <sys/kernel.h>
56 #include <sys/kthread.h>
57 #include <sys/malloc.h>
59 #include <sys/module.h>
61 #include <sys/socket.h>
62 #include <sys/sockio.h>
63 #include <sys/sysctl.h>
64 #include <sys/taskqueue.h>
65 #include <sys/eventhandler.h>
66 #include <machine/bus.h>
67 #include <machine/resource.h>
70 #include <net/ethernet.h>
72 #include <net/if_arp.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
76 #include <net/if_types.h>
77 #include <net/if_vlan_var.h>
79 #include <netinet/in_systm.h>
80 #include <netinet/in.h>
81 #include <netinet/if_ether.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip6.h>
84 #include <netinet/tcp.h>
85 #include <netinet/udp.h>
87 #include <machine/in_cksum.h>
88 #include <dev/led/led.h>
89 #include <dev/pci/pcivar.h>
90 #include <dev/pci/pcireg.h>
92 #include "e1000_api.h"
95 /*********************************************************************
96 * Legacy Em Driver version:
97 *********************************************************************/
98 char lem_driver_version[] = "1.1.0";
100 /*********************************************************************
101 * PCI Device ID Table
103 * Used by probe to select devices to load on
104 * Last field stores an index into e1000_strings
105 * Last entry must be all 0s
107 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
108 *********************************************************************/
110 static em_vendor_info_t lem_vendor_info_array[] =
112 /* Intel(R) PRO/1000 Network Connection */
113 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
132 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
143 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
147 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
148 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
149 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
150 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
151 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
152 PCI_ANY_ID, PCI_ANY_ID, 0},
154 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
155 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
156 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
157 /* required last entry */
161 /*********************************************************************
162 * Table of branding strings for all supported NICs.
163 *********************************************************************/
165 static char *lem_strings[] = {
166 "Intel(R) PRO/1000 Legacy Network Connection"
169 /*********************************************************************
170 * Function prototypes
171 *********************************************************************/
172 static int lem_probe(device_t);
173 static int lem_attach(device_t);
174 static int lem_detach(device_t);
175 static int lem_shutdown(device_t);
176 static int lem_suspend(device_t);
177 static int lem_resume(device_t);
178 static void lem_start(struct ifnet *);
179 static void lem_start_locked(struct ifnet *ifp);
180 static int lem_ioctl(struct ifnet *, u_long, caddr_t);
181 static void lem_init(void *);
182 static void lem_init_locked(struct adapter *);
183 static void lem_stop(void *);
184 static void lem_media_status(struct ifnet *, struct ifmediareq *);
185 static int lem_media_change(struct ifnet *);
186 static void lem_identify_hardware(struct adapter *);
187 static int lem_allocate_pci_resources(struct adapter *);
188 static int lem_allocate_irq(struct adapter *adapter);
189 static void lem_free_pci_resources(struct adapter *);
190 static void lem_local_timer(void *);
191 static int lem_hardware_init(struct adapter *);
192 static int lem_setup_interface(device_t, struct adapter *);
193 static void lem_setup_transmit_structures(struct adapter *);
194 static void lem_initialize_transmit_unit(struct adapter *);
195 static int lem_setup_receive_structures(struct adapter *);
196 static void lem_initialize_receive_unit(struct adapter *);
197 static void lem_enable_intr(struct adapter *);
198 static void lem_disable_intr(struct adapter *);
199 static void lem_free_transmit_structures(struct adapter *);
200 static void lem_free_receive_structures(struct adapter *);
201 static void lem_update_stats_counters(struct adapter *);
202 static void lem_add_hw_stats(struct adapter *adapter);
203 static void lem_txeof(struct adapter *);
204 static void lem_tx_purge(struct adapter *);
205 static int lem_allocate_receive_structures(struct adapter *);
206 static int lem_allocate_transmit_structures(struct adapter *);
207 static bool lem_rxeof(struct adapter *, int, int *);
208 #ifndef __NO_STRICT_ALIGNMENT
209 static int lem_fixup_rx(struct adapter *);
211 static void lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
213 static void lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
215 static void lem_set_promisc(struct adapter *);
216 static void lem_disable_promisc(struct adapter *);
217 static void lem_set_multi(struct adapter *);
218 static void lem_update_link_status(struct adapter *);
219 static int lem_get_buf(struct adapter *, int);
220 static void lem_register_vlan(void *, struct ifnet *, u16);
221 static void lem_unregister_vlan(void *, struct ifnet *, u16);
222 static void lem_setup_vlan_hw_support(struct adapter *);
223 static int lem_xmit(struct adapter *, struct mbuf **);
224 static void lem_smartspeed(struct adapter *);
225 static int lem_82547_fifo_workaround(struct adapter *, int);
226 static void lem_82547_update_fifo_head(struct adapter *, int);
227 static int lem_82547_tx_fifo_reset(struct adapter *);
228 static void lem_82547_move_tail(void *);
229 static int lem_dma_malloc(struct adapter *, bus_size_t,
230 struct em_dma_alloc *, int);
231 static void lem_dma_free(struct adapter *, struct em_dma_alloc *);
232 static int lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
233 static void lem_print_nvm_info(struct adapter *);
234 static int lem_is_valid_ether_addr(u8 *);
235 static u32 lem_fill_descriptors (bus_addr_t address, u32 length,
236 PDESC_ARRAY desc_array);
237 static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
238 static void lem_add_int_delay_sysctl(struct adapter *, const char *,
239 const char *, struct em_int_delay_info *, int, int);
240 static void lem_set_flow_cntrl(struct adapter *, const char *,
241 const char *, int *, int);
242 /* Management and WOL Support */
243 static void lem_init_manageability(struct adapter *);
244 static void lem_release_manageability(struct adapter *);
245 static void lem_get_hw_control(struct adapter *);
246 static void lem_release_hw_control(struct adapter *);
247 static void lem_get_wakeup(device_t);
248 static void lem_enable_wakeup(device_t);
249 static int lem_enable_phy_wakeup(struct adapter *);
250 static void lem_led_func(void *, int);
252 static void lem_intr(void *);
253 static int lem_irq_fast(void *);
254 static void lem_handle_rxtx(void *context, int pending);
255 static void lem_handle_link(void *context, int pending);
256 static void lem_add_rx_process_limit(struct adapter *, const char *,
257 const char *, int *, int);
259 #ifdef DEVICE_POLLING
260 static poll_handler_t lem_poll;
263 /*********************************************************************
264 * FreeBSD Device Interface Entry Points
265 *********************************************************************/
267 static device_method_t lem_methods[] = {
268 /* Device interface */
269 DEVMETHOD(device_probe, lem_probe),
270 DEVMETHOD(device_attach, lem_attach),
271 DEVMETHOD(device_detach, lem_detach),
272 DEVMETHOD(device_shutdown, lem_shutdown),
273 DEVMETHOD(device_suspend, lem_suspend),
274 DEVMETHOD(device_resume, lem_resume),
278 static driver_t lem_driver = {
279 "em", lem_methods, sizeof(struct adapter),
282 extern devclass_t em_devclass;
283 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
284 MODULE_DEPEND(lem, pci, 1, 1, 1);
285 MODULE_DEPEND(lem, ether, 1, 1, 1);
287 /*********************************************************************
288 * Tunable default values.
289 *********************************************************************/
291 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
292 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
294 #define MAX_INTS_PER_SEC 8000
295 #define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256))
297 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
298 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
299 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
300 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
302 * increase lem_rxd and lem_txd to at least 2048 in netmap mode
303 * for better performance.
305 static int lem_rxd = EM_DEFAULT_RXD;
306 static int lem_txd = EM_DEFAULT_TXD;
307 static int lem_smart_pwr_down = FALSE;
309 /* Controls whether promiscuous also shows bad packets */
310 static int lem_debug_sbp = FALSE;
312 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
313 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
314 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
315 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
316 TUNABLE_INT("hw.em.rxd", &lem_rxd);
317 TUNABLE_INT("hw.em.txd", &lem_txd);
318 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
319 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
321 /* Interrupt style - default to fast */
322 static int lem_use_legacy_irq = 0;
323 TUNABLE_INT("hw.em.use_legacy_irq", &lem_use_legacy_irq);
325 /* How many packets rxeof tries to clean at a time */
326 static int lem_rx_process_limit = 100;
327 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
329 /* Flow control setting - default to FULL */
330 static int lem_fc_setting = e1000_fc_full;
331 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
333 /* Global used in WOL setup with multiport cards */
334 static int global_quad_port_a = 0;
336 #ifdef DEV_NETMAP /* see ixgbe.c for details */
337 #include <dev/netmap/if_lem_netmap.h>
338 #endif /* DEV_NETMAP */
340 /*********************************************************************
341 * Device identification routine
343 * em_probe determines if the driver should be loaded on
344 * adapter based on PCI vendor/device id of the adapter.
346 * return BUS_PROBE_DEFAULT on success, positive on failure
347 *********************************************************************/
350 lem_probe(device_t dev)
352 char adapter_name[60];
353 u16 pci_vendor_id = 0;
354 u16 pci_device_id = 0;
355 u16 pci_subvendor_id = 0;
356 u16 pci_subdevice_id = 0;
357 em_vendor_info_t *ent;
359 INIT_DEBUGOUT("em_probe: begin");
361 pci_vendor_id = pci_get_vendor(dev);
362 if (pci_vendor_id != EM_VENDOR_ID)
365 pci_device_id = pci_get_device(dev);
366 pci_subvendor_id = pci_get_subvendor(dev);
367 pci_subdevice_id = pci_get_subdevice(dev);
369 ent = lem_vendor_info_array;
370 while (ent->vendor_id != 0) {
371 if ((pci_vendor_id == ent->vendor_id) &&
372 (pci_device_id == ent->device_id) &&
374 ((pci_subvendor_id == ent->subvendor_id) ||
375 (ent->subvendor_id == PCI_ANY_ID)) &&
377 ((pci_subdevice_id == ent->subdevice_id) ||
378 (ent->subdevice_id == PCI_ANY_ID))) {
379 sprintf(adapter_name, "%s %s",
380 lem_strings[ent->index],
382 device_set_desc_copy(dev, adapter_name);
383 return (BUS_PROBE_DEFAULT);
391 /*********************************************************************
392 * Device initialization routine
394 * The attach entry point is called when the driver is being loaded.
395 * This routine identifies the type of hardware, allocates all resources
396 * and initializes the hardware.
398 * return 0 on success, positive on failure
399 *********************************************************************/
402 lem_attach(device_t dev)
404 struct adapter *adapter;
408 INIT_DEBUGOUT("lem_attach: begin");
410 adapter = device_get_softc(dev);
411 adapter->dev = adapter->osdep.dev = dev;
412 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
413 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
414 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
417 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
418 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
419 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
420 lem_sysctl_nvm_info, "I", "NVM Information");
422 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
423 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
425 /* Determine hardware and mac info */
426 lem_identify_hardware(adapter);
428 /* Setup PCI resources */
429 if (lem_allocate_pci_resources(adapter)) {
430 device_printf(dev, "Allocation of PCI resources failed\n");
435 /* Do Shared Code initialization */
436 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
437 device_printf(dev, "Setup of Shared code failed\n");
442 e1000_get_bus_info(&adapter->hw);
444 /* Set up some sysctls for the tunable interrupt delays */
445 lem_add_int_delay_sysctl(adapter, "rx_int_delay",
446 "receive interrupt delay in usecs", &adapter->rx_int_delay,
447 E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
448 lem_add_int_delay_sysctl(adapter, "tx_int_delay",
449 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
450 E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
451 if (adapter->hw.mac.type >= e1000_82540) {
452 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
453 "receive interrupt delay limit in usecs",
454 &adapter->rx_abs_int_delay,
455 E1000_REGISTER(&adapter->hw, E1000_RADV),
456 lem_rx_abs_int_delay_dflt);
457 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
458 "transmit interrupt delay limit in usecs",
459 &adapter->tx_abs_int_delay,
460 E1000_REGISTER(&adapter->hw, E1000_TADV),
461 lem_tx_abs_int_delay_dflt);
462 lem_add_int_delay_sysctl(adapter, "itr",
463 "interrupt delay limit in usecs/4",
465 E1000_REGISTER(&adapter->hw, E1000_ITR),
469 /* Sysctls for limiting the amount of work done in the taskqueue */
470 lem_add_rx_process_limit(adapter, "rx_processing_limit",
471 "max number of rx packets to process", &adapter->rx_process_limit,
472 lem_rx_process_limit);
474 #ifdef NIC_SEND_COMBINING
475 /* Sysctls to control mitigation */
476 lem_add_rx_process_limit(adapter, "sc_enable",
477 "driver TDT mitigation", &adapter->sc_enable, 0);
478 #endif /* NIC_SEND_COMBINING */
479 #ifdef BATCH_DISPATCH
480 lem_add_rx_process_limit(adapter, "batch_enable",
481 "driver rx batch", &adapter->batch_enable, 0);
482 #endif /* BATCH_DISPATCH */
484 lem_add_rx_process_limit(adapter, "rx_retries",
485 "driver rx retries", &adapter->rx_retries, 0);
486 #endif /* NIC_PARAVIRT */
488 /* Sysctl for setting the interface flow control */
489 lem_set_flow_cntrl(adapter, "flow_control",
490 "flow control setting",
491 &adapter->fc_setting, lem_fc_setting);
494 * Validate number of transmit and receive descriptors. It
495 * must not exceed hardware maximum, and must be multiple
496 * of E1000_DBA_ALIGN.
498 if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
499 (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
500 (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
501 (lem_txd < EM_MIN_TXD)) {
502 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
503 EM_DEFAULT_TXD, lem_txd);
504 adapter->num_tx_desc = EM_DEFAULT_TXD;
506 adapter->num_tx_desc = lem_txd;
507 if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
508 (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
509 (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
510 (lem_rxd < EM_MIN_RXD)) {
511 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
512 EM_DEFAULT_RXD, lem_rxd);
513 adapter->num_rx_desc = EM_DEFAULT_RXD;
515 adapter->num_rx_desc = lem_rxd;
517 adapter->hw.mac.autoneg = DO_AUTO_NEG;
518 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
519 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
520 adapter->rx_buffer_len = 2048;
522 e1000_init_script_state_82541(&adapter->hw, TRUE);
523 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
526 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
527 adapter->hw.phy.mdix = AUTO_ALL_MODES;
528 adapter->hw.phy.disable_polarity_correction = FALSE;
529 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
533 * Set the frame limits assuming
534 * standard ethernet sized frames.
536 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
537 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
540 * This controls when hardware reports transmit completion
543 adapter->hw.mac.report_tx_early = 1;
546 device_printf(dev, "driver supports paravirt, subdev 0x%x\n",
547 adapter->hw.subsystem_device_id);
548 if (adapter->hw.subsystem_device_id == E1000_PARA_SUBDEV) {
551 device_printf(dev, "paravirt support on dev %p\n", adapter);
552 tsize = 4096; // XXX one page for the csb
553 if (lem_dma_malloc(adapter, tsize, &adapter->csb_mem, BUS_DMA_NOWAIT)) {
554 device_printf(dev, "Unable to allocate csb memory\n");
558 /* Setup the Base of the CSB */
559 adapter->csb = (struct paravirt_csb *)adapter->csb_mem.dma_vaddr;
560 /* force the first kick */
561 adapter->csb->host_need_txkick = 1; /* txring empty */
562 adapter->csb->guest_need_rxkick = 1; /* no rx packets */
563 bus_addr = adapter->csb_mem.dma_paddr;
564 lem_add_rx_process_limit(adapter, "csb_on",
565 "enable paravirt.", &adapter->csb->guest_csb_on, 0);
566 lem_add_rx_process_limit(adapter, "txc_lim",
567 "txc_lim", &adapter->csb->host_txcycles_lim, 1);
570 #define PA_SC(name, var, val) \
571 lem_add_rx_process_limit(adapter, name, name, var, val)
572 PA_SC("host_need_txkick",&adapter->csb->host_need_txkick, 1);
573 PA_SC("host_rxkick_at",&adapter->csb->host_rxkick_at, ~0);
574 PA_SC("guest_need_txkick",&adapter->csb->guest_need_txkick, 0);
575 PA_SC("guest_need_rxkick",&adapter->csb->guest_need_rxkick, 1);
576 PA_SC("tdt_reg_count",&adapter->tdt_reg_count, 0);
577 PA_SC("tdt_csb_count",&adapter->tdt_csb_count, 0);
578 PA_SC("tdt_int_count",&adapter->tdt_int_count, 0);
579 PA_SC("guest_need_kick_count",&adapter->guest_need_kick_count, 0);
580 /* tell the host where the block is */
581 E1000_WRITE_REG(&adapter->hw, E1000_CSBAH,
582 (u32)(bus_addr >> 32));
583 E1000_WRITE_REG(&adapter->hw, E1000_CSBAL,
586 #endif /* NIC_PARAVIRT */
588 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
591 /* Allocate Transmit Descriptor ring */
592 if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
593 device_printf(dev, "Unable to allocate tx_desc memory\n");
597 adapter->tx_desc_base =
598 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
600 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
603 /* Allocate Receive Descriptor ring */
604 if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
605 device_printf(dev, "Unable to allocate rx_desc memory\n");
609 adapter->rx_desc_base =
610 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
612 /* Allocate multicast array memory. */
613 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
614 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
615 if (adapter->mta == NULL) {
616 device_printf(dev, "Can not allocate multicast setup array\n");
622 ** Start from a known state, this is
623 ** important in reading the nvm and
626 e1000_reset_hw(&adapter->hw);
628 /* Make sure we have a good EEPROM before we read from it */
629 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
631 ** Some PCI-E parts fail the first check due to
632 ** the link being in sleep state, call it again,
633 ** if it fails a second time its a real issue.
635 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
637 "The EEPROM Checksum Is Not Valid\n");
643 /* Copy the permanent MAC address out of the EEPROM */
644 if (e1000_read_mac_addr(&adapter->hw) < 0) {
645 device_printf(dev, "EEPROM read error while reading MAC"
651 if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
652 device_printf(dev, "Invalid MAC address\n");
657 /* Initialize the hardware */
658 if (lem_hardware_init(adapter)) {
659 device_printf(dev, "Unable to initialize the hardware\n");
664 /* Allocate transmit descriptors and buffers */
665 if (lem_allocate_transmit_structures(adapter)) {
666 device_printf(dev, "Could not setup transmit structures\n");
671 /* Allocate receive descriptors and buffers */
672 if (lem_allocate_receive_structures(adapter)) {
673 device_printf(dev, "Could not setup receive structures\n");
679 ** Do interrupt configuration
681 error = lem_allocate_irq(adapter);
686 * Get Wake-on-Lan and Management info for later use
690 /* Setup OS specific network interface */
691 if (lem_setup_interface(dev, adapter) != 0)
694 /* Initialize statistics */
695 lem_update_stats_counters(adapter);
697 adapter->hw.mac.get_link_status = 1;
698 lem_update_link_status(adapter);
700 /* Indicate SOL/IDER usage */
701 if (e1000_check_reset_block(&adapter->hw))
703 "PHY reset is blocked due to SOL/IDER session.\n");
705 /* Do we need workaround for 82544 PCI-X adapter? */
706 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
707 adapter->hw.mac.type == e1000_82544)
708 adapter->pcix_82544 = TRUE;
710 adapter->pcix_82544 = FALSE;
712 /* Register for VLAN events */
713 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
714 lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
715 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
716 lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
718 lem_add_hw_stats(adapter);
720 /* Non-AMT based hardware can now take control from firmware */
721 if (adapter->has_manage && !adapter->has_amt)
722 lem_get_hw_control(adapter);
724 /* Tell the stack that the interface is not active */
725 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
727 adapter->led_dev = led_create(lem_led_func, adapter,
728 device_get_nameunit(dev));
731 lem_netmap_attach(adapter);
732 #endif /* DEV_NETMAP */
733 INIT_DEBUGOUT("lem_attach: end");
738 lem_free_transmit_structures(adapter);
741 lem_release_hw_control(adapter);
742 lem_dma_free(adapter, &adapter->rxdma);
744 lem_dma_free(adapter, &adapter->txdma);
747 lem_dma_free(adapter, &adapter->csb_mem);
749 #endif /* NIC_PARAVIRT */
752 if (adapter->ifp != NULL)
753 if_free(adapter->ifp);
754 lem_free_pci_resources(adapter);
755 free(adapter->mta, M_DEVBUF);
756 EM_TX_LOCK_DESTROY(adapter);
757 EM_RX_LOCK_DESTROY(adapter);
758 EM_CORE_LOCK_DESTROY(adapter);
763 /*********************************************************************
764 * Device removal routine
766 * The detach entry point is called when the driver is being removed.
767 * This routine stops the adapter and deallocates all the resources
768 * that were allocated for driver operation.
770 * return 0 on success, positive on failure
771 *********************************************************************/
774 lem_detach(device_t dev)
776 struct adapter *adapter = device_get_softc(dev);
777 struct ifnet *ifp = adapter->ifp;
779 INIT_DEBUGOUT("em_detach: begin");
781 /* Make sure VLANS are not using driver */
782 if (adapter->ifp->if_vlantrunk != NULL) {
783 device_printf(dev,"Vlan in use, detach first\n");
787 #ifdef DEVICE_POLLING
788 if (ifp->if_capenable & IFCAP_POLLING)
789 ether_poll_deregister(ifp);
792 if (adapter->led_dev != NULL)
793 led_destroy(adapter->led_dev);
795 EM_CORE_LOCK(adapter);
797 adapter->in_detach = 1;
799 e1000_phy_hw_reset(&adapter->hw);
801 lem_release_manageability(adapter);
803 EM_TX_UNLOCK(adapter);
804 EM_CORE_UNLOCK(adapter);
806 /* Unregister VLAN events */
807 if (adapter->vlan_attach != NULL)
808 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
809 if (adapter->vlan_detach != NULL)
810 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
812 ether_ifdetach(adapter->ifp);
813 callout_drain(&adapter->timer);
814 callout_drain(&adapter->tx_fifo_timer);
818 #endif /* DEV_NETMAP */
819 lem_free_pci_resources(adapter);
820 bus_generic_detach(dev);
823 lem_free_transmit_structures(adapter);
824 lem_free_receive_structures(adapter);
826 /* Free Transmit Descriptor ring */
827 if (adapter->tx_desc_base) {
828 lem_dma_free(adapter, &adapter->txdma);
829 adapter->tx_desc_base = NULL;
832 /* Free Receive Descriptor ring */
833 if (adapter->rx_desc_base) {
834 lem_dma_free(adapter, &adapter->rxdma);
835 adapter->rx_desc_base = NULL;
840 lem_dma_free(adapter, &adapter->csb_mem);
843 #endif /* NIC_PARAVIRT */
844 lem_release_hw_control(adapter);
845 free(adapter->mta, M_DEVBUF);
846 EM_TX_LOCK_DESTROY(adapter);
847 EM_RX_LOCK_DESTROY(adapter);
848 EM_CORE_LOCK_DESTROY(adapter);
853 /*********************************************************************
855 * Shutdown entry point
857 **********************************************************************/
860 lem_shutdown(device_t dev)
862 return lem_suspend(dev);
866 * Suspend/resume device methods.
869 lem_suspend(device_t dev)
871 struct adapter *adapter = device_get_softc(dev);
873 EM_CORE_LOCK(adapter);
875 lem_release_manageability(adapter);
876 lem_release_hw_control(adapter);
877 lem_enable_wakeup(dev);
879 EM_CORE_UNLOCK(adapter);
881 return bus_generic_suspend(dev);
885 lem_resume(device_t dev)
887 struct adapter *adapter = device_get_softc(dev);
888 struct ifnet *ifp = adapter->ifp;
890 EM_CORE_LOCK(adapter);
891 lem_init_locked(adapter);
892 lem_init_manageability(adapter);
893 EM_CORE_UNLOCK(adapter);
896 return bus_generic_resume(dev);
901 lem_start_locked(struct ifnet *ifp)
903 struct adapter *adapter = ifp->if_softc;
906 EM_TX_LOCK_ASSERT(adapter);
908 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
911 if (!adapter->link_active)
915 * Force a cleanup if number of TX descriptors
916 * available hits the threshold
918 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
920 /* Now do we at least have a minimal? */
921 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
922 adapter->no_tx_desc_avail1++;
927 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
929 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
933 * Encapsulation can modify our pointer, and or make it
934 * NULL on failure. In that event, we can't requeue.
936 if (lem_xmit(adapter, &m_head)) {
939 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
940 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
944 /* Send a copy of the frame to the BPF listener */
945 ETHER_BPF_MTAP(ifp, m_head);
947 /* Set timeout in case hardware has problems transmitting. */
948 adapter->watchdog_check = TRUE;
949 adapter->watchdog_time = ticks;
951 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
952 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
954 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) && adapter->csb &&
955 adapter->csb->guest_csb_on &&
956 !(adapter->csb->guest_need_txkick & 1)) {
957 adapter->csb->guest_need_txkick = 1;
958 adapter->guest_need_kick_count++;
959 // XXX memory barrier
960 lem_txeof(adapter); // XXX possibly clear IFF_DRV_OACTIVE
962 #endif /* NIC_PARAVIRT */
968 lem_start(struct ifnet *ifp)
970 struct adapter *adapter = ifp->if_softc;
973 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
974 lem_start_locked(ifp);
975 EM_TX_UNLOCK(adapter);
978 /*********************************************************************
981 * em_ioctl is called when the user wants to configure the
984 * return 0 on success, positive on failure
985 **********************************************************************/
988 lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
990 struct adapter *adapter = ifp->if_softc;
991 struct ifreq *ifr = (struct ifreq *)data;
992 #if defined(INET) || defined(INET6)
993 struct ifaddr *ifa = (struct ifaddr *)data;
995 bool avoid_reset = FALSE;
998 if (adapter->in_detach)
1004 if (ifa->ifa_addr->sa_family == AF_INET)
1008 if (ifa->ifa_addr->sa_family == AF_INET6)
1012 ** Calling init results in link renegotiation,
1013 ** so we avoid doing it when possible.
1016 ifp->if_flags |= IFF_UP;
1017 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1020 if (!(ifp->if_flags & IFF_NOARP))
1021 arp_ifinit(ifp, ifa);
1024 error = ether_ioctl(ifp, command, data);
1030 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1032 EM_CORE_LOCK(adapter);
1033 switch (adapter->hw.mac.type) {
1035 max_frame_size = ETHER_MAX_LEN;
1038 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1040 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1042 EM_CORE_UNLOCK(adapter);
1047 ifp->if_mtu = ifr->ifr_mtu;
1048 adapter->max_frame_size =
1049 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1050 lem_init_locked(adapter);
1051 EM_CORE_UNLOCK(adapter);
1055 IOCTL_DEBUGOUT("ioctl rcv'd:\
1056 SIOCSIFFLAGS (Set Interface Flags)");
1057 EM_CORE_LOCK(adapter);
1058 if (ifp->if_flags & IFF_UP) {
1059 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1060 if ((ifp->if_flags ^ adapter->if_flags) &
1061 (IFF_PROMISC | IFF_ALLMULTI)) {
1062 lem_disable_promisc(adapter);
1063 lem_set_promisc(adapter);
1066 lem_init_locked(adapter);
1068 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1069 EM_TX_LOCK(adapter);
1071 EM_TX_UNLOCK(adapter);
1073 adapter->if_flags = ifp->if_flags;
1074 EM_CORE_UNLOCK(adapter);
1078 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1079 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1080 EM_CORE_LOCK(adapter);
1081 lem_disable_intr(adapter);
1082 lem_set_multi(adapter);
1083 if (adapter->hw.mac.type == e1000_82542 &&
1084 adapter->hw.revision_id == E1000_REVISION_2) {
1085 lem_initialize_receive_unit(adapter);
1087 #ifdef DEVICE_POLLING
1088 if (!(ifp->if_capenable & IFCAP_POLLING))
1090 lem_enable_intr(adapter);
1091 EM_CORE_UNLOCK(adapter);
1095 /* Check SOL/IDER usage */
1096 EM_CORE_LOCK(adapter);
1097 if (e1000_check_reset_block(&adapter->hw)) {
1098 EM_CORE_UNLOCK(adapter);
1099 device_printf(adapter->dev, "Media change is"
1100 " blocked due to SOL/IDER session.\n");
1103 EM_CORE_UNLOCK(adapter);
1105 IOCTL_DEBUGOUT("ioctl rcv'd: \
1106 SIOCxIFMEDIA (Get/Set Interface Media)");
1107 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1113 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1115 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1116 #ifdef DEVICE_POLLING
1117 if (mask & IFCAP_POLLING) {
1118 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1119 error = ether_poll_register(lem_poll, ifp);
1122 EM_CORE_LOCK(adapter);
1123 lem_disable_intr(adapter);
1124 ifp->if_capenable |= IFCAP_POLLING;
1125 EM_CORE_UNLOCK(adapter);
1127 error = ether_poll_deregister(ifp);
1128 /* Enable interrupt even in error case */
1129 EM_CORE_LOCK(adapter);
1130 lem_enable_intr(adapter);
1131 ifp->if_capenable &= ~IFCAP_POLLING;
1132 EM_CORE_UNLOCK(adapter);
1136 if (mask & IFCAP_HWCSUM) {
1137 ifp->if_capenable ^= IFCAP_HWCSUM;
1140 if (mask & IFCAP_VLAN_HWTAGGING) {
1141 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1144 if ((mask & IFCAP_WOL) &&
1145 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1146 if (mask & IFCAP_WOL_MCAST)
1147 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1148 if (mask & IFCAP_WOL_MAGIC)
1149 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1151 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1153 VLAN_CAPABILITIES(ifp);
1158 error = ether_ioctl(ifp, command, data);
1166 /*********************************************************************
1169 * This routine is used in two ways. It is used by the stack as
1170 * init entry point in network interface structure. It is also used
1171 * by the driver as a hw/sw initialization routine to get to a
1174 * return 0 on success, positive on failure
1175 **********************************************************************/
1178 lem_init_locked(struct adapter *adapter)
1180 struct ifnet *ifp = adapter->ifp;
1181 device_t dev = adapter->dev;
1184 INIT_DEBUGOUT("lem_init: begin");
1186 EM_CORE_LOCK_ASSERT(adapter);
1188 EM_TX_LOCK(adapter);
1190 EM_TX_UNLOCK(adapter);
1193 * Packet Buffer Allocation (PBA)
1194 * Writing PBA sets the receive portion of the buffer
1195 * the remainder is used for the transmit buffer.
1197 * Devices before the 82547 had a Packet Buffer of 64K.
1198 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1199 * After the 82547 the buffer was reduced to 40K.
1200 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1201 * Note: default does not leave enough room for Jumbo Frame >10k.
1203 switch (adapter->hw.mac.type) {
1205 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1206 if (adapter->max_frame_size > 8192)
1207 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1209 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1210 adapter->tx_fifo_head = 0;
1211 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1212 adapter->tx_fifo_size =
1213 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1216 /* Devices before 82547 had a Packet Buffer of 64K. */
1217 if (adapter->max_frame_size > 8192)
1218 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1220 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1223 INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1224 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1226 /* Get the latest mac address, User can use a LAA */
1227 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1230 /* Put the address into the Receive Address Array */
1231 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1233 /* Initialize the hardware */
1234 if (lem_hardware_init(adapter)) {
1235 device_printf(dev, "Unable to initialize the hardware\n");
1238 lem_update_link_status(adapter);
1240 /* Setup VLAN support, basic and offload if available */
1241 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1243 /* Set hardware offload abilities */
1244 ifp->if_hwassist = 0;
1245 if (adapter->hw.mac.type >= e1000_82543) {
1246 if (ifp->if_capenable & IFCAP_TXCSUM)
1247 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1250 /* Configure for OS presence */
1251 lem_init_manageability(adapter);
1253 /* Prepare transmit descriptors and buffers */
1254 lem_setup_transmit_structures(adapter);
1255 lem_initialize_transmit_unit(adapter);
1257 /* Setup Multicast table */
1258 lem_set_multi(adapter);
1260 /* Prepare receive descriptors and buffers */
1261 if (lem_setup_receive_structures(adapter)) {
1262 device_printf(dev, "Could not setup receive structures\n");
1263 EM_TX_LOCK(adapter);
1265 EM_TX_UNLOCK(adapter);
1268 lem_initialize_receive_unit(adapter);
1270 /* Use real VLAN Filter support? */
1271 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1272 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1273 /* Use real VLAN Filter support */
1274 lem_setup_vlan_hw_support(adapter);
1277 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1278 ctrl |= E1000_CTRL_VME;
1279 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1283 /* Don't lose promiscuous settings */
1284 lem_set_promisc(adapter);
1286 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1287 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1289 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1290 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1292 #ifdef DEVICE_POLLING
1294 * Only enable interrupts if we are not polling, make sure
1295 * they are off otherwise.
1297 if (ifp->if_capenable & IFCAP_POLLING)
1298 lem_disable_intr(adapter);
1300 #endif /* DEVICE_POLLING */
1301 lem_enable_intr(adapter);
1303 /* AMT based hardware can now take control from firmware */
1304 if (adapter->has_manage && adapter->has_amt)
1305 lem_get_hw_control(adapter);
1311 struct adapter *adapter = arg;
1313 EM_CORE_LOCK(adapter);
1314 lem_init_locked(adapter);
1315 EM_CORE_UNLOCK(adapter);
1319 #ifdef DEVICE_POLLING
1320 /*********************************************************************
1322 * Legacy polling routine
1324 *********************************************************************/
1326 lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1328 struct adapter *adapter = ifp->if_softc;
1329 u32 reg_icr, rx_done = 0;
1331 EM_CORE_LOCK(adapter);
1332 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1333 EM_CORE_UNLOCK(adapter);
1337 if (cmd == POLL_AND_CHECK_STATUS) {
1338 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1339 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1340 callout_stop(&adapter->timer);
1341 adapter->hw.mac.get_link_status = 1;
1342 lem_update_link_status(adapter);
1343 callout_reset(&adapter->timer, hz,
1344 lem_local_timer, adapter);
1347 EM_CORE_UNLOCK(adapter);
1349 lem_rxeof(adapter, count, &rx_done);
1351 EM_TX_LOCK(adapter);
1353 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1354 lem_start_locked(ifp);
1355 EM_TX_UNLOCK(adapter);
1358 #endif /* DEVICE_POLLING */
1360 /*********************************************************************
1362 * Legacy Interrupt Service routine
1364 *********************************************************************/
1368 struct adapter *adapter = arg;
1369 struct ifnet *ifp = adapter->ifp;
1373 if ((ifp->if_capenable & IFCAP_POLLING) ||
1374 ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0))
1377 EM_CORE_LOCK(adapter);
1378 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1379 if (reg_icr & E1000_ICR_RXO)
1380 adapter->rx_overruns++;
1382 if ((reg_icr == 0xffffffff) || (reg_icr == 0)) {
1383 EM_CORE_UNLOCK(adapter);
1387 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1388 callout_stop(&adapter->timer);
1389 adapter->hw.mac.get_link_status = 1;
1390 lem_update_link_status(adapter);
1391 /* Deal with TX cruft when link lost */
1392 lem_tx_purge(adapter);
1393 callout_reset(&adapter->timer, hz,
1394 lem_local_timer, adapter);
1395 EM_CORE_UNLOCK(adapter);
1399 EM_CORE_UNLOCK(adapter);
1400 lem_rxeof(adapter, -1, NULL);
1402 EM_TX_LOCK(adapter);
1404 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1405 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1406 lem_start_locked(ifp);
1407 EM_TX_UNLOCK(adapter);
1413 lem_handle_link(void *context, int pending)
1415 struct adapter *adapter = context;
1416 struct ifnet *ifp = adapter->ifp;
1418 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1421 EM_CORE_LOCK(adapter);
1422 callout_stop(&adapter->timer);
1423 lem_update_link_status(adapter);
1424 /* Deal with TX cruft when link lost */
1425 lem_tx_purge(adapter);
1426 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1427 EM_CORE_UNLOCK(adapter);
1431 /* Combined RX/TX handler, used by Legacy and MSI */
1433 lem_handle_rxtx(void *context, int pending)
1435 struct adapter *adapter = context;
1436 struct ifnet *ifp = adapter->ifp;
1439 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1440 bool more = lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1441 EM_TX_LOCK(adapter);
1443 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1444 lem_start_locked(ifp);
1445 EM_TX_UNLOCK(adapter);
1447 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1452 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1453 lem_enable_intr(adapter);
1456 /*********************************************************************
1458 * Fast Legacy/MSI Combined Interrupt Service routine
1460 *********************************************************************/
1462 lem_irq_fast(void *arg)
1464 struct adapter *adapter = arg;
1470 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1473 if (reg_icr == 0xffffffff)
1474 return FILTER_STRAY;
1476 /* Definitely not our interrupt. */
1478 return FILTER_STRAY;
1481 * Mask interrupts until the taskqueue is finished running. This is
1482 * cheap, just assume that it is needed. This also works around the
1483 * MSI message reordering errata on certain systems.
1485 lem_disable_intr(adapter);
1486 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1488 /* Link status change */
1489 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1490 adapter->hw.mac.get_link_status = 1;
1491 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1494 if (reg_icr & E1000_ICR_RXO)
1495 adapter->rx_overruns++;
1496 return FILTER_HANDLED;
1500 /*********************************************************************
1502 * Media Ioctl callback
1504 * This routine is called whenever the user queries the status of
1505 * the interface using ifconfig.
1507 **********************************************************************/
1509 lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1511 struct adapter *adapter = ifp->if_softc;
1512 u_char fiber_type = IFM_1000_SX;
1514 INIT_DEBUGOUT("lem_media_status: begin");
1516 EM_CORE_LOCK(adapter);
1517 lem_update_link_status(adapter);
1519 ifmr->ifm_status = IFM_AVALID;
1520 ifmr->ifm_active = IFM_ETHER;
1522 if (!adapter->link_active) {
1523 EM_CORE_UNLOCK(adapter);
1527 ifmr->ifm_status |= IFM_ACTIVE;
1529 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1530 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1531 if (adapter->hw.mac.type == e1000_82545)
1532 fiber_type = IFM_1000_LX;
1533 ifmr->ifm_active |= fiber_type | IFM_FDX;
1535 switch (adapter->link_speed) {
1537 ifmr->ifm_active |= IFM_10_T;
1540 ifmr->ifm_active |= IFM_100_TX;
1543 ifmr->ifm_active |= IFM_1000_T;
1546 if (adapter->link_duplex == FULL_DUPLEX)
1547 ifmr->ifm_active |= IFM_FDX;
1549 ifmr->ifm_active |= IFM_HDX;
1551 EM_CORE_UNLOCK(adapter);
1554 /*********************************************************************
1556 * Media Ioctl callback
1558 * This routine is called when the user changes speed/duplex using
1559 * media/mediopt option with ifconfig.
1561 **********************************************************************/
1563 lem_media_change(struct ifnet *ifp)
1565 struct adapter *adapter = ifp->if_softc;
1566 struct ifmedia *ifm = &adapter->media;
1568 INIT_DEBUGOUT("lem_media_change: begin");
1570 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1573 EM_CORE_LOCK(adapter);
1574 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1576 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1577 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1582 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1583 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1586 adapter->hw.mac.autoneg = FALSE;
1587 adapter->hw.phy.autoneg_advertised = 0;
1588 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1589 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1591 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1594 adapter->hw.mac.autoneg = FALSE;
1595 adapter->hw.phy.autoneg_advertised = 0;
1596 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1597 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1599 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1602 device_printf(adapter->dev, "Unsupported media type\n");
1605 lem_init_locked(adapter);
1606 EM_CORE_UNLOCK(adapter);
1611 /*********************************************************************
1613 * This routine maps the mbufs to tx descriptors.
1615 * return 0 on success, positive on failure
1616 **********************************************************************/
1619 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1621 bus_dma_segment_t segs[EM_MAX_SCATTER];
1623 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1624 struct e1000_tx_desc *ctxd = NULL;
1625 struct mbuf *m_head;
1626 u32 txd_upper, txd_lower, txd_used, txd_saved;
1627 int error, nsegs, i, j, first, last = 0;
1630 txd_upper = txd_lower = txd_used = txd_saved = 0;
1633 ** When doing checksum offload, it is critical to
1634 ** make sure the first mbuf has more than header,
1635 ** because that routine expects data to be present.
1637 if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1638 (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1639 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1646 * Map the packet for DMA
1648 * Capture the first descriptor index,
1649 * this descriptor will have the index
1650 * of the EOP which is the only one that
1651 * now gets a DONE bit writeback.
1653 first = adapter->next_avail_tx_desc;
1654 tx_buffer = &adapter->tx_buffer_area[first];
1655 tx_buffer_mapped = tx_buffer;
1656 map = tx_buffer->map;
1658 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1659 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1662 * There are two types of errors we can (try) to handle:
1663 * - EFBIG means the mbuf chain was too long and bus_dma ran
1664 * out of segments. Defragment the mbuf chain and try again.
1665 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1666 * at this point in time. Defer sending and try again later.
1667 * All other errors, in particular EINVAL, are fatal and prevent the
1668 * mbuf chain from ever going through. Drop it and report error.
1670 if (error == EFBIG) {
1673 m = m_collapse(*m_headp, M_NOWAIT, EM_MAX_SCATTER);
1675 adapter->mbuf_defrag_failed++;
1683 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1684 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1687 adapter->no_tx_dma_setup++;
1692 } else if (error != 0) {
1693 adapter->no_tx_dma_setup++;
1697 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1698 adapter->no_tx_desc_avail2++;
1699 bus_dmamap_unload(adapter->txtag, map);
1704 /* Do hardware assists */
1705 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1706 lem_transmit_checksum_setup(adapter, m_head,
1707 &txd_upper, &txd_lower);
1709 i = adapter->next_avail_tx_desc;
1710 if (adapter->pcix_82544)
1713 /* Set up our transmit descriptors */
1714 for (j = 0; j < nsegs; j++) {
1716 bus_addr_t seg_addr;
1717 /* If adapter is 82544 and on PCIX bus */
1718 if(adapter->pcix_82544) {
1719 DESC_ARRAY desc_array;
1720 u32 array_elements, counter;
1722 * Check the Address and Length combination and
1723 * split the data accordingly
1725 array_elements = lem_fill_descriptors(segs[j].ds_addr,
1726 segs[j].ds_len, &desc_array);
1727 for (counter = 0; counter < array_elements; counter++) {
1728 if (txd_used == adapter->num_tx_desc_avail) {
1729 adapter->next_avail_tx_desc = txd_saved;
1730 adapter->no_tx_desc_avail2++;
1731 bus_dmamap_unload(adapter->txtag, map);
1734 tx_buffer = &adapter->tx_buffer_area[i];
1735 ctxd = &adapter->tx_desc_base[i];
1736 ctxd->buffer_addr = htole64(
1737 desc_array.descriptor[counter].address);
1738 ctxd->lower.data = htole32(
1739 (adapter->txd_cmd | txd_lower | (u16)
1740 desc_array.descriptor[counter].length));
1742 htole32((txd_upper));
1744 if (++i == adapter->num_tx_desc)
1746 tx_buffer->m_head = NULL;
1747 tx_buffer->next_eop = -1;
1751 tx_buffer = &adapter->tx_buffer_area[i];
1752 ctxd = &adapter->tx_desc_base[i];
1753 seg_addr = segs[j].ds_addr;
1754 seg_len = segs[j].ds_len;
1755 ctxd->buffer_addr = htole64(seg_addr);
1756 ctxd->lower.data = htole32(
1757 adapter->txd_cmd | txd_lower | seg_len);
1761 if (++i == adapter->num_tx_desc)
1763 tx_buffer->m_head = NULL;
1764 tx_buffer->next_eop = -1;
1768 adapter->next_avail_tx_desc = i;
1770 if (adapter->pcix_82544)
1771 adapter->num_tx_desc_avail -= txd_used;
1773 adapter->num_tx_desc_avail -= nsegs;
1775 if (m_head->m_flags & M_VLANTAG) {
1776 /* Set the vlan id. */
1777 ctxd->upper.fields.special =
1778 htole16(m_head->m_pkthdr.ether_vtag);
1779 /* Tell hardware to add tag */
1780 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1783 tx_buffer->m_head = m_head;
1784 tx_buffer_mapped->map = tx_buffer->map;
1785 tx_buffer->map = map;
1786 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1789 * Last Descriptor of Packet
1790 * needs End Of Packet (EOP)
1791 * and Report Status (RS)
1794 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1796 * Keep track in the first buffer which
1797 * descriptor will be written back
1799 tx_buffer = &adapter->tx_buffer_area[first];
1800 tx_buffer->next_eop = last;
1801 adapter->watchdog_time = ticks;
1804 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1805 * that this frame is available to transmit.
1807 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1808 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1812 adapter->csb->guest_tdt = i;
1813 /* XXX memory barrier ? */
1814 if (adapter->csb->guest_csb_on &&
1815 !(adapter->csb->host_need_txkick & 1)) {
1816 /* XXX maybe useless
1817 * clean the ring. maybe do it before ?
1818 * maybe a little bit of histeresys ?
1820 if (adapter->num_tx_desc_avail <= 64) {// XXX
1826 #endif /* NIC_PARAVIRT */
1828 #ifdef NIC_SEND_COMBINING
1829 if (adapter->sc_enable) {
1830 if (adapter->shadow_tdt & MIT_PENDING_INT) {
1831 /* signal intr and data pending */
1832 adapter->shadow_tdt = MIT_PENDING_TDT | (i & 0xffff);
1835 adapter->shadow_tdt = MIT_PENDING_INT;
1838 #endif /* NIC_SEND_COMBINING */
1840 if (adapter->hw.mac.type == e1000_82547 &&
1841 adapter->link_duplex == HALF_DUPLEX)
1842 lem_82547_move_tail(adapter);
1844 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1845 if (adapter->hw.mac.type == e1000_82547)
1846 lem_82547_update_fifo_head(adapter,
1847 m_head->m_pkthdr.len);
1853 /*********************************************************************
1855 * 82547 workaround to avoid controller hang in half-duplex environment.
1856 * The workaround is to avoid queuing a large packet that would span
1857 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1858 * in this case. We do that only when FIFO is quiescent.
1860 **********************************************************************/
1862 lem_82547_move_tail(void *arg)
1864 struct adapter *adapter = arg;
1865 struct e1000_tx_desc *tx_desc;
1866 u16 hw_tdt, sw_tdt, length = 0;
1869 EM_TX_LOCK_ASSERT(adapter);
1871 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1872 sw_tdt = adapter->next_avail_tx_desc;
1874 while (hw_tdt != sw_tdt) {
1875 tx_desc = &adapter->tx_desc_base[hw_tdt];
1876 length += tx_desc->lower.flags.length;
1877 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1878 if (++hw_tdt == adapter->num_tx_desc)
1882 if (lem_82547_fifo_workaround(adapter, length)) {
1883 adapter->tx_fifo_wrk_cnt++;
1884 callout_reset(&adapter->tx_fifo_timer, 1,
1885 lem_82547_move_tail, adapter);
1888 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1889 lem_82547_update_fifo_head(adapter, length);
1896 lem_82547_fifo_workaround(struct adapter *adapter, int len)
1898 int fifo_space, fifo_pkt_len;
1900 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1902 if (adapter->link_duplex == HALF_DUPLEX) {
1903 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1905 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1906 if (lem_82547_tx_fifo_reset(adapter))
1917 lem_82547_update_fifo_head(struct adapter *adapter, int len)
1919 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1921 /* tx_fifo_head is always 16 byte aligned */
1922 adapter->tx_fifo_head += fifo_pkt_len;
1923 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1924 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1930 lem_82547_tx_fifo_reset(struct adapter *adapter)
1934 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1935 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1936 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1937 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1938 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1939 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1940 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1941 /* Disable TX unit */
1942 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1943 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1944 tctl & ~E1000_TCTL_EN);
1946 /* Reset FIFO pointers */
1947 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1948 adapter->tx_head_addr);
1949 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1950 adapter->tx_head_addr);
1951 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1952 adapter->tx_head_addr);
1953 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1954 adapter->tx_head_addr);
1956 /* Re-enable TX unit */
1957 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1958 E1000_WRITE_FLUSH(&adapter->hw);
1960 adapter->tx_fifo_head = 0;
1961 adapter->tx_fifo_reset_cnt++;
1971 lem_set_promisc(struct adapter *adapter)
1973 struct ifnet *ifp = adapter->ifp;
1976 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1978 if (ifp->if_flags & IFF_PROMISC) {
1979 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1980 /* Turn this on if you want to see bad packets */
1982 reg_rctl |= E1000_RCTL_SBP;
1983 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1984 } else if (ifp->if_flags & IFF_ALLMULTI) {
1985 reg_rctl |= E1000_RCTL_MPE;
1986 reg_rctl &= ~E1000_RCTL_UPE;
1987 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1992 lem_disable_promisc(struct adapter *adapter)
1994 struct ifnet *ifp = adapter->ifp;
1998 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1999 reg_rctl &= (~E1000_RCTL_UPE);
2000 if (ifp->if_flags & IFF_ALLMULTI)
2001 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2003 struct ifmultiaddr *ifma;
2004 #if __FreeBSD_version < 800000
2007 if_maddr_rlock(ifp);
2009 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2010 if (ifma->ifma_addr->sa_family != AF_LINK)
2012 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2016 #if __FreeBSD_version < 800000
2017 IF_ADDR_UNLOCK(ifp);
2019 if_maddr_runlock(ifp);
2022 /* Don't disable if in MAX groups */
2023 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2024 reg_rctl &= (~E1000_RCTL_MPE);
2025 reg_rctl &= (~E1000_RCTL_SBP);
2026 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2030 /*********************************************************************
2033 * This routine is called whenever multicast address list is updated.
2035 **********************************************************************/
2038 lem_set_multi(struct adapter *adapter)
2040 struct ifnet *ifp = adapter->ifp;
2041 struct ifmultiaddr *ifma;
2043 u8 *mta; /* Multicast array memory */
2046 IOCTL_DEBUGOUT("lem_set_multi: begin");
2049 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
2051 if (adapter->hw.mac.type == e1000_82542 &&
2052 adapter->hw.revision_id == E1000_REVISION_2) {
2053 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2054 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2055 e1000_pci_clear_mwi(&adapter->hw);
2056 reg_rctl |= E1000_RCTL_RST;
2057 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2061 #if __FreeBSD_version < 800000
2064 if_maddr_rlock(ifp);
2066 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2067 if (ifma->ifma_addr->sa_family != AF_LINK)
2070 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2073 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2074 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2077 #if __FreeBSD_version < 800000
2078 IF_ADDR_UNLOCK(ifp);
2080 if_maddr_runlock(ifp);
2082 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2083 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2084 reg_rctl |= E1000_RCTL_MPE;
2085 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2087 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
2089 if (adapter->hw.mac.type == e1000_82542 &&
2090 adapter->hw.revision_id == E1000_REVISION_2) {
2091 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2092 reg_rctl &= ~E1000_RCTL_RST;
2093 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2095 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2096 e1000_pci_set_mwi(&adapter->hw);
2101 /*********************************************************************
2104 * This routine checks for link status and updates statistics.
2106 **********************************************************************/
2109 lem_local_timer(void *arg)
2111 struct adapter *adapter = arg;
2113 EM_CORE_LOCK_ASSERT(adapter);
2115 lem_update_link_status(adapter);
2116 lem_update_stats_counters(adapter);
2118 lem_smartspeed(adapter);
2121 /* recover space if needed */
2122 if (adapter->csb && adapter->csb->guest_csb_on &&
2123 (adapter->watchdog_check == TRUE) &&
2124 (ticks - adapter->watchdog_time > EM_WATCHDOG) &&
2125 (adapter->num_tx_desc_avail != adapter->num_tx_desc) ) {
2128 * lem_txeof() normally (except when space in the queue
2129 * runs low XXX) cleans watchdog_check so that
2133 #endif /* NIC_PARAVIRT */
2135 * We check the watchdog: the time since
2136 * the last TX descriptor was cleaned.
2137 * This implies a functional TX engine.
2139 if ((adapter->watchdog_check == TRUE) &&
2140 (ticks - adapter->watchdog_time > EM_WATCHDOG))
2143 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
2146 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2147 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2148 adapter->watchdog_events++;
2149 lem_init_locked(adapter);
2153 lem_update_link_status(struct adapter *adapter)
2155 struct e1000_hw *hw = &adapter->hw;
2156 struct ifnet *ifp = adapter->ifp;
2157 device_t dev = adapter->dev;
2160 /* Get the cached link value or read phy for real */
2161 switch (hw->phy.media_type) {
2162 case e1000_media_type_copper:
2163 if (hw->mac.get_link_status) {
2164 /* Do the work to read phy */
2165 e1000_check_for_link(hw);
2166 link_check = !hw->mac.get_link_status;
2167 if (link_check) /* ESB2 fix */
2168 e1000_cfg_on_link_up(hw);
2172 case e1000_media_type_fiber:
2173 e1000_check_for_link(hw);
2174 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2177 case e1000_media_type_internal_serdes:
2178 e1000_check_for_link(hw);
2179 link_check = adapter->hw.mac.serdes_has_link;
2182 case e1000_media_type_unknown:
2186 /* Now check for a transition */
2187 if (link_check && (adapter->link_active == 0)) {
2188 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2189 &adapter->link_duplex);
2191 device_printf(dev, "Link is up %d Mbps %s\n",
2192 adapter->link_speed,
2193 ((adapter->link_duplex == FULL_DUPLEX) ?
2194 "Full Duplex" : "Half Duplex"));
2195 adapter->link_active = 1;
2196 adapter->smartspeed = 0;
2197 ifp->if_baudrate = adapter->link_speed * 1000000;
2198 if_link_state_change(ifp, LINK_STATE_UP);
2199 } else if (!link_check && (adapter->link_active == 1)) {
2200 ifp->if_baudrate = adapter->link_speed = 0;
2201 adapter->link_duplex = 0;
2203 device_printf(dev, "Link is Down\n");
2204 adapter->link_active = 0;
2205 /* Link down, disable watchdog */
2206 adapter->watchdog_check = FALSE;
2207 if_link_state_change(ifp, LINK_STATE_DOWN);
2211 /*********************************************************************
2213 * This routine disables all traffic on the adapter by issuing a
2214 * global reset on the MAC and deallocates TX/RX buffers.
2216 * This routine should always be called with BOTH the CORE
2218 **********************************************************************/
2223 struct adapter *adapter = arg;
2224 struct ifnet *ifp = adapter->ifp;
2226 EM_CORE_LOCK_ASSERT(adapter);
2227 EM_TX_LOCK_ASSERT(adapter);
2229 INIT_DEBUGOUT("lem_stop: begin");
2231 lem_disable_intr(adapter);
2232 callout_stop(&adapter->timer);
2233 callout_stop(&adapter->tx_fifo_timer);
2235 /* Tell the stack that the interface is no longer active */
2236 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2238 e1000_reset_hw(&adapter->hw);
2239 if (adapter->hw.mac.type >= e1000_82544)
2240 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2242 e1000_led_off(&adapter->hw);
2243 e1000_cleanup_led(&adapter->hw);
2247 /*********************************************************************
2249 * Determine hardware revision.
2251 **********************************************************************/
2253 lem_identify_hardware(struct adapter *adapter)
2255 device_t dev = adapter->dev;
2257 /* Make sure our PCI config space has the necessary stuff set */
2258 pci_enable_busmaster(dev);
2259 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2261 /* Save off the information about this board */
2262 adapter->hw.vendor_id = pci_get_vendor(dev);
2263 adapter->hw.device_id = pci_get_device(dev);
2264 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2265 adapter->hw.subsystem_vendor_id =
2266 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2267 adapter->hw.subsystem_device_id =
2268 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2270 /* Do Shared Code Init and Setup */
2271 if (e1000_set_mac_type(&adapter->hw)) {
2272 device_printf(dev, "Setup init failure\n");
2278 lem_allocate_pci_resources(struct adapter *adapter)
2280 device_t dev = adapter->dev;
2281 int val, rid, error = E1000_SUCCESS;
2284 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2286 if (adapter->memory == NULL) {
2287 device_printf(dev, "Unable to allocate bus resource: memory\n");
2290 adapter->osdep.mem_bus_space_tag =
2291 rman_get_bustag(adapter->memory);
2292 adapter->osdep.mem_bus_space_handle =
2293 rman_get_bushandle(adapter->memory);
2294 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2296 /* Only older adapters use IO mapping */
2297 if (adapter->hw.mac.type > e1000_82543) {
2298 /* Figure our where our IO BAR is ? */
2299 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2300 val = pci_read_config(dev, rid, 4);
2301 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2302 adapter->io_rid = rid;
2306 /* check for 64bit BAR */
2307 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2310 if (rid >= PCIR_CIS) {
2311 device_printf(dev, "Unable to locate IO BAR\n");
2314 adapter->ioport = bus_alloc_resource_any(dev,
2315 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2316 if (adapter->ioport == NULL) {
2317 device_printf(dev, "Unable to allocate bus resource: "
2321 adapter->hw.io_base = 0;
2322 adapter->osdep.io_bus_space_tag =
2323 rman_get_bustag(adapter->ioport);
2324 adapter->osdep.io_bus_space_handle =
2325 rman_get_bushandle(adapter->ioport);
2328 adapter->hw.back = &adapter->osdep;
2333 /*********************************************************************
2335 * Setup the Legacy or MSI Interrupt handler
2337 **********************************************************************/
2339 lem_allocate_irq(struct adapter *adapter)
2341 device_t dev = adapter->dev;
2344 /* Manually turn off all interrupts */
2345 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2347 /* We allocate a single interrupt resource */
2348 adapter->res[0] = bus_alloc_resource_any(dev,
2349 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2350 if (adapter->res[0] == NULL) {
2351 device_printf(dev, "Unable to allocate bus resource: "
2356 /* Do Legacy setup? */
2357 if (lem_use_legacy_irq) {
2358 if ((error = bus_setup_intr(dev, adapter->res[0],
2359 INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2360 &adapter->tag[0])) != 0) {
2362 "Failed to register interrupt handler");
2369 * Use a Fast interrupt and the associated
2370 * deferred processing contexts.
2372 TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2373 TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2374 adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2375 taskqueue_thread_enqueue, &adapter->tq);
2376 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2377 device_get_nameunit(adapter->dev));
2378 if ((error = bus_setup_intr(dev, adapter->res[0],
2379 INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2380 &adapter->tag[0])) != 0) {
2381 device_printf(dev, "Failed to register fast interrupt "
2382 "handler: %d\n", error);
2383 taskqueue_free(adapter->tq);
2393 lem_free_pci_resources(struct adapter *adapter)
2395 device_t dev = adapter->dev;
2398 if (adapter->tag[0] != NULL) {
2399 bus_teardown_intr(dev, adapter->res[0],
2401 adapter->tag[0] = NULL;
2404 if (adapter->res[0] != NULL) {
2405 bus_release_resource(dev, SYS_RES_IRQ,
2406 0, adapter->res[0]);
2409 if (adapter->memory != NULL)
2410 bus_release_resource(dev, SYS_RES_MEMORY,
2411 PCIR_BAR(0), adapter->memory);
2413 if (adapter->ioport != NULL)
2414 bus_release_resource(dev, SYS_RES_IOPORT,
2415 adapter->io_rid, adapter->ioport);
2419 /*********************************************************************
2421 * Initialize the hardware to a configuration
2422 * as specified by the adapter structure.
2424 **********************************************************************/
2426 lem_hardware_init(struct adapter *adapter)
2428 device_t dev = adapter->dev;
2431 INIT_DEBUGOUT("lem_hardware_init: begin");
2433 /* Issue a global reset */
2434 e1000_reset_hw(&adapter->hw);
2436 /* When hardware is reset, fifo_head is also reset */
2437 adapter->tx_fifo_head = 0;
2440 * These parameters control the automatic generation (Tx) and
2441 * response (Rx) to Ethernet PAUSE frames.
2442 * - High water mark should allow for at least two frames to be
2443 * received after sending an XOFF.
2444 * - Low water mark works best when it is very near the high water mark.
2445 * This allows the receiver to restart by sending XON when it has
2446 * drained a bit. Here we use an arbitary value of 1500 which will
2447 * restart after one full frame is pulled from the buffer. There
2448 * could be several smaller frames in the buffer and if so they will
2449 * not trigger the XON until their total number reduces the buffer
2451 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2453 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2456 adapter->hw.fc.high_water = rx_buffer_size -
2457 roundup2(adapter->max_frame_size, 1024);
2458 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2460 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2461 adapter->hw.fc.send_xon = TRUE;
2463 /* Set Flow control, use the tunable location if sane */
2464 if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2465 adapter->hw.fc.requested_mode = lem_fc_setting;
2467 adapter->hw.fc.requested_mode = e1000_fc_none;
2469 if (e1000_init_hw(&adapter->hw) < 0) {
2470 device_printf(dev, "Hardware Initialization Failed\n");
2474 e1000_check_for_link(&adapter->hw);
2479 /*********************************************************************
2481 * Setup networking device structure and register an interface.
2483 **********************************************************************/
2485 lem_setup_interface(device_t dev, struct adapter *adapter)
2489 INIT_DEBUGOUT("lem_setup_interface: begin");
2491 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2493 device_printf(dev, "can not allocate ifnet structure\n");
2496 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2497 ifp->if_init = lem_init;
2498 ifp->if_softc = adapter;
2499 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2500 ifp->if_ioctl = lem_ioctl;
2501 ifp->if_start = lem_start;
2502 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2503 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2504 IFQ_SET_READY(&ifp->if_snd);
2506 ether_ifattach(ifp, adapter->hw.mac.addr);
2508 ifp->if_capabilities = ifp->if_capenable = 0;
2510 if (adapter->hw.mac.type >= e1000_82543) {
2511 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2512 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2516 * Tell the upper layer(s) we support long frames.
2518 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2519 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2520 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2523 ** Dont turn this on by default, if vlans are
2524 ** created on another pseudo device (eg. lagg)
2525 ** then vlan events are not passed thru, breaking
2526 ** operation, but with HW FILTER off it works. If
2527 ** using vlans directly on the em driver you can
2528 ** enable this and get full hardware tag filtering.
2530 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2532 #ifdef DEVICE_POLLING
2533 ifp->if_capabilities |= IFCAP_POLLING;
2536 /* Enable only WOL MAGIC by default */
2538 ifp->if_capabilities |= IFCAP_WOL;
2539 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2543 * Specify the media types supported by this adapter and register
2544 * callbacks to update media and link information
2546 ifmedia_init(&adapter->media, IFM_IMASK,
2547 lem_media_change, lem_media_status);
2548 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2549 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2550 u_char fiber_type = IFM_1000_SX; /* default type */
2552 if (adapter->hw.mac.type == e1000_82545)
2553 fiber_type = IFM_1000_LX;
2554 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2556 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2558 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2559 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2561 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2563 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2565 if (adapter->hw.phy.type != e1000_phy_ife) {
2566 ifmedia_add(&adapter->media,
2567 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2568 ifmedia_add(&adapter->media,
2569 IFM_ETHER | IFM_1000_T, 0, NULL);
2572 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2573 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2578 /*********************************************************************
2580 * Workaround for SmartSpeed on 82541 and 82547 controllers
2582 **********************************************************************/
2584 lem_smartspeed(struct adapter *adapter)
2588 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2589 adapter->hw.mac.autoneg == 0 ||
2590 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2593 if (adapter->smartspeed == 0) {
2594 /* If Master/Slave config fault is asserted twice,
2595 * we assume back-to-back */
2596 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2597 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2599 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2600 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2601 e1000_read_phy_reg(&adapter->hw,
2602 PHY_1000T_CTRL, &phy_tmp);
2603 if(phy_tmp & CR_1000T_MS_ENABLE) {
2604 phy_tmp &= ~CR_1000T_MS_ENABLE;
2605 e1000_write_phy_reg(&adapter->hw,
2606 PHY_1000T_CTRL, phy_tmp);
2607 adapter->smartspeed++;
2608 if(adapter->hw.mac.autoneg &&
2609 !e1000_copper_link_autoneg(&adapter->hw) &&
2610 !e1000_read_phy_reg(&adapter->hw,
2611 PHY_CONTROL, &phy_tmp)) {
2612 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2613 MII_CR_RESTART_AUTO_NEG);
2614 e1000_write_phy_reg(&adapter->hw,
2615 PHY_CONTROL, phy_tmp);
2620 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2621 /* If still no link, perhaps using 2/3 pair cable */
2622 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2623 phy_tmp |= CR_1000T_MS_ENABLE;
2624 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2625 if(adapter->hw.mac.autoneg &&
2626 !e1000_copper_link_autoneg(&adapter->hw) &&
2627 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2628 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2629 MII_CR_RESTART_AUTO_NEG);
2630 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2633 /* Restart process after EM_SMARTSPEED_MAX iterations */
2634 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2635 adapter->smartspeed = 0;
2640 * Manage DMA'able memory.
2643 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2647 *(bus_addr_t *) arg = segs[0].ds_addr;
2651 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2652 struct em_dma_alloc *dma, int mapflags)
2656 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2657 EM_DBA_ALIGN, 0, /* alignment, bounds */
2658 BUS_SPACE_MAXADDR, /* lowaddr */
2659 BUS_SPACE_MAXADDR, /* highaddr */
2660 NULL, NULL, /* filter, filterarg */
2663 size, /* maxsegsize */
2665 NULL, /* lockfunc */
2669 device_printf(adapter->dev,
2670 "%s: bus_dma_tag_create failed: %d\n",
2675 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2676 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2678 device_printf(adapter->dev,
2679 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2680 __func__, (uintmax_t)size, error);
2685 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2686 size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2687 if (error || dma->dma_paddr == 0) {
2688 device_printf(adapter->dev,
2689 "%s: bus_dmamap_load failed: %d\n",
2697 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2699 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2700 bus_dma_tag_destroy(dma->dma_tag);
2702 dma->dma_tag = NULL;
2708 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2710 if (dma->dma_tag == NULL)
2712 if (dma->dma_paddr != 0) {
2713 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2714 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2715 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2718 if (dma->dma_vaddr != NULL) {
2719 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2720 dma->dma_vaddr = NULL;
2722 bus_dma_tag_destroy(dma->dma_tag);
2723 dma->dma_tag = NULL;
2727 /*********************************************************************
2729 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2730 * the information needed to transmit a packet on the wire.
2732 **********************************************************************/
2734 lem_allocate_transmit_structures(struct adapter *adapter)
2736 device_t dev = adapter->dev;
2737 struct em_buffer *tx_buffer;
2741 * Create DMA tags for tx descriptors
2743 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2744 1, 0, /* alignment, bounds */
2745 BUS_SPACE_MAXADDR, /* lowaddr */
2746 BUS_SPACE_MAXADDR, /* highaddr */
2747 NULL, NULL, /* filter, filterarg */
2748 MCLBYTES * EM_MAX_SCATTER, /* maxsize */
2749 EM_MAX_SCATTER, /* nsegments */
2750 MCLBYTES, /* maxsegsize */
2752 NULL, /* lockfunc */
2754 &adapter->txtag)) != 0) {
2755 device_printf(dev, "Unable to allocate TX DMA tag\n");
2759 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2760 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2761 if (adapter->tx_buffer_area == NULL) {
2762 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2767 /* Create the descriptor buffer dma maps */
2768 for (int i = 0; i < adapter->num_tx_desc; i++) {
2769 tx_buffer = &adapter->tx_buffer_area[i];
2770 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2772 device_printf(dev, "Unable to create TX DMA map\n");
2775 tx_buffer->next_eop = -1;
2780 lem_free_transmit_structures(adapter);
2784 /*********************************************************************
2786 * (Re)Initialize transmit structures.
2788 **********************************************************************/
2790 lem_setup_transmit_structures(struct adapter *adapter)
2792 struct em_buffer *tx_buffer;
2794 /* we are already locked */
2795 struct netmap_adapter *na = NA(adapter->ifp);
2796 struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
2797 #endif /* DEV_NETMAP */
2799 /* Clear the old ring contents */
2800 bzero(adapter->tx_desc_base,
2801 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2803 /* Free any existing TX buffers */
2804 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2805 tx_buffer = &adapter->tx_buffer_area[i];
2806 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2807 BUS_DMASYNC_POSTWRITE);
2808 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2809 m_freem(tx_buffer->m_head);
2810 tx_buffer->m_head = NULL;
2813 /* the i-th NIC entry goes to slot si */
2814 int si = netmap_idx_n2k(&na->tx_rings[0], i);
2818 addr = PNMB(na, slot + si, &paddr);
2819 adapter->tx_desc_base[i].buffer_addr = htole64(paddr);
2820 /* reload the map for netmap mode */
2821 netmap_load_map(na, adapter->txtag, tx_buffer->map, addr);
2823 #endif /* DEV_NETMAP */
2824 tx_buffer->next_eop = -1;
2828 adapter->last_hw_offload = 0;
2829 adapter->next_avail_tx_desc = 0;
2830 adapter->next_tx_to_clean = 0;
2831 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2833 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2834 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2839 /*********************************************************************
2841 * Enable transmit unit.
2843 **********************************************************************/
2845 lem_initialize_transmit_unit(struct adapter *adapter)
2850 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2851 /* Setup the Base and Length of the Tx Descriptor Ring */
2852 bus_addr = adapter->txdma.dma_paddr;
2853 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2854 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2855 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2856 (u32)(bus_addr >> 32));
2857 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2859 /* Setup the HW Tx Head and Tail descriptor pointers */
2860 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2861 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2863 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2864 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2865 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2867 /* Set the default values for the Tx Inter Packet Gap timer */
2868 switch (adapter->hw.mac.type) {
2870 tipg = DEFAULT_82542_TIPG_IPGT;
2871 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2872 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2875 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2876 (adapter->hw.phy.media_type ==
2877 e1000_media_type_internal_serdes))
2878 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2880 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2881 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2882 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2885 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2886 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2887 if(adapter->hw.mac.type >= e1000_82540)
2888 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2889 adapter->tx_abs_int_delay.value);
2891 /* Program the Transmit Control Register */
2892 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2893 tctl &= ~E1000_TCTL_CT;
2894 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2895 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2897 /* This write will effectively turn on the transmit unit. */
2898 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2900 /* Setup Transmit Descriptor Base Settings */
2901 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2903 if (adapter->tx_int_delay.value > 0)
2904 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2907 /*********************************************************************
2909 * Free all transmit related data structures.
2911 **********************************************************************/
2913 lem_free_transmit_structures(struct adapter *adapter)
2915 struct em_buffer *tx_buffer;
2917 INIT_DEBUGOUT("free_transmit_structures: begin");
2919 if (adapter->tx_buffer_area != NULL) {
2920 for (int i = 0; i < adapter->num_tx_desc; i++) {
2921 tx_buffer = &adapter->tx_buffer_area[i];
2922 if (tx_buffer->m_head != NULL) {
2923 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2924 BUS_DMASYNC_POSTWRITE);
2925 bus_dmamap_unload(adapter->txtag,
2927 m_freem(tx_buffer->m_head);
2928 tx_buffer->m_head = NULL;
2929 } else if (tx_buffer->map != NULL)
2930 bus_dmamap_unload(adapter->txtag,
2932 if (tx_buffer->map != NULL) {
2933 bus_dmamap_destroy(adapter->txtag,
2935 tx_buffer->map = NULL;
2939 if (adapter->tx_buffer_area != NULL) {
2940 free(adapter->tx_buffer_area, M_DEVBUF);
2941 adapter->tx_buffer_area = NULL;
2943 if (adapter->txtag != NULL) {
2944 bus_dma_tag_destroy(adapter->txtag);
2945 adapter->txtag = NULL;
2949 /*********************************************************************
2951 * The offload context needs to be set when we transfer the first
2952 * packet of a particular protocol (TCP/UDP). This routine has been
2953 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2955 * Added back the old method of keeping the current context type
2956 * and not setting if unnecessary, as this is reported to be a
2957 * big performance win. -jfv
2958 **********************************************************************/
2960 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2961 u32 *txd_upper, u32 *txd_lower)
2963 struct e1000_context_desc *TXD = NULL;
2964 struct em_buffer *tx_buffer;
2965 struct ether_vlan_header *eh;
2966 struct ip *ip = NULL;
2967 struct ip6_hdr *ip6;
2968 int curr_txd, ehdrlen;
2969 u32 cmd, hdr_len, ip_hlen;
2974 cmd = hdr_len = ipproto = 0;
2975 *txd_upper = *txd_lower = 0;
2976 curr_txd = adapter->next_avail_tx_desc;
2979 * Determine where frame payload starts.
2980 * Jump over vlan headers if already present,
2981 * helpful for QinQ too.
2983 eh = mtod(mp, struct ether_vlan_header *);
2984 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2985 etype = ntohs(eh->evl_proto);
2986 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2988 etype = ntohs(eh->evl_encap_proto);
2989 ehdrlen = ETHER_HDR_LEN;
2993 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2994 * TODO: Support SCTP too when it hits the tree.
2998 ip = (struct ip *)(mp->m_data + ehdrlen);
2999 ip_hlen = ip->ip_hl << 2;
3001 /* Setup of IP header checksum. */
3002 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3004 * Start offset for header checksum calculation.
3005 * End offset for header checksum calculation.
3006 * Offset of place to put the checksum.
3008 TXD = (struct e1000_context_desc *)
3009 &adapter->tx_desc_base[curr_txd];
3010 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3011 TXD->lower_setup.ip_fields.ipcse =
3012 htole16(ehdrlen + ip_hlen);
3013 TXD->lower_setup.ip_fields.ipcso =
3014 ehdrlen + offsetof(struct ip, ip_sum);
3015 cmd |= E1000_TXD_CMD_IP;
3016 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3019 hdr_len = ehdrlen + ip_hlen;
3023 case ETHERTYPE_IPV6:
3024 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3025 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3027 /* IPv6 doesn't have a header checksum. */
3029 hdr_len = ehdrlen + ip_hlen;
3030 ipproto = ip6->ip6_nxt;
3039 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3040 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3041 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3042 /* no need for context if already set */
3043 if (adapter->last_hw_offload == CSUM_TCP)
3045 adapter->last_hw_offload = CSUM_TCP;
3047 * Start offset for payload checksum calculation.
3048 * End offset for payload checksum calculation.
3049 * Offset of place to put the checksum.
3051 TXD = (struct e1000_context_desc *)
3052 &adapter->tx_desc_base[curr_txd];
3053 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3054 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3055 TXD->upper_setup.tcp_fields.tucso =
3056 hdr_len + offsetof(struct tcphdr, th_sum);
3057 cmd |= E1000_TXD_CMD_TCP;
3062 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3063 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3064 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3065 /* no need for context if already set */
3066 if (adapter->last_hw_offload == CSUM_UDP)
3068 adapter->last_hw_offload = CSUM_UDP;
3070 * Start offset for header checksum calculation.
3071 * End offset for header checksum calculation.
3072 * Offset of place to put the checksum.
3074 TXD = (struct e1000_context_desc *)
3075 &adapter->tx_desc_base[curr_txd];
3076 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3077 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3078 TXD->upper_setup.tcp_fields.tucso =
3079 hdr_len + offsetof(struct udphdr, uh_sum);
3089 TXD->tcp_seg_setup.data = htole32(0);
3090 TXD->cmd_and_length =
3091 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3092 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3093 tx_buffer->m_head = NULL;
3094 tx_buffer->next_eop = -1;
3096 if (++curr_txd == adapter->num_tx_desc)
3099 adapter->num_tx_desc_avail--;
3100 adapter->next_avail_tx_desc = curr_txd;
3104 /**********************************************************************
3106 * Examine each tx_buffer in the used queue. If the hardware is done
3107 * processing the packet then free associated resources. The
3108 * tx_buffer is put back on the free queue.
3110 **********************************************************************/
3112 lem_txeof(struct adapter *adapter)
3114 int first, last, done, num_avail;
3115 struct em_buffer *tx_buffer;
3116 struct e1000_tx_desc *tx_desc, *eop_desc;
3117 struct ifnet *ifp = adapter->ifp;
3119 EM_TX_LOCK_ASSERT(adapter);
3122 if (netmap_tx_irq(ifp, 0))
3124 #endif /* DEV_NETMAP */
3125 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3128 num_avail = adapter->num_tx_desc_avail;
3129 first = adapter->next_tx_to_clean;
3130 tx_desc = &adapter->tx_desc_base[first];
3131 tx_buffer = &adapter->tx_buffer_area[first];
3132 last = tx_buffer->next_eop;
3133 eop_desc = &adapter->tx_desc_base[last];
3136 * What this does is get the index of the
3137 * first descriptor AFTER the EOP of the
3138 * first packet, that way we can do the
3139 * simple comparison on the inner while loop.
3141 if (++last == adapter->num_tx_desc)
3145 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3146 BUS_DMASYNC_POSTREAD);
3148 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3149 /* We clean the range of the packet */
3150 while (first != done) {
3151 tx_desc->upper.data = 0;
3152 tx_desc->lower.data = 0;
3153 tx_desc->buffer_addr = 0;
3156 if (tx_buffer->m_head) {
3158 bus_dmamap_sync(adapter->txtag,
3160 BUS_DMASYNC_POSTWRITE);
3161 bus_dmamap_unload(adapter->txtag,
3164 m_freem(tx_buffer->m_head);
3165 tx_buffer->m_head = NULL;
3167 tx_buffer->next_eop = -1;
3168 adapter->watchdog_time = ticks;
3170 if (++first == adapter->num_tx_desc)
3173 tx_buffer = &adapter->tx_buffer_area[first];
3174 tx_desc = &adapter->tx_desc_base[first];
3176 /* See if we can continue to the next packet */
3177 last = tx_buffer->next_eop;
3179 eop_desc = &adapter->tx_desc_base[last];
3180 /* Get new done point */
3181 if (++last == adapter->num_tx_desc) last = 0;
3186 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3187 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3189 adapter->next_tx_to_clean = first;
3190 adapter->num_tx_desc_avail = num_avail;
3192 #ifdef NIC_SEND_COMBINING
3193 if ((adapter->shadow_tdt & MIT_PENDING_TDT) == MIT_PENDING_TDT) {
3194 /* a tdt write is pending, do it */
3195 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0),
3196 0xffff & adapter->shadow_tdt);
3197 adapter->shadow_tdt = MIT_PENDING_INT;
3199 adapter->shadow_tdt = 0; // disable
3201 #endif /* NIC_SEND_COMBINING */
3203 * If we have enough room, clear IFF_DRV_OACTIVE to
3204 * tell the stack that it is OK to send packets.
3205 * If there are no pending descriptors, clear the watchdog.
3207 if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {
3208 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3210 if (adapter->csb) { // XXX also csb_on ?
3211 adapter->csb->guest_need_txkick = 2; /* acked */
3212 // XXX memory barrier
3214 #endif /* NIC_PARAVIRT */
3215 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3216 adapter->watchdog_check = FALSE;
3222 /*********************************************************************
3224 * When Link is lost sometimes there is work still in the TX ring
3225 * which may result in a watchdog, rather than allow that we do an
3226 * attempted cleanup and then reinit here. Note that this has been
3227 * seens mostly with fiber adapters.
3229 **********************************************************************/
3231 lem_tx_purge(struct adapter *adapter)
3233 if ((!adapter->link_active) && (adapter->watchdog_check)) {
3234 EM_TX_LOCK(adapter);
3236 EM_TX_UNLOCK(adapter);
3237 if (adapter->watchdog_check) /* Still outstanding? */
3238 lem_init_locked(adapter);
3242 /*********************************************************************
3244 * Get a buffer from system mbuf buffer pool.
3246 **********************************************************************/
3248 lem_get_buf(struct adapter *adapter, int i)
3251 bus_dma_segment_t segs[1];
3253 struct em_buffer *rx_buffer;
3256 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3258 adapter->mbuf_cluster_failed++;
3261 m->m_len = m->m_pkthdr.len = MCLBYTES;
3263 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3264 m_adj(m, ETHER_ALIGN);
3267 * Using memory from the mbuf cluster pool, invoke the
3268 * bus_dma machinery to arrange the memory mapping.
3270 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3271 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3277 /* If nsegs is wrong then the stack is corrupt. */
3278 KASSERT(nsegs == 1, ("Too many segments returned!"));
3280 rx_buffer = &adapter->rx_buffer_area[i];
3281 if (rx_buffer->m_head != NULL)
3282 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3284 map = rx_buffer->map;
3285 rx_buffer->map = adapter->rx_sparemap;
3286 adapter->rx_sparemap = map;
3287 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3288 rx_buffer->m_head = m;
3290 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3294 /*********************************************************************
3296 * Allocate memory for rx_buffer structures. Since we use one
3297 * rx_buffer per received packet, the maximum number of rx_buffer's
3298 * that we'll need is equal to the number of receive descriptors
3299 * that we've allocated.
3301 **********************************************************************/
3303 lem_allocate_receive_structures(struct adapter *adapter)
3305 device_t dev = adapter->dev;
3306 struct em_buffer *rx_buffer;
3309 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3310 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3311 if (adapter->rx_buffer_area == NULL) {
3312 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3316 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3317 1, 0, /* alignment, bounds */
3318 BUS_SPACE_MAXADDR, /* lowaddr */
3319 BUS_SPACE_MAXADDR, /* highaddr */
3320 NULL, NULL, /* filter, filterarg */
3321 MCLBYTES, /* maxsize */
3323 MCLBYTES, /* maxsegsize */
3325 NULL, /* lockfunc */
3329 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3334 /* Create the spare map (used by getbuf) */
3335 error = bus_dmamap_create(adapter->rxtag, 0, &adapter->rx_sparemap);
3337 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3342 rx_buffer = adapter->rx_buffer_area;
3343 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3344 error = bus_dmamap_create(adapter->rxtag, 0, &rx_buffer->map);
3346 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3355 lem_free_receive_structures(adapter);
3359 /*********************************************************************
3361 * (Re)initialize receive structures.
3363 **********************************************************************/
3365 lem_setup_receive_structures(struct adapter *adapter)
3367 struct em_buffer *rx_buffer;
3370 /* we are already under lock */
3371 struct netmap_adapter *na = NA(adapter->ifp);
3372 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
3375 /* Reset descriptor ring */
3376 bzero(adapter->rx_desc_base,
3377 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3379 /* Free current RX buffers. */
3380 rx_buffer = adapter->rx_buffer_area;
3381 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3382 if (rx_buffer->m_head != NULL) {
3383 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3384 BUS_DMASYNC_POSTREAD);
3385 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3386 m_freem(rx_buffer->m_head);
3387 rx_buffer->m_head = NULL;
3391 /* Allocate new ones. */
3392 for (i = 0; i < adapter->num_rx_desc; i++) {
3395 /* the i-th NIC entry goes to slot si */
3396 int si = netmap_idx_n2k(&na->rx_rings[0], i);
3400 addr = PNMB(na, slot + si, &paddr);
3401 netmap_load_map(na, adapter->rxtag, rx_buffer->map, addr);
3402 /* Update descriptor */
3403 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
3406 #endif /* DEV_NETMAP */
3407 error = lem_get_buf(adapter, i);
3412 /* Setup our descriptor pointers */
3413 adapter->next_rx_desc_to_check = 0;
3414 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3415 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3420 /*********************************************************************
3422 * Enable receive unit.
3424 **********************************************************************/
3427 lem_initialize_receive_unit(struct adapter *adapter)
3429 struct ifnet *ifp = adapter->ifp;
3433 INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3436 * Make sure receives are disabled while setting
3437 * up the descriptor ring
3439 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3440 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3442 if (adapter->hw.mac.type >= e1000_82540) {
3443 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3444 adapter->rx_abs_int_delay.value);
3446 * Set the interrupt throttling rate. Value is calculated
3447 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3449 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3452 /* Setup the Base and Length of the Rx Descriptor Ring */
3453 bus_addr = adapter->rxdma.dma_paddr;
3454 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3455 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3456 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3457 (u32)(bus_addr >> 32));
3458 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3461 /* Setup the Receive Control Register */
3462 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3463 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3464 E1000_RCTL_RDMTS_HALF |
3465 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3467 /* Make sure VLAN Filters are off */
3468 rctl &= ~E1000_RCTL_VFE;
3470 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3471 rctl |= E1000_RCTL_SBP;
3473 rctl &= ~E1000_RCTL_SBP;
3475 switch (adapter->rx_buffer_len) {
3478 rctl |= E1000_RCTL_SZ_2048;
3481 rctl |= E1000_RCTL_SZ_4096 |
3482 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3485 rctl |= E1000_RCTL_SZ_8192 |
3486 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3489 rctl |= E1000_RCTL_SZ_16384 |
3490 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3494 if (ifp->if_mtu > ETHERMTU)
3495 rctl |= E1000_RCTL_LPE;
3497 rctl &= ~E1000_RCTL_LPE;
3499 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3500 if ((adapter->hw.mac.type >= e1000_82543) &&
3501 (ifp->if_capenable & IFCAP_RXCSUM)) {
3502 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3503 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3504 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3507 /* Enable Receives */
3508 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3511 * Setup the HW Rx Head and
3512 * Tail Descriptor Pointers
3514 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3515 rctl = adapter->num_rx_desc - 1; /* default RDT value */
3517 /* preserve buffers already made available to clients */
3518 if (ifp->if_capenable & IFCAP_NETMAP)
3519 rctl -= nm_kr_rxspace(&NA(adapter->ifp)->rx_rings[0]);
3520 #endif /* DEV_NETMAP */
3521 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rctl);
3526 /*********************************************************************
3528 * Free receive related data structures.
3530 **********************************************************************/
3532 lem_free_receive_structures(struct adapter *adapter)
3534 struct em_buffer *rx_buffer;
3537 INIT_DEBUGOUT("free_receive_structures: begin");
3539 if (adapter->rx_sparemap) {
3540 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3541 adapter->rx_sparemap = NULL;
3544 /* Cleanup any existing buffers */
3545 if (adapter->rx_buffer_area != NULL) {
3546 rx_buffer = adapter->rx_buffer_area;
3547 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3548 if (rx_buffer->m_head != NULL) {
3549 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3550 BUS_DMASYNC_POSTREAD);
3551 bus_dmamap_unload(adapter->rxtag,
3553 m_freem(rx_buffer->m_head);
3554 rx_buffer->m_head = NULL;
3555 } else if (rx_buffer->map != NULL)
3556 bus_dmamap_unload(adapter->rxtag,
3558 if (rx_buffer->map != NULL) {
3559 bus_dmamap_destroy(adapter->rxtag,
3561 rx_buffer->map = NULL;
3566 if (adapter->rx_buffer_area != NULL) {
3567 free(adapter->rx_buffer_area, M_DEVBUF);
3568 adapter->rx_buffer_area = NULL;
3571 if (adapter->rxtag != NULL) {
3572 bus_dma_tag_destroy(adapter->rxtag);
3573 adapter->rxtag = NULL;
3577 /*********************************************************************
3579 * This routine executes in interrupt context. It replenishes
3580 * the mbufs in the descriptor and sends data which has been
3581 * dma'ed into host memory to upper layer.
3583 * We loop at most count times if count is > 0, or until done if
3586 * For polling we also now return the number of cleaned packets
3587 *********************************************************************/
3589 lem_rxeof(struct adapter *adapter, int count, int *done)
3591 struct ifnet *ifp = adapter->ifp;
3593 u8 status = 0, accept_frame = 0, eop = 0;
3594 u16 len, desc_len, prev_len_adj;
3596 struct e1000_rx_desc *current_desc;
3598 #ifdef BATCH_DISPATCH
3599 struct mbuf *mh = NULL, *mt = NULL;
3600 #endif /* BATCH_DISPATCH */
3603 struct paravirt_csb* csb = adapter->csb;
3604 int csb_mode = csb && csb->guest_csb_on;
3606 //ND("clear guest_rxkick at %d", adapter->next_rx_desc_to_check);
3607 if (csb_mode && csb->guest_need_rxkick)
3608 csb->guest_need_rxkick = 0;
3609 #endif /* NIC_PARAVIRT */
3610 EM_RX_LOCK(adapter);
3612 #ifdef BATCH_DISPATCH
3614 #endif /* BATCH_DISPATCH */
3615 i = adapter->next_rx_desc_to_check;
3616 current_desc = &adapter->rx_desc_base[i];
3617 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3618 BUS_DMASYNC_POSTREAD);
3621 if (netmap_rx_irq(ifp, 0, &rx_sent)) {
3622 EM_RX_UNLOCK(adapter);
3625 #endif /* DEV_NETMAP */
3627 #if 1 // XXX optimization ?
3628 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3631 EM_RX_UNLOCK(adapter);
3636 while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3637 struct mbuf *m = NULL;
3639 status = current_desc->status;
3640 if ((status & E1000_RXD_STAT_DD) == 0) {
3643 /* buffer not ready yet. Retry a few times before giving up */
3644 if (++retries <= adapter->rx_retries) {
3647 if (csb->guest_need_rxkick == 0) {
3648 // ND("set guest_rxkick at %d", adapter->next_rx_desc_to_check);
3649 csb->guest_need_rxkick = 1;
3650 // XXX memory barrier, status volatile ?
3651 continue; /* double check */
3654 /* no buffer ready, give up */
3655 #endif /* NIC_PARAVIRT */
3660 if (csb->guest_need_rxkick)
3661 // ND("clear again guest_rxkick at %d", adapter->next_rx_desc_to_check);
3662 csb->guest_need_rxkick = 0;
3665 #endif /* NIC_PARAVIRT */
3667 mp = adapter->rx_buffer_area[i].m_head;
3669 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3670 * needs to access the last received byte in the mbuf.
3672 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3673 BUS_DMASYNC_POSTREAD);
3677 desc_len = le16toh(current_desc->length);
3678 if (status & E1000_RXD_STAT_EOP) {
3681 if (desc_len < ETHER_CRC_LEN) {
3683 prev_len_adj = ETHER_CRC_LEN - desc_len;
3685 len = desc_len - ETHER_CRC_LEN;
3691 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3693 u32 pkt_len = desc_len;
3695 if (adapter->fmp != NULL)
3696 pkt_len += adapter->fmp->m_pkthdr.len;
3698 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3699 if (TBI_ACCEPT(&adapter->hw, status,
3700 current_desc->errors, pkt_len, last_byte,
3701 adapter->min_frame_size, adapter->max_frame_size)) {
3702 e1000_tbi_adjust_stats_82543(&adapter->hw,
3703 &adapter->stats, pkt_len,
3704 adapter->hw.mac.addr,
3705 adapter->max_frame_size);
3713 if (lem_get_buf(adapter, i) != 0) {
3718 /* Assign correct length to the current fragment */
3721 if (adapter->fmp == NULL) {
3722 mp->m_pkthdr.len = len;
3723 adapter->fmp = mp; /* Store the first mbuf */
3726 /* Chain mbuf's together */
3727 mp->m_flags &= ~M_PKTHDR;
3729 * Adjust length of previous mbuf in chain if
3730 * we received less than 4 bytes in the last
3733 if (prev_len_adj > 0) {
3734 adapter->lmp->m_len -= prev_len_adj;
3735 adapter->fmp->m_pkthdr.len -=
3738 adapter->lmp->m_next = mp;
3739 adapter->lmp = adapter->lmp->m_next;
3740 adapter->fmp->m_pkthdr.len += len;
3744 adapter->fmp->m_pkthdr.rcvif = ifp;
3746 lem_receive_checksum(adapter, current_desc,
3748 #ifndef __NO_STRICT_ALIGNMENT
3749 if (adapter->max_frame_size >
3750 (MCLBYTES - ETHER_ALIGN) &&
3751 lem_fixup_rx(adapter) != 0)
3754 if (status & E1000_RXD_STAT_VP) {
3755 adapter->fmp->m_pkthdr.ether_vtag =
3756 le16toh(current_desc->special);
3757 adapter->fmp->m_flags |= M_VLANTAG;
3759 #ifndef __NO_STRICT_ALIGNMENT
3763 adapter->fmp = NULL;
3764 adapter->lmp = NULL;
3767 adapter->dropped_pkts++;
3769 /* Reuse loaded DMA map and just update mbuf chain */
3770 mp = adapter->rx_buffer_area[i].m_head;
3771 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3772 mp->m_data = mp->m_ext.ext_buf;
3774 if (adapter->max_frame_size <=
3775 (MCLBYTES - ETHER_ALIGN))
3776 m_adj(mp, ETHER_ALIGN);
3777 if (adapter->fmp != NULL) {
3778 m_freem(adapter->fmp);
3779 adapter->fmp = NULL;
3780 adapter->lmp = NULL;
3785 /* Zero out the receive descriptors status. */
3786 current_desc->status = 0;
3787 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3788 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3792 /* the buffer at i has been already replaced by lem_get_buf()
3793 * so it is safe to set guest_rdt = i and possibly send a kick.
3794 * XXX see if we can optimize it later.
3797 // XXX memory barrier
3798 if (i == csb->host_rxkick_at)
3799 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3801 #endif /* NIC_PARAVIRT */
3802 /* Advance our pointers to the next descriptor. */
3803 if (++i == adapter->num_rx_desc)
3805 /* Call into the stack */
3807 #ifdef BATCH_DISPATCH
3808 if (adapter->batch_enable) {
3814 m->m_nextpkt = NULL;
3816 current_desc = &adapter->rx_desc_base[i];
3819 #endif /* BATCH_DISPATCH */
3820 adapter->next_rx_desc_to_check = i;
3821 EM_RX_UNLOCK(adapter);
3822 (*ifp->if_input)(ifp, m);
3823 EM_RX_LOCK(adapter);
3825 i = adapter->next_rx_desc_to_check;
3827 current_desc = &adapter->rx_desc_base[i];
3829 adapter->next_rx_desc_to_check = i;
3830 #ifdef BATCH_DISPATCH
3832 EM_RX_UNLOCK(adapter);
3833 while ( (mt = mh) != NULL) {
3835 mt->m_nextpkt = NULL;
3838 EM_RX_LOCK(adapter);
3839 i = adapter->next_rx_desc_to_check; /* in case of interrupts */
3843 #endif /* BATCH_DISPATCH */
3845 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3847 i = adapter->num_rx_desc - 1;
3849 if (!csb_mode) /* filter out writes */
3850 #endif /* NIC_PARAVIRT */
3851 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3854 EM_RX_UNLOCK(adapter);
3855 return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3858 #ifndef __NO_STRICT_ALIGNMENT
3860 * When jumbo frames are enabled we should realign entire payload on
3861 * architecures with strict alignment. This is serious design mistake of 8254x
3862 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3863 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3864 * payload. On architecures without strict alignment restrictions 8254x still
3865 * performs unaligned memory access which would reduce the performance too.
3866 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3867 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3868 * existing mbuf chain.
3870 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3871 * not used at all on architectures with strict alignment.
3874 lem_fixup_rx(struct adapter *adapter)
3881 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3882 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3883 m->m_data += ETHER_HDR_LEN;
3885 MGETHDR(n, M_NOWAIT, MT_DATA);
3887 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3888 m->m_data += ETHER_HDR_LEN;
3889 m->m_len -= ETHER_HDR_LEN;
3890 n->m_len = ETHER_HDR_LEN;
3891 M_MOVE_PKTHDR(n, m);
3895 adapter->dropped_pkts++;
3896 m_freem(adapter->fmp);
3897 adapter->fmp = NULL;
3906 /*********************************************************************
3908 * Verify that the hardware indicated that the checksum is valid.
3909 * Inform the stack about the status of checksum so that stack
3910 * doesn't spend time verifying the checksum.
3912 *********************************************************************/
3914 lem_receive_checksum(struct adapter *adapter,
3915 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3917 /* 82543 or newer only */
3918 if ((adapter->hw.mac.type < e1000_82543) ||
3919 /* Ignore Checksum bit is set */
3920 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3921 mp->m_pkthdr.csum_flags = 0;
3925 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3927 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3928 /* IP Checksum Good */
3929 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3930 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3933 mp->m_pkthdr.csum_flags = 0;
3937 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3939 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3940 mp->m_pkthdr.csum_flags |=
3941 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3942 mp->m_pkthdr.csum_data = htons(0xffff);
3948 * This routine is run via an vlan
3952 lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3954 struct adapter *adapter = ifp->if_softc;
3957 if (ifp->if_softc != arg) /* Not our event */
3960 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
3963 EM_CORE_LOCK(adapter);
3964 index = (vtag >> 5) & 0x7F;
3966 adapter->shadow_vfta[index] |= (1 << bit);
3967 ++adapter->num_vlans;
3968 /* Re-init to load the changes */
3969 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3970 lem_init_locked(adapter);
3971 EM_CORE_UNLOCK(adapter);
3975 * This routine is run via an vlan
3979 lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3981 struct adapter *adapter = ifp->if_softc;
3984 if (ifp->if_softc != arg)
3987 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3990 EM_CORE_LOCK(adapter);
3991 index = (vtag >> 5) & 0x7F;
3993 adapter->shadow_vfta[index] &= ~(1 << bit);
3994 --adapter->num_vlans;
3995 /* Re-init to load the changes */
3996 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3997 lem_init_locked(adapter);
3998 EM_CORE_UNLOCK(adapter);
4002 lem_setup_vlan_hw_support(struct adapter *adapter)
4004 struct e1000_hw *hw = &adapter->hw;
4008 ** We get here thru init_locked, meaning
4009 ** a soft reset, this has already cleared
4010 ** the VFTA and other state, so if there
4011 ** have been no vlan's registered do nothing.
4013 if (adapter->num_vlans == 0)
4017 ** A soft reset zero's out the VFTA, so
4018 ** we need to repopulate it now.
4020 for (int i = 0; i < EM_VFTA_SIZE; i++)
4021 if (adapter->shadow_vfta[i] != 0)
4022 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
4023 i, adapter->shadow_vfta[i]);
4025 reg = E1000_READ_REG(hw, E1000_CTRL);
4026 reg |= E1000_CTRL_VME;
4027 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4029 /* Enable the Filter Table */
4030 reg = E1000_READ_REG(hw, E1000_RCTL);
4031 reg &= ~E1000_RCTL_CFIEN;
4032 reg |= E1000_RCTL_VFE;
4033 E1000_WRITE_REG(hw, E1000_RCTL, reg);
4037 lem_enable_intr(struct adapter *adapter)
4039 struct e1000_hw *hw = &adapter->hw;
4040 u32 ims_mask = IMS_ENABLE_MASK;
4042 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4046 lem_disable_intr(struct adapter *adapter)
4048 struct e1000_hw *hw = &adapter->hw;
4050 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4054 * Bit of a misnomer, what this really means is
4055 * to enable OS management of the system... aka
4056 * to disable special hardware management features
4059 lem_init_manageability(struct adapter *adapter)
4061 /* A shared code workaround */
4062 if (adapter->has_manage) {
4063 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4064 /* disable hardware interception of ARP */
4065 manc &= ~(E1000_MANC_ARP_EN);
4066 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4071 * Give control back to hardware management
4072 * controller if there is one.
4075 lem_release_manageability(struct adapter *adapter)
4077 if (adapter->has_manage) {
4078 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4080 /* re-enable hardware interception of ARP */
4081 manc |= E1000_MANC_ARP_EN;
4082 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4087 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
4088 * For ASF and Pass Through versions of f/w this means
4089 * that the driver is loaded. For AMT version type f/w
4090 * this means that the network i/f is open.
4093 lem_get_hw_control(struct adapter *adapter)
4097 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4098 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4099 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4104 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4105 * For ASF and Pass Through versions of f/w this means that
4106 * the driver is no longer loaded. For AMT versions of the
4107 * f/w this means that the network i/f is closed.
4110 lem_release_hw_control(struct adapter *adapter)
4114 if (!adapter->has_manage)
4117 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4118 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4119 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4124 lem_is_valid_ether_addr(u8 *addr)
4126 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4128 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4136 ** Parse the interface capabilities with regard
4137 ** to both system management and wake-on-lan for
4141 lem_get_wakeup(device_t dev)
4143 struct adapter *adapter = device_get_softc(dev);
4144 u16 eeprom_data = 0, device_id, apme_mask;
4146 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
4147 apme_mask = EM_EEPROM_APME;
4149 switch (adapter->hw.mac.type) {
4154 e1000_read_nvm(&adapter->hw,
4155 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
4156 apme_mask = EM_82544_APME;
4159 case e1000_82546_rev_3:
4160 if (adapter->hw.bus.func == 1) {
4161 e1000_read_nvm(&adapter->hw,
4162 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
4165 e1000_read_nvm(&adapter->hw,
4166 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4169 e1000_read_nvm(&adapter->hw,
4170 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4173 if (eeprom_data & apme_mask)
4174 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
4176 * We have the eeprom settings, now apply the special cases
4177 * where the eeprom may be wrong or the board won't support
4178 * wake on lan on a particular port
4180 device_id = pci_get_device(dev);
4181 switch (device_id) {
4182 case E1000_DEV_ID_82546GB_PCIE:
4185 case E1000_DEV_ID_82546EB_FIBER:
4186 case E1000_DEV_ID_82546GB_FIBER:
4187 /* Wake events only supported on port A for dual fiber
4188 * regardless of eeprom setting */
4189 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
4190 E1000_STATUS_FUNC_1)
4193 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
4194 /* if quad port adapter, disable WoL on all but port A */
4195 if (global_quad_port_a != 0)
4197 /* Reset for multiple quad port adapters */
4198 if (++global_quad_port_a == 4)
4199 global_quad_port_a = 0;
4207 * Enable PCI Wake On Lan capability
4210 lem_enable_wakeup(device_t dev)
4212 struct adapter *adapter = device_get_softc(dev);
4213 struct ifnet *ifp = adapter->ifp;
4214 u32 pmc, ctrl, ctrl_ext, rctl;
4217 if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
4220 /* Advertise the wakeup capability */
4221 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4222 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4223 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4224 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4226 /* Keep the laser running on Fiber adapters */
4227 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4228 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4229 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4230 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4231 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4235 ** Determine type of Wakeup: note that wol
4236 ** is set with all bits on by default.
4238 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
4239 adapter->wol &= ~E1000_WUFC_MAG;
4241 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
4242 adapter->wol &= ~E1000_WUFC_MC;
4244 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4245 rctl |= E1000_RCTL_MPE;
4246 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4249 if (adapter->hw.mac.type == e1000_pchlan) {
4250 if (lem_enable_phy_wakeup(adapter))
4253 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4254 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4259 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4260 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4261 if (ifp->if_capenable & IFCAP_WOL)
4262 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4263 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4269 ** WOL in the newer chipset interfaces (pchlan)
4270 ** require thing to be copied into the phy
4273 lem_enable_phy_wakeup(struct adapter *adapter)
4275 struct e1000_hw *hw = &adapter->hw;
4279 /* copy MAC RARs to PHY RARs */
4280 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4281 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4282 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4283 e1000_write_phy_reg(hw, BM_RAR_M(i),
4284 (u16)((mreg >> 16) & 0xFFFF));
4285 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4286 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4287 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4288 (u16)((mreg >> 16) & 0xFFFF));
4291 /* copy MAC MTA to PHY MTA */
4292 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4293 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4294 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4295 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4296 (u16)((mreg >> 16) & 0xFFFF));
4299 /* configure PHY Rx Control register */
4300 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4301 mreg = E1000_READ_REG(hw, E1000_RCTL);
4302 if (mreg & E1000_RCTL_UPE)
4303 preg |= BM_RCTL_UPE;
4304 if (mreg & E1000_RCTL_MPE)
4305 preg |= BM_RCTL_MPE;
4306 preg &= ~(BM_RCTL_MO_MASK);
4307 if (mreg & E1000_RCTL_MO_3)
4308 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4309 << BM_RCTL_MO_SHIFT);
4310 if (mreg & E1000_RCTL_BAM)
4311 preg |= BM_RCTL_BAM;
4312 if (mreg & E1000_RCTL_PMCF)
4313 preg |= BM_RCTL_PMCF;
4314 mreg = E1000_READ_REG(hw, E1000_CTRL);
4315 if (mreg & E1000_CTRL_RFCE)
4316 preg |= BM_RCTL_RFCE;
4317 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4319 /* enable PHY wakeup in MAC register */
4320 E1000_WRITE_REG(hw, E1000_WUC,
4321 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4322 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4324 /* configure and enable PHY wakeup in PHY registers */
4325 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4326 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4328 /* activate PHY wakeup */
4329 ret = hw->phy.ops.acquire(hw);
4331 printf("Could not acquire PHY\n");
4334 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4335 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4336 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4338 printf("Could not read PHY page 769\n");
4341 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4342 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4344 printf("Could not set PHY Host Wakeup bit\n");
4346 hw->phy.ops.release(hw);
4352 lem_led_func(void *arg, int onoff)
4354 struct adapter *adapter = arg;
4356 EM_CORE_LOCK(adapter);
4358 e1000_setup_led(&adapter->hw);
4359 e1000_led_on(&adapter->hw);
4361 e1000_led_off(&adapter->hw);
4362 e1000_cleanup_led(&adapter->hw);
4364 EM_CORE_UNLOCK(adapter);
4367 /*********************************************************************
4368 * 82544 Coexistence issue workaround.
4369 * There are 2 issues.
4370 * 1. Transmit Hang issue.
4371 * To detect this issue, following equation can be used...
4372 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4373 * If SUM[3:0] is in between 1 to 4, we will have this issue.
4376 * To detect this issue, following equation can be used...
4377 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4378 * If SUM[3:0] is in between 9 to c, we will have this issue.
4382 * Make sure we do not have ending address
4383 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4385 *************************************************************************/
4387 lem_fill_descriptors (bus_addr_t address, u32 length,
4388 PDESC_ARRAY desc_array)
4390 u32 safe_terminator;
4392 /* Since issue is sensitive to length and address.*/
4393 /* Let us first check the address...*/
4395 desc_array->descriptor[0].address = address;
4396 desc_array->descriptor[0].length = length;
4397 desc_array->elements = 1;
4398 return (desc_array->elements);
4400 safe_terminator = (u32)((((u32)address & 0x7) +
4401 (length & 0xF)) & 0xF);
4402 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4403 if (safe_terminator == 0 ||
4404 (safe_terminator > 4 &&
4405 safe_terminator < 9) ||
4406 (safe_terminator > 0xC &&
4407 safe_terminator <= 0xF)) {
4408 desc_array->descriptor[0].address = address;
4409 desc_array->descriptor[0].length = length;
4410 desc_array->elements = 1;
4411 return (desc_array->elements);
4414 desc_array->descriptor[0].address = address;
4415 desc_array->descriptor[0].length = length - 4;
4416 desc_array->descriptor[1].address = address + (length - 4);
4417 desc_array->descriptor[1].length = 4;
4418 desc_array->elements = 2;
4419 return (desc_array->elements);
4422 /**********************************************************************
4424 * Update the board statistics counters.
4426 **********************************************************************/
4428 lem_update_stats_counters(struct adapter *adapter)
4432 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4433 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4434 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4435 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4437 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4438 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4439 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4440 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4442 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4443 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4444 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4445 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4446 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4447 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4448 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4449 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4450 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4451 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4452 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4453 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4454 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4455 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4456 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4457 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4458 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4459 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4460 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4461 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4463 /* For the 64-bit byte counters the low dword must be read first. */
4464 /* Both registers clear on the read of the high dword */
4466 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4467 ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4468 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4469 ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4471 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4472 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4473 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4474 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4475 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4477 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4478 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4480 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4481 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4482 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4483 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4484 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4485 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4486 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4487 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4488 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4489 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4491 if (adapter->hw.mac.type >= e1000_82543) {
4492 adapter->stats.algnerrc +=
4493 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4494 adapter->stats.rxerrc +=
4495 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4496 adapter->stats.tncrs +=
4497 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4498 adapter->stats.cexterr +=
4499 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4500 adapter->stats.tsctc +=
4501 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4502 adapter->stats.tsctfc +=
4503 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4507 ifp->if_collisions = adapter->stats.colc;
4510 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4511 adapter->stats.crcerrs + adapter->stats.algnerrc +
4512 adapter->stats.ruc + adapter->stats.roc +
4513 adapter->stats.mpc + adapter->stats.cexterr;
4516 ifp->if_oerrors = adapter->stats.ecol +
4517 adapter->stats.latecol + adapter->watchdog_events;
4520 /* Export a single 32-bit register via a read-only sysctl. */
4522 lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4524 struct adapter *adapter;
4527 adapter = oidp->oid_arg1;
4528 val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4529 return (sysctl_handle_int(oidp, &val, 0, req));
4533 * Add sysctl variables, one per statistic, to the system.
4536 lem_add_hw_stats(struct adapter *adapter)
4538 device_t dev = adapter->dev;
4540 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4541 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4542 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4543 struct e1000_hw_stats *stats = &adapter->stats;
4545 struct sysctl_oid *stat_node;
4546 struct sysctl_oid_list *stat_list;
4548 /* Driver Statistics */
4549 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
4550 CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4551 "Std mbuf cluster failed");
4552 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_fail",
4553 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
4554 "Defragmenting mbuf chain failed");
4555 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4556 CTLFLAG_RD, &adapter->dropped_pkts,
4557 "Driver dropped packets");
4558 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
4559 CTLFLAG_RD, &adapter->no_tx_dma_setup,
4560 "Driver tx dma failure in xmit");
4561 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4562 CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4563 "Not enough tx descriptors failure in xmit");
4564 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4565 CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4566 "Not enough tx descriptors failure in xmit");
4567 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4568 CTLFLAG_RD, &adapter->rx_overruns,
4570 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4571 CTLFLAG_RD, &adapter->watchdog_events,
4572 "Watchdog timeouts");
4574 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4575 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
4576 lem_sysctl_reg_handler, "IU",
4577 "Device Control Register");
4578 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4579 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
4580 lem_sysctl_reg_handler, "IU",
4581 "Receiver Control Register");
4582 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4583 CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4584 "Flow Control High Watermark");
4585 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
4586 CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4587 "Flow Control Low Watermark");
4588 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
4589 CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4590 "TX FIFO workaround events");
4591 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
4592 CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4595 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head",
4596 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
4597 lem_sysctl_reg_handler, "IU",
4598 "Transmit Descriptor Head");
4599 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail",
4600 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
4601 lem_sysctl_reg_handler, "IU",
4602 "Transmit Descriptor Tail");
4603 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head",
4604 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
4605 lem_sysctl_reg_handler, "IU",
4606 "Receive Descriptor Head");
4607 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail",
4608 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
4609 lem_sysctl_reg_handler, "IU",
4610 "Receive Descriptor Tail");
4613 /* MAC stats get their own sub node */
4615 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4616 CTLFLAG_RD, NULL, "Statistics");
4617 stat_list = SYSCTL_CHILDREN(stat_node);
4619 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4620 CTLFLAG_RD, &stats->ecol,
4621 "Excessive collisions");
4622 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4623 CTLFLAG_RD, &stats->scc,
4624 "Single collisions");
4625 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4626 CTLFLAG_RD, &stats->mcc,
4627 "Multiple collisions");
4628 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4629 CTLFLAG_RD, &stats->latecol,
4631 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4632 CTLFLAG_RD, &stats->colc,
4634 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4635 CTLFLAG_RD, &adapter->stats.symerrs,
4637 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4638 CTLFLAG_RD, &adapter->stats.sec,
4640 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4641 CTLFLAG_RD, &adapter->stats.dc,
4643 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4644 CTLFLAG_RD, &adapter->stats.mpc,
4646 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4647 CTLFLAG_RD, &adapter->stats.rnbc,
4648 "Receive No Buffers");
4649 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4650 CTLFLAG_RD, &adapter->stats.ruc,
4651 "Receive Undersize");
4652 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4653 CTLFLAG_RD, &adapter->stats.rfc,
4654 "Fragmented Packets Received ");
4655 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4656 CTLFLAG_RD, &adapter->stats.roc,
4657 "Oversized Packets Received");
4658 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4659 CTLFLAG_RD, &adapter->stats.rjc,
4661 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4662 CTLFLAG_RD, &adapter->stats.rxerrc,
4664 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4665 CTLFLAG_RD, &adapter->stats.crcerrs,
4667 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4668 CTLFLAG_RD, &adapter->stats.algnerrc,
4669 "Alignment Errors");
4670 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4671 CTLFLAG_RD, &adapter->stats.cexterr,
4672 "Collision/Carrier extension errors");
4673 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4674 CTLFLAG_RD, &adapter->stats.xonrxc,
4676 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4677 CTLFLAG_RD, &adapter->stats.xontxc,
4679 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4680 CTLFLAG_RD, &adapter->stats.xoffrxc,
4682 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4683 CTLFLAG_RD, &adapter->stats.xofftxc,
4684 "XOFF Transmitted");
4686 /* Packet Reception Stats */
4687 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4688 CTLFLAG_RD, &adapter->stats.tpr,
4689 "Total Packets Received ");
4690 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4691 CTLFLAG_RD, &adapter->stats.gprc,
4692 "Good Packets Received");
4693 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4694 CTLFLAG_RD, &adapter->stats.bprc,
4695 "Broadcast Packets Received");
4696 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4697 CTLFLAG_RD, &adapter->stats.mprc,
4698 "Multicast Packets Received");
4699 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4700 CTLFLAG_RD, &adapter->stats.prc64,
4701 "64 byte frames received ");
4702 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4703 CTLFLAG_RD, &adapter->stats.prc127,
4704 "65-127 byte frames received");
4705 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4706 CTLFLAG_RD, &adapter->stats.prc255,
4707 "128-255 byte frames received");
4708 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4709 CTLFLAG_RD, &adapter->stats.prc511,
4710 "256-511 byte frames received");
4711 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4712 CTLFLAG_RD, &adapter->stats.prc1023,
4713 "512-1023 byte frames received");
4714 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4715 CTLFLAG_RD, &adapter->stats.prc1522,
4716 "1023-1522 byte frames received");
4717 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4718 CTLFLAG_RD, &adapter->stats.gorc,
4719 "Good Octets Received");
4721 /* Packet Transmission Stats */
4722 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4723 CTLFLAG_RD, &adapter->stats.gotc,
4724 "Good Octets Transmitted");
4725 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4726 CTLFLAG_RD, &adapter->stats.tpt,
4727 "Total Packets Transmitted");
4728 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4729 CTLFLAG_RD, &adapter->stats.gptc,
4730 "Good Packets Transmitted");
4731 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4732 CTLFLAG_RD, &adapter->stats.bptc,
4733 "Broadcast Packets Transmitted");
4734 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4735 CTLFLAG_RD, &adapter->stats.mptc,
4736 "Multicast Packets Transmitted");
4737 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4738 CTLFLAG_RD, &adapter->stats.ptc64,
4739 "64 byte frames transmitted ");
4740 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4741 CTLFLAG_RD, &adapter->stats.ptc127,
4742 "65-127 byte frames transmitted");
4743 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4744 CTLFLAG_RD, &adapter->stats.ptc255,
4745 "128-255 byte frames transmitted");
4746 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4747 CTLFLAG_RD, &adapter->stats.ptc511,
4748 "256-511 byte frames transmitted");
4749 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4750 CTLFLAG_RD, &adapter->stats.ptc1023,
4751 "512-1023 byte frames transmitted");
4752 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4753 CTLFLAG_RD, &adapter->stats.ptc1522,
4754 "1024-1522 byte frames transmitted");
4755 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4756 CTLFLAG_RD, &adapter->stats.tsctc,
4757 "TSO Contexts Transmitted");
4758 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4759 CTLFLAG_RD, &adapter->stats.tsctfc,
4760 "TSO Contexts Failed");
4763 /**********************************************************************
4765 * This routine provides a way to dump out the adapter eeprom,
4766 * often a useful debug/service tool. This only dumps the first
4767 * 32 words, stuff that matters is in that extent.
4769 **********************************************************************/
4772 lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4774 struct adapter *adapter;
4779 error = sysctl_handle_int(oidp, &result, 0, req);
4781 if (error || !req->newptr)
4785 * This value will cause a hex dump of the
4786 * first 32 16-bit words of the EEPROM to
4790 adapter = (struct adapter *)arg1;
4791 lem_print_nvm_info(adapter);
4798 lem_print_nvm_info(struct adapter *adapter)
4803 /* Its a bit crude, but it gets the job done */
4804 printf("\nInterface EEPROM Dump:\n");
4805 printf("Offset\n0x0000 ");
4806 for (i = 0, j = 0; i < 32; i++, j++) {
4807 if (j == 8) { /* Make the offset block */
4809 printf("\n0x00%x0 ",row);
4811 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4812 printf("%04x ", eeprom_data);
4818 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4820 struct em_int_delay_info *info;
4821 struct adapter *adapter;
4827 info = (struct em_int_delay_info *)arg1;
4828 usecs = info->value;
4829 error = sysctl_handle_int(oidp, &usecs, 0, req);
4830 if (error != 0 || req->newptr == NULL)
4832 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4834 info->value = usecs;
4835 ticks = EM_USECS_TO_TICKS(usecs);
4836 if (info->offset == E1000_ITR) /* units are 256ns here */
4839 adapter = info->adapter;
4841 EM_CORE_LOCK(adapter);
4842 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4843 regval = (regval & ~0xffff) | (ticks & 0xffff);
4844 /* Handle a few special cases. */
4845 switch (info->offset) {
4850 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4851 /* Don't write 0 into the TIDV register. */
4854 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4857 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4858 EM_CORE_UNLOCK(adapter);
4863 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4864 const char *description, struct em_int_delay_info *info,
4865 int offset, int value)
4867 info->adapter = adapter;
4868 info->offset = offset;
4869 info->value = value;
4870 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4871 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4872 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4873 info, 0, lem_sysctl_int_delay, "I", description);
4877 lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4878 const char *description, int *limit, int value)
4881 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4882 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4883 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
4887 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4888 const char *description, int *limit, int value)
4891 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4892 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4893 OID_AUTO, name, CTLFLAG_RW, limit, value, description);