1 /******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 * Uncomment the following extensions for better performance in a VM,
37 * especially if you have support in the hypervisor.
38 * See http://info.iet.unipi.it/~luigi/netmap/
40 // #define BATCH_DISPATCH
41 // #define NIC_SEND_COMBINING
42 // #define NIC_PARAVIRT /* enable virtio-like synchronization */
45 #include "opt_inet6.h"
47 #ifdef HAVE_KERNEL_OPTION_HEADERS
48 #include "opt_device_polling.h"
51 #include <sys/param.h>
52 #include <sys/systm.h>
54 #include <sys/endian.h>
55 #include <sys/kernel.h>
56 #include <sys/kthread.h>
57 #include <sys/malloc.h>
59 #include <sys/module.h>
61 #include <sys/socket.h>
62 #include <sys/sockio.h>
63 #include <sys/sysctl.h>
64 #include <sys/taskqueue.h>
65 #include <sys/eventhandler.h>
66 #include <machine/bus.h>
67 #include <machine/resource.h>
70 #include <net/ethernet.h>
72 #include <net/if_arp.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
76 #include <net/if_types.h>
77 #include <net/if_vlan_var.h>
79 #include <netinet/in_systm.h>
80 #include <netinet/in.h>
81 #include <netinet/if_ether.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip6.h>
84 #include <netinet/tcp.h>
85 #include <netinet/udp.h>
87 #include <machine/in_cksum.h>
88 #include <dev/led/led.h>
89 #include <dev/pci/pcivar.h>
90 #include <dev/pci/pcireg.h>
92 #include "e1000_api.h"
95 /*********************************************************************
96 * Legacy Em Driver version:
97 *********************************************************************/
98 char lem_driver_version[] = "1.0.6";
100 /*********************************************************************
101 * PCI Device ID Table
103 * Used by probe to select devices to load on
104 * Last field stores an index into e1000_strings
105 * Last entry must be all 0s
107 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
108 *********************************************************************/
110 static em_vendor_info_t lem_vendor_info_array[] =
112 /* Intel(R) PRO/1000 Network Connection */
113 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
132 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
143 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
147 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
148 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
149 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
150 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
151 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
152 PCI_ANY_ID, PCI_ANY_ID, 0},
154 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
155 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
156 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
157 /* required last entry */
161 /*********************************************************************
162 * Table of branding strings for all supported NICs.
163 *********************************************************************/
165 static char *lem_strings[] = {
166 "Intel(R) PRO/1000 Legacy Network Connection"
169 /*********************************************************************
170 * Function prototypes
171 *********************************************************************/
172 static int lem_probe(device_t);
173 static int lem_attach(device_t);
174 static int lem_detach(device_t);
175 static int lem_shutdown(device_t);
176 static int lem_suspend(device_t);
177 static int lem_resume(device_t);
178 static void lem_start(struct ifnet *);
179 static void lem_start_locked(struct ifnet *ifp);
180 static int lem_ioctl(struct ifnet *, u_long, caddr_t);
181 static void lem_init(void *);
182 static void lem_init_locked(struct adapter *);
183 static void lem_stop(void *);
184 static void lem_media_status(struct ifnet *, struct ifmediareq *);
185 static int lem_media_change(struct ifnet *);
186 static void lem_identify_hardware(struct adapter *);
187 static int lem_allocate_pci_resources(struct adapter *);
188 static int lem_allocate_irq(struct adapter *adapter);
189 static void lem_free_pci_resources(struct adapter *);
190 static void lem_local_timer(void *);
191 static int lem_hardware_init(struct adapter *);
192 static int lem_setup_interface(device_t, struct adapter *);
193 static void lem_setup_transmit_structures(struct adapter *);
194 static void lem_initialize_transmit_unit(struct adapter *);
195 static int lem_setup_receive_structures(struct adapter *);
196 static void lem_initialize_receive_unit(struct adapter *);
197 static void lem_enable_intr(struct adapter *);
198 static void lem_disable_intr(struct adapter *);
199 static void lem_free_transmit_structures(struct adapter *);
200 static void lem_free_receive_structures(struct adapter *);
201 static void lem_update_stats_counters(struct adapter *);
202 static void lem_add_hw_stats(struct adapter *adapter);
203 static void lem_txeof(struct adapter *);
204 static void lem_tx_purge(struct adapter *);
205 static int lem_allocate_receive_structures(struct adapter *);
206 static int lem_allocate_transmit_structures(struct adapter *);
207 static bool lem_rxeof(struct adapter *, int, int *);
208 #ifndef __NO_STRICT_ALIGNMENT
209 static int lem_fixup_rx(struct adapter *);
211 static void lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
213 static void lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
215 static void lem_set_promisc(struct adapter *);
216 static void lem_disable_promisc(struct adapter *);
217 static void lem_set_multi(struct adapter *);
218 static void lem_update_link_status(struct adapter *);
219 static int lem_get_buf(struct adapter *, int);
220 static void lem_register_vlan(void *, struct ifnet *, u16);
221 static void lem_unregister_vlan(void *, struct ifnet *, u16);
222 static void lem_setup_vlan_hw_support(struct adapter *);
223 static int lem_xmit(struct adapter *, struct mbuf **);
224 static void lem_smartspeed(struct adapter *);
225 static int lem_82547_fifo_workaround(struct adapter *, int);
226 static void lem_82547_update_fifo_head(struct adapter *, int);
227 static int lem_82547_tx_fifo_reset(struct adapter *);
228 static void lem_82547_move_tail(void *);
229 static int lem_dma_malloc(struct adapter *, bus_size_t,
230 struct em_dma_alloc *, int);
231 static void lem_dma_free(struct adapter *, struct em_dma_alloc *);
232 static int lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
233 static void lem_print_nvm_info(struct adapter *);
234 static int lem_is_valid_ether_addr(u8 *);
235 static u32 lem_fill_descriptors (bus_addr_t address, u32 length,
236 PDESC_ARRAY desc_array);
237 static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
238 static void lem_add_int_delay_sysctl(struct adapter *, const char *,
239 const char *, struct em_int_delay_info *, int, int);
240 static void lem_set_flow_cntrl(struct adapter *, const char *,
241 const char *, int *, int);
242 /* Management and WOL Support */
243 static void lem_init_manageability(struct adapter *);
244 static void lem_release_manageability(struct adapter *);
245 static void lem_get_hw_control(struct adapter *);
246 static void lem_release_hw_control(struct adapter *);
247 static void lem_get_wakeup(device_t);
248 static void lem_enable_wakeup(device_t);
249 static int lem_enable_phy_wakeup(struct adapter *);
250 static void lem_led_func(void *, int);
252 static void lem_intr(void *);
253 static int lem_irq_fast(void *);
254 static void lem_handle_rxtx(void *context, int pending);
255 static void lem_handle_link(void *context, int pending);
256 static void lem_add_rx_process_limit(struct adapter *, const char *,
257 const char *, int *, int);
259 #ifdef DEVICE_POLLING
260 static poll_handler_t lem_poll;
263 /*********************************************************************
264 * FreeBSD Device Interface Entry Points
265 *********************************************************************/
267 static device_method_t lem_methods[] = {
268 /* Device interface */
269 DEVMETHOD(device_probe, lem_probe),
270 DEVMETHOD(device_attach, lem_attach),
271 DEVMETHOD(device_detach, lem_detach),
272 DEVMETHOD(device_shutdown, lem_shutdown),
273 DEVMETHOD(device_suspend, lem_suspend),
274 DEVMETHOD(device_resume, lem_resume),
278 static driver_t lem_driver = {
279 "em", lem_methods, sizeof(struct adapter),
282 extern devclass_t em_devclass;
283 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
284 MODULE_DEPEND(lem, pci, 1, 1, 1);
285 MODULE_DEPEND(lem, ether, 1, 1, 1);
287 /*********************************************************************
288 * Tunable default values.
289 *********************************************************************/
291 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
292 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
294 #define MAX_INTS_PER_SEC 8000
295 #define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256))
297 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
298 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
299 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
300 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
302 * increase lem_rxd and lem_txd to at least 2048 in netmap mode
303 * for better performance.
305 static int lem_rxd = EM_DEFAULT_RXD;
306 static int lem_txd = EM_DEFAULT_TXD;
307 static int lem_smart_pwr_down = FALSE;
309 /* Controls whether promiscuous also shows bad packets */
310 static int lem_debug_sbp = FALSE;
312 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
313 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
314 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
315 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
316 TUNABLE_INT("hw.em.rxd", &lem_rxd);
317 TUNABLE_INT("hw.em.txd", &lem_txd);
318 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
319 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
321 /* Interrupt style - default to fast */
322 static int lem_use_legacy_irq = 0;
323 TUNABLE_INT("hw.em.use_legacy_irq", &lem_use_legacy_irq);
325 /* How many packets rxeof tries to clean at a time */
326 static int lem_rx_process_limit = 100;
327 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
329 /* Flow control setting - default to FULL */
330 static int lem_fc_setting = e1000_fc_full;
331 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
333 /* Global used in WOL setup with multiport cards */
334 static int global_quad_port_a = 0;
336 #ifdef DEV_NETMAP /* see ixgbe.c for details */
337 #include <dev/netmap/if_lem_netmap.h>
338 #endif /* DEV_NETMAP */
340 /*********************************************************************
341 * Device identification routine
343 * em_probe determines if the driver should be loaded on
344 * adapter based on PCI vendor/device id of the adapter.
346 * return BUS_PROBE_DEFAULT on success, positive on failure
347 *********************************************************************/
350 lem_probe(device_t dev)
352 char adapter_name[60];
353 u16 pci_vendor_id = 0;
354 u16 pci_device_id = 0;
355 u16 pci_subvendor_id = 0;
356 u16 pci_subdevice_id = 0;
357 em_vendor_info_t *ent;
359 INIT_DEBUGOUT("em_probe: begin");
361 pci_vendor_id = pci_get_vendor(dev);
362 if (pci_vendor_id != EM_VENDOR_ID)
365 pci_device_id = pci_get_device(dev);
366 pci_subvendor_id = pci_get_subvendor(dev);
367 pci_subdevice_id = pci_get_subdevice(dev);
369 ent = lem_vendor_info_array;
370 while (ent->vendor_id != 0) {
371 if ((pci_vendor_id == ent->vendor_id) &&
372 (pci_device_id == ent->device_id) &&
374 ((pci_subvendor_id == ent->subvendor_id) ||
375 (ent->subvendor_id == PCI_ANY_ID)) &&
377 ((pci_subdevice_id == ent->subdevice_id) ||
378 (ent->subdevice_id == PCI_ANY_ID))) {
379 sprintf(adapter_name, "%s %s",
380 lem_strings[ent->index],
382 device_set_desc_copy(dev, adapter_name);
383 return (BUS_PROBE_DEFAULT);
391 /*********************************************************************
392 * Device initialization routine
394 * The attach entry point is called when the driver is being loaded.
395 * This routine identifies the type of hardware, allocates all resources
396 * and initializes the hardware.
398 * return 0 on success, positive on failure
399 *********************************************************************/
402 lem_attach(device_t dev)
404 struct adapter *adapter;
408 INIT_DEBUGOUT("lem_attach: begin");
410 adapter = device_get_softc(dev);
411 adapter->dev = adapter->osdep.dev = dev;
412 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
413 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
414 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
417 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
418 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
419 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
420 lem_sysctl_nvm_info, "I", "NVM Information");
422 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
423 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
425 /* Determine hardware and mac info */
426 lem_identify_hardware(adapter);
428 /* Setup PCI resources */
429 if (lem_allocate_pci_resources(adapter)) {
430 device_printf(dev, "Allocation of PCI resources failed\n");
435 /* Do Shared Code initialization */
436 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
437 device_printf(dev, "Setup of Shared code failed\n");
442 e1000_get_bus_info(&adapter->hw);
444 /* Set up some sysctls for the tunable interrupt delays */
445 lem_add_int_delay_sysctl(adapter, "rx_int_delay",
446 "receive interrupt delay in usecs", &adapter->rx_int_delay,
447 E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
448 lem_add_int_delay_sysctl(adapter, "tx_int_delay",
449 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
450 E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
451 if (adapter->hw.mac.type >= e1000_82540) {
452 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
453 "receive interrupt delay limit in usecs",
454 &adapter->rx_abs_int_delay,
455 E1000_REGISTER(&adapter->hw, E1000_RADV),
456 lem_rx_abs_int_delay_dflt);
457 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
458 "transmit interrupt delay limit in usecs",
459 &adapter->tx_abs_int_delay,
460 E1000_REGISTER(&adapter->hw, E1000_TADV),
461 lem_tx_abs_int_delay_dflt);
462 lem_add_int_delay_sysctl(adapter, "itr",
463 "interrupt delay limit in usecs/4",
465 E1000_REGISTER(&adapter->hw, E1000_ITR),
469 /* Sysctls for limiting the amount of work done in the taskqueue */
470 lem_add_rx_process_limit(adapter, "rx_processing_limit",
471 "max number of rx packets to process", &adapter->rx_process_limit,
472 lem_rx_process_limit);
474 #ifdef NIC_SEND_COMBINING
475 /* Sysctls to control mitigation */
476 lem_add_rx_process_limit(adapter, "sc_enable",
477 "driver TDT mitigation", &adapter->sc_enable, 0);
478 #endif /* NIC_SEND_COMBINING */
479 #ifdef BATCH_DISPATCH
480 lem_add_rx_process_limit(adapter, "batch_enable",
481 "driver rx batch", &adapter->batch_enable, 0);
482 #endif /* BATCH_DISPATCH */
484 lem_add_rx_process_limit(adapter, "rx_retries",
485 "driver rx retries", &adapter->rx_retries, 0);
486 #endif /* NIC_PARAVIRT */
488 /* Sysctl for setting the interface flow control */
489 lem_set_flow_cntrl(adapter, "flow_control",
490 "flow control setting",
491 &adapter->fc_setting, lem_fc_setting);
494 * Validate number of transmit and receive descriptors. It
495 * must not exceed hardware maximum, and must be multiple
496 * of E1000_DBA_ALIGN.
498 if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
499 (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
500 (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
501 (lem_txd < EM_MIN_TXD)) {
502 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
503 EM_DEFAULT_TXD, lem_txd);
504 adapter->num_tx_desc = EM_DEFAULT_TXD;
506 adapter->num_tx_desc = lem_txd;
507 if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
508 (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
509 (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
510 (lem_rxd < EM_MIN_RXD)) {
511 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
512 EM_DEFAULT_RXD, lem_rxd);
513 adapter->num_rx_desc = EM_DEFAULT_RXD;
515 adapter->num_rx_desc = lem_rxd;
517 adapter->hw.mac.autoneg = DO_AUTO_NEG;
518 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
519 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
520 adapter->rx_buffer_len = 2048;
522 e1000_init_script_state_82541(&adapter->hw, TRUE);
523 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
526 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
527 adapter->hw.phy.mdix = AUTO_ALL_MODES;
528 adapter->hw.phy.disable_polarity_correction = FALSE;
529 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
533 * Set the frame limits assuming
534 * standard ethernet sized frames.
536 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
537 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
540 * This controls when hardware reports transmit completion
543 adapter->hw.mac.report_tx_early = 1;
546 device_printf(dev, "driver supports paravirt, subdev 0x%x\n",
547 adapter->hw.subsystem_device_id);
548 if (adapter->hw.subsystem_device_id == E1000_PARA_SUBDEV) {
551 device_printf(dev, "paravirt support on dev %p\n", adapter);
552 tsize = 4096; // XXX one page for the csb
553 if (lem_dma_malloc(adapter, tsize, &adapter->csb_mem, BUS_DMA_NOWAIT)) {
554 device_printf(dev, "Unable to allocate csb memory\n");
558 /* Setup the Base of the CSB */
559 adapter->csb = (struct paravirt_csb *)adapter->csb_mem.dma_vaddr;
560 /* force the first kick */
561 adapter->csb->host_need_txkick = 1; /* txring empty */
562 adapter->csb->guest_need_rxkick = 1; /* no rx packets */
563 bus_addr = adapter->csb_mem.dma_paddr;
564 lem_add_rx_process_limit(adapter, "csb_on",
565 "enable paravirt.", &adapter->csb->guest_csb_on, 0);
566 lem_add_rx_process_limit(adapter, "txc_lim",
567 "txc_lim", &adapter->csb->host_txcycles_lim, 1);
570 #define PA_SC(name, var, val) \
571 lem_add_rx_process_limit(adapter, name, name, var, val)
572 PA_SC("host_need_txkick",&adapter->csb->host_need_txkick, 1);
573 PA_SC("host_rxkick_at",&adapter->csb->host_rxkick_at, ~0);
574 PA_SC("guest_need_txkick",&adapter->csb->guest_need_txkick, 0);
575 PA_SC("guest_need_rxkick",&adapter->csb->guest_need_rxkick, 1);
576 PA_SC("tdt_reg_count",&adapter->tdt_reg_count, 0);
577 PA_SC("tdt_csb_count",&adapter->tdt_csb_count, 0);
578 PA_SC("tdt_int_count",&adapter->tdt_int_count, 0);
579 PA_SC("guest_need_kick_count",&adapter->guest_need_kick_count, 0);
580 /* tell the host where the block is */
581 E1000_WRITE_REG(&adapter->hw, E1000_CSBAH,
582 (u32)(bus_addr >> 32));
583 E1000_WRITE_REG(&adapter->hw, E1000_CSBAL,
586 #endif /* NIC_PARAVIRT */
588 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
591 /* Allocate Transmit Descriptor ring */
592 if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
593 device_printf(dev, "Unable to allocate tx_desc memory\n");
597 adapter->tx_desc_base =
598 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
600 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
603 /* Allocate Receive Descriptor ring */
604 if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
605 device_printf(dev, "Unable to allocate rx_desc memory\n");
609 adapter->rx_desc_base =
610 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
612 /* Allocate multicast array memory. */
613 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
614 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
615 if (adapter->mta == NULL) {
616 device_printf(dev, "Can not allocate multicast setup array\n");
622 ** Start from a known state, this is
623 ** important in reading the nvm and
626 e1000_reset_hw(&adapter->hw);
628 /* Make sure we have a good EEPROM before we read from it */
629 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
631 ** Some PCI-E parts fail the first check due to
632 ** the link being in sleep state, call it again,
633 ** if it fails a second time its a real issue.
635 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
637 "The EEPROM Checksum Is Not Valid\n");
643 /* Copy the permanent MAC address out of the EEPROM */
644 if (e1000_read_mac_addr(&adapter->hw) < 0) {
645 device_printf(dev, "EEPROM read error while reading MAC"
651 if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
652 device_printf(dev, "Invalid MAC address\n");
657 /* Initialize the hardware */
658 if (lem_hardware_init(adapter)) {
659 device_printf(dev, "Unable to initialize the hardware\n");
664 /* Allocate transmit descriptors and buffers */
665 if (lem_allocate_transmit_structures(adapter)) {
666 device_printf(dev, "Could not setup transmit structures\n");
671 /* Allocate receive descriptors and buffers */
672 if (lem_allocate_receive_structures(adapter)) {
673 device_printf(dev, "Could not setup receive structures\n");
679 ** Do interrupt configuration
681 error = lem_allocate_irq(adapter);
686 * Get Wake-on-Lan and Management info for later use
690 /* Setup OS specific network interface */
691 if (lem_setup_interface(dev, adapter) != 0)
694 /* Initialize statistics */
695 lem_update_stats_counters(adapter);
697 adapter->hw.mac.get_link_status = 1;
698 lem_update_link_status(adapter);
700 /* Indicate SOL/IDER usage */
701 if (e1000_check_reset_block(&adapter->hw))
703 "PHY reset is blocked due to SOL/IDER session.\n");
705 /* Do we need workaround for 82544 PCI-X adapter? */
706 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
707 adapter->hw.mac.type == e1000_82544)
708 adapter->pcix_82544 = TRUE;
710 adapter->pcix_82544 = FALSE;
712 /* Register for VLAN events */
713 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
714 lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
715 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
716 lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
718 lem_add_hw_stats(adapter);
720 /* Non-AMT based hardware can now take control from firmware */
721 if (adapter->has_manage && !adapter->has_amt)
722 lem_get_hw_control(adapter);
724 /* Tell the stack that the interface is not active */
725 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
727 adapter->led_dev = led_create(lem_led_func, adapter,
728 device_get_nameunit(dev));
731 lem_netmap_attach(adapter);
732 #endif /* DEV_NETMAP */
733 INIT_DEBUGOUT("lem_attach: end");
738 lem_free_transmit_structures(adapter);
741 lem_release_hw_control(adapter);
742 lem_dma_free(adapter, &adapter->rxdma);
744 lem_dma_free(adapter, &adapter->txdma);
747 lem_dma_free(adapter, &adapter->csb_mem);
749 #endif /* NIC_PARAVIRT */
752 if (adapter->ifp != NULL)
753 if_free(adapter->ifp);
754 lem_free_pci_resources(adapter);
755 free(adapter->mta, M_DEVBUF);
756 EM_TX_LOCK_DESTROY(adapter);
757 EM_RX_LOCK_DESTROY(adapter);
758 EM_CORE_LOCK_DESTROY(adapter);
763 /*********************************************************************
764 * Device removal routine
766 * The detach entry point is called when the driver is being removed.
767 * This routine stops the adapter and deallocates all the resources
768 * that were allocated for driver operation.
770 * return 0 on success, positive on failure
771 *********************************************************************/
774 lem_detach(device_t dev)
776 struct adapter *adapter = device_get_softc(dev);
777 struct ifnet *ifp = adapter->ifp;
779 INIT_DEBUGOUT("em_detach: begin");
781 /* Make sure VLANS are not using driver */
782 if (adapter->ifp->if_vlantrunk != NULL) {
783 device_printf(dev,"Vlan in use, detach first\n");
787 #ifdef DEVICE_POLLING
788 if (ifp->if_capenable & IFCAP_POLLING)
789 ether_poll_deregister(ifp);
792 if (adapter->led_dev != NULL)
793 led_destroy(adapter->led_dev);
795 EM_CORE_LOCK(adapter);
797 adapter->in_detach = 1;
799 e1000_phy_hw_reset(&adapter->hw);
801 lem_release_manageability(adapter);
803 EM_TX_UNLOCK(adapter);
804 EM_CORE_UNLOCK(adapter);
806 /* Unregister VLAN events */
807 if (adapter->vlan_attach != NULL)
808 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
809 if (adapter->vlan_detach != NULL)
810 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
812 ether_ifdetach(adapter->ifp);
813 callout_drain(&adapter->timer);
814 callout_drain(&adapter->tx_fifo_timer);
818 #endif /* DEV_NETMAP */
819 lem_free_pci_resources(adapter);
820 bus_generic_detach(dev);
823 lem_free_transmit_structures(adapter);
824 lem_free_receive_structures(adapter);
826 /* Free Transmit Descriptor ring */
827 if (adapter->tx_desc_base) {
828 lem_dma_free(adapter, &adapter->txdma);
829 adapter->tx_desc_base = NULL;
832 /* Free Receive Descriptor ring */
833 if (adapter->rx_desc_base) {
834 lem_dma_free(adapter, &adapter->rxdma);
835 adapter->rx_desc_base = NULL;
840 lem_dma_free(adapter, &adapter->csb_mem);
843 #endif /* NIC_PARAVIRT */
844 lem_release_hw_control(adapter);
845 free(adapter->mta, M_DEVBUF);
846 EM_TX_LOCK_DESTROY(adapter);
847 EM_RX_LOCK_DESTROY(adapter);
848 EM_CORE_LOCK_DESTROY(adapter);
853 /*********************************************************************
855 * Shutdown entry point
857 **********************************************************************/
860 lem_shutdown(device_t dev)
862 return lem_suspend(dev);
866 * Suspend/resume device methods.
869 lem_suspend(device_t dev)
871 struct adapter *adapter = device_get_softc(dev);
873 EM_CORE_LOCK(adapter);
875 lem_release_manageability(adapter);
876 lem_release_hw_control(adapter);
877 lem_enable_wakeup(dev);
879 EM_CORE_UNLOCK(adapter);
881 return bus_generic_suspend(dev);
885 lem_resume(device_t dev)
887 struct adapter *adapter = device_get_softc(dev);
888 struct ifnet *ifp = adapter->ifp;
890 EM_CORE_LOCK(adapter);
891 lem_init_locked(adapter);
892 lem_init_manageability(adapter);
893 EM_CORE_UNLOCK(adapter);
896 return bus_generic_resume(dev);
901 lem_start_locked(struct ifnet *ifp)
903 struct adapter *adapter = ifp->if_softc;
906 EM_TX_LOCK_ASSERT(adapter);
908 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
911 if (!adapter->link_active)
915 * Force a cleanup if number of TX descriptors
916 * available hits the threshold
918 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
920 /* Now do we at least have a minimal? */
921 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
922 adapter->no_tx_desc_avail1++;
927 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
929 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
933 * Encapsulation can modify our pointer, and or make it
934 * NULL on failure. In that event, we can't requeue.
936 if (lem_xmit(adapter, &m_head)) {
939 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
940 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
944 /* Send a copy of the frame to the BPF listener */
945 ETHER_BPF_MTAP(ifp, m_head);
947 /* Set timeout in case hardware has problems transmitting. */
948 adapter->watchdog_check = TRUE;
949 adapter->watchdog_time = ticks;
951 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
952 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
954 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE && adapter->csb &&
955 adapter->csb->guest_csb_on &&
956 !(adapter->csb->guest_need_txkick & 1)) {
957 adapter->csb->guest_need_txkick = 1;
958 adapter->guest_need_kick_count++;
959 // XXX memory barrier
960 lem_txeof(adapter); // XXX possibly clear IFF_DRV_OACTIVE
962 #endif /* NIC_PARAVIRT */
968 lem_start(struct ifnet *ifp)
970 struct adapter *adapter = ifp->if_softc;
973 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
974 lem_start_locked(ifp);
975 EM_TX_UNLOCK(adapter);
978 /*********************************************************************
981 * em_ioctl is called when the user wants to configure the
984 * return 0 on success, positive on failure
985 **********************************************************************/
988 lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
990 struct adapter *adapter = ifp->if_softc;
991 struct ifreq *ifr = (struct ifreq *)data;
992 #if defined(INET) || defined(INET6)
993 struct ifaddr *ifa = (struct ifaddr *)data;
995 bool avoid_reset = FALSE;
998 if (adapter->in_detach)
1004 if (ifa->ifa_addr->sa_family == AF_INET)
1008 if (ifa->ifa_addr->sa_family == AF_INET6)
1012 ** Calling init results in link renegotiation,
1013 ** so we avoid doing it when possible.
1016 ifp->if_flags |= IFF_UP;
1017 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1020 if (!(ifp->if_flags & IFF_NOARP))
1021 arp_ifinit(ifp, ifa);
1024 error = ether_ioctl(ifp, command, data);
1030 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1032 EM_CORE_LOCK(adapter);
1033 switch (adapter->hw.mac.type) {
1035 max_frame_size = ETHER_MAX_LEN;
1038 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1040 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1042 EM_CORE_UNLOCK(adapter);
1047 ifp->if_mtu = ifr->ifr_mtu;
1048 adapter->max_frame_size =
1049 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1050 lem_init_locked(adapter);
1051 EM_CORE_UNLOCK(adapter);
1055 IOCTL_DEBUGOUT("ioctl rcv'd:\
1056 SIOCSIFFLAGS (Set Interface Flags)");
1057 EM_CORE_LOCK(adapter);
1058 if (ifp->if_flags & IFF_UP) {
1059 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1060 if ((ifp->if_flags ^ adapter->if_flags) &
1061 (IFF_PROMISC | IFF_ALLMULTI)) {
1062 lem_disable_promisc(adapter);
1063 lem_set_promisc(adapter);
1066 lem_init_locked(adapter);
1068 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1069 EM_TX_LOCK(adapter);
1071 EM_TX_UNLOCK(adapter);
1073 adapter->if_flags = ifp->if_flags;
1074 EM_CORE_UNLOCK(adapter);
1078 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1079 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1080 EM_CORE_LOCK(adapter);
1081 lem_disable_intr(adapter);
1082 lem_set_multi(adapter);
1083 if (adapter->hw.mac.type == e1000_82542 &&
1084 adapter->hw.revision_id == E1000_REVISION_2) {
1085 lem_initialize_receive_unit(adapter);
1087 #ifdef DEVICE_POLLING
1088 if (!(ifp->if_capenable & IFCAP_POLLING))
1090 lem_enable_intr(adapter);
1091 EM_CORE_UNLOCK(adapter);
1095 /* Check SOL/IDER usage */
1096 EM_CORE_LOCK(adapter);
1097 if (e1000_check_reset_block(&adapter->hw)) {
1098 EM_CORE_UNLOCK(adapter);
1099 device_printf(adapter->dev, "Media change is"
1100 " blocked due to SOL/IDER session.\n");
1103 EM_CORE_UNLOCK(adapter);
1105 IOCTL_DEBUGOUT("ioctl rcv'd: \
1106 SIOCxIFMEDIA (Get/Set Interface Media)");
1107 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1113 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1115 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1116 #ifdef DEVICE_POLLING
1117 if (mask & IFCAP_POLLING) {
1118 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1119 error = ether_poll_register(lem_poll, ifp);
1122 EM_CORE_LOCK(adapter);
1123 lem_disable_intr(adapter);
1124 ifp->if_capenable |= IFCAP_POLLING;
1125 EM_CORE_UNLOCK(adapter);
1127 error = ether_poll_deregister(ifp);
1128 /* Enable interrupt even in error case */
1129 EM_CORE_LOCK(adapter);
1130 lem_enable_intr(adapter);
1131 ifp->if_capenable &= ~IFCAP_POLLING;
1132 EM_CORE_UNLOCK(adapter);
1136 if (mask & IFCAP_HWCSUM) {
1137 ifp->if_capenable ^= IFCAP_HWCSUM;
1140 if (mask & IFCAP_VLAN_HWTAGGING) {
1141 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1144 if ((mask & IFCAP_WOL) &&
1145 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1146 if (mask & IFCAP_WOL_MCAST)
1147 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1148 if (mask & IFCAP_WOL_MAGIC)
1149 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1151 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1153 VLAN_CAPABILITIES(ifp);
1158 error = ether_ioctl(ifp, command, data);
1166 /*********************************************************************
1169 * This routine is used in two ways. It is used by the stack as
1170 * init entry point in network interface structure. It is also used
1171 * by the driver as a hw/sw initialization routine to get to a
1174 * return 0 on success, positive on failure
1175 **********************************************************************/
1178 lem_init_locked(struct adapter *adapter)
1180 struct ifnet *ifp = adapter->ifp;
1181 device_t dev = adapter->dev;
1184 INIT_DEBUGOUT("lem_init: begin");
1186 EM_CORE_LOCK_ASSERT(adapter);
1188 EM_TX_LOCK(adapter);
1190 EM_TX_UNLOCK(adapter);
1193 * Packet Buffer Allocation (PBA)
1194 * Writing PBA sets the receive portion of the buffer
1195 * the remainder is used for the transmit buffer.
1197 * Devices before the 82547 had a Packet Buffer of 64K.
1198 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1199 * After the 82547 the buffer was reduced to 40K.
1200 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1201 * Note: default does not leave enough room for Jumbo Frame >10k.
1203 switch (adapter->hw.mac.type) {
1205 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1206 if (adapter->max_frame_size > 8192)
1207 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1209 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1210 adapter->tx_fifo_head = 0;
1211 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1212 adapter->tx_fifo_size =
1213 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1216 /* Devices before 82547 had a Packet Buffer of 64K. */
1217 if (adapter->max_frame_size > 8192)
1218 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1220 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1223 INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1224 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1226 /* Get the latest mac address, User can use a LAA */
1227 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1230 /* Put the address into the Receive Address Array */
1231 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1233 /* Initialize the hardware */
1234 if (lem_hardware_init(adapter)) {
1235 device_printf(dev, "Unable to initialize the hardware\n");
1238 lem_update_link_status(adapter);
1240 /* Setup VLAN support, basic and offload if available */
1241 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1243 /* Set hardware offload abilities */
1244 ifp->if_hwassist = 0;
1245 if (adapter->hw.mac.type >= e1000_82543) {
1246 if (ifp->if_capenable & IFCAP_TXCSUM)
1247 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1250 /* Configure for OS presence */
1251 lem_init_manageability(adapter);
1253 /* Prepare transmit descriptors and buffers */
1254 lem_setup_transmit_structures(adapter);
1255 lem_initialize_transmit_unit(adapter);
1257 /* Setup Multicast table */
1258 lem_set_multi(adapter);
1260 /* Prepare receive descriptors and buffers */
1261 if (lem_setup_receive_structures(adapter)) {
1262 device_printf(dev, "Could not setup receive structures\n");
1263 EM_TX_LOCK(adapter);
1265 EM_TX_UNLOCK(adapter);
1268 lem_initialize_receive_unit(adapter);
1270 /* Use real VLAN Filter support? */
1271 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1272 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1273 /* Use real VLAN Filter support */
1274 lem_setup_vlan_hw_support(adapter);
1277 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1278 ctrl |= E1000_CTRL_VME;
1279 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1283 /* Don't lose promiscuous settings */
1284 lem_set_promisc(adapter);
1286 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1287 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1289 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1290 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1292 #ifdef DEVICE_POLLING
1294 * Only enable interrupts if we are not polling, make sure
1295 * they are off otherwise.
1297 if (ifp->if_capenable & IFCAP_POLLING)
1298 lem_disable_intr(adapter);
1300 #endif /* DEVICE_POLLING */
1301 lem_enable_intr(adapter);
1303 /* AMT based hardware can now take control from firmware */
1304 if (adapter->has_manage && adapter->has_amt)
1305 lem_get_hw_control(adapter);
1311 struct adapter *adapter = arg;
1313 EM_CORE_LOCK(adapter);
1314 lem_init_locked(adapter);
1315 EM_CORE_UNLOCK(adapter);
1319 #ifdef DEVICE_POLLING
1320 /*********************************************************************
1322 * Legacy polling routine
1324 *********************************************************************/
1326 lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1328 struct adapter *adapter = ifp->if_softc;
1329 u32 reg_icr, rx_done = 0;
1331 EM_CORE_LOCK(adapter);
1332 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1333 EM_CORE_UNLOCK(adapter);
1337 if (cmd == POLL_AND_CHECK_STATUS) {
1338 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1339 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1340 callout_stop(&adapter->timer);
1341 adapter->hw.mac.get_link_status = 1;
1342 lem_update_link_status(adapter);
1343 callout_reset(&adapter->timer, hz,
1344 lem_local_timer, adapter);
1347 EM_CORE_UNLOCK(adapter);
1349 lem_rxeof(adapter, count, &rx_done);
1351 EM_TX_LOCK(adapter);
1353 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1354 lem_start_locked(ifp);
1355 EM_TX_UNLOCK(adapter);
1358 #endif /* DEVICE_POLLING */
1360 /*********************************************************************
1362 * Legacy Interrupt Service routine
1364 *********************************************************************/
1368 struct adapter *adapter = arg;
1369 struct ifnet *ifp = adapter->ifp;
1373 if ((ifp->if_capenable & IFCAP_POLLING) ||
1374 ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0))
1377 EM_CORE_LOCK(adapter);
1378 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1379 if (reg_icr & E1000_ICR_RXO)
1380 adapter->rx_overruns++;
1382 if ((reg_icr == 0xffffffff) || (reg_icr == 0)) {
1383 EM_CORE_UNLOCK(adapter);
1387 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1388 callout_stop(&adapter->timer);
1389 adapter->hw.mac.get_link_status = 1;
1390 lem_update_link_status(adapter);
1391 /* Deal with TX cruft when link lost */
1392 lem_tx_purge(adapter);
1393 callout_reset(&adapter->timer, hz,
1394 lem_local_timer, adapter);
1395 EM_CORE_UNLOCK(adapter);
1399 EM_CORE_UNLOCK(adapter);
1400 lem_rxeof(adapter, -1, NULL);
1402 EM_TX_LOCK(adapter);
1404 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1405 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1406 lem_start_locked(ifp);
1407 EM_TX_UNLOCK(adapter);
1413 lem_handle_link(void *context, int pending)
1415 struct adapter *adapter = context;
1416 struct ifnet *ifp = adapter->ifp;
1418 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1421 EM_CORE_LOCK(adapter);
1422 callout_stop(&adapter->timer);
1423 lem_update_link_status(adapter);
1424 /* Deal with TX cruft when link lost */
1425 lem_tx_purge(adapter);
1426 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1427 EM_CORE_UNLOCK(adapter);
1431 /* Combined RX/TX handler, used by Legacy and MSI */
1433 lem_handle_rxtx(void *context, int pending)
1435 struct adapter *adapter = context;
1436 struct ifnet *ifp = adapter->ifp;
1439 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1440 bool more = lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1441 EM_TX_LOCK(adapter);
1443 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1444 lem_start_locked(ifp);
1445 EM_TX_UNLOCK(adapter);
1447 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1452 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1453 lem_enable_intr(adapter);
1456 /*********************************************************************
1458 * Fast Legacy/MSI Combined Interrupt Service routine
1460 *********************************************************************/
1462 lem_irq_fast(void *arg)
1464 struct adapter *adapter = arg;
1470 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1473 if (reg_icr == 0xffffffff)
1474 return FILTER_STRAY;
1476 /* Definitely not our interrupt. */
1478 return FILTER_STRAY;
1481 * Mask interrupts until the taskqueue is finished running. This is
1482 * cheap, just assume that it is needed. This also works around the
1483 * MSI message reordering errata on certain systems.
1485 lem_disable_intr(adapter);
1486 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1488 /* Link status change */
1489 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1490 adapter->hw.mac.get_link_status = 1;
1491 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1494 if (reg_icr & E1000_ICR_RXO)
1495 adapter->rx_overruns++;
1496 return FILTER_HANDLED;
1500 /*********************************************************************
1502 * Media Ioctl callback
1504 * This routine is called whenever the user queries the status of
1505 * the interface using ifconfig.
1507 **********************************************************************/
1509 lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1511 struct adapter *adapter = ifp->if_softc;
1512 u_char fiber_type = IFM_1000_SX;
1514 INIT_DEBUGOUT("lem_media_status: begin");
1516 EM_CORE_LOCK(adapter);
1517 lem_update_link_status(adapter);
1519 ifmr->ifm_status = IFM_AVALID;
1520 ifmr->ifm_active = IFM_ETHER;
1522 if (!adapter->link_active) {
1523 EM_CORE_UNLOCK(adapter);
1527 ifmr->ifm_status |= IFM_ACTIVE;
1529 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1530 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1531 if (adapter->hw.mac.type == e1000_82545)
1532 fiber_type = IFM_1000_LX;
1533 ifmr->ifm_active |= fiber_type | IFM_FDX;
1535 switch (adapter->link_speed) {
1537 ifmr->ifm_active |= IFM_10_T;
1540 ifmr->ifm_active |= IFM_100_TX;
1543 ifmr->ifm_active |= IFM_1000_T;
1546 if (adapter->link_duplex == FULL_DUPLEX)
1547 ifmr->ifm_active |= IFM_FDX;
1549 ifmr->ifm_active |= IFM_HDX;
1551 EM_CORE_UNLOCK(adapter);
1554 /*********************************************************************
1556 * Media Ioctl callback
1558 * This routine is called when the user changes speed/duplex using
1559 * media/mediopt option with ifconfig.
1561 **********************************************************************/
1563 lem_media_change(struct ifnet *ifp)
1565 struct adapter *adapter = ifp->if_softc;
1566 struct ifmedia *ifm = &adapter->media;
1568 INIT_DEBUGOUT("lem_media_change: begin");
1570 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1573 EM_CORE_LOCK(adapter);
1574 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1576 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1577 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1582 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1583 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1586 adapter->hw.mac.autoneg = FALSE;
1587 adapter->hw.phy.autoneg_advertised = 0;
1588 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1589 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1591 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1594 adapter->hw.mac.autoneg = FALSE;
1595 adapter->hw.phy.autoneg_advertised = 0;
1596 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1597 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1599 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1602 device_printf(adapter->dev, "Unsupported media type\n");
1605 lem_init_locked(adapter);
1606 EM_CORE_UNLOCK(adapter);
1611 /*********************************************************************
1613 * This routine maps the mbufs to tx descriptors.
1615 * return 0 on success, positive on failure
1616 **********************************************************************/
1619 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1621 bus_dma_segment_t segs[EM_MAX_SCATTER];
1623 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1624 struct e1000_tx_desc *ctxd = NULL;
1625 struct mbuf *m_head;
1626 u32 txd_upper, txd_lower, txd_used, txd_saved;
1627 int error, nsegs, i, j, first, last = 0;
1630 txd_upper = txd_lower = txd_used = txd_saved = 0;
1633 ** When doing checksum offload, it is critical to
1634 ** make sure the first mbuf has more than header,
1635 ** because that routine expects data to be present.
1637 if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1638 (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1639 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1646 * Map the packet for DMA
1648 * Capture the first descriptor index,
1649 * this descriptor will have the index
1650 * of the EOP which is the only one that
1651 * now gets a DONE bit writeback.
1653 first = adapter->next_avail_tx_desc;
1654 tx_buffer = &adapter->tx_buffer_area[first];
1655 tx_buffer_mapped = tx_buffer;
1656 map = tx_buffer->map;
1658 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1659 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1662 * There are two types of errors we can (try) to handle:
1663 * - EFBIG means the mbuf chain was too long and bus_dma ran
1664 * out of segments. Defragment the mbuf chain and try again.
1665 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1666 * at this point in time. Defer sending and try again later.
1667 * All other errors, in particular EINVAL, are fatal and prevent the
1668 * mbuf chain from ever going through. Drop it and report error.
1670 if (error == EFBIG) {
1673 m = m_defrag(*m_headp, M_NOWAIT);
1675 adapter->mbuf_alloc_failed++;
1683 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1684 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1687 adapter->no_tx_dma_setup++;
1692 } else if (error != 0) {
1693 adapter->no_tx_dma_setup++;
1697 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1698 adapter->no_tx_desc_avail2++;
1699 bus_dmamap_unload(adapter->txtag, map);
1704 /* Do hardware assists */
1705 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1706 lem_transmit_checksum_setup(adapter, m_head,
1707 &txd_upper, &txd_lower);
1709 i = adapter->next_avail_tx_desc;
1710 if (adapter->pcix_82544)
1713 /* Set up our transmit descriptors */
1714 for (j = 0; j < nsegs; j++) {
1716 bus_addr_t seg_addr;
1717 /* If adapter is 82544 and on PCIX bus */
1718 if(adapter->pcix_82544) {
1719 DESC_ARRAY desc_array;
1720 u32 array_elements, counter;
1722 * Check the Address and Length combination and
1723 * split the data accordingly
1725 array_elements = lem_fill_descriptors(segs[j].ds_addr,
1726 segs[j].ds_len, &desc_array);
1727 for (counter = 0; counter < array_elements; counter++) {
1728 if (txd_used == adapter->num_tx_desc_avail) {
1729 adapter->next_avail_tx_desc = txd_saved;
1730 adapter->no_tx_desc_avail2++;
1731 bus_dmamap_unload(adapter->txtag, map);
1734 tx_buffer = &adapter->tx_buffer_area[i];
1735 ctxd = &adapter->tx_desc_base[i];
1736 ctxd->buffer_addr = htole64(
1737 desc_array.descriptor[counter].address);
1738 ctxd->lower.data = htole32(
1739 (adapter->txd_cmd | txd_lower | (u16)
1740 desc_array.descriptor[counter].length));
1742 htole32((txd_upper));
1744 if (++i == adapter->num_tx_desc)
1746 tx_buffer->m_head = NULL;
1747 tx_buffer->next_eop = -1;
1751 tx_buffer = &adapter->tx_buffer_area[i];
1752 ctxd = &adapter->tx_desc_base[i];
1753 seg_addr = segs[j].ds_addr;
1754 seg_len = segs[j].ds_len;
1755 ctxd->buffer_addr = htole64(seg_addr);
1756 ctxd->lower.data = htole32(
1757 adapter->txd_cmd | txd_lower | seg_len);
1761 if (++i == adapter->num_tx_desc)
1763 tx_buffer->m_head = NULL;
1764 tx_buffer->next_eop = -1;
1768 adapter->next_avail_tx_desc = i;
1770 if (adapter->pcix_82544)
1771 adapter->num_tx_desc_avail -= txd_used;
1773 adapter->num_tx_desc_avail -= nsegs;
1775 if (m_head->m_flags & M_VLANTAG) {
1776 /* Set the vlan id. */
1777 ctxd->upper.fields.special =
1778 htole16(m_head->m_pkthdr.ether_vtag);
1779 /* Tell hardware to add tag */
1780 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1783 tx_buffer->m_head = m_head;
1784 tx_buffer_mapped->map = tx_buffer->map;
1785 tx_buffer->map = map;
1786 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1789 * Last Descriptor of Packet
1790 * needs End Of Packet (EOP)
1791 * and Report Status (RS)
1794 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1796 * Keep track in the first buffer which
1797 * descriptor will be written back
1799 tx_buffer = &adapter->tx_buffer_area[first];
1800 tx_buffer->next_eop = last;
1801 adapter->watchdog_time = ticks;
1804 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1805 * that this frame is available to transmit.
1807 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1808 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1812 adapter->csb->guest_tdt = i;
1813 /* XXX memory barrier ? */
1814 if (adapter->csb->guest_csb_on &&
1815 !(adapter->csb->host_need_txkick & 1)) {
1816 /* XXX maybe useless
1817 * clean the ring. maybe do it before ?
1818 * maybe a little bit of histeresys ?
1820 if (adapter->num_tx_desc_avail <= 64) {// XXX
1826 #endif /* NIC_PARAVIRT */
1828 #ifdef NIC_SEND_COMBINING
1829 if (adapter->sc_enable) {
1830 if (adapter->shadow_tdt & MIT_PENDING_INT) {
1831 /* signal intr and data pending */
1832 adapter->shadow_tdt = MIT_PENDING_TDT | (i & 0xffff);
1835 adapter->shadow_tdt = MIT_PENDING_INT;
1838 #endif /* NIC_SEND_COMBINING */
1840 if (adapter->hw.mac.type == e1000_82547 &&
1841 adapter->link_duplex == HALF_DUPLEX)
1842 lem_82547_move_tail(adapter);
1844 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1845 if (adapter->hw.mac.type == e1000_82547)
1846 lem_82547_update_fifo_head(adapter,
1847 m_head->m_pkthdr.len);
1853 /*********************************************************************
1855 * 82547 workaround to avoid controller hang in half-duplex environment.
1856 * The workaround is to avoid queuing a large packet that would span
1857 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1858 * in this case. We do that only when FIFO is quiescent.
1860 **********************************************************************/
1862 lem_82547_move_tail(void *arg)
1864 struct adapter *adapter = arg;
1865 struct e1000_tx_desc *tx_desc;
1866 u16 hw_tdt, sw_tdt, length = 0;
1869 EM_TX_LOCK_ASSERT(adapter);
1871 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1872 sw_tdt = adapter->next_avail_tx_desc;
1874 while (hw_tdt != sw_tdt) {
1875 tx_desc = &adapter->tx_desc_base[hw_tdt];
1876 length += tx_desc->lower.flags.length;
1877 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1878 if (++hw_tdt == adapter->num_tx_desc)
1882 if (lem_82547_fifo_workaround(adapter, length)) {
1883 adapter->tx_fifo_wrk_cnt++;
1884 callout_reset(&adapter->tx_fifo_timer, 1,
1885 lem_82547_move_tail, adapter);
1888 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1889 lem_82547_update_fifo_head(adapter, length);
1896 lem_82547_fifo_workaround(struct adapter *adapter, int len)
1898 int fifo_space, fifo_pkt_len;
1900 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1902 if (adapter->link_duplex == HALF_DUPLEX) {
1903 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1905 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1906 if (lem_82547_tx_fifo_reset(adapter))
1917 lem_82547_update_fifo_head(struct adapter *adapter, int len)
1919 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1921 /* tx_fifo_head is always 16 byte aligned */
1922 adapter->tx_fifo_head += fifo_pkt_len;
1923 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1924 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1930 lem_82547_tx_fifo_reset(struct adapter *adapter)
1934 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1935 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1936 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1937 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1938 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1939 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1940 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1941 /* Disable TX unit */
1942 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1943 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1944 tctl & ~E1000_TCTL_EN);
1946 /* Reset FIFO pointers */
1947 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1948 adapter->tx_head_addr);
1949 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1950 adapter->tx_head_addr);
1951 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1952 adapter->tx_head_addr);
1953 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1954 adapter->tx_head_addr);
1956 /* Re-enable TX unit */
1957 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1958 E1000_WRITE_FLUSH(&adapter->hw);
1960 adapter->tx_fifo_head = 0;
1961 adapter->tx_fifo_reset_cnt++;
1971 lem_set_promisc(struct adapter *adapter)
1973 struct ifnet *ifp = adapter->ifp;
1976 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1978 if (ifp->if_flags & IFF_PROMISC) {
1979 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1980 /* Turn this on if you want to see bad packets */
1982 reg_rctl |= E1000_RCTL_SBP;
1983 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1984 } else if (ifp->if_flags & IFF_ALLMULTI) {
1985 reg_rctl |= E1000_RCTL_MPE;
1986 reg_rctl &= ~E1000_RCTL_UPE;
1987 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1992 lem_disable_promisc(struct adapter *adapter)
1994 struct ifnet *ifp = adapter->ifp;
1998 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1999 reg_rctl &= (~E1000_RCTL_UPE);
2000 if (ifp->if_flags & IFF_ALLMULTI)
2001 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2003 struct ifmultiaddr *ifma;
2004 #if __FreeBSD_version < 800000
2007 if_maddr_rlock(ifp);
2009 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2010 if (ifma->ifma_addr->sa_family != AF_LINK)
2012 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2016 #if __FreeBSD_version < 800000
2017 IF_ADDR_UNLOCK(ifp);
2019 if_maddr_runlock(ifp);
2022 /* Don't disable if in MAX groups */
2023 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2024 reg_rctl &= (~E1000_RCTL_MPE);
2025 reg_rctl &= (~E1000_RCTL_SBP);
2026 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2030 /*********************************************************************
2033 * This routine is called whenever multicast address list is updated.
2035 **********************************************************************/
2038 lem_set_multi(struct adapter *adapter)
2040 struct ifnet *ifp = adapter->ifp;
2041 struct ifmultiaddr *ifma;
2043 u8 *mta; /* Multicast array memory */
2046 IOCTL_DEBUGOUT("lem_set_multi: begin");
2049 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
2051 if (adapter->hw.mac.type == e1000_82542 &&
2052 adapter->hw.revision_id == E1000_REVISION_2) {
2053 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2054 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2055 e1000_pci_clear_mwi(&adapter->hw);
2056 reg_rctl |= E1000_RCTL_RST;
2057 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2061 #if __FreeBSD_version < 800000
2064 if_maddr_rlock(ifp);
2066 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2067 if (ifma->ifma_addr->sa_family != AF_LINK)
2070 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2073 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2074 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2077 #if __FreeBSD_version < 800000
2078 IF_ADDR_UNLOCK(ifp);
2080 if_maddr_runlock(ifp);
2082 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2083 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2084 reg_rctl |= E1000_RCTL_MPE;
2085 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2087 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
2089 if (adapter->hw.mac.type == e1000_82542 &&
2090 adapter->hw.revision_id == E1000_REVISION_2) {
2091 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2092 reg_rctl &= ~E1000_RCTL_RST;
2093 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2095 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2096 e1000_pci_set_mwi(&adapter->hw);
2101 /*********************************************************************
2104 * This routine checks for link status and updates statistics.
2106 **********************************************************************/
2109 lem_local_timer(void *arg)
2111 struct adapter *adapter = arg;
2113 EM_CORE_LOCK_ASSERT(adapter);
2115 lem_update_link_status(adapter);
2116 lem_update_stats_counters(adapter);
2118 lem_smartspeed(adapter);
2121 /* recover space if needed */
2122 if (adapter->csb && adapter->csb->guest_csb_on &&
2123 (adapter->watchdog_check == TRUE) &&
2124 (ticks - adapter->watchdog_time > EM_WATCHDOG) &&
2125 (adapter->num_tx_desc_avail != adapter->num_tx_desc) ) {
2128 * lem_txeof() normally (except when space in the queue
2129 * runs low XXX) cleans watchdog_check so that
2133 #endif /* NIC_PARAVIRT */
2135 * We check the watchdog: the time since
2136 * the last TX descriptor was cleaned.
2137 * This implies a functional TX engine.
2139 if ((adapter->watchdog_check == TRUE) &&
2140 (ticks - adapter->watchdog_time > EM_WATCHDOG))
2143 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
2146 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2147 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2148 adapter->watchdog_events++;
2149 lem_init_locked(adapter);
2153 lem_update_link_status(struct adapter *adapter)
2155 struct e1000_hw *hw = &adapter->hw;
2156 struct ifnet *ifp = adapter->ifp;
2157 device_t dev = adapter->dev;
2160 /* Get the cached link value or read phy for real */
2161 switch (hw->phy.media_type) {
2162 case e1000_media_type_copper:
2163 if (hw->mac.get_link_status) {
2164 /* Do the work to read phy */
2165 e1000_check_for_link(hw);
2166 link_check = !hw->mac.get_link_status;
2167 if (link_check) /* ESB2 fix */
2168 e1000_cfg_on_link_up(hw);
2172 case e1000_media_type_fiber:
2173 e1000_check_for_link(hw);
2174 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2177 case e1000_media_type_internal_serdes:
2178 e1000_check_for_link(hw);
2179 link_check = adapter->hw.mac.serdes_has_link;
2182 case e1000_media_type_unknown:
2186 /* Now check for a transition */
2187 if (link_check && (adapter->link_active == 0)) {
2188 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2189 &adapter->link_duplex);
2191 device_printf(dev, "Link is up %d Mbps %s\n",
2192 adapter->link_speed,
2193 ((adapter->link_duplex == FULL_DUPLEX) ?
2194 "Full Duplex" : "Half Duplex"));
2195 adapter->link_active = 1;
2196 adapter->smartspeed = 0;
2197 ifp->if_baudrate = adapter->link_speed * 1000000;
2198 if_link_state_change(ifp, LINK_STATE_UP);
2199 } else if (!link_check && (adapter->link_active == 1)) {
2200 ifp->if_baudrate = adapter->link_speed = 0;
2201 adapter->link_duplex = 0;
2203 device_printf(dev, "Link is Down\n");
2204 adapter->link_active = 0;
2205 /* Link down, disable watchdog */
2206 adapter->watchdog_check = FALSE;
2207 if_link_state_change(ifp, LINK_STATE_DOWN);
2211 /*********************************************************************
2213 * This routine disables all traffic on the adapter by issuing a
2214 * global reset on the MAC and deallocates TX/RX buffers.
2216 * This routine should always be called with BOTH the CORE
2218 **********************************************************************/
2223 struct adapter *adapter = arg;
2224 struct ifnet *ifp = adapter->ifp;
2226 EM_CORE_LOCK_ASSERT(adapter);
2227 EM_TX_LOCK_ASSERT(adapter);
2229 INIT_DEBUGOUT("lem_stop: begin");
2231 lem_disable_intr(adapter);
2232 callout_stop(&adapter->timer);
2233 callout_stop(&adapter->tx_fifo_timer);
2235 /* Tell the stack that the interface is no longer active */
2236 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2238 e1000_reset_hw(&adapter->hw);
2239 if (adapter->hw.mac.type >= e1000_82544)
2240 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2242 e1000_led_off(&adapter->hw);
2243 e1000_cleanup_led(&adapter->hw);
2247 /*********************************************************************
2249 * Determine hardware revision.
2251 **********************************************************************/
2253 lem_identify_hardware(struct adapter *adapter)
2255 device_t dev = adapter->dev;
2257 /* Make sure our PCI config space has the necessary stuff set */
2258 pci_enable_busmaster(dev);
2259 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2261 /* Save off the information about this board */
2262 adapter->hw.vendor_id = pci_get_vendor(dev);
2263 adapter->hw.device_id = pci_get_device(dev);
2264 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2265 adapter->hw.subsystem_vendor_id =
2266 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2267 adapter->hw.subsystem_device_id =
2268 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2270 /* Do Shared Code Init and Setup */
2271 if (e1000_set_mac_type(&adapter->hw)) {
2272 device_printf(dev, "Setup init failure\n");
2278 lem_allocate_pci_resources(struct adapter *adapter)
2280 device_t dev = adapter->dev;
2281 int val, rid, error = E1000_SUCCESS;
2284 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2286 if (adapter->memory == NULL) {
2287 device_printf(dev, "Unable to allocate bus resource: memory\n");
2290 adapter->osdep.mem_bus_space_tag =
2291 rman_get_bustag(adapter->memory);
2292 adapter->osdep.mem_bus_space_handle =
2293 rman_get_bushandle(adapter->memory);
2294 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2296 /* Only older adapters use IO mapping */
2297 if (adapter->hw.mac.type > e1000_82543) {
2298 /* Figure our where our IO BAR is ? */
2299 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2300 val = pci_read_config(dev, rid, 4);
2301 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2302 adapter->io_rid = rid;
2306 /* check for 64bit BAR */
2307 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2310 if (rid >= PCIR_CIS) {
2311 device_printf(dev, "Unable to locate IO BAR\n");
2314 adapter->ioport = bus_alloc_resource_any(dev,
2315 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2316 if (adapter->ioport == NULL) {
2317 device_printf(dev, "Unable to allocate bus resource: "
2321 adapter->hw.io_base = 0;
2322 adapter->osdep.io_bus_space_tag =
2323 rman_get_bustag(adapter->ioport);
2324 adapter->osdep.io_bus_space_handle =
2325 rman_get_bushandle(adapter->ioport);
2328 adapter->hw.back = &adapter->osdep;
2333 /*********************************************************************
2335 * Setup the Legacy or MSI Interrupt handler
2337 **********************************************************************/
2339 lem_allocate_irq(struct adapter *adapter)
2341 device_t dev = adapter->dev;
2344 /* Manually turn off all interrupts */
2345 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2347 /* We allocate a single interrupt resource */
2348 adapter->res[0] = bus_alloc_resource_any(dev,
2349 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2350 if (adapter->res[0] == NULL) {
2351 device_printf(dev, "Unable to allocate bus resource: "
2356 /* Do Legacy setup? */
2357 if (lem_use_legacy_irq) {
2358 if ((error = bus_setup_intr(dev, adapter->res[0],
2359 INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2360 &adapter->tag[0])) != 0) {
2362 "Failed to register interrupt handler");
2369 * Use a Fast interrupt and the associated
2370 * deferred processing contexts.
2372 TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2373 TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2374 adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2375 taskqueue_thread_enqueue, &adapter->tq);
2376 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2377 device_get_nameunit(adapter->dev));
2378 if ((error = bus_setup_intr(dev, adapter->res[0],
2379 INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2380 &adapter->tag[0])) != 0) {
2381 device_printf(dev, "Failed to register fast interrupt "
2382 "handler: %d\n", error);
2383 taskqueue_free(adapter->tq);
2393 lem_free_pci_resources(struct adapter *adapter)
2395 device_t dev = adapter->dev;
2398 if (adapter->tag[0] != NULL) {
2399 bus_teardown_intr(dev, adapter->res[0],
2401 adapter->tag[0] = NULL;
2404 if (adapter->res[0] != NULL) {
2405 bus_release_resource(dev, SYS_RES_IRQ,
2406 0, adapter->res[0]);
2409 if (adapter->memory != NULL)
2410 bus_release_resource(dev, SYS_RES_MEMORY,
2411 PCIR_BAR(0), adapter->memory);
2413 if (adapter->ioport != NULL)
2414 bus_release_resource(dev, SYS_RES_IOPORT,
2415 adapter->io_rid, adapter->ioport);
2419 /*********************************************************************
2421 * Initialize the hardware to a configuration
2422 * as specified by the adapter structure.
2424 **********************************************************************/
2426 lem_hardware_init(struct adapter *adapter)
2428 device_t dev = adapter->dev;
2431 INIT_DEBUGOUT("lem_hardware_init: begin");
2433 /* Issue a global reset */
2434 e1000_reset_hw(&adapter->hw);
2436 /* When hardware is reset, fifo_head is also reset */
2437 adapter->tx_fifo_head = 0;
2440 * These parameters control the automatic generation (Tx) and
2441 * response (Rx) to Ethernet PAUSE frames.
2442 * - High water mark should allow for at least two frames to be
2443 * received after sending an XOFF.
2444 * - Low water mark works best when it is very near the high water mark.
2445 * This allows the receiver to restart by sending XON when it has
2446 * drained a bit. Here we use an arbitary value of 1500 which will
2447 * restart after one full frame is pulled from the buffer. There
2448 * could be several smaller frames in the buffer and if so they will
2449 * not trigger the XON until their total number reduces the buffer
2451 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2453 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2456 adapter->hw.fc.high_water = rx_buffer_size -
2457 roundup2(adapter->max_frame_size, 1024);
2458 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2460 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2461 adapter->hw.fc.send_xon = TRUE;
2463 /* Set Flow control, use the tunable location if sane */
2464 if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2465 adapter->hw.fc.requested_mode = lem_fc_setting;
2467 adapter->hw.fc.requested_mode = e1000_fc_none;
2469 if (e1000_init_hw(&adapter->hw) < 0) {
2470 device_printf(dev, "Hardware Initialization Failed\n");
2474 e1000_check_for_link(&adapter->hw);
2479 /*********************************************************************
2481 * Setup networking device structure and register an interface.
2483 **********************************************************************/
2485 lem_setup_interface(device_t dev, struct adapter *adapter)
2489 INIT_DEBUGOUT("lem_setup_interface: begin");
2491 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2493 device_printf(dev, "can not allocate ifnet structure\n");
2496 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2497 ifp->if_init = lem_init;
2498 ifp->if_softc = adapter;
2499 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2500 ifp->if_ioctl = lem_ioctl;
2501 ifp->if_start = lem_start;
2502 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2503 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2504 IFQ_SET_READY(&ifp->if_snd);
2506 ether_ifattach(ifp, adapter->hw.mac.addr);
2508 ifp->if_capabilities = ifp->if_capenable = 0;
2510 if (adapter->hw.mac.type >= e1000_82543) {
2511 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2512 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2516 * Tell the upper layer(s) we support long frames.
2518 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2519 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2520 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2523 ** Dont turn this on by default, if vlans are
2524 ** created on another pseudo device (eg. lagg)
2525 ** then vlan events are not passed thru, breaking
2526 ** operation, but with HW FILTER off it works. If
2527 ** using vlans directly on the em driver you can
2528 ** enable this and get full hardware tag filtering.
2530 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2532 #ifdef DEVICE_POLLING
2533 ifp->if_capabilities |= IFCAP_POLLING;
2536 /* Enable only WOL MAGIC by default */
2538 ifp->if_capabilities |= IFCAP_WOL;
2539 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2543 * Specify the media types supported by this adapter and register
2544 * callbacks to update media and link information
2546 ifmedia_init(&adapter->media, IFM_IMASK,
2547 lem_media_change, lem_media_status);
2548 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2549 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2550 u_char fiber_type = IFM_1000_SX; /* default type */
2552 if (adapter->hw.mac.type == e1000_82545)
2553 fiber_type = IFM_1000_LX;
2554 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2556 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2558 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2559 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2561 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2563 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2565 if (adapter->hw.phy.type != e1000_phy_ife) {
2566 ifmedia_add(&adapter->media,
2567 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2568 ifmedia_add(&adapter->media,
2569 IFM_ETHER | IFM_1000_T, 0, NULL);
2572 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2573 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2578 /*********************************************************************
2580 * Workaround for SmartSpeed on 82541 and 82547 controllers
2582 **********************************************************************/
2584 lem_smartspeed(struct adapter *adapter)
2588 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2589 adapter->hw.mac.autoneg == 0 ||
2590 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2593 if (adapter->smartspeed == 0) {
2594 /* If Master/Slave config fault is asserted twice,
2595 * we assume back-to-back */
2596 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2597 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2599 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2600 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2601 e1000_read_phy_reg(&adapter->hw,
2602 PHY_1000T_CTRL, &phy_tmp);
2603 if(phy_tmp & CR_1000T_MS_ENABLE) {
2604 phy_tmp &= ~CR_1000T_MS_ENABLE;
2605 e1000_write_phy_reg(&adapter->hw,
2606 PHY_1000T_CTRL, phy_tmp);
2607 adapter->smartspeed++;
2608 if(adapter->hw.mac.autoneg &&
2609 !e1000_copper_link_autoneg(&adapter->hw) &&
2610 !e1000_read_phy_reg(&adapter->hw,
2611 PHY_CONTROL, &phy_tmp)) {
2612 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2613 MII_CR_RESTART_AUTO_NEG);
2614 e1000_write_phy_reg(&adapter->hw,
2615 PHY_CONTROL, phy_tmp);
2620 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2621 /* If still no link, perhaps using 2/3 pair cable */
2622 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2623 phy_tmp |= CR_1000T_MS_ENABLE;
2624 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2625 if(adapter->hw.mac.autoneg &&
2626 !e1000_copper_link_autoneg(&adapter->hw) &&
2627 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2628 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2629 MII_CR_RESTART_AUTO_NEG);
2630 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2633 /* Restart process after EM_SMARTSPEED_MAX iterations */
2634 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2635 adapter->smartspeed = 0;
2640 * Manage DMA'able memory.
2643 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2647 *(bus_addr_t *) arg = segs[0].ds_addr;
2651 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2652 struct em_dma_alloc *dma, int mapflags)
2656 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2657 EM_DBA_ALIGN, 0, /* alignment, bounds */
2658 BUS_SPACE_MAXADDR, /* lowaddr */
2659 BUS_SPACE_MAXADDR, /* highaddr */
2660 NULL, NULL, /* filter, filterarg */
2663 size, /* maxsegsize */
2665 NULL, /* lockfunc */
2669 device_printf(adapter->dev,
2670 "%s: bus_dma_tag_create failed: %d\n",
2675 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2676 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2678 device_printf(adapter->dev,
2679 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2680 __func__, (uintmax_t)size, error);
2685 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2686 size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2687 if (error || dma->dma_paddr == 0) {
2688 device_printf(adapter->dev,
2689 "%s: bus_dmamap_load failed: %d\n",
2697 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2699 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2700 bus_dma_tag_destroy(dma->dma_tag);
2702 dma->dma_map = NULL;
2703 dma->dma_tag = NULL;
2709 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2711 if (dma->dma_tag == NULL)
2713 if (dma->dma_map != NULL) {
2714 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2715 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2716 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2717 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2718 dma->dma_map = NULL;
2720 bus_dma_tag_destroy(dma->dma_tag);
2721 dma->dma_tag = NULL;
2725 /*********************************************************************
2727 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2728 * the information needed to transmit a packet on the wire.
2730 **********************************************************************/
2732 lem_allocate_transmit_structures(struct adapter *adapter)
2734 device_t dev = adapter->dev;
2735 struct em_buffer *tx_buffer;
2739 * Create DMA tags for tx descriptors
2741 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2742 1, 0, /* alignment, bounds */
2743 BUS_SPACE_MAXADDR, /* lowaddr */
2744 BUS_SPACE_MAXADDR, /* highaddr */
2745 NULL, NULL, /* filter, filterarg */
2746 MCLBYTES * EM_MAX_SCATTER, /* maxsize */
2747 EM_MAX_SCATTER, /* nsegments */
2748 MCLBYTES, /* maxsegsize */
2750 NULL, /* lockfunc */
2752 &adapter->txtag)) != 0) {
2753 device_printf(dev, "Unable to allocate TX DMA tag\n");
2757 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2758 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2759 if (adapter->tx_buffer_area == NULL) {
2760 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2765 /* Create the descriptor buffer dma maps */
2766 for (int i = 0; i < adapter->num_tx_desc; i++) {
2767 tx_buffer = &adapter->tx_buffer_area[i];
2768 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2770 device_printf(dev, "Unable to create TX DMA map\n");
2773 tx_buffer->next_eop = -1;
2778 lem_free_transmit_structures(adapter);
2782 /*********************************************************************
2784 * (Re)Initialize transmit structures.
2786 **********************************************************************/
2788 lem_setup_transmit_structures(struct adapter *adapter)
2790 struct em_buffer *tx_buffer;
2792 /* we are already locked */
2793 struct netmap_adapter *na = NA(adapter->ifp);
2794 struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
2795 #endif /* DEV_NETMAP */
2797 /* Clear the old ring contents */
2798 bzero(adapter->tx_desc_base,
2799 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2801 /* Free any existing TX buffers */
2802 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2803 tx_buffer = &adapter->tx_buffer_area[i];
2804 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2805 BUS_DMASYNC_POSTWRITE);
2806 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2807 m_freem(tx_buffer->m_head);
2808 tx_buffer->m_head = NULL;
2811 /* the i-th NIC entry goes to slot si */
2812 int si = netmap_idx_n2k(&na->tx_rings[0], i);
2816 addr = PNMB(na, slot + si, &paddr);
2817 adapter->tx_desc_base[i].buffer_addr = htole64(paddr);
2818 /* reload the map for netmap mode */
2819 netmap_load_map(na, adapter->txtag, tx_buffer->map, addr);
2821 #endif /* DEV_NETMAP */
2822 tx_buffer->next_eop = -1;
2826 adapter->last_hw_offload = 0;
2827 adapter->next_avail_tx_desc = 0;
2828 adapter->next_tx_to_clean = 0;
2829 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2831 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2832 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2837 /*********************************************************************
2839 * Enable transmit unit.
2841 **********************************************************************/
2843 lem_initialize_transmit_unit(struct adapter *adapter)
2848 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2849 /* Setup the Base and Length of the Tx Descriptor Ring */
2850 bus_addr = adapter->txdma.dma_paddr;
2851 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2852 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2853 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2854 (u32)(bus_addr >> 32));
2855 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2857 /* Setup the HW Tx Head and Tail descriptor pointers */
2858 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2859 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2861 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2862 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2863 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2865 /* Set the default values for the Tx Inter Packet Gap timer */
2866 switch (adapter->hw.mac.type) {
2868 tipg = DEFAULT_82542_TIPG_IPGT;
2869 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2870 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2873 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2874 (adapter->hw.phy.media_type ==
2875 e1000_media_type_internal_serdes))
2876 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2878 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2879 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2880 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2883 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2884 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2885 if(adapter->hw.mac.type >= e1000_82540)
2886 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2887 adapter->tx_abs_int_delay.value);
2889 /* Program the Transmit Control Register */
2890 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2891 tctl &= ~E1000_TCTL_CT;
2892 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2893 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2895 /* This write will effectively turn on the transmit unit. */
2896 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2898 /* Setup Transmit Descriptor Base Settings */
2899 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2901 if (adapter->tx_int_delay.value > 0)
2902 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2905 /*********************************************************************
2907 * Free all transmit related data structures.
2909 **********************************************************************/
2911 lem_free_transmit_structures(struct adapter *adapter)
2913 struct em_buffer *tx_buffer;
2915 INIT_DEBUGOUT("free_transmit_structures: begin");
2917 if (adapter->tx_buffer_area != NULL) {
2918 for (int i = 0; i < adapter->num_tx_desc; i++) {
2919 tx_buffer = &adapter->tx_buffer_area[i];
2920 if (tx_buffer->m_head != NULL) {
2921 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2922 BUS_DMASYNC_POSTWRITE);
2923 bus_dmamap_unload(adapter->txtag,
2925 m_freem(tx_buffer->m_head);
2926 tx_buffer->m_head = NULL;
2927 } else if (tx_buffer->map != NULL)
2928 bus_dmamap_unload(adapter->txtag,
2930 if (tx_buffer->map != NULL) {
2931 bus_dmamap_destroy(adapter->txtag,
2933 tx_buffer->map = NULL;
2937 if (adapter->tx_buffer_area != NULL) {
2938 free(adapter->tx_buffer_area, M_DEVBUF);
2939 adapter->tx_buffer_area = NULL;
2941 if (adapter->txtag != NULL) {
2942 bus_dma_tag_destroy(adapter->txtag);
2943 adapter->txtag = NULL;
2945 #if __FreeBSD_version >= 800000
2946 if (adapter->br != NULL)
2947 buf_ring_free(adapter->br, M_DEVBUF);
2951 /*********************************************************************
2953 * The offload context needs to be set when we transfer the first
2954 * packet of a particular protocol (TCP/UDP). This routine has been
2955 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2957 * Added back the old method of keeping the current context type
2958 * and not setting if unnecessary, as this is reported to be a
2959 * big performance win. -jfv
2960 **********************************************************************/
2962 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2963 u32 *txd_upper, u32 *txd_lower)
2965 struct e1000_context_desc *TXD = NULL;
2966 struct em_buffer *tx_buffer;
2967 struct ether_vlan_header *eh;
2968 struct ip *ip = NULL;
2969 struct ip6_hdr *ip6;
2970 int curr_txd, ehdrlen;
2971 u32 cmd, hdr_len, ip_hlen;
2976 cmd = hdr_len = ipproto = 0;
2977 *txd_upper = *txd_lower = 0;
2978 curr_txd = adapter->next_avail_tx_desc;
2981 * Determine where frame payload starts.
2982 * Jump over vlan headers if already present,
2983 * helpful for QinQ too.
2985 eh = mtod(mp, struct ether_vlan_header *);
2986 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2987 etype = ntohs(eh->evl_proto);
2988 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2990 etype = ntohs(eh->evl_encap_proto);
2991 ehdrlen = ETHER_HDR_LEN;
2995 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2996 * TODO: Support SCTP too when it hits the tree.
3000 ip = (struct ip *)(mp->m_data + ehdrlen);
3001 ip_hlen = ip->ip_hl << 2;
3003 /* Setup of IP header checksum. */
3004 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3006 * Start offset for header checksum calculation.
3007 * End offset for header checksum calculation.
3008 * Offset of place to put the checksum.
3010 TXD = (struct e1000_context_desc *)
3011 &adapter->tx_desc_base[curr_txd];
3012 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3013 TXD->lower_setup.ip_fields.ipcse =
3014 htole16(ehdrlen + ip_hlen);
3015 TXD->lower_setup.ip_fields.ipcso =
3016 ehdrlen + offsetof(struct ip, ip_sum);
3017 cmd |= E1000_TXD_CMD_IP;
3018 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3021 hdr_len = ehdrlen + ip_hlen;
3025 case ETHERTYPE_IPV6:
3026 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3027 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3029 /* IPv6 doesn't have a header checksum. */
3031 hdr_len = ehdrlen + ip_hlen;
3032 ipproto = ip6->ip6_nxt;
3041 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3042 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3043 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3044 /* no need for context if already set */
3045 if (adapter->last_hw_offload == CSUM_TCP)
3047 adapter->last_hw_offload = CSUM_TCP;
3049 * Start offset for payload checksum calculation.
3050 * End offset for payload checksum calculation.
3051 * Offset of place to put the checksum.
3053 TXD = (struct e1000_context_desc *)
3054 &adapter->tx_desc_base[curr_txd];
3055 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3056 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3057 TXD->upper_setup.tcp_fields.tucso =
3058 hdr_len + offsetof(struct tcphdr, th_sum);
3059 cmd |= E1000_TXD_CMD_TCP;
3064 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3065 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3066 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3067 /* no need for context if already set */
3068 if (adapter->last_hw_offload == CSUM_UDP)
3070 adapter->last_hw_offload = CSUM_UDP;
3072 * Start offset for header checksum calculation.
3073 * End offset for header checksum calculation.
3074 * Offset of place to put the checksum.
3076 TXD = (struct e1000_context_desc *)
3077 &adapter->tx_desc_base[curr_txd];
3078 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3079 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3080 TXD->upper_setup.tcp_fields.tucso =
3081 hdr_len + offsetof(struct udphdr, uh_sum);
3091 TXD->tcp_seg_setup.data = htole32(0);
3092 TXD->cmd_and_length =
3093 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3094 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3095 tx_buffer->m_head = NULL;
3096 tx_buffer->next_eop = -1;
3098 if (++curr_txd == adapter->num_tx_desc)
3101 adapter->num_tx_desc_avail--;
3102 adapter->next_avail_tx_desc = curr_txd;
3106 /**********************************************************************
3108 * Examine each tx_buffer in the used queue. If the hardware is done
3109 * processing the packet then free associated resources. The
3110 * tx_buffer is put back on the free queue.
3112 **********************************************************************/
3114 lem_txeof(struct adapter *adapter)
3116 int first, last, done, num_avail;
3117 struct em_buffer *tx_buffer;
3118 struct e1000_tx_desc *tx_desc, *eop_desc;
3119 struct ifnet *ifp = adapter->ifp;
3121 EM_TX_LOCK_ASSERT(adapter);
3124 if (netmap_tx_irq(ifp, 0))
3126 #endif /* DEV_NETMAP */
3127 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3130 num_avail = adapter->num_tx_desc_avail;
3131 first = adapter->next_tx_to_clean;
3132 tx_desc = &adapter->tx_desc_base[first];
3133 tx_buffer = &adapter->tx_buffer_area[first];
3134 last = tx_buffer->next_eop;
3135 eop_desc = &adapter->tx_desc_base[last];
3138 * What this does is get the index of the
3139 * first descriptor AFTER the EOP of the
3140 * first packet, that way we can do the
3141 * simple comparison on the inner while loop.
3143 if (++last == adapter->num_tx_desc)
3147 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3148 BUS_DMASYNC_POSTREAD);
3150 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3151 /* We clean the range of the packet */
3152 while (first != done) {
3153 tx_desc->upper.data = 0;
3154 tx_desc->lower.data = 0;
3155 tx_desc->buffer_addr = 0;
3158 if (tx_buffer->m_head) {
3160 bus_dmamap_sync(adapter->txtag,
3162 BUS_DMASYNC_POSTWRITE);
3163 bus_dmamap_unload(adapter->txtag,
3166 m_freem(tx_buffer->m_head);
3167 tx_buffer->m_head = NULL;
3169 tx_buffer->next_eop = -1;
3170 adapter->watchdog_time = ticks;
3172 if (++first == adapter->num_tx_desc)
3175 tx_buffer = &adapter->tx_buffer_area[first];
3176 tx_desc = &adapter->tx_desc_base[first];
3178 /* See if we can continue to the next packet */
3179 last = tx_buffer->next_eop;
3181 eop_desc = &adapter->tx_desc_base[last];
3182 /* Get new done point */
3183 if (++last == adapter->num_tx_desc) last = 0;
3188 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3189 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3191 adapter->next_tx_to_clean = first;
3192 adapter->num_tx_desc_avail = num_avail;
3194 #ifdef NIC_SEND_COMBINING
3195 if ((adapter->shadow_tdt & MIT_PENDING_TDT) == MIT_PENDING_TDT) {
3196 /* a tdt write is pending, do it */
3197 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0),
3198 0xffff & adapter->shadow_tdt);
3199 adapter->shadow_tdt = MIT_PENDING_INT;
3201 adapter->shadow_tdt = 0; // disable
3203 #endif /* NIC_SEND_COMBINING */
3205 * If we have enough room, clear IFF_DRV_OACTIVE to
3206 * tell the stack that it is OK to send packets.
3207 * If there are no pending descriptors, clear the watchdog.
3209 if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {
3210 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3212 if (adapter->csb) { // XXX also csb_on ?
3213 adapter->csb->guest_need_txkick = 2; /* acked */
3214 // XXX memory barrier
3216 #endif /* NIC_PARAVIRT */
3217 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3218 adapter->watchdog_check = FALSE;
3224 /*********************************************************************
3226 * When Link is lost sometimes there is work still in the TX ring
3227 * which may result in a watchdog, rather than allow that we do an
3228 * attempted cleanup and then reinit here. Note that this has been
3229 * seens mostly with fiber adapters.
3231 **********************************************************************/
3233 lem_tx_purge(struct adapter *adapter)
3235 if ((!adapter->link_active) && (adapter->watchdog_check)) {
3236 EM_TX_LOCK(adapter);
3238 EM_TX_UNLOCK(adapter);
3239 if (adapter->watchdog_check) /* Still outstanding? */
3240 lem_init_locked(adapter);
3244 /*********************************************************************
3246 * Get a buffer from system mbuf buffer pool.
3248 **********************************************************************/
3250 lem_get_buf(struct adapter *adapter, int i)
3253 bus_dma_segment_t segs[1];
3255 struct em_buffer *rx_buffer;
3258 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3260 adapter->mbuf_cluster_failed++;
3263 m->m_len = m->m_pkthdr.len = MCLBYTES;
3265 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3266 m_adj(m, ETHER_ALIGN);
3269 * Using memory from the mbuf cluster pool, invoke the
3270 * bus_dma machinery to arrange the memory mapping.
3272 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3273 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3279 /* If nsegs is wrong then the stack is corrupt. */
3280 KASSERT(nsegs == 1, ("Too many segments returned!"));
3282 rx_buffer = &adapter->rx_buffer_area[i];
3283 if (rx_buffer->m_head != NULL)
3284 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3286 map = rx_buffer->map;
3287 rx_buffer->map = adapter->rx_sparemap;
3288 adapter->rx_sparemap = map;
3289 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3290 rx_buffer->m_head = m;
3292 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3296 /*********************************************************************
3298 * Allocate memory for rx_buffer structures. Since we use one
3299 * rx_buffer per received packet, the maximum number of rx_buffer's
3300 * that we'll need is equal to the number of receive descriptors
3301 * that we've allocated.
3303 **********************************************************************/
3305 lem_allocate_receive_structures(struct adapter *adapter)
3307 device_t dev = adapter->dev;
3308 struct em_buffer *rx_buffer;
3311 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3312 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3313 if (adapter->rx_buffer_area == NULL) {
3314 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3318 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3319 1, 0, /* alignment, bounds */
3320 BUS_SPACE_MAXADDR, /* lowaddr */
3321 BUS_SPACE_MAXADDR, /* highaddr */
3322 NULL, NULL, /* filter, filterarg */
3323 MCLBYTES, /* maxsize */
3325 MCLBYTES, /* maxsegsize */
3327 NULL, /* lockfunc */
3331 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3336 /* Create the spare map (used by getbuf) */
3337 error = bus_dmamap_create(adapter->rxtag, 0, &adapter->rx_sparemap);
3339 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3344 rx_buffer = adapter->rx_buffer_area;
3345 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3346 error = bus_dmamap_create(adapter->rxtag, 0, &rx_buffer->map);
3348 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3357 lem_free_receive_structures(adapter);
3361 /*********************************************************************
3363 * (Re)initialize receive structures.
3365 **********************************************************************/
3367 lem_setup_receive_structures(struct adapter *adapter)
3369 struct em_buffer *rx_buffer;
3372 /* we are already under lock */
3373 struct netmap_adapter *na = NA(adapter->ifp);
3374 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
3377 /* Reset descriptor ring */
3378 bzero(adapter->rx_desc_base,
3379 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3381 /* Free current RX buffers. */
3382 rx_buffer = adapter->rx_buffer_area;
3383 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3384 if (rx_buffer->m_head != NULL) {
3385 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3386 BUS_DMASYNC_POSTREAD);
3387 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3388 m_freem(rx_buffer->m_head);
3389 rx_buffer->m_head = NULL;
3393 /* Allocate new ones. */
3394 for (i = 0; i < adapter->num_rx_desc; i++) {
3397 /* the i-th NIC entry goes to slot si */
3398 int si = netmap_idx_n2k(&na->rx_rings[0], i);
3402 addr = PNMB(na, slot + si, &paddr);
3403 netmap_load_map(na, adapter->rxtag, rx_buffer->map, addr);
3404 /* Update descriptor */
3405 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
3408 #endif /* DEV_NETMAP */
3409 error = lem_get_buf(adapter, i);
3414 /* Setup our descriptor pointers */
3415 adapter->next_rx_desc_to_check = 0;
3416 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3417 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3422 /*********************************************************************
3424 * Enable receive unit.
3426 **********************************************************************/
3429 lem_initialize_receive_unit(struct adapter *adapter)
3431 struct ifnet *ifp = adapter->ifp;
3435 INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3438 * Make sure receives are disabled while setting
3439 * up the descriptor ring
3441 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3442 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3444 if (adapter->hw.mac.type >= e1000_82540) {
3445 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3446 adapter->rx_abs_int_delay.value);
3448 * Set the interrupt throttling rate. Value is calculated
3449 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3451 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3454 /* Setup the Base and Length of the Rx Descriptor Ring */
3455 bus_addr = adapter->rxdma.dma_paddr;
3456 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3457 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3458 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3459 (u32)(bus_addr >> 32));
3460 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3463 /* Setup the Receive Control Register */
3464 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3465 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3466 E1000_RCTL_RDMTS_HALF |
3467 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3469 /* Make sure VLAN Filters are off */
3470 rctl &= ~E1000_RCTL_VFE;
3472 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3473 rctl |= E1000_RCTL_SBP;
3475 rctl &= ~E1000_RCTL_SBP;
3477 switch (adapter->rx_buffer_len) {
3480 rctl |= E1000_RCTL_SZ_2048;
3483 rctl |= E1000_RCTL_SZ_4096 |
3484 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3487 rctl |= E1000_RCTL_SZ_8192 |
3488 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3491 rctl |= E1000_RCTL_SZ_16384 |
3492 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3496 if (ifp->if_mtu > ETHERMTU)
3497 rctl |= E1000_RCTL_LPE;
3499 rctl &= ~E1000_RCTL_LPE;
3501 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3502 if ((adapter->hw.mac.type >= e1000_82543) &&
3503 (ifp->if_capenable & IFCAP_RXCSUM)) {
3504 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3505 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3506 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3509 /* Enable Receives */
3510 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3513 * Setup the HW Rx Head and
3514 * Tail Descriptor Pointers
3516 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3517 rctl = adapter->num_rx_desc - 1; /* default RDT value */
3519 /* preserve buffers already made available to clients */
3520 if (ifp->if_capenable & IFCAP_NETMAP)
3521 rctl -= nm_kr_rxspace(&NA(adapter->ifp)->rx_rings[0]);
3522 #endif /* DEV_NETMAP */
3523 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rctl);
3528 /*********************************************************************
3530 * Free receive related data structures.
3532 **********************************************************************/
3534 lem_free_receive_structures(struct adapter *adapter)
3536 struct em_buffer *rx_buffer;
3539 INIT_DEBUGOUT("free_receive_structures: begin");
3541 if (adapter->rx_sparemap) {
3542 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3543 adapter->rx_sparemap = NULL;
3546 /* Cleanup any existing buffers */
3547 if (adapter->rx_buffer_area != NULL) {
3548 rx_buffer = adapter->rx_buffer_area;
3549 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3550 if (rx_buffer->m_head != NULL) {
3551 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3552 BUS_DMASYNC_POSTREAD);
3553 bus_dmamap_unload(adapter->rxtag,
3555 m_freem(rx_buffer->m_head);
3556 rx_buffer->m_head = NULL;
3557 } else if (rx_buffer->map != NULL)
3558 bus_dmamap_unload(adapter->rxtag,
3560 if (rx_buffer->map != NULL) {
3561 bus_dmamap_destroy(adapter->rxtag,
3563 rx_buffer->map = NULL;
3568 if (adapter->rx_buffer_area != NULL) {
3569 free(adapter->rx_buffer_area, M_DEVBUF);
3570 adapter->rx_buffer_area = NULL;
3573 if (adapter->rxtag != NULL) {
3574 bus_dma_tag_destroy(adapter->rxtag);
3575 adapter->rxtag = NULL;
3579 /*********************************************************************
3581 * This routine executes in interrupt context. It replenishes
3582 * the mbufs in the descriptor and sends data which has been
3583 * dma'ed into host memory to upper layer.
3585 * We loop at most count times if count is > 0, or until done if
3588 * For polling we also now return the number of cleaned packets
3589 *********************************************************************/
3591 lem_rxeof(struct adapter *adapter, int count, int *done)
3593 struct ifnet *ifp = adapter->ifp;
3595 u8 status = 0, accept_frame = 0, eop = 0;
3596 u16 len, desc_len, prev_len_adj;
3598 struct e1000_rx_desc *current_desc;
3600 #ifdef BATCH_DISPATCH
3601 struct mbuf *mh = NULL, *mt = NULL;
3602 #endif /* BATCH_DISPATCH */
3605 struct paravirt_csb* csb = adapter->csb;
3606 int csb_mode = csb && csb->guest_csb_on;
3608 //ND("clear guest_rxkick at %d", adapter->next_rx_desc_to_check);
3609 if (csb_mode && csb->guest_need_rxkick)
3610 csb->guest_need_rxkick = 0;
3611 #endif /* NIC_PARAVIRT */
3612 EM_RX_LOCK(adapter);
3614 #ifdef BATCH_DISPATCH
3616 #endif /* BATCH_DISPATCH */
3617 i = adapter->next_rx_desc_to_check;
3618 current_desc = &adapter->rx_desc_base[i];
3619 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3620 BUS_DMASYNC_POSTREAD);
3623 if (netmap_rx_irq(ifp, 0, &rx_sent)) {
3624 EM_RX_UNLOCK(adapter);
3627 #endif /* DEV_NETMAP */
3629 #if 1 // XXX optimization ?
3630 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3633 EM_RX_UNLOCK(adapter);
3638 while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3639 struct mbuf *m = NULL;
3641 status = current_desc->status;
3642 if ((status & E1000_RXD_STAT_DD) == 0) {
3645 /* buffer not ready yet. Retry a few times before giving up */
3646 if (++retries <= adapter->rx_retries) {
3649 if (csb->guest_need_rxkick == 0) {
3650 // ND("set guest_rxkick at %d", adapter->next_rx_desc_to_check);
3651 csb->guest_need_rxkick = 1;
3652 // XXX memory barrier, status volatile ?
3653 continue; /* double check */
3656 /* no buffer ready, give up */
3657 #endif /* NIC_PARAVIRT */
3662 if (csb->guest_need_rxkick)
3663 // ND("clear again guest_rxkick at %d", adapter->next_rx_desc_to_check);
3664 csb->guest_need_rxkick = 0;
3667 #endif /* NIC_PARAVIRT */
3669 mp = adapter->rx_buffer_area[i].m_head;
3671 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3672 * needs to access the last received byte in the mbuf.
3674 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3675 BUS_DMASYNC_POSTREAD);
3679 desc_len = le16toh(current_desc->length);
3680 if (status & E1000_RXD_STAT_EOP) {
3683 if (desc_len < ETHER_CRC_LEN) {
3685 prev_len_adj = ETHER_CRC_LEN - desc_len;
3687 len = desc_len - ETHER_CRC_LEN;
3693 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3695 u32 pkt_len = desc_len;
3697 if (adapter->fmp != NULL)
3698 pkt_len += adapter->fmp->m_pkthdr.len;
3700 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3701 if (TBI_ACCEPT(&adapter->hw, status,
3702 current_desc->errors, pkt_len, last_byte,
3703 adapter->min_frame_size, adapter->max_frame_size)) {
3704 e1000_tbi_adjust_stats_82543(&adapter->hw,
3705 &adapter->stats, pkt_len,
3706 adapter->hw.mac.addr,
3707 adapter->max_frame_size);
3715 if (lem_get_buf(adapter, i) != 0) {
3720 /* Assign correct length to the current fragment */
3723 if (adapter->fmp == NULL) {
3724 mp->m_pkthdr.len = len;
3725 adapter->fmp = mp; /* Store the first mbuf */
3728 /* Chain mbuf's together */
3729 mp->m_flags &= ~M_PKTHDR;
3731 * Adjust length of previous mbuf in chain if
3732 * we received less than 4 bytes in the last
3735 if (prev_len_adj > 0) {
3736 adapter->lmp->m_len -= prev_len_adj;
3737 adapter->fmp->m_pkthdr.len -=
3740 adapter->lmp->m_next = mp;
3741 adapter->lmp = adapter->lmp->m_next;
3742 adapter->fmp->m_pkthdr.len += len;
3746 adapter->fmp->m_pkthdr.rcvif = ifp;
3748 lem_receive_checksum(adapter, current_desc,
3750 #ifndef __NO_STRICT_ALIGNMENT
3751 if (adapter->max_frame_size >
3752 (MCLBYTES - ETHER_ALIGN) &&
3753 lem_fixup_rx(adapter) != 0)
3756 if (status & E1000_RXD_STAT_VP) {
3757 adapter->fmp->m_pkthdr.ether_vtag =
3758 le16toh(current_desc->special);
3759 adapter->fmp->m_flags |= M_VLANTAG;
3761 #ifndef __NO_STRICT_ALIGNMENT
3765 adapter->fmp = NULL;
3766 adapter->lmp = NULL;
3769 adapter->dropped_pkts++;
3771 /* Reuse loaded DMA map and just update mbuf chain */
3772 mp = adapter->rx_buffer_area[i].m_head;
3773 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3774 mp->m_data = mp->m_ext.ext_buf;
3776 if (adapter->max_frame_size <=
3777 (MCLBYTES - ETHER_ALIGN))
3778 m_adj(mp, ETHER_ALIGN);
3779 if (adapter->fmp != NULL) {
3780 m_freem(adapter->fmp);
3781 adapter->fmp = NULL;
3782 adapter->lmp = NULL;
3787 /* Zero out the receive descriptors status. */
3788 current_desc->status = 0;
3789 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3790 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3794 /* the buffer at i has been already replaced by lem_get_buf()
3795 * so it is safe to set guest_rdt = i and possibly send a kick.
3796 * XXX see if we can optimize it later.
3799 // XXX memory barrier
3800 if (i == csb->host_rxkick_at)
3801 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3803 #endif /* NIC_PARAVIRT */
3804 /* Advance our pointers to the next descriptor. */
3805 if (++i == adapter->num_rx_desc)
3807 /* Call into the stack */
3809 #ifdef BATCH_DISPATCH
3810 if (adapter->batch_enable) {
3816 m->m_nextpkt = NULL;
3818 current_desc = &adapter->rx_desc_base[i];
3821 #endif /* BATCH_DISPATCH */
3822 adapter->next_rx_desc_to_check = i;
3823 EM_RX_UNLOCK(adapter);
3824 (*ifp->if_input)(ifp, m);
3825 EM_RX_LOCK(adapter);
3827 i = adapter->next_rx_desc_to_check;
3829 current_desc = &adapter->rx_desc_base[i];
3831 adapter->next_rx_desc_to_check = i;
3832 #ifdef BATCH_DISPATCH
3834 EM_RX_UNLOCK(adapter);
3835 while ( (mt = mh) != NULL) {
3837 mt->m_nextpkt = NULL;
3840 EM_RX_LOCK(adapter);
3841 i = adapter->next_rx_desc_to_check; /* in case of interrupts */
3845 #endif /* BATCH_DISPATCH */
3847 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3849 i = adapter->num_rx_desc - 1;
3851 if (!csb_mode) /* filter out writes */
3852 #endif /* NIC_PARAVIRT */
3853 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3856 EM_RX_UNLOCK(adapter);
3857 return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3860 #ifndef __NO_STRICT_ALIGNMENT
3862 * When jumbo frames are enabled we should realign entire payload on
3863 * architecures with strict alignment. This is serious design mistake of 8254x
3864 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3865 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3866 * payload. On architecures without strict alignment restrictions 8254x still
3867 * performs unaligned memory access which would reduce the performance too.
3868 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3869 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3870 * existing mbuf chain.
3872 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3873 * not used at all on architectures with strict alignment.
3876 lem_fixup_rx(struct adapter *adapter)
3883 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3884 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3885 m->m_data += ETHER_HDR_LEN;
3887 MGETHDR(n, M_NOWAIT, MT_DATA);
3889 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3890 m->m_data += ETHER_HDR_LEN;
3891 m->m_len -= ETHER_HDR_LEN;
3892 n->m_len = ETHER_HDR_LEN;
3893 M_MOVE_PKTHDR(n, m);
3897 adapter->dropped_pkts++;
3898 m_freem(adapter->fmp);
3899 adapter->fmp = NULL;
3908 /*********************************************************************
3910 * Verify that the hardware indicated that the checksum is valid.
3911 * Inform the stack about the status of checksum so that stack
3912 * doesn't spend time verifying the checksum.
3914 *********************************************************************/
3916 lem_receive_checksum(struct adapter *adapter,
3917 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3919 /* 82543 or newer only */
3920 if ((adapter->hw.mac.type < e1000_82543) ||
3921 /* Ignore Checksum bit is set */
3922 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3923 mp->m_pkthdr.csum_flags = 0;
3927 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3929 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3930 /* IP Checksum Good */
3931 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3932 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3935 mp->m_pkthdr.csum_flags = 0;
3939 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3941 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3942 mp->m_pkthdr.csum_flags |=
3943 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3944 mp->m_pkthdr.csum_data = htons(0xffff);
3950 * This routine is run via an vlan
3954 lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3956 struct adapter *adapter = ifp->if_softc;
3959 if (ifp->if_softc != arg) /* Not our event */
3962 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
3965 EM_CORE_LOCK(adapter);
3966 index = (vtag >> 5) & 0x7F;
3968 adapter->shadow_vfta[index] |= (1 << bit);
3969 ++adapter->num_vlans;
3970 /* Re-init to load the changes */
3971 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3972 lem_init_locked(adapter);
3973 EM_CORE_UNLOCK(adapter);
3977 * This routine is run via an vlan
3981 lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3983 struct adapter *adapter = ifp->if_softc;
3986 if (ifp->if_softc != arg)
3989 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3992 EM_CORE_LOCK(adapter);
3993 index = (vtag >> 5) & 0x7F;
3995 adapter->shadow_vfta[index] &= ~(1 << bit);
3996 --adapter->num_vlans;
3997 /* Re-init to load the changes */
3998 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3999 lem_init_locked(adapter);
4000 EM_CORE_UNLOCK(adapter);
4004 lem_setup_vlan_hw_support(struct adapter *adapter)
4006 struct e1000_hw *hw = &adapter->hw;
4010 ** We get here thru init_locked, meaning
4011 ** a soft reset, this has already cleared
4012 ** the VFTA and other state, so if there
4013 ** have been no vlan's registered do nothing.
4015 if (adapter->num_vlans == 0)
4019 ** A soft reset zero's out the VFTA, so
4020 ** we need to repopulate it now.
4022 for (int i = 0; i < EM_VFTA_SIZE; i++)
4023 if (adapter->shadow_vfta[i] != 0)
4024 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
4025 i, adapter->shadow_vfta[i]);
4027 reg = E1000_READ_REG(hw, E1000_CTRL);
4028 reg |= E1000_CTRL_VME;
4029 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4031 /* Enable the Filter Table */
4032 reg = E1000_READ_REG(hw, E1000_RCTL);
4033 reg &= ~E1000_RCTL_CFIEN;
4034 reg |= E1000_RCTL_VFE;
4035 E1000_WRITE_REG(hw, E1000_RCTL, reg);
4039 lem_enable_intr(struct adapter *adapter)
4041 struct e1000_hw *hw = &adapter->hw;
4042 u32 ims_mask = IMS_ENABLE_MASK;
4044 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4048 lem_disable_intr(struct adapter *adapter)
4050 struct e1000_hw *hw = &adapter->hw;
4052 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4056 * Bit of a misnomer, what this really means is
4057 * to enable OS management of the system... aka
4058 * to disable special hardware management features
4061 lem_init_manageability(struct adapter *adapter)
4063 /* A shared code workaround */
4064 if (adapter->has_manage) {
4065 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4066 /* disable hardware interception of ARP */
4067 manc &= ~(E1000_MANC_ARP_EN);
4068 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4073 * Give control back to hardware management
4074 * controller if there is one.
4077 lem_release_manageability(struct adapter *adapter)
4079 if (adapter->has_manage) {
4080 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4082 /* re-enable hardware interception of ARP */
4083 manc |= E1000_MANC_ARP_EN;
4084 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4089 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
4090 * For ASF and Pass Through versions of f/w this means
4091 * that the driver is loaded. For AMT version type f/w
4092 * this means that the network i/f is open.
4095 lem_get_hw_control(struct adapter *adapter)
4099 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4100 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4101 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4106 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4107 * For ASF and Pass Through versions of f/w this means that
4108 * the driver is no longer loaded. For AMT versions of the
4109 * f/w this means that the network i/f is closed.
4112 lem_release_hw_control(struct adapter *adapter)
4116 if (!adapter->has_manage)
4119 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4120 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4121 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4126 lem_is_valid_ether_addr(u8 *addr)
4128 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4130 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4138 ** Parse the interface capabilities with regard
4139 ** to both system management and wake-on-lan for
4143 lem_get_wakeup(device_t dev)
4145 struct adapter *adapter = device_get_softc(dev);
4146 u16 eeprom_data = 0, device_id, apme_mask;
4148 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
4149 apme_mask = EM_EEPROM_APME;
4151 switch (adapter->hw.mac.type) {
4156 e1000_read_nvm(&adapter->hw,
4157 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
4158 apme_mask = EM_82544_APME;
4161 case e1000_82546_rev_3:
4162 if (adapter->hw.bus.func == 1) {
4163 e1000_read_nvm(&adapter->hw,
4164 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
4167 e1000_read_nvm(&adapter->hw,
4168 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4171 e1000_read_nvm(&adapter->hw,
4172 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4175 if (eeprom_data & apme_mask)
4176 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
4178 * We have the eeprom settings, now apply the special cases
4179 * where the eeprom may be wrong or the board won't support
4180 * wake on lan on a particular port
4182 device_id = pci_get_device(dev);
4183 switch (device_id) {
4184 case E1000_DEV_ID_82546GB_PCIE:
4187 case E1000_DEV_ID_82546EB_FIBER:
4188 case E1000_DEV_ID_82546GB_FIBER:
4189 /* Wake events only supported on port A for dual fiber
4190 * regardless of eeprom setting */
4191 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
4192 E1000_STATUS_FUNC_1)
4195 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
4196 /* if quad port adapter, disable WoL on all but port A */
4197 if (global_quad_port_a != 0)
4199 /* Reset for multiple quad port adapters */
4200 if (++global_quad_port_a == 4)
4201 global_quad_port_a = 0;
4209 * Enable PCI Wake On Lan capability
4212 lem_enable_wakeup(device_t dev)
4214 struct adapter *adapter = device_get_softc(dev);
4215 struct ifnet *ifp = adapter->ifp;
4216 u32 pmc, ctrl, ctrl_ext, rctl;
4219 if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
4222 /* Advertise the wakeup capability */
4223 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4224 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4225 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4226 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4228 /* Keep the laser running on Fiber adapters */
4229 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4230 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4231 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4232 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4233 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4237 ** Determine type of Wakeup: note that wol
4238 ** is set with all bits on by default.
4240 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
4241 adapter->wol &= ~E1000_WUFC_MAG;
4243 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
4244 adapter->wol &= ~E1000_WUFC_MC;
4246 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4247 rctl |= E1000_RCTL_MPE;
4248 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4251 if (adapter->hw.mac.type == e1000_pchlan) {
4252 if (lem_enable_phy_wakeup(adapter))
4255 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4256 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4261 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4262 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4263 if (ifp->if_capenable & IFCAP_WOL)
4264 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4265 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4271 ** WOL in the newer chipset interfaces (pchlan)
4272 ** require thing to be copied into the phy
4275 lem_enable_phy_wakeup(struct adapter *adapter)
4277 struct e1000_hw *hw = &adapter->hw;
4281 /* copy MAC RARs to PHY RARs */
4282 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4283 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4284 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4285 e1000_write_phy_reg(hw, BM_RAR_M(i),
4286 (u16)((mreg >> 16) & 0xFFFF));
4287 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4288 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4289 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4290 (u16)((mreg >> 16) & 0xFFFF));
4293 /* copy MAC MTA to PHY MTA */
4294 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4295 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4296 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4297 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4298 (u16)((mreg >> 16) & 0xFFFF));
4301 /* configure PHY Rx Control register */
4302 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4303 mreg = E1000_READ_REG(hw, E1000_RCTL);
4304 if (mreg & E1000_RCTL_UPE)
4305 preg |= BM_RCTL_UPE;
4306 if (mreg & E1000_RCTL_MPE)
4307 preg |= BM_RCTL_MPE;
4308 preg &= ~(BM_RCTL_MO_MASK);
4309 if (mreg & E1000_RCTL_MO_3)
4310 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4311 << BM_RCTL_MO_SHIFT);
4312 if (mreg & E1000_RCTL_BAM)
4313 preg |= BM_RCTL_BAM;
4314 if (mreg & E1000_RCTL_PMCF)
4315 preg |= BM_RCTL_PMCF;
4316 mreg = E1000_READ_REG(hw, E1000_CTRL);
4317 if (mreg & E1000_CTRL_RFCE)
4318 preg |= BM_RCTL_RFCE;
4319 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4321 /* enable PHY wakeup in MAC register */
4322 E1000_WRITE_REG(hw, E1000_WUC,
4323 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4324 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4326 /* configure and enable PHY wakeup in PHY registers */
4327 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4328 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4330 /* activate PHY wakeup */
4331 ret = hw->phy.ops.acquire(hw);
4333 printf("Could not acquire PHY\n");
4336 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4337 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4338 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4340 printf("Could not read PHY page 769\n");
4343 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4344 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4346 printf("Could not set PHY Host Wakeup bit\n");
4348 hw->phy.ops.release(hw);
4354 lem_led_func(void *arg, int onoff)
4356 struct adapter *adapter = arg;
4358 EM_CORE_LOCK(adapter);
4360 e1000_setup_led(&adapter->hw);
4361 e1000_led_on(&adapter->hw);
4363 e1000_led_off(&adapter->hw);
4364 e1000_cleanup_led(&adapter->hw);
4366 EM_CORE_UNLOCK(adapter);
4369 /*********************************************************************
4370 * 82544 Coexistence issue workaround.
4371 * There are 2 issues.
4372 * 1. Transmit Hang issue.
4373 * To detect this issue, following equation can be used...
4374 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4375 * If SUM[3:0] is in between 1 to 4, we will have this issue.
4378 * To detect this issue, following equation can be used...
4379 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4380 * If SUM[3:0] is in between 9 to c, we will have this issue.
4384 * Make sure we do not have ending address
4385 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4387 *************************************************************************/
4389 lem_fill_descriptors (bus_addr_t address, u32 length,
4390 PDESC_ARRAY desc_array)
4392 u32 safe_terminator;
4394 /* Since issue is sensitive to length and address.*/
4395 /* Let us first check the address...*/
4397 desc_array->descriptor[0].address = address;
4398 desc_array->descriptor[0].length = length;
4399 desc_array->elements = 1;
4400 return (desc_array->elements);
4402 safe_terminator = (u32)((((u32)address & 0x7) +
4403 (length & 0xF)) & 0xF);
4404 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4405 if (safe_terminator == 0 ||
4406 (safe_terminator > 4 &&
4407 safe_terminator < 9) ||
4408 (safe_terminator > 0xC &&
4409 safe_terminator <= 0xF)) {
4410 desc_array->descriptor[0].address = address;
4411 desc_array->descriptor[0].length = length;
4412 desc_array->elements = 1;
4413 return (desc_array->elements);
4416 desc_array->descriptor[0].address = address;
4417 desc_array->descriptor[0].length = length - 4;
4418 desc_array->descriptor[1].address = address + (length - 4);
4419 desc_array->descriptor[1].length = 4;
4420 desc_array->elements = 2;
4421 return (desc_array->elements);
4424 /**********************************************************************
4426 * Update the board statistics counters.
4428 **********************************************************************/
4430 lem_update_stats_counters(struct adapter *adapter)
4434 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4435 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4436 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4437 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4439 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4440 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4441 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4442 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4444 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4445 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4446 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4447 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4448 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4449 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4450 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4451 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4452 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4453 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4454 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4455 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4456 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4457 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4458 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4459 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4460 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4461 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4462 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4463 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4465 /* For the 64-bit byte counters the low dword must be read first. */
4466 /* Both registers clear on the read of the high dword */
4468 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4469 ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4470 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4471 ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4473 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4474 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4475 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4476 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4477 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4479 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4480 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4482 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4483 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4484 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4485 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4486 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4487 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4488 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4489 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4490 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4491 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4493 if (adapter->hw.mac.type >= e1000_82543) {
4494 adapter->stats.algnerrc +=
4495 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4496 adapter->stats.rxerrc +=
4497 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4498 adapter->stats.tncrs +=
4499 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4500 adapter->stats.cexterr +=
4501 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4502 adapter->stats.tsctc +=
4503 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4504 adapter->stats.tsctfc +=
4505 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4509 ifp->if_collisions = adapter->stats.colc;
4512 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4513 adapter->stats.crcerrs + adapter->stats.algnerrc +
4514 adapter->stats.ruc + adapter->stats.roc +
4515 adapter->stats.mpc + adapter->stats.cexterr;
4518 ifp->if_oerrors = adapter->stats.ecol +
4519 adapter->stats.latecol + adapter->watchdog_events;
4522 /* Export a single 32-bit register via a read-only sysctl. */
4524 lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4526 struct adapter *adapter;
4529 adapter = oidp->oid_arg1;
4530 val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4531 return (sysctl_handle_int(oidp, &val, 0, req));
4535 * Add sysctl variables, one per statistic, to the system.
4538 lem_add_hw_stats(struct adapter *adapter)
4540 device_t dev = adapter->dev;
4542 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4543 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4544 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4545 struct e1000_hw_stats *stats = &adapter->stats;
4547 struct sysctl_oid *stat_node;
4548 struct sysctl_oid_list *stat_list;
4550 /* Driver Statistics */
4551 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail",
4552 CTLFLAG_RD, &adapter->mbuf_alloc_failed,
4554 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
4555 CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4556 "Std mbuf cluster failed");
4557 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4558 CTLFLAG_RD, &adapter->dropped_pkts,
4559 "Driver dropped packets");
4560 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
4561 CTLFLAG_RD, &adapter->no_tx_dma_setup,
4562 "Driver tx dma failure in xmit");
4563 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4564 CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4565 "Not enough tx descriptors failure in xmit");
4566 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4567 CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4568 "Not enough tx descriptors failure in xmit");
4569 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4570 CTLFLAG_RD, &adapter->rx_overruns,
4572 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4573 CTLFLAG_RD, &adapter->watchdog_events,
4574 "Watchdog timeouts");
4576 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4577 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
4578 lem_sysctl_reg_handler, "IU",
4579 "Device Control Register");
4580 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4581 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
4582 lem_sysctl_reg_handler, "IU",
4583 "Receiver Control Register");
4584 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4585 CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4586 "Flow Control High Watermark");
4587 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
4588 CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4589 "Flow Control Low Watermark");
4590 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
4591 CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4592 "TX FIFO workaround events");
4593 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
4594 CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4597 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head",
4598 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
4599 lem_sysctl_reg_handler, "IU",
4600 "Transmit Descriptor Head");
4601 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail",
4602 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
4603 lem_sysctl_reg_handler, "IU",
4604 "Transmit Descriptor Tail");
4605 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head",
4606 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
4607 lem_sysctl_reg_handler, "IU",
4608 "Receive Descriptor Head");
4609 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail",
4610 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
4611 lem_sysctl_reg_handler, "IU",
4612 "Receive Descriptor Tail");
4615 /* MAC stats get their own sub node */
4617 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4618 CTLFLAG_RD, NULL, "Statistics");
4619 stat_list = SYSCTL_CHILDREN(stat_node);
4621 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4622 CTLFLAG_RD, &stats->ecol,
4623 "Excessive collisions");
4624 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4625 CTLFLAG_RD, &stats->scc,
4626 "Single collisions");
4627 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4628 CTLFLAG_RD, &stats->mcc,
4629 "Multiple collisions");
4630 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4631 CTLFLAG_RD, &stats->latecol,
4633 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4634 CTLFLAG_RD, &stats->colc,
4636 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4637 CTLFLAG_RD, &adapter->stats.symerrs,
4639 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4640 CTLFLAG_RD, &adapter->stats.sec,
4642 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4643 CTLFLAG_RD, &adapter->stats.dc,
4645 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4646 CTLFLAG_RD, &adapter->stats.mpc,
4648 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4649 CTLFLAG_RD, &adapter->stats.rnbc,
4650 "Receive No Buffers");
4651 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4652 CTLFLAG_RD, &adapter->stats.ruc,
4653 "Receive Undersize");
4654 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4655 CTLFLAG_RD, &adapter->stats.rfc,
4656 "Fragmented Packets Received ");
4657 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4658 CTLFLAG_RD, &adapter->stats.roc,
4659 "Oversized Packets Received");
4660 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4661 CTLFLAG_RD, &adapter->stats.rjc,
4663 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4664 CTLFLAG_RD, &adapter->stats.rxerrc,
4666 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4667 CTLFLAG_RD, &adapter->stats.crcerrs,
4669 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4670 CTLFLAG_RD, &adapter->stats.algnerrc,
4671 "Alignment Errors");
4672 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4673 CTLFLAG_RD, &adapter->stats.cexterr,
4674 "Collision/Carrier extension errors");
4675 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4676 CTLFLAG_RD, &adapter->stats.xonrxc,
4678 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4679 CTLFLAG_RD, &adapter->stats.xontxc,
4681 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4682 CTLFLAG_RD, &adapter->stats.xoffrxc,
4684 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4685 CTLFLAG_RD, &adapter->stats.xofftxc,
4686 "XOFF Transmitted");
4688 /* Packet Reception Stats */
4689 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4690 CTLFLAG_RD, &adapter->stats.tpr,
4691 "Total Packets Received ");
4692 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4693 CTLFLAG_RD, &adapter->stats.gprc,
4694 "Good Packets Received");
4695 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4696 CTLFLAG_RD, &adapter->stats.bprc,
4697 "Broadcast Packets Received");
4698 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4699 CTLFLAG_RD, &adapter->stats.mprc,
4700 "Multicast Packets Received");
4701 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4702 CTLFLAG_RD, &adapter->stats.prc64,
4703 "64 byte frames received ");
4704 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4705 CTLFLAG_RD, &adapter->stats.prc127,
4706 "65-127 byte frames received");
4707 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4708 CTLFLAG_RD, &adapter->stats.prc255,
4709 "128-255 byte frames received");
4710 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4711 CTLFLAG_RD, &adapter->stats.prc511,
4712 "256-511 byte frames received");
4713 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4714 CTLFLAG_RD, &adapter->stats.prc1023,
4715 "512-1023 byte frames received");
4716 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4717 CTLFLAG_RD, &adapter->stats.prc1522,
4718 "1023-1522 byte frames received");
4719 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4720 CTLFLAG_RD, &adapter->stats.gorc,
4721 "Good Octets Received");
4723 /* Packet Transmission Stats */
4724 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4725 CTLFLAG_RD, &adapter->stats.gotc,
4726 "Good Octets Transmitted");
4727 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4728 CTLFLAG_RD, &adapter->stats.tpt,
4729 "Total Packets Transmitted");
4730 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4731 CTLFLAG_RD, &adapter->stats.gptc,
4732 "Good Packets Transmitted");
4733 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4734 CTLFLAG_RD, &adapter->stats.bptc,
4735 "Broadcast Packets Transmitted");
4736 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4737 CTLFLAG_RD, &adapter->stats.mptc,
4738 "Multicast Packets Transmitted");
4739 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4740 CTLFLAG_RD, &adapter->stats.ptc64,
4741 "64 byte frames transmitted ");
4742 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4743 CTLFLAG_RD, &adapter->stats.ptc127,
4744 "65-127 byte frames transmitted");
4745 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4746 CTLFLAG_RD, &adapter->stats.ptc255,
4747 "128-255 byte frames transmitted");
4748 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4749 CTLFLAG_RD, &adapter->stats.ptc511,
4750 "256-511 byte frames transmitted");
4751 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4752 CTLFLAG_RD, &adapter->stats.ptc1023,
4753 "512-1023 byte frames transmitted");
4754 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4755 CTLFLAG_RD, &adapter->stats.ptc1522,
4756 "1024-1522 byte frames transmitted");
4757 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4758 CTLFLAG_RD, &adapter->stats.tsctc,
4759 "TSO Contexts Transmitted");
4760 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4761 CTLFLAG_RD, &adapter->stats.tsctfc,
4762 "TSO Contexts Failed");
4765 /**********************************************************************
4767 * This routine provides a way to dump out the adapter eeprom,
4768 * often a useful debug/service tool. This only dumps the first
4769 * 32 words, stuff that matters is in that extent.
4771 **********************************************************************/
4774 lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4776 struct adapter *adapter;
4781 error = sysctl_handle_int(oidp, &result, 0, req);
4783 if (error || !req->newptr)
4787 * This value will cause a hex dump of the
4788 * first 32 16-bit words of the EEPROM to
4792 adapter = (struct adapter *)arg1;
4793 lem_print_nvm_info(adapter);
4800 lem_print_nvm_info(struct adapter *adapter)
4805 /* Its a bit crude, but it gets the job done */
4806 printf("\nInterface EEPROM Dump:\n");
4807 printf("Offset\n0x0000 ");
4808 for (i = 0, j = 0; i < 32; i++, j++) {
4809 if (j == 8) { /* Make the offset block */
4811 printf("\n0x00%x0 ",row);
4813 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4814 printf("%04x ", eeprom_data);
4820 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4822 struct em_int_delay_info *info;
4823 struct adapter *adapter;
4829 info = (struct em_int_delay_info *)arg1;
4830 usecs = info->value;
4831 error = sysctl_handle_int(oidp, &usecs, 0, req);
4832 if (error != 0 || req->newptr == NULL)
4834 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4836 info->value = usecs;
4837 ticks = EM_USECS_TO_TICKS(usecs);
4838 if (info->offset == E1000_ITR) /* units are 256ns here */
4841 adapter = info->adapter;
4843 EM_CORE_LOCK(adapter);
4844 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4845 regval = (regval & ~0xffff) | (ticks & 0xffff);
4846 /* Handle a few special cases. */
4847 switch (info->offset) {
4852 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4853 /* Don't write 0 into the TIDV register. */
4856 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4859 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4860 EM_CORE_UNLOCK(adapter);
4865 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4866 const char *description, struct em_int_delay_info *info,
4867 int offset, int value)
4869 info->adapter = adapter;
4870 info->offset = offset;
4871 info->value = value;
4872 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4873 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4874 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4875 info, 0, lem_sysctl_int_delay, "I", description);
4879 lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4880 const char *description, int *limit, int value)
4883 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4884 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4885 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4889 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4890 const char *description, int *limit, int value)
4893 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4894 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4895 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);