1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 * Uncomment the following extensions for better performance in a VM,
37 * especially if you have support in the hypervisor.
38 * See http://info.iet.unipi.it/~luigi/netmap/
40 // #define BATCH_DISPATCH
41 // #define NIC_SEND_COMBINING
42 // #define NIC_PARAVIRT /* enable virtio-like synchronization */
45 #include "opt_inet6.h"
47 #ifdef HAVE_KERNEL_OPTION_HEADERS
48 #include "opt_device_polling.h"
51 #include <sys/param.h>
52 #include <sys/systm.h>
54 #include <sys/endian.h>
55 #include <sys/kernel.h>
56 #include <sys/kthread.h>
57 #include <sys/malloc.h>
59 #include <sys/module.h>
61 #include <sys/socket.h>
62 #include <sys/sockio.h>
63 #include <sys/sysctl.h>
64 #include <sys/taskqueue.h>
65 #include <sys/eventhandler.h>
66 #include <machine/bus.h>
67 #include <machine/resource.h>
70 #include <net/ethernet.h>
72 #include <net/if_arp.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
76 #include <net/if_types.h>
77 #include <net/if_vlan_var.h>
79 #include <netinet/in_systm.h>
80 #include <netinet/in.h>
81 #include <netinet/if_ether.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip6.h>
84 #include <netinet/tcp.h>
85 #include <netinet/udp.h>
87 #include <machine/in_cksum.h>
88 #include <dev/led/led.h>
89 #include <dev/pci/pcivar.h>
90 #include <dev/pci/pcireg.h>
92 #include "e1000_api.h"
95 /*********************************************************************
96 * Legacy Em Driver version:
97 *********************************************************************/
98 char lem_driver_version[] = "1.1.0";
100 /*********************************************************************
101 * PCI Device ID Table
103 * Used by probe to select devices to load on
104 * Last field stores an index into e1000_strings
105 * Last entry must be all 0s
107 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
108 *********************************************************************/
110 static em_vendor_info_t lem_vendor_info_array[] =
112 /* Intel(R) PRO/1000 Network Connection */
113 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
132 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
143 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
147 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
148 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
149 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
150 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
151 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
152 PCI_ANY_ID, PCI_ANY_ID, 0},
154 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
155 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
156 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
157 /* required last entry */
161 /*********************************************************************
162 * Table of branding strings for all supported NICs.
163 *********************************************************************/
165 static char *lem_strings[] = {
166 "Intel(R) PRO/1000 Legacy Network Connection"
169 /*********************************************************************
170 * Function prototypes
171 *********************************************************************/
172 static int lem_probe(device_t);
173 static int lem_attach(device_t);
174 static int lem_detach(device_t);
175 static int lem_shutdown(device_t);
176 static int lem_suspend(device_t);
177 static int lem_resume(device_t);
178 static void lem_start(struct ifnet *);
179 static void lem_start_locked(struct ifnet *ifp);
180 static int lem_ioctl(struct ifnet *, u_long, caddr_t);
181 static void lem_init(void *);
182 static void lem_init_locked(struct adapter *);
183 static void lem_stop(void *);
184 static void lem_media_status(struct ifnet *, struct ifmediareq *);
185 static int lem_media_change(struct ifnet *);
186 static void lem_identify_hardware(struct adapter *);
187 static int lem_allocate_pci_resources(struct adapter *);
188 static int lem_allocate_irq(struct adapter *adapter);
189 static void lem_free_pci_resources(struct adapter *);
190 static void lem_local_timer(void *);
191 static int lem_hardware_init(struct adapter *);
192 static int lem_setup_interface(device_t, struct adapter *);
193 static void lem_setup_transmit_structures(struct adapter *);
194 static void lem_initialize_transmit_unit(struct adapter *);
195 static int lem_setup_receive_structures(struct adapter *);
196 static void lem_initialize_receive_unit(struct adapter *);
197 static void lem_enable_intr(struct adapter *);
198 static void lem_disable_intr(struct adapter *);
199 static void lem_free_transmit_structures(struct adapter *);
200 static void lem_free_receive_structures(struct adapter *);
201 static void lem_update_stats_counters(struct adapter *);
202 static void lem_add_hw_stats(struct adapter *adapter);
203 static void lem_txeof(struct adapter *);
204 static void lem_tx_purge(struct adapter *);
205 static int lem_allocate_receive_structures(struct adapter *);
206 static int lem_allocate_transmit_structures(struct adapter *);
207 static bool lem_rxeof(struct adapter *, int, int *);
208 #ifndef __NO_STRICT_ALIGNMENT
209 static int lem_fixup_rx(struct adapter *);
211 static void lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
213 static void lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
215 static void lem_set_promisc(struct adapter *);
216 static void lem_disable_promisc(struct adapter *);
217 static void lem_set_multi(struct adapter *);
218 static void lem_update_link_status(struct adapter *);
219 static int lem_get_buf(struct adapter *, int);
220 static void lem_register_vlan(void *, struct ifnet *, u16);
221 static void lem_unregister_vlan(void *, struct ifnet *, u16);
222 static void lem_setup_vlan_hw_support(struct adapter *);
223 static int lem_xmit(struct adapter *, struct mbuf **);
224 static void lem_smartspeed(struct adapter *);
225 static int lem_82547_fifo_workaround(struct adapter *, int);
226 static void lem_82547_update_fifo_head(struct adapter *, int);
227 static int lem_82547_tx_fifo_reset(struct adapter *);
228 static void lem_82547_move_tail(void *);
229 static int lem_dma_malloc(struct adapter *, bus_size_t,
230 struct em_dma_alloc *, int);
231 static void lem_dma_free(struct adapter *, struct em_dma_alloc *);
232 static int lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
233 static void lem_print_nvm_info(struct adapter *);
234 static int lem_is_valid_ether_addr(u8 *);
235 static u32 lem_fill_descriptors (bus_addr_t address, u32 length,
236 PDESC_ARRAY desc_array);
237 static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
238 static void lem_add_int_delay_sysctl(struct adapter *, const char *,
239 const char *, struct em_int_delay_info *, int, int);
240 static void lem_set_flow_cntrl(struct adapter *, const char *,
241 const char *, int *, int);
242 /* Management and WOL Support */
243 static void lem_init_manageability(struct adapter *);
244 static void lem_release_manageability(struct adapter *);
245 static void lem_get_hw_control(struct adapter *);
246 static void lem_release_hw_control(struct adapter *);
247 static void lem_get_wakeup(device_t);
248 static void lem_enable_wakeup(device_t);
249 static int lem_enable_phy_wakeup(struct adapter *);
250 static void lem_led_func(void *, int);
252 static void lem_intr(void *);
253 static int lem_irq_fast(void *);
254 static void lem_handle_rxtx(void *context, int pending);
255 static void lem_handle_link(void *context, int pending);
256 static void lem_add_rx_process_limit(struct adapter *, const char *,
257 const char *, int *, int);
259 #ifdef DEVICE_POLLING
260 static poll_handler_t lem_poll;
263 /*********************************************************************
264 * FreeBSD Device Interface Entry Points
265 *********************************************************************/
267 static device_method_t lem_methods[] = {
268 /* Device interface */
269 DEVMETHOD(device_probe, lem_probe),
270 DEVMETHOD(device_attach, lem_attach),
271 DEVMETHOD(device_detach, lem_detach),
272 DEVMETHOD(device_shutdown, lem_shutdown),
273 DEVMETHOD(device_suspend, lem_suspend),
274 DEVMETHOD(device_resume, lem_resume),
278 static driver_t lem_driver = {
279 "em", lem_methods, sizeof(struct adapter),
282 extern devclass_t em_devclass;
283 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
284 MODULE_DEPEND(lem, pci, 1, 1, 1);
285 MODULE_DEPEND(lem, ether, 1, 1, 1);
287 /*********************************************************************
288 * Tunable default values.
289 *********************************************************************/
291 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
292 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
294 #define MAX_INTS_PER_SEC 8000
295 #define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256))
297 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
298 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
299 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
300 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
302 * increase lem_rxd and lem_txd to at least 2048 in netmap mode
303 * for better performance.
305 static int lem_rxd = EM_DEFAULT_RXD;
306 static int lem_txd = EM_DEFAULT_TXD;
307 static int lem_smart_pwr_down = FALSE;
309 /* Controls whether promiscuous also shows bad packets */
310 static int lem_debug_sbp = FALSE;
312 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
313 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
314 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
315 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
316 TUNABLE_INT("hw.em.rxd", &lem_rxd);
317 TUNABLE_INT("hw.em.txd", &lem_txd);
318 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
319 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
321 /* Interrupt style - default to fast */
322 static int lem_use_legacy_irq = 0;
323 TUNABLE_INT("hw.em.use_legacy_irq", &lem_use_legacy_irq);
325 /* How many packets rxeof tries to clean at a time */
326 static int lem_rx_process_limit = 100;
327 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
329 /* Flow control setting - default to FULL */
330 static int lem_fc_setting = e1000_fc_full;
331 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
333 /* Global used in WOL setup with multiport cards */
334 static int global_quad_port_a = 0;
336 #ifdef DEV_NETMAP /* see ixgbe.c for details */
337 #include <dev/netmap/if_lem_netmap.h>
338 #endif /* DEV_NETMAP */
340 /*********************************************************************
341 * Device identification routine
343 * em_probe determines if the driver should be loaded on
344 * adapter based on PCI vendor/device id of the adapter.
346 * return BUS_PROBE_DEFAULT on success, positive on failure
347 *********************************************************************/
350 lem_probe(device_t dev)
352 char adapter_name[60];
353 u16 pci_vendor_id = 0;
354 u16 pci_device_id = 0;
355 u16 pci_subvendor_id = 0;
356 u16 pci_subdevice_id = 0;
357 em_vendor_info_t *ent;
359 INIT_DEBUGOUT("em_probe: begin");
361 pci_vendor_id = pci_get_vendor(dev);
362 if (pci_vendor_id != EM_VENDOR_ID)
365 pci_device_id = pci_get_device(dev);
366 pci_subvendor_id = pci_get_subvendor(dev);
367 pci_subdevice_id = pci_get_subdevice(dev);
369 ent = lem_vendor_info_array;
370 while (ent->vendor_id != 0) {
371 if ((pci_vendor_id == ent->vendor_id) &&
372 (pci_device_id == ent->device_id) &&
374 ((pci_subvendor_id == ent->subvendor_id) ||
375 (ent->subvendor_id == PCI_ANY_ID)) &&
377 ((pci_subdevice_id == ent->subdevice_id) ||
378 (ent->subdevice_id == PCI_ANY_ID))) {
379 sprintf(adapter_name, "%s %s",
380 lem_strings[ent->index],
382 device_set_desc_copy(dev, adapter_name);
383 return (BUS_PROBE_DEFAULT);
391 /*********************************************************************
392 * Device initialization routine
394 * The attach entry point is called when the driver is being loaded.
395 * This routine identifies the type of hardware, allocates all resources
396 * and initializes the hardware.
398 * return 0 on success, positive on failure
399 *********************************************************************/
402 lem_attach(device_t dev)
404 struct adapter *adapter;
408 INIT_DEBUGOUT("lem_attach: begin");
410 adapter = device_get_softc(dev);
411 adapter->dev = adapter->osdep.dev = dev;
412 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
413 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
414 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
417 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
418 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
419 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
420 lem_sysctl_nvm_info, "I", "NVM Information");
422 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
423 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
425 /* Determine hardware and mac info */
426 lem_identify_hardware(adapter);
428 /* Setup PCI resources */
429 if (lem_allocate_pci_resources(adapter)) {
430 device_printf(dev, "Allocation of PCI resources failed\n");
435 /* Do Shared Code initialization */
436 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
437 device_printf(dev, "Setup of Shared code failed\n");
442 e1000_get_bus_info(&adapter->hw);
444 /* Set up some sysctls for the tunable interrupt delays */
445 lem_add_int_delay_sysctl(adapter, "rx_int_delay",
446 "receive interrupt delay in usecs", &adapter->rx_int_delay,
447 E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
448 lem_add_int_delay_sysctl(adapter, "tx_int_delay",
449 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
450 E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
451 if (adapter->hw.mac.type >= e1000_82540) {
452 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
453 "receive interrupt delay limit in usecs",
454 &adapter->rx_abs_int_delay,
455 E1000_REGISTER(&adapter->hw, E1000_RADV),
456 lem_rx_abs_int_delay_dflt);
457 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
458 "transmit interrupt delay limit in usecs",
459 &adapter->tx_abs_int_delay,
460 E1000_REGISTER(&adapter->hw, E1000_TADV),
461 lem_tx_abs_int_delay_dflt);
462 lem_add_int_delay_sysctl(adapter, "itr",
463 "interrupt delay limit in usecs/4",
465 E1000_REGISTER(&adapter->hw, E1000_ITR),
469 /* Sysctls for limiting the amount of work done in the taskqueue */
470 lem_add_rx_process_limit(adapter, "rx_processing_limit",
471 "max number of rx packets to process", &adapter->rx_process_limit,
472 lem_rx_process_limit);
474 #ifdef NIC_SEND_COMBINING
475 /* Sysctls to control mitigation */
476 lem_add_rx_process_limit(adapter, "sc_enable",
477 "driver TDT mitigation", &adapter->sc_enable, 0);
478 #endif /* NIC_SEND_COMBINING */
479 #ifdef BATCH_DISPATCH
480 lem_add_rx_process_limit(adapter, "batch_enable",
481 "driver rx batch", &adapter->batch_enable, 0);
482 #endif /* BATCH_DISPATCH */
484 lem_add_rx_process_limit(adapter, "rx_retries",
485 "driver rx retries", &adapter->rx_retries, 0);
486 #endif /* NIC_PARAVIRT */
488 /* Sysctl for setting the interface flow control */
489 lem_set_flow_cntrl(adapter, "flow_control",
490 "flow control setting",
491 &adapter->fc_setting, lem_fc_setting);
494 * Validate number of transmit and receive descriptors. It
495 * must not exceed hardware maximum, and must be multiple
496 * of E1000_DBA_ALIGN.
498 if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
499 (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
500 (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
501 (lem_txd < EM_MIN_TXD)) {
502 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
503 EM_DEFAULT_TXD, lem_txd);
504 adapter->num_tx_desc = EM_DEFAULT_TXD;
506 adapter->num_tx_desc = lem_txd;
507 if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
508 (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
509 (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
510 (lem_rxd < EM_MIN_RXD)) {
511 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
512 EM_DEFAULT_RXD, lem_rxd);
513 adapter->num_rx_desc = EM_DEFAULT_RXD;
515 adapter->num_rx_desc = lem_rxd;
517 adapter->hw.mac.autoneg = DO_AUTO_NEG;
518 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
519 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
520 adapter->rx_buffer_len = 2048;
522 e1000_init_script_state_82541(&adapter->hw, TRUE);
523 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
526 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
527 adapter->hw.phy.mdix = AUTO_ALL_MODES;
528 adapter->hw.phy.disable_polarity_correction = FALSE;
529 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
533 * Set the frame limits assuming
534 * standard ethernet sized frames.
536 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
537 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
540 * This controls when hardware reports transmit completion
543 adapter->hw.mac.report_tx_early = 1;
546 device_printf(dev, "driver supports paravirt, subdev 0x%x\n",
547 adapter->hw.subsystem_device_id);
548 if (adapter->hw.subsystem_device_id == E1000_PARA_SUBDEV) {
551 device_printf(dev, "paravirt support on dev %p\n", adapter);
552 tsize = 4096; // XXX one page for the csb
553 if (lem_dma_malloc(adapter, tsize, &adapter->csb_mem, BUS_DMA_NOWAIT)) {
554 device_printf(dev, "Unable to allocate csb memory\n");
558 /* Setup the Base of the CSB */
559 adapter->csb = (struct paravirt_csb *)adapter->csb_mem.dma_vaddr;
560 /* force the first kick */
561 adapter->csb->host_need_txkick = 1; /* txring empty */
562 adapter->csb->guest_need_rxkick = 1; /* no rx packets */
563 bus_addr = adapter->csb_mem.dma_paddr;
564 lem_add_rx_process_limit(adapter, "csb_on",
565 "enable paravirt.", &adapter->csb->guest_csb_on, 0);
566 lem_add_rx_process_limit(adapter, "txc_lim",
567 "txc_lim", &adapter->csb->host_txcycles_lim, 1);
570 #define PA_SC(name, var, val) \
571 lem_add_rx_process_limit(adapter, name, name, var, val)
572 PA_SC("host_need_txkick",&adapter->csb->host_need_txkick, 1);
573 PA_SC("host_rxkick_at",&adapter->csb->host_rxkick_at, ~0);
574 PA_SC("guest_need_txkick",&adapter->csb->guest_need_txkick, 0);
575 PA_SC("guest_need_rxkick",&adapter->csb->guest_need_rxkick, 1);
576 PA_SC("tdt_reg_count",&adapter->tdt_reg_count, 0);
577 PA_SC("tdt_csb_count",&adapter->tdt_csb_count, 0);
578 PA_SC("tdt_int_count",&adapter->tdt_int_count, 0);
579 PA_SC("guest_need_kick_count",&adapter->guest_need_kick_count, 0);
580 /* tell the host where the block is */
581 E1000_WRITE_REG(&adapter->hw, E1000_CSBAH,
582 (u32)(bus_addr >> 32));
583 E1000_WRITE_REG(&adapter->hw, E1000_CSBAL,
586 #endif /* NIC_PARAVIRT */
588 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
591 /* Allocate Transmit Descriptor ring */
592 if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
593 device_printf(dev, "Unable to allocate tx_desc memory\n");
597 adapter->tx_desc_base =
598 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
600 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
603 /* Allocate Receive Descriptor ring */
604 if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
605 device_printf(dev, "Unable to allocate rx_desc memory\n");
609 adapter->rx_desc_base =
610 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
612 /* Allocate multicast array memory. */
613 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
614 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
615 if (adapter->mta == NULL) {
616 device_printf(dev, "Can not allocate multicast setup array\n");
622 ** Start from a known state, this is
623 ** important in reading the nvm and
626 e1000_reset_hw(&adapter->hw);
628 /* Make sure we have a good EEPROM before we read from it */
629 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
631 ** Some PCI-E parts fail the first check due to
632 ** the link being in sleep state, call it again,
633 ** if it fails a second time its a real issue.
635 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
637 "The EEPROM Checksum Is Not Valid\n");
643 /* Copy the permanent MAC address out of the EEPROM */
644 if (e1000_read_mac_addr(&adapter->hw) < 0) {
645 device_printf(dev, "EEPROM read error while reading MAC"
651 if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
652 device_printf(dev, "Invalid MAC address\n");
657 /* Initialize the hardware */
658 if (lem_hardware_init(adapter)) {
659 device_printf(dev, "Unable to initialize the hardware\n");
664 /* Allocate transmit descriptors and buffers */
665 if (lem_allocate_transmit_structures(adapter)) {
666 device_printf(dev, "Could not setup transmit structures\n");
671 /* Allocate receive descriptors and buffers */
672 if (lem_allocate_receive_structures(adapter)) {
673 device_printf(dev, "Could not setup receive structures\n");
679 ** Do interrupt configuration
681 error = lem_allocate_irq(adapter);
686 * Get Wake-on-Lan and Management info for later use
690 /* Setup OS specific network interface */
691 if (lem_setup_interface(dev, adapter) != 0)
694 /* Initialize statistics */
695 lem_update_stats_counters(adapter);
697 adapter->hw.mac.get_link_status = 1;
698 lem_update_link_status(adapter);
700 /* Indicate SOL/IDER usage */
701 if (e1000_check_reset_block(&adapter->hw))
703 "PHY reset is blocked due to SOL/IDER session.\n");
705 /* Do we need workaround for 82544 PCI-X adapter? */
706 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
707 adapter->hw.mac.type == e1000_82544)
708 adapter->pcix_82544 = TRUE;
710 adapter->pcix_82544 = FALSE;
712 /* Register for VLAN events */
713 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
714 lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
715 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
716 lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
718 lem_add_hw_stats(adapter);
720 /* Non-AMT based hardware can now take control from firmware */
721 if (adapter->has_manage && !adapter->has_amt)
722 lem_get_hw_control(adapter);
724 /* Tell the stack that the interface is not active */
725 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
727 adapter->led_dev = led_create(lem_led_func, adapter,
728 device_get_nameunit(dev));
731 lem_netmap_attach(adapter);
732 #endif /* DEV_NETMAP */
733 INIT_DEBUGOUT("lem_attach: end");
738 lem_free_transmit_structures(adapter);
741 lem_release_hw_control(adapter);
742 lem_dma_free(adapter, &adapter->rxdma);
744 lem_dma_free(adapter, &adapter->txdma);
747 lem_dma_free(adapter, &adapter->csb_mem);
749 #endif /* NIC_PARAVIRT */
752 if (adapter->ifp != NULL)
753 if_free(adapter->ifp);
754 lem_free_pci_resources(adapter);
755 free(adapter->mta, M_DEVBUF);
756 EM_TX_LOCK_DESTROY(adapter);
757 EM_RX_LOCK_DESTROY(adapter);
758 EM_CORE_LOCK_DESTROY(adapter);
763 /*********************************************************************
764 * Device removal routine
766 * The detach entry point is called when the driver is being removed.
767 * This routine stops the adapter and deallocates all the resources
768 * that were allocated for driver operation.
770 * return 0 on success, positive on failure
771 *********************************************************************/
774 lem_detach(device_t dev)
776 struct adapter *adapter = device_get_softc(dev);
777 struct ifnet *ifp = adapter->ifp;
779 INIT_DEBUGOUT("em_detach: begin");
781 /* Make sure VLANS are not using driver */
782 if (adapter->ifp->if_vlantrunk != NULL) {
783 device_printf(dev,"Vlan in use, detach first\n");
787 #ifdef DEVICE_POLLING
788 if (ifp->if_capenable & IFCAP_POLLING)
789 ether_poll_deregister(ifp);
792 if (adapter->led_dev != NULL)
793 led_destroy(adapter->led_dev);
795 EM_CORE_LOCK(adapter);
797 adapter->in_detach = 1;
799 e1000_phy_hw_reset(&adapter->hw);
801 lem_release_manageability(adapter);
803 EM_TX_UNLOCK(adapter);
804 EM_CORE_UNLOCK(adapter);
806 /* Unregister VLAN events */
807 if (adapter->vlan_attach != NULL)
808 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
809 if (adapter->vlan_detach != NULL)
810 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
812 ether_ifdetach(adapter->ifp);
813 callout_drain(&adapter->timer);
814 callout_drain(&adapter->tx_fifo_timer);
818 #endif /* DEV_NETMAP */
819 lem_free_pci_resources(adapter);
820 bus_generic_detach(dev);
823 lem_free_transmit_structures(adapter);
824 lem_free_receive_structures(adapter);
826 /* Free Transmit Descriptor ring */
827 if (adapter->tx_desc_base) {
828 lem_dma_free(adapter, &adapter->txdma);
829 adapter->tx_desc_base = NULL;
832 /* Free Receive Descriptor ring */
833 if (adapter->rx_desc_base) {
834 lem_dma_free(adapter, &adapter->rxdma);
835 adapter->rx_desc_base = NULL;
840 lem_dma_free(adapter, &adapter->csb_mem);
843 #endif /* NIC_PARAVIRT */
844 lem_release_hw_control(adapter);
845 free(adapter->mta, M_DEVBUF);
846 EM_TX_LOCK_DESTROY(adapter);
847 EM_RX_LOCK_DESTROY(adapter);
848 EM_CORE_LOCK_DESTROY(adapter);
853 /*********************************************************************
855 * Shutdown entry point
857 **********************************************************************/
860 lem_shutdown(device_t dev)
862 return lem_suspend(dev);
866 * Suspend/resume device methods.
869 lem_suspend(device_t dev)
871 struct adapter *adapter = device_get_softc(dev);
873 EM_CORE_LOCK(adapter);
875 lem_release_manageability(adapter);
876 lem_release_hw_control(adapter);
877 lem_enable_wakeup(dev);
879 EM_CORE_UNLOCK(adapter);
881 return bus_generic_suspend(dev);
885 lem_resume(device_t dev)
887 struct adapter *adapter = device_get_softc(dev);
888 struct ifnet *ifp = adapter->ifp;
890 EM_CORE_LOCK(adapter);
891 lem_init_locked(adapter);
892 lem_init_manageability(adapter);
893 EM_CORE_UNLOCK(adapter);
896 return bus_generic_resume(dev);
901 lem_start_locked(struct ifnet *ifp)
903 struct adapter *adapter = ifp->if_softc;
906 EM_TX_LOCK_ASSERT(adapter);
908 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
911 if (!adapter->link_active)
915 * Force a cleanup if number of TX descriptors
916 * available hits the threshold
918 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
920 /* Now do we at least have a minimal? */
921 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
922 adapter->no_tx_desc_avail1++;
927 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
929 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
933 * Encapsulation can modify our pointer, and or make it
934 * NULL on failure. In that event, we can't requeue.
936 if (lem_xmit(adapter, &m_head)) {
939 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
940 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
944 /* Send a copy of the frame to the BPF listener */
945 ETHER_BPF_MTAP(ifp, m_head);
947 /* Set timeout in case hardware has problems transmitting. */
948 adapter->watchdog_check = TRUE;
949 adapter->watchdog_time = ticks;
951 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
952 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
954 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) && adapter->csb &&
955 adapter->csb->guest_csb_on &&
956 !(adapter->csb->guest_need_txkick & 1)) {
957 adapter->csb->guest_need_txkick = 1;
958 adapter->guest_need_kick_count++;
959 // XXX memory barrier
960 lem_txeof(adapter); // XXX possibly clear IFF_DRV_OACTIVE
962 #endif /* NIC_PARAVIRT */
968 lem_start(struct ifnet *ifp)
970 struct adapter *adapter = ifp->if_softc;
973 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
974 lem_start_locked(ifp);
975 EM_TX_UNLOCK(adapter);
978 /*********************************************************************
981 * em_ioctl is called when the user wants to configure the
984 * return 0 on success, positive on failure
985 **********************************************************************/
988 lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
990 struct adapter *adapter = ifp->if_softc;
991 struct ifreq *ifr = (struct ifreq *)data;
992 #if defined(INET) || defined(INET6)
993 struct ifaddr *ifa = (struct ifaddr *)data;
995 bool avoid_reset = FALSE;
998 if (adapter->in_detach)
1004 if (ifa->ifa_addr->sa_family == AF_INET)
1008 if (ifa->ifa_addr->sa_family == AF_INET6)
1012 ** Calling init results in link renegotiation,
1013 ** so we avoid doing it when possible.
1016 ifp->if_flags |= IFF_UP;
1017 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1020 if (!(ifp->if_flags & IFF_NOARP))
1021 arp_ifinit(ifp, ifa);
1024 error = ether_ioctl(ifp, command, data);
1030 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1032 EM_CORE_LOCK(adapter);
1033 switch (adapter->hw.mac.type) {
1035 max_frame_size = ETHER_MAX_LEN;
1038 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1040 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1042 EM_CORE_UNLOCK(adapter);
1047 ifp->if_mtu = ifr->ifr_mtu;
1048 adapter->max_frame_size =
1049 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1050 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1051 lem_init_locked(adapter);
1052 EM_CORE_UNLOCK(adapter);
1056 IOCTL_DEBUGOUT("ioctl rcv'd:\
1057 SIOCSIFFLAGS (Set Interface Flags)");
1058 EM_CORE_LOCK(adapter);
1059 if (ifp->if_flags & IFF_UP) {
1060 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1061 if ((ifp->if_flags ^ adapter->if_flags) &
1062 (IFF_PROMISC | IFF_ALLMULTI)) {
1063 lem_disable_promisc(adapter);
1064 lem_set_promisc(adapter);
1067 lem_init_locked(adapter);
1069 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1070 EM_TX_LOCK(adapter);
1072 EM_TX_UNLOCK(adapter);
1074 adapter->if_flags = ifp->if_flags;
1075 EM_CORE_UNLOCK(adapter);
1079 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1080 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1081 EM_CORE_LOCK(adapter);
1082 lem_disable_intr(adapter);
1083 lem_set_multi(adapter);
1084 if (adapter->hw.mac.type == e1000_82542 &&
1085 adapter->hw.revision_id == E1000_REVISION_2) {
1086 lem_initialize_receive_unit(adapter);
1088 #ifdef DEVICE_POLLING
1089 if (!(ifp->if_capenable & IFCAP_POLLING))
1091 lem_enable_intr(adapter);
1092 EM_CORE_UNLOCK(adapter);
1096 /* Check SOL/IDER usage */
1097 EM_CORE_LOCK(adapter);
1098 if (e1000_check_reset_block(&adapter->hw)) {
1099 EM_CORE_UNLOCK(adapter);
1100 device_printf(adapter->dev, "Media change is"
1101 " blocked due to SOL/IDER session.\n");
1104 EM_CORE_UNLOCK(adapter);
1106 IOCTL_DEBUGOUT("ioctl rcv'd: \
1107 SIOCxIFMEDIA (Get/Set Interface Media)");
1108 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1114 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1116 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1117 #ifdef DEVICE_POLLING
1118 if (mask & IFCAP_POLLING) {
1119 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1120 error = ether_poll_register(lem_poll, ifp);
1123 EM_CORE_LOCK(adapter);
1124 lem_disable_intr(adapter);
1125 ifp->if_capenable |= IFCAP_POLLING;
1126 EM_CORE_UNLOCK(adapter);
1128 error = ether_poll_deregister(ifp);
1129 /* Enable interrupt even in error case */
1130 EM_CORE_LOCK(adapter);
1131 lem_enable_intr(adapter);
1132 ifp->if_capenable &= ~IFCAP_POLLING;
1133 EM_CORE_UNLOCK(adapter);
1137 if (mask & IFCAP_HWCSUM) {
1138 ifp->if_capenable ^= IFCAP_HWCSUM;
1141 if (mask & IFCAP_VLAN_HWTAGGING) {
1142 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1145 if ((mask & IFCAP_WOL) &&
1146 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1147 if (mask & IFCAP_WOL_MCAST)
1148 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1149 if (mask & IFCAP_WOL_MAGIC)
1150 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1152 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1154 VLAN_CAPABILITIES(ifp);
1159 error = ether_ioctl(ifp, command, data);
1167 /*********************************************************************
1170 * This routine is used in two ways. It is used by the stack as
1171 * init entry point in network interface structure. It is also used
1172 * by the driver as a hw/sw initialization routine to get to a
1175 * return 0 on success, positive on failure
1176 **********************************************************************/
1179 lem_init_locked(struct adapter *adapter)
1181 struct ifnet *ifp = adapter->ifp;
1182 device_t dev = adapter->dev;
1185 INIT_DEBUGOUT("lem_init: begin");
1187 EM_CORE_LOCK_ASSERT(adapter);
1189 EM_TX_LOCK(adapter);
1191 EM_TX_UNLOCK(adapter);
1194 * Packet Buffer Allocation (PBA)
1195 * Writing PBA sets the receive portion of the buffer
1196 * the remainder is used for the transmit buffer.
1198 * Devices before the 82547 had a Packet Buffer of 64K.
1199 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1200 * After the 82547 the buffer was reduced to 40K.
1201 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1202 * Note: default does not leave enough room for Jumbo Frame >10k.
1204 switch (adapter->hw.mac.type) {
1206 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1207 if (adapter->max_frame_size > 8192)
1208 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1210 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1211 adapter->tx_fifo_head = 0;
1212 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1213 adapter->tx_fifo_size =
1214 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1217 /* Devices before 82547 had a Packet Buffer of 64K. */
1218 if (adapter->max_frame_size > 8192)
1219 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1221 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1224 INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1225 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1227 /* Get the latest mac address, User can use a LAA */
1228 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1231 /* Put the address into the Receive Address Array */
1232 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1234 /* Initialize the hardware */
1235 if (lem_hardware_init(adapter)) {
1236 device_printf(dev, "Unable to initialize the hardware\n");
1239 lem_update_link_status(adapter);
1241 /* Setup VLAN support, basic and offload if available */
1242 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1244 /* Set hardware offload abilities */
1245 ifp->if_hwassist = 0;
1246 if (adapter->hw.mac.type >= e1000_82543) {
1247 if (ifp->if_capenable & IFCAP_TXCSUM)
1248 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1251 /* Configure for OS presence */
1252 lem_init_manageability(adapter);
1254 /* Prepare transmit descriptors and buffers */
1255 lem_setup_transmit_structures(adapter);
1256 lem_initialize_transmit_unit(adapter);
1258 /* Setup Multicast table */
1259 lem_set_multi(adapter);
1261 /* Prepare receive descriptors and buffers */
1262 if (lem_setup_receive_structures(adapter)) {
1263 device_printf(dev, "Could not setup receive structures\n");
1264 EM_TX_LOCK(adapter);
1266 EM_TX_UNLOCK(adapter);
1269 lem_initialize_receive_unit(adapter);
1271 /* Use real VLAN Filter support? */
1272 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1273 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1274 /* Use real VLAN Filter support */
1275 lem_setup_vlan_hw_support(adapter);
1278 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1279 ctrl |= E1000_CTRL_VME;
1280 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1284 /* Don't lose promiscuous settings */
1285 lem_set_promisc(adapter);
1287 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1288 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1290 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1291 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1293 #ifdef DEVICE_POLLING
1295 * Only enable interrupts if we are not polling, make sure
1296 * they are off otherwise.
1298 if (ifp->if_capenable & IFCAP_POLLING)
1299 lem_disable_intr(adapter);
1301 #endif /* DEVICE_POLLING */
1302 lem_enable_intr(adapter);
1304 /* AMT based hardware can now take control from firmware */
1305 if (adapter->has_manage && adapter->has_amt)
1306 lem_get_hw_control(adapter);
1312 struct adapter *adapter = arg;
1314 EM_CORE_LOCK(adapter);
1315 lem_init_locked(adapter);
1316 EM_CORE_UNLOCK(adapter);
1320 #ifdef DEVICE_POLLING
1321 /*********************************************************************
1323 * Legacy polling routine
1325 *********************************************************************/
1327 lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1329 struct adapter *adapter = ifp->if_softc;
1330 u32 reg_icr, rx_done = 0;
1332 EM_CORE_LOCK(adapter);
1333 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1334 EM_CORE_UNLOCK(adapter);
1338 if (cmd == POLL_AND_CHECK_STATUS) {
1339 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1340 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1341 callout_stop(&adapter->timer);
1342 adapter->hw.mac.get_link_status = 1;
1343 lem_update_link_status(adapter);
1344 callout_reset(&adapter->timer, hz,
1345 lem_local_timer, adapter);
1348 EM_CORE_UNLOCK(adapter);
1350 lem_rxeof(adapter, count, &rx_done);
1352 EM_TX_LOCK(adapter);
1354 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1355 lem_start_locked(ifp);
1356 EM_TX_UNLOCK(adapter);
1359 #endif /* DEVICE_POLLING */
1361 /*********************************************************************
1363 * Legacy Interrupt Service routine
1365 *********************************************************************/
1369 struct adapter *adapter = arg;
1370 struct ifnet *ifp = adapter->ifp;
1374 if ((ifp->if_capenable & IFCAP_POLLING) ||
1375 ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0))
1378 EM_CORE_LOCK(adapter);
1379 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1380 if (reg_icr & E1000_ICR_RXO)
1381 adapter->rx_overruns++;
1383 if ((reg_icr == 0xffffffff) || (reg_icr == 0)) {
1384 EM_CORE_UNLOCK(adapter);
1388 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1389 callout_stop(&adapter->timer);
1390 adapter->hw.mac.get_link_status = 1;
1391 lem_update_link_status(adapter);
1392 /* Deal with TX cruft when link lost */
1393 lem_tx_purge(adapter);
1394 callout_reset(&adapter->timer, hz,
1395 lem_local_timer, adapter);
1396 EM_CORE_UNLOCK(adapter);
1400 EM_CORE_UNLOCK(adapter);
1401 lem_rxeof(adapter, -1, NULL);
1403 EM_TX_LOCK(adapter);
1405 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1406 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1407 lem_start_locked(ifp);
1408 EM_TX_UNLOCK(adapter);
1414 lem_handle_link(void *context, int pending)
1416 struct adapter *adapter = context;
1417 struct ifnet *ifp = adapter->ifp;
1419 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1422 EM_CORE_LOCK(adapter);
1423 callout_stop(&adapter->timer);
1424 lem_update_link_status(adapter);
1425 /* Deal with TX cruft when link lost */
1426 lem_tx_purge(adapter);
1427 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1428 EM_CORE_UNLOCK(adapter);
1432 /* Combined RX/TX handler, used by Legacy and MSI */
1434 lem_handle_rxtx(void *context, int pending)
1436 struct adapter *adapter = context;
1437 struct ifnet *ifp = adapter->ifp;
1440 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1441 bool more = lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1442 EM_TX_LOCK(adapter);
1444 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1445 lem_start_locked(ifp);
1446 EM_TX_UNLOCK(adapter);
1448 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1453 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1454 lem_enable_intr(adapter);
1457 /*********************************************************************
1459 * Fast Legacy/MSI Combined Interrupt Service routine
1461 *********************************************************************/
1463 lem_irq_fast(void *arg)
1465 struct adapter *adapter = arg;
1471 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1474 if (reg_icr == 0xffffffff)
1475 return FILTER_STRAY;
1477 /* Definitely not our interrupt. */
1479 return FILTER_STRAY;
1482 * Mask interrupts until the taskqueue is finished running. This is
1483 * cheap, just assume that it is needed. This also works around the
1484 * MSI message reordering errata on certain systems.
1486 lem_disable_intr(adapter);
1487 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1489 /* Link status change */
1490 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1491 adapter->hw.mac.get_link_status = 1;
1492 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1495 if (reg_icr & E1000_ICR_RXO)
1496 adapter->rx_overruns++;
1497 return FILTER_HANDLED;
1501 /*********************************************************************
1503 * Media Ioctl callback
1505 * This routine is called whenever the user queries the status of
1506 * the interface using ifconfig.
1508 **********************************************************************/
1510 lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1512 struct adapter *adapter = ifp->if_softc;
1513 u_char fiber_type = IFM_1000_SX;
1515 INIT_DEBUGOUT("lem_media_status: begin");
1517 EM_CORE_LOCK(adapter);
1518 lem_update_link_status(adapter);
1520 ifmr->ifm_status = IFM_AVALID;
1521 ifmr->ifm_active = IFM_ETHER;
1523 if (!adapter->link_active) {
1524 EM_CORE_UNLOCK(adapter);
1528 ifmr->ifm_status |= IFM_ACTIVE;
1530 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1531 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1532 if (adapter->hw.mac.type == e1000_82545)
1533 fiber_type = IFM_1000_LX;
1534 ifmr->ifm_active |= fiber_type | IFM_FDX;
1536 switch (adapter->link_speed) {
1538 ifmr->ifm_active |= IFM_10_T;
1541 ifmr->ifm_active |= IFM_100_TX;
1544 ifmr->ifm_active |= IFM_1000_T;
1547 if (adapter->link_duplex == FULL_DUPLEX)
1548 ifmr->ifm_active |= IFM_FDX;
1550 ifmr->ifm_active |= IFM_HDX;
1552 EM_CORE_UNLOCK(adapter);
1555 /*********************************************************************
1557 * Media Ioctl callback
1559 * This routine is called when the user changes speed/duplex using
1560 * media/mediopt option with ifconfig.
1562 **********************************************************************/
1564 lem_media_change(struct ifnet *ifp)
1566 struct adapter *adapter = ifp->if_softc;
1567 struct ifmedia *ifm = &adapter->media;
1569 INIT_DEBUGOUT("lem_media_change: begin");
1571 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1574 EM_CORE_LOCK(adapter);
1575 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1577 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1578 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1583 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1584 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1587 adapter->hw.mac.autoneg = FALSE;
1588 adapter->hw.phy.autoneg_advertised = 0;
1589 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1590 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1592 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1595 adapter->hw.mac.autoneg = FALSE;
1596 adapter->hw.phy.autoneg_advertised = 0;
1597 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1598 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1600 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1603 device_printf(adapter->dev, "Unsupported media type\n");
1606 lem_init_locked(adapter);
1607 EM_CORE_UNLOCK(adapter);
1612 /*********************************************************************
1614 * This routine maps the mbufs to tx descriptors.
1616 * return 0 on success, positive on failure
1617 **********************************************************************/
1620 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1622 bus_dma_segment_t segs[EM_MAX_SCATTER];
1624 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1625 struct e1000_tx_desc *ctxd = NULL;
1626 struct mbuf *m_head;
1627 u32 txd_upper, txd_lower, txd_used, txd_saved;
1628 int error, nsegs, i, j, first, last = 0;
1631 txd_upper = txd_lower = txd_used = txd_saved = 0;
1634 ** When doing checksum offload, it is critical to
1635 ** make sure the first mbuf has more than header,
1636 ** because that routine expects data to be present.
1638 if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1639 (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1640 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1647 * Map the packet for DMA
1649 * Capture the first descriptor index,
1650 * this descriptor will have the index
1651 * of the EOP which is the only one that
1652 * now gets a DONE bit writeback.
1654 first = adapter->next_avail_tx_desc;
1655 tx_buffer = &adapter->tx_buffer_area[first];
1656 tx_buffer_mapped = tx_buffer;
1657 map = tx_buffer->map;
1659 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1660 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1663 * There are two types of errors we can (try) to handle:
1664 * - EFBIG means the mbuf chain was too long and bus_dma ran
1665 * out of segments. Defragment the mbuf chain and try again.
1666 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1667 * at this point in time. Defer sending and try again later.
1668 * All other errors, in particular EINVAL, are fatal and prevent the
1669 * mbuf chain from ever going through. Drop it and report error.
1671 if (error == EFBIG) {
1674 m = m_collapse(*m_headp, M_NOWAIT, EM_MAX_SCATTER);
1676 adapter->mbuf_defrag_failed++;
1684 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1685 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1688 adapter->no_tx_dma_setup++;
1693 } else if (error != 0) {
1694 adapter->no_tx_dma_setup++;
1698 if (adapter->num_tx_desc_avail < (nsegs + 2)) {
1699 adapter->no_tx_desc_avail2++;
1700 bus_dmamap_unload(adapter->txtag, map);
1705 /* Do hardware assists */
1706 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1707 lem_transmit_checksum_setup(adapter, m_head,
1708 &txd_upper, &txd_lower);
1710 i = adapter->next_avail_tx_desc;
1711 if (adapter->pcix_82544)
1714 /* Set up our transmit descriptors */
1715 for (j = 0; j < nsegs; j++) {
1717 bus_addr_t seg_addr;
1718 /* If adapter is 82544 and on PCIX bus */
1719 if(adapter->pcix_82544) {
1720 DESC_ARRAY desc_array;
1721 u32 array_elements, counter;
1723 * Check the Address and Length combination and
1724 * split the data accordingly
1726 array_elements = lem_fill_descriptors(segs[j].ds_addr,
1727 segs[j].ds_len, &desc_array);
1728 for (counter = 0; counter < array_elements; counter++) {
1729 if (txd_used == adapter->num_tx_desc_avail) {
1730 adapter->next_avail_tx_desc = txd_saved;
1731 adapter->no_tx_desc_avail2++;
1732 bus_dmamap_unload(adapter->txtag, map);
1735 tx_buffer = &adapter->tx_buffer_area[i];
1736 ctxd = &adapter->tx_desc_base[i];
1737 ctxd->buffer_addr = htole64(
1738 desc_array.descriptor[counter].address);
1739 ctxd->lower.data = htole32(
1740 (adapter->txd_cmd | txd_lower | (u16)
1741 desc_array.descriptor[counter].length));
1743 htole32((txd_upper));
1745 if (++i == adapter->num_tx_desc)
1747 tx_buffer->m_head = NULL;
1748 tx_buffer->next_eop = -1;
1752 tx_buffer = &adapter->tx_buffer_area[i];
1753 ctxd = &adapter->tx_desc_base[i];
1754 seg_addr = segs[j].ds_addr;
1755 seg_len = segs[j].ds_len;
1756 ctxd->buffer_addr = htole64(seg_addr);
1757 ctxd->lower.data = htole32(
1758 adapter->txd_cmd | txd_lower | seg_len);
1762 if (++i == adapter->num_tx_desc)
1764 tx_buffer->m_head = NULL;
1765 tx_buffer->next_eop = -1;
1769 adapter->next_avail_tx_desc = i;
1771 if (adapter->pcix_82544)
1772 adapter->num_tx_desc_avail -= txd_used;
1774 adapter->num_tx_desc_avail -= nsegs;
1776 if (m_head->m_flags & M_VLANTAG) {
1777 /* Set the vlan id. */
1778 ctxd->upper.fields.special =
1779 htole16(m_head->m_pkthdr.ether_vtag);
1780 /* Tell hardware to add tag */
1781 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1784 tx_buffer->m_head = m_head;
1785 tx_buffer_mapped->map = tx_buffer->map;
1786 tx_buffer->map = map;
1787 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1790 * Last Descriptor of Packet
1791 * needs End Of Packet (EOP)
1792 * and Report Status (RS)
1795 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1797 * Keep track in the first buffer which
1798 * descriptor will be written back
1800 tx_buffer = &adapter->tx_buffer_area[first];
1801 tx_buffer->next_eop = last;
1802 adapter->watchdog_time = ticks;
1805 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1806 * that this frame is available to transmit.
1808 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1809 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1813 adapter->csb->guest_tdt = i;
1814 /* XXX memory barrier ? */
1815 if (adapter->csb->guest_csb_on &&
1816 !(adapter->csb->host_need_txkick & 1)) {
1817 /* XXX maybe useless
1818 * clean the ring. maybe do it before ?
1819 * maybe a little bit of histeresys ?
1821 if (adapter->num_tx_desc_avail <= 64) {// XXX
1827 #endif /* NIC_PARAVIRT */
1829 #ifdef NIC_SEND_COMBINING
1830 if (adapter->sc_enable) {
1831 if (adapter->shadow_tdt & MIT_PENDING_INT) {
1832 /* signal intr and data pending */
1833 adapter->shadow_tdt = MIT_PENDING_TDT | (i & 0xffff);
1836 adapter->shadow_tdt = MIT_PENDING_INT;
1839 #endif /* NIC_SEND_COMBINING */
1841 if (adapter->hw.mac.type == e1000_82547 &&
1842 adapter->link_duplex == HALF_DUPLEX)
1843 lem_82547_move_tail(adapter);
1845 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1846 if (adapter->hw.mac.type == e1000_82547)
1847 lem_82547_update_fifo_head(adapter,
1848 m_head->m_pkthdr.len);
1854 /*********************************************************************
1856 * 82547 workaround to avoid controller hang in half-duplex environment.
1857 * The workaround is to avoid queuing a large packet that would span
1858 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1859 * in this case. We do that only when FIFO is quiescent.
1861 **********************************************************************/
1863 lem_82547_move_tail(void *arg)
1865 struct adapter *adapter = arg;
1866 struct e1000_tx_desc *tx_desc;
1867 u16 hw_tdt, sw_tdt, length = 0;
1870 EM_TX_LOCK_ASSERT(adapter);
1872 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1873 sw_tdt = adapter->next_avail_tx_desc;
1875 while (hw_tdt != sw_tdt) {
1876 tx_desc = &adapter->tx_desc_base[hw_tdt];
1877 length += tx_desc->lower.flags.length;
1878 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1879 if (++hw_tdt == adapter->num_tx_desc)
1883 if (lem_82547_fifo_workaround(adapter, length)) {
1884 adapter->tx_fifo_wrk_cnt++;
1885 callout_reset(&adapter->tx_fifo_timer, 1,
1886 lem_82547_move_tail, adapter);
1889 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1890 lem_82547_update_fifo_head(adapter, length);
1897 lem_82547_fifo_workaround(struct adapter *adapter, int len)
1899 int fifo_space, fifo_pkt_len;
1901 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1903 if (adapter->link_duplex == HALF_DUPLEX) {
1904 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1906 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1907 if (lem_82547_tx_fifo_reset(adapter))
1918 lem_82547_update_fifo_head(struct adapter *adapter, int len)
1920 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1922 /* tx_fifo_head is always 16 byte aligned */
1923 adapter->tx_fifo_head += fifo_pkt_len;
1924 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1925 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1931 lem_82547_tx_fifo_reset(struct adapter *adapter)
1935 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1936 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1937 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1938 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1939 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1940 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1941 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1942 /* Disable TX unit */
1943 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1944 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1945 tctl & ~E1000_TCTL_EN);
1947 /* Reset FIFO pointers */
1948 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1949 adapter->tx_head_addr);
1950 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1951 adapter->tx_head_addr);
1952 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1953 adapter->tx_head_addr);
1954 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1955 adapter->tx_head_addr);
1957 /* Re-enable TX unit */
1958 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1959 E1000_WRITE_FLUSH(&adapter->hw);
1961 adapter->tx_fifo_head = 0;
1962 adapter->tx_fifo_reset_cnt++;
1972 lem_set_promisc(struct adapter *adapter)
1974 struct ifnet *ifp = adapter->ifp;
1977 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1979 if (ifp->if_flags & IFF_PROMISC) {
1980 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1981 /* Turn this on if you want to see bad packets */
1983 reg_rctl |= E1000_RCTL_SBP;
1984 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1985 } else if (ifp->if_flags & IFF_ALLMULTI) {
1986 reg_rctl |= E1000_RCTL_MPE;
1987 reg_rctl &= ~E1000_RCTL_UPE;
1988 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1993 lem_disable_promisc(struct adapter *adapter)
1995 struct ifnet *ifp = adapter->ifp;
1999 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2000 reg_rctl &= (~E1000_RCTL_UPE);
2001 if (ifp->if_flags & IFF_ALLMULTI)
2002 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2004 struct ifmultiaddr *ifma;
2005 #if __FreeBSD_version < 800000
2008 if_maddr_rlock(ifp);
2010 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2011 if (ifma->ifma_addr->sa_family != AF_LINK)
2013 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2017 #if __FreeBSD_version < 800000
2018 IF_ADDR_UNLOCK(ifp);
2020 if_maddr_runlock(ifp);
2023 /* Don't disable if in MAX groups */
2024 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2025 reg_rctl &= (~E1000_RCTL_MPE);
2026 reg_rctl &= (~E1000_RCTL_SBP);
2027 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2031 /*********************************************************************
2034 * This routine is called whenever multicast address list is updated.
2036 **********************************************************************/
2039 lem_set_multi(struct adapter *adapter)
2041 struct ifnet *ifp = adapter->ifp;
2042 struct ifmultiaddr *ifma;
2044 u8 *mta; /* Multicast array memory */
2047 IOCTL_DEBUGOUT("lem_set_multi: begin");
2050 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
2052 if (adapter->hw.mac.type == e1000_82542 &&
2053 adapter->hw.revision_id == E1000_REVISION_2) {
2054 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2055 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2056 e1000_pci_clear_mwi(&adapter->hw);
2057 reg_rctl |= E1000_RCTL_RST;
2058 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2062 #if __FreeBSD_version < 800000
2065 if_maddr_rlock(ifp);
2067 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2068 if (ifma->ifma_addr->sa_family != AF_LINK)
2071 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2074 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2075 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2078 #if __FreeBSD_version < 800000
2079 IF_ADDR_UNLOCK(ifp);
2081 if_maddr_runlock(ifp);
2083 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2084 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2085 reg_rctl |= E1000_RCTL_MPE;
2086 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2088 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
2090 if (adapter->hw.mac.type == e1000_82542 &&
2091 adapter->hw.revision_id == E1000_REVISION_2) {
2092 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2093 reg_rctl &= ~E1000_RCTL_RST;
2094 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2096 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2097 e1000_pci_set_mwi(&adapter->hw);
2102 /*********************************************************************
2105 * This routine checks for link status and updates statistics.
2107 **********************************************************************/
2110 lem_local_timer(void *arg)
2112 struct adapter *adapter = arg;
2114 EM_CORE_LOCK_ASSERT(adapter);
2116 lem_update_link_status(adapter);
2117 lem_update_stats_counters(adapter);
2119 lem_smartspeed(adapter);
2122 /* recover space if needed */
2123 if (adapter->csb && adapter->csb->guest_csb_on &&
2124 (adapter->watchdog_check == TRUE) &&
2125 (ticks - adapter->watchdog_time > EM_WATCHDOG) &&
2126 (adapter->num_tx_desc_avail != adapter->num_tx_desc) ) {
2129 * lem_txeof() normally (except when space in the queue
2130 * runs low XXX) cleans watchdog_check so that
2134 #endif /* NIC_PARAVIRT */
2136 * We check the watchdog: the time since
2137 * the last TX descriptor was cleaned.
2138 * This implies a functional TX engine.
2140 if ((adapter->watchdog_check == TRUE) &&
2141 (ticks - adapter->watchdog_time > EM_WATCHDOG))
2144 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
2147 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2148 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2149 adapter->watchdog_events++;
2150 lem_init_locked(adapter);
2154 lem_update_link_status(struct adapter *adapter)
2156 struct e1000_hw *hw = &adapter->hw;
2157 struct ifnet *ifp = adapter->ifp;
2158 device_t dev = adapter->dev;
2161 /* Get the cached link value or read phy for real */
2162 switch (hw->phy.media_type) {
2163 case e1000_media_type_copper:
2164 if (hw->mac.get_link_status) {
2165 /* Do the work to read phy */
2166 e1000_check_for_link(hw);
2167 link_check = !hw->mac.get_link_status;
2168 if (link_check) /* ESB2 fix */
2169 e1000_cfg_on_link_up(hw);
2173 case e1000_media_type_fiber:
2174 e1000_check_for_link(hw);
2175 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2178 case e1000_media_type_internal_serdes:
2179 e1000_check_for_link(hw);
2180 link_check = adapter->hw.mac.serdes_has_link;
2183 case e1000_media_type_unknown:
2187 /* Now check for a transition */
2188 if (link_check && (adapter->link_active == 0)) {
2189 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2190 &adapter->link_duplex);
2192 device_printf(dev, "Link is up %d Mbps %s\n",
2193 adapter->link_speed,
2194 ((adapter->link_duplex == FULL_DUPLEX) ?
2195 "Full Duplex" : "Half Duplex"));
2196 adapter->link_active = 1;
2197 adapter->smartspeed = 0;
2198 ifp->if_baudrate = adapter->link_speed * 1000000;
2199 if_link_state_change(ifp, LINK_STATE_UP);
2200 } else if (!link_check && (adapter->link_active == 1)) {
2201 ifp->if_baudrate = adapter->link_speed = 0;
2202 adapter->link_duplex = 0;
2204 device_printf(dev, "Link is Down\n");
2205 adapter->link_active = 0;
2206 /* Link down, disable watchdog */
2207 adapter->watchdog_check = FALSE;
2208 if_link_state_change(ifp, LINK_STATE_DOWN);
2212 /*********************************************************************
2214 * This routine disables all traffic on the adapter by issuing a
2215 * global reset on the MAC and deallocates TX/RX buffers.
2217 * This routine should always be called with BOTH the CORE
2219 **********************************************************************/
2224 struct adapter *adapter = arg;
2225 struct ifnet *ifp = adapter->ifp;
2227 EM_CORE_LOCK_ASSERT(adapter);
2228 EM_TX_LOCK_ASSERT(adapter);
2230 INIT_DEBUGOUT("lem_stop: begin");
2232 lem_disable_intr(adapter);
2233 callout_stop(&adapter->timer);
2234 callout_stop(&adapter->tx_fifo_timer);
2236 /* Tell the stack that the interface is no longer active */
2237 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2239 e1000_reset_hw(&adapter->hw);
2240 if (adapter->hw.mac.type >= e1000_82544)
2241 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2243 e1000_led_off(&adapter->hw);
2244 e1000_cleanup_led(&adapter->hw);
2248 /*********************************************************************
2250 * Determine hardware revision.
2252 **********************************************************************/
2254 lem_identify_hardware(struct adapter *adapter)
2256 device_t dev = adapter->dev;
2258 /* Make sure our PCI config space has the necessary stuff set */
2259 pci_enable_busmaster(dev);
2260 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2262 /* Save off the information about this board */
2263 adapter->hw.vendor_id = pci_get_vendor(dev);
2264 adapter->hw.device_id = pci_get_device(dev);
2265 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2266 adapter->hw.subsystem_vendor_id =
2267 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2268 adapter->hw.subsystem_device_id =
2269 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2271 /* Do Shared Code Init and Setup */
2272 if (e1000_set_mac_type(&adapter->hw)) {
2273 device_printf(dev, "Setup init failure\n");
2279 lem_allocate_pci_resources(struct adapter *adapter)
2281 device_t dev = adapter->dev;
2282 int val, rid, error = E1000_SUCCESS;
2285 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2287 if (adapter->memory == NULL) {
2288 device_printf(dev, "Unable to allocate bus resource: memory\n");
2291 adapter->osdep.mem_bus_space_tag =
2292 rman_get_bustag(adapter->memory);
2293 adapter->osdep.mem_bus_space_handle =
2294 rman_get_bushandle(adapter->memory);
2295 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2297 /* Only older adapters use IO mapping */
2298 if (adapter->hw.mac.type > e1000_82543) {
2299 /* Figure our where our IO BAR is ? */
2300 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2301 val = pci_read_config(dev, rid, 4);
2302 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2303 adapter->io_rid = rid;
2307 /* check for 64bit BAR */
2308 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2311 if (rid >= PCIR_CIS) {
2312 device_printf(dev, "Unable to locate IO BAR\n");
2315 adapter->ioport = bus_alloc_resource_any(dev,
2316 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2317 if (adapter->ioport == NULL) {
2318 device_printf(dev, "Unable to allocate bus resource: "
2322 adapter->hw.io_base = 0;
2323 adapter->osdep.io_bus_space_tag =
2324 rman_get_bustag(adapter->ioport);
2325 adapter->osdep.io_bus_space_handle =
2326 rman_get_bushandle(adapter->ioport);
2329 adapter->hw.back = &adapter->osdep;
2334 /*********************************************************************
2336 * Setup the Legacy or MSI Interrupt handler
2338 **********************************************************************/
2340 lem_allocate_irq(struct adapter *adapter)
2342 device_t dev = adapter->dev;
2345 /* Manually turn off all interrupts */
2346 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2348 /* We allocate a single interrupt resource */
2349 adapter->res[0] = bus_alloc_resource_any(dev,
2350 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2351 if (adapter->res[0] == NULL) {
2352 device_printf(dev, "Unable to allocate bus resource: "
2357 /* Do Legacy setup? */
2358 if (lem_use_legacy_irq) {
2359 if ((error = bus_setup_intr(dev, adapter->res[0],
2360 INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2361 &adapter->tag[0])) != 0) {
2363 "Failed to register interrupt handler");
2370 * Use a Fast interrupt and the associated
2371 * deferred processing contexts.
2373 TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2374 TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2375 adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2376 taskqueue_thread_enqueue, &adapter->tq);
2377 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2378 device_get_nameunit(adapter->dev));
2379 if ((error = bus_setup_intr(dev, adapter->res[0],
2380 INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2381 &adapter->tag[0])) != 0) {
2382 device_printf(dev, "Failed to register fast interrupt "
2383 "handler: %d\n", error);
2384 taskqueue_free(adapter->tq);
2394 lem_free_pci_resources(struct adapter *adapter)
2396 device_t dev = adapter->dev;
2399 if (adapter->tag[0] != NULL) {
2400 bus_teardown_intr(dev, adapter->res[0],
2402 adapter->tag[0] = NULL;
2405 if (adapter->res[0] != NULL) {
2406 bus_release_resource(dev, SYS_RES_IRQ,
2407 0, adapter->res[0]);
2410 if (adapter->memory != NULL)
2411 bus_release_resource(dev, SYS_RES_MEMORY,
2412 PCIR_BAR(0), adapter->memory);
2414 if (adapter->ioport != NULL)
2415 bus_release_resource(dev, SYS_RES_IOPORT,
2416 adapter->io_rid, adapter->ioport);
2420 /*********************************************************************
2422 * Initialize the hardware to a configuration
2423 * as specified by the adapter structure.
2425 **********************************************************************/
2427 lem_hardware_init(struct adapter *adapter)
2429 device_t dev = adapter->dev;
2432 INIT_DEBUGOUT("lem_hardware_init: begin");
2434 /* Issue a global reset */
2435 e1000_reset_hw(&adapter->hw);
2437 /* When hardware is reset, fifo_head is also reset */
2438 adapter->tx_fifo_head = 0;
2441 * These parameters control the automatic generation (Tx) and
2442 * response (Rx) to Ethernet PAUSE frames.
2443 * - High water mark should allow for at least two frames to be
2444 * received after sending an XOFF.
2445 * - Low water mark works best when it is very near the high water mark.
2446 * This allows the receiver to restart by sending XON when it has
2447 * drained a bit. Here we use an arbitary value of 1500 which will
2448 * restart after one full frame is pulled from the buffer. There
2449 * could be several smaller frames in the buffer and if so they will
2450 * not trigger the XON until their total number reduces the buffer
2452 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2454 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2457 adapter->hw.fc.high_water = rx_buffer_size -
2458 roundup2(adapter->max_frame_size, 1024);
2459 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2461 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2462 adapter->hw.fc.send_xon = TRUE;
2464 /* Set Flow control, use the tunable location if sane */
2465 if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2466 adapter->hw.fc.requested_mode = lem_fc_setting;
2468 adapter->hw.fc.requested_mode = e1000_fc_none;
2470 if (e1000_init_hw(&adapter->hw) < 0) {
2471 device_printf(dev, "Hardware Initialization Failed\n");
2475 e1000_check_for_link(&adapter->hw);
2480 /*********************************************************************
2482 * Setup networking device structure and register an interface.
2484 **********************************************************************/
2486 lem_setup_interface(device_t dev, struct adapter *adapter)
2490 INIT_DEBUGOUT("lem_setup_interface: begin");
2492 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2494 device_printf(dev, "can not allocate ifnet structure\n");
2497 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2498 ifp->if_init = lem_init;
2499 ifp->if_softc = adapter;
2500 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2501 ifp->if_ioctl = lem_ioctl;
2502 ifp->if_start = lem_start;
2503 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2504 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2505 IFQ_SET_READY(&ifp->if_snd);
2507 ether_ifattach(ifp, adapter->hw.mac.addr);
2509 ifp->if_capabilities = ifp->if_capenable = 0;
2511 if (adapter->hw.mac.type >= e1000_82543) {
2512 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2513 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2517 * Tell the upper layer(s) we support long frames.
2519 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2520 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2521 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2524 ** Dont turn this on by default, if vlans are
2525 ** created on another pseudo device (eg. lagg)
2526 ** then vlan events are not passed thru, breaking
2527 ** operation, but with HW FILTER off it works. If
2528 ** using vlans directly on the em driver you can
2529 ** enable this and get full hardware tag filtering.
2531 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2533 #ifdef DEVICE_POLLING
2534 ifp->if_capabilities |= IFCAP_POLLING;
2537 /* Enable only WOL MAGIC by default */
2539 ifp->if_capabilities |= IFCAP_WOL;
2540 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2544 * Specify the media types supported by this adapter and register
2545 * callbacks to update media and link information
2547 ifmedia_init(&adapter->media, IFM_IMASK,
2548 lem_media_change, lem_media_status);
2549 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2550 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2551 u_char fiber_type = IFM_1000_SX; /* default type */
2553 if (adapter->hw.mac.type == e1000_82545)
2554 fiber_type = IFM_1000_LX;
2555 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2557 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2559 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2560 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2562 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2564 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2566 if (adapter->hw.phy.type != e1000_phy_ife) {
2567 ifmedia_add(&adapter->media,
2568 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2569 ifmedia_add(&adapter->media,
2570 IFM_ETHER | IFM_1000_T, 0, NULL);
2573 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2574 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2579 /*********************************************************************
2581 * Workaround for SmartSpeed on 82541 and 82547 controllers
2583 **********************************************************************/
2585 lem_smartspeed(struct adapter *adapter)
2589 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2590 adapter->hw.mac.autoneg == 0 ||
2591 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2594 if (adapter->smartspeed == 0) {
2595 /* If Master/Slave config fault is asserted twice,
2596 * we assume back-to-back */
2597 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2598 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2600 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2601 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2602 e1000_read_phy_reg(&adapter->hw,
2603 PHY_1000T_CTRL, &phy_tmp);
2604 if(phy_tmp & CR_1000T_MS_ENABLE) {
2605 phy_tmp &= ~CR_1000T_MS_ENABLE;
2606 e1000_write_phy_reg(&adapter->hw,
2607 PHY_1000T_CTRL, phy_tmp);
2608 adapter->smartspeed++;
2609 if(adapter->hw.mac.autoneg &&
2610 !e1000_copper_link_autoneg(&adapter->hw) &&
2611 !e1000_read_phy_reg(&adapter->hw,
2612 PHY_CONTROL, &phy_tmp)) {
2613 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2614 MII_CR_RESTART_AUTO_NEG);
2615 e1000_write_phy_reg(&adapter->hw,
2616 PHY_CONTROL, phy_tmp);
2621 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2622 /* If still no link, perhaps using 2/3 pair cable */
2623 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2624 phy_tmp |= CR_1000T_MS_ENABLE;
2625 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2626 if(adapter->hw.mac.autoneg &&
2627 !e1000_copper_link_autoneg(&adapter->hw) &&
2628 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2629 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2630 MII_CR_RESTART_AUTO_NEG);
2631 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2634 /* Restart process after EM_SMARTSPEED_MAX iterations */
2635 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2636 adapter->smartspeed = 0;
2641 * Manage DMA'able memory.
2644 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2648 *(bus_addr_t *) arg = segs[0].ds_addr;
2652 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2653 struct em_dma_alloc *dma, int mapflags)
2657 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2658 EM_DBA_ALIGN, 0, /* alignment, bounds */
2659 BUS_SPACE_MAXADDR, /* lowaddr */
2660 BUS_SPACE_MAXADDR, /* highaddr */
2661 NULL, NULL, /* filter, filterarg */
2664 size, /* maxsegsize */
2666 NULL, /* lockfunc */
2670 device_printf(adapter->dev,
2671 "%s: bus_dma_tag_create failed: %d\n",
2676 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2677 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2679 device_printf(adapter->dev,
2680 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2681 __func__, (uintmax_t)size, error);
2686 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2687 size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2688 if (error || dma->dma_paddr == 0) {
2689 device_printf(adapter->dev,
2690 "%s: bus_dmamap_load failed: %d\n",
2698 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2700 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2701 bus_dma_tag_destroy(dma->dma_tag);
2703 dma->dma_tag = NULL;
2709 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2711 if (dma->dma_tag == NULL)
2713 if (dma->dma_paddr != 0) {
2714 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2715 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2716 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2719 if (dma->dma_vaddr != NULL) {
2720 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2721 dma->dma_vaddr = NULL;
2723 bus_dma_tag_destroy(dma->dma_tag);
2724 dma->dma_tag = NULL;
2728 /*********************************************************************
2730 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2731 * the information needed to transmit a packet on the wire.
2733 **********************************************************************/
2735 lem_allocate_transmit_structures(struct adapter *adapter)
2737 device_t dev = adapter->dev;
2738 struct em_buffer *tx_buffer;
2742 * Create DMA tags for tx descriptors
2744 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2745 1, 0, /* alignment, bounds */
2746 BUS_SPACE_MAXADDR, /* lowaddr */
2747 BUS_SPACE_MAXADDR, /* highaddr */
2748 NULL, NULL, /* filter, filterarg */
2749 MCLBYTES * EM_MAX_SCATTER, /* maxsize */
2750 EM_MAX_SCATTER, /* nsegments */
2751 MCLBYTES, /* maxsegsize */
2753 NULL, /* lockfunc */
2755 &adapter->txtag)) != 0) {
2756 device_printf(dev, "Unable to allocate TX DMA tag\n");
2760 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2761 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2762 if (adapter->tx_buffer_area == NULL) {
2763 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2768 /* Create the descriptor buffer dma maps */
2769 for (int i = 0; i < adapter->num_tx_desc; i++) {
2770 tx_buffer = &adapter->tx_buffer_area[i];
2771 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2773 device_printf(dev, "Unable to create TX DMA map\n");
2776 tx_buffer->next_eop = -1;
2781 lem_free_transmit_structures(adapter);
2785 /*********************************************************************
2787 * (Re)Initialize transmit structures.
2789 **********************************************************************/
2791 lem_setup_transmit_structures(struct adapter *adapter)
2793 struct em_buffer *tx_buffer;
2795 /* we are already locked */
2796 struct netmap_adapter *na = NA(adapter->ifp);
2797 struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
2798 #endif /* DEV_NETMAP */
2800 /* Clear the old ring contents */
2801 bzero(adapter->tx_desc_base,
2802 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2804 /* Free any existing TX buffers */
2805 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2806 tx_buffer = &adapter->tx_buffer_area[i];
2807 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2808 BUS_DMASYNC_POSTWRITE);
2809 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2810 m_freem(tx_buffer->m_head);
2811 tx_buffer->m_head = NULL;
2814 /* the i-th NIC entry goes to slot si */
2815 int si = netmap_idx_n2k(&na->tx_rings[0], i);
2819 addr = PNMB(na, slot + si, &paddr);
2820 adapter->tx_desc_base[i].buffer_addr = htole64(paddr);
2821 /* reload the map for netmap mode */
2822 netmap_load_map(na, adapter->txtag, tx_buffer->map, addr);
2824 #endif /* DEV_NETMAP */
2825 tx_buffer->next_eop = -1;
2829 adapter->last_hw_offload = 0;
2830 adapter->next_avail_tx_desc = 0;
2831 adapter->next_tx_to_clean = 0;
2832 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2834 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2835 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2840 /*********************************************************************
2842 * Enable transmit unit.
2844 **********************************************************************/
2846 lem_initialize_transmit_unit(struct adapter *adapter)
2851 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2852 /* Setup the Base and Length of the Tx Descriptor Ring */
2853 bus_addr = adapter->txdma.dma_paddr;
2854 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2855 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2856 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2857 (u32)(bus_addr >> 32));
2858 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2860 /* Setup the HW Tx Head and Tail descriptor pointers */
2861 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2862 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2864 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2865 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2866 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2868 /* Set the default values for the Tx Inter Packet Gap timer */
2869 switch (adapter->hw.mac.type) {
2871 tipg = DEFAULT_82542_TIPG_IPGT;
2872 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2873 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2876 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2877 (adapter->hw.phy.media_type ==
2878 e1000_media_type_internal_serdes))
2879 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2881 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2882 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2883 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2886 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2887 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2888 if(adapter->hw.mac.type >= e1000_82540)
2889 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2890 adapter->tx_abs_int_delay.value);
2892 /* Program the Transmit Control Register */
2893 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2894 tctl &= ~E1000_TCTL_CT;
2895 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2896 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2898 /* This write will effectively turn on the transmit unit. */
2899 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2901 /* Setup Transmit Descriptor Base Settings */
2902 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2904 if (adapter->tx_int_delay.value > 0)
2905 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2908 /*********************************************************************
2910 * Free all transmit related data structures.
2912 **********************************************************************/
2914 lem_free_transmit_structures(struct adapter *adapter)
2916 struct em_buffer *tx_buffer;
2918 INIT_DEBUGOUT("free_transmit_structures: begin");
2920 if (adapter->tx_buffer_area != NULL) {
2921 for (int i = 0; i < adapter->num_tx_desc; i++) {
2922 tx_buffer = &adapter->tx_buffer_area[i];
2923 if (tx_buffer->m_head != NULL) {
2924 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2925 BUS_DMASYNC_POSTWRITE);
2926 bus_dmamap_unload(adapter->txtag,
2928 m_freem(tx_buffer->m_head);
2929 tx_buffer->m_head = NULL;
2930 } else if (tx_buffer->map != NULL)
2931 bus_dmamap_unload(adapter->txtag,
2933 if (tx_buffer->map != NULL) {
2934 bus_dmamap_destroy(adapter->txtag,
2936 tx_buffer->map = NULL;
2940 if (adapter->tx_buffer_area != NULL) {
2941 free(adapter->tx_buffer_area, M_DEVBUF);
2942 adapter->tx_buffer_area = NULL;
2944 if (adapter->txtag != NULL) {
2945 bus_dma_tag_destroy(adapter->txtag);
2946 adapter->txtag = NULL;
2950 /*********************************************************************
2952 * The offload context needs to be set when we transfer the first
2953 * packet of a particular protocol (TCP/UDP). This routine has been
2954 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2956 * Added back the old method of keeping the current context type
2957 * and not setting if unnecessary, as this is reported to be a
2958 * big performance win. -jfv
2959 **********************************************************************/
2961 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2962 u32 *txd_upper, u32 *txd_lower)
2964 struct e1000_context_desc *TXD = NULL;
2965 struct em_buffer *tx_buffer;
2966 struct ether_vlan_header *eh;
2967 struct ip *ip = NULL;
2968 struct ip6_hdr *ip6;
2969 int curr_txd, ehdrlen;
2970 u32 cmd, hdr_len, ip_hlen;
2975 cmd = hdr_len = ipproto = 0;
2976 *txd_upper = *txd_lower = 0;
2977 curr_txd = adapter->next_avail_tx_desc;
2980 * Determine where frame payload starts.
2981 * Jump over vlan headers if already present,
2982 * helpful for QinQ too.
2984 eh = mtod(mp, struct ether_vlan_header *);
2985 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2986 etype = ntohs(eh->evl_proto);
2987 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2989 etype = ntohs(eh->evl_encap_proto);
2990 ehdrlen = ETHER_HDR_LEN;
2994 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2995 * TODO: Support SCTP too when it hits the tree.
2999 ip = (struct ip *)(mp->m_data + ehdrlen);
3000 ip_hlen = ip->ip_hl << 2;
3002 /* Setup of IP header checksum. */
3003 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3005 * Start offset for header checksum calculation.
3006 * End offset for header checksum calculation.
3007 * Offset of place to put the checksum.
3009 TXD = (struct e1000_context_desc *)
3010 &adapter->tx_desc_base[curr_txd];
3011 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3012 TXD->lower_setup.ip_fields.ipcse =
3013 htole16(ehdrlen + ip_hlen);
3014 TXD->lower_setup.ip_fields.ipcso =
3015 ehdrlen + offsetof(struct ip, ip_sum);
3016 cmd |= E1000_TXD_CMD_IP;
3017 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3020 hdr_len = ehdrlen + ip_hlen;
3024 case ETHERTYPE_IPV6:
3025 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3026 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3028 /* IPv6 doesn't have a header checksum. */
3030 hdr_len = ehdrlen + ip_hlen;
3031 ipproto = ip6->ip6_nxt;
3040 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3041 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3042 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3043 /* no need for context if already set */
3044 if (adapter->last_hw_offload == CSUM_TCP)
3046 adapter->last_hw_offload = CSUM_TCP;
3048 * Start offset for payload checksum calculation.
3049 * End offset for payload checksum calculation.
3050 * Offset of place to put the checksum.
3052 TXD = (struct e1000_context_desc *)
3053 &adapter->tx_desc_base[curr_txd];
3054 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3055 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3056 TXD->upper_setup.tcp_fields.tucso =
3057 hdr_len + offsetof(struct tcphdr, th_sum);
3058 cmd |= E1000_TXD_CMD_TCP;
3063 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3064 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3065 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3066 /* no need for context if already set */
3067 if (adapter->last_hw_offload == CSUM_UDP)
3069 adapter->last_hw_offload = CSUM_UDP;
3071 * Start offset for header checksum calculation.
3072 * End offset for header checksum calculation.
3073 * Offset of place to put the checksum.
3075 TXD = (struct e1000_context_desc *)
3076 &adapter->tx_desc_base[curr_txd];
3077 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3078 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3079 TXD->upper_setup.tcp_fields.tucso =
3080 hdr_len + offsetof(struct udphdr, uh_sum);
3090 TXD->tcp_seg_setup.data = htole32(0);
3091 TXD->cmd_and_length =
3092 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3093 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3094 tx_buffer->m_head = NULL;
3095 tx_buffer->next_eop = -1;
3097 if (++curr_txd == adapter->num_tx_desc)
3100 adapter->num_tx_desc_avail--;
3101 adapter->next_avail_tx_desc = curr_txd;
3105 /**********************************************************************
3107 * Examine each tx_buffer in the used queue. If the hardware is done
3108 * processing the packet then free associated resources. The
3109 * tx_buffer is put back on the free queue.
3111 **********************************************************************/
3113 lem_txeof(struct adapter *adapter)
3115 int first, last, done, num_avail;
3116 struct em_buffer *tx_buffer;
3117 struct e1000_tx_desc *tx_desc, *eop_desc;
3118 struct ifnet *ifp = adapter->ifp;
3120 EM_TX_LOCK_ASSERT(adapter);
3123 if (netmap_tx_irq(ifp, 0))
3125 #endif /* DEV_NETMAP */
3126 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3129 num_avail = adapter->num_tx_desc_avail;
3130 first = adapter->next_tx_to_clean;
3131 tx_desc = &adapter->tx_desc_base[first];
3132 tx_buffer = &adapter->tx_buffer_area[first];
3133 last = tx_buffer->next_eop;
3134 eop_desc = &adapter->tx_desc_base[last];
3137 * What this does is get the index of the
3138 * first descriptor AFTER the EOP of the
3139 * first packet, that way we can do the
3140 * simple comparison on the inner while loop.
3142 if (++last == adapter->num_tx_desc)
3146 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3147 BUS_DMASYNC_POSTREAD);
3149 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3150 /* We clean the range of the packet */
3151 while (first != done) {
3152 tx_desc->upper.data = 0;
3153 tx_desc->lower.data = 0;
3154 tx_desc->buffer_addr = 0;
3157 if (tx_buffer->m_head) {
3159 bus_dmamap_sync(adapter->txtag,
3161 BUS_DMASYNC_POSTWRITE);
3162 bus_dmamap_unload(adapter->txtag,
3165 m_freem(tx_buffer->m_head);
3166 tx_buffer->m_head = NULL;
3168 tx_buffer->next_eop = -1;
3169 adapter->watchdog_time = ticks;
3171 if (++first == adapter->num_tx_desc)
3174 tx_buffer = &adapter->tx_buffer_area[first];
3175 tx_desc = &adapter->tx_desc_base[first];
3177 /* See if we can continue to the next packet */
3178 last = tx_buffer->next_eop;
3180 eop_desc = &adapter->tx_desc_base[last];
3181 /* Get new done point */
3182 if (++last == adapter->num_tx_desc) last = 0;
3187 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3188 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3190 adapter->next_tx_to_clean = first;
3191 adapter->num_tx_desc_avail = num_avail;
3193 #ifdef NIC_SEND_COMBINING
3194 if ((adapter->shadow_tdt & MIT_PENDING_TDT) == MIT_PENDING_TDT) {
3195 /* a tdt write is pending, do it */
3196 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0),
3197 0xffff & adapter->shadow_tdt);
3198 adapter->shadow_tdt = MIT_PENDING_INT;
3200 adapter->shadow_tdt = 0; // disable
3202 #endif /* NIC_SEND_COMBINING */
3204 * If we have enough room, clear IFF_DRV_OACTIVE to
3205 * tell the stack that it is OK to send packets.
3206 * If there are no pending descriptors, clear the watchdog.
3208 if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {
3209 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3211 if (adapter->csb) { // XXX also csb_on ?
3212 adapter->csb->guest_need_txkick = 2; /* acked */
3213 // XXX memory barrier
3215 #endif /* NIC_PARAVIRT */
3216 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3217 adapter->watchdog_check = FALSE;
3223 /*********************************************************************
3225 * When Link is lost sometimes there is work still in the TX ring
3226 * which may result in a watchdog, rather than allow that we do an
3227 * attempted cleanup and then reinit here. Note that this has been
3228 * seens mostly with fiber adapters.
3230 **********************************************************************/
3232 lem_tx_purge(struct adapter *adapter)
3234 if ((!adapter->link_active) && (adapter->watchdog_check)) {
3235 EM_TX_LOCK(adapter);
3237 EM_TX_UNLOCK(adapter);
3238 if (adapter->watchdog_check) /* Still outstanding? */
3239 lem_init_locked(adapter);
3243 /*********************************************************************
3245 * Get a buffer from system mbuf buffer pool.
3247 **********************************************************************/
3249 lem_get_buf(struct adapter *adapter, int i)
3252 bus_dma_segment_t segs[1];
3254 struct em_buffer *rx_buffer;
3257 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3259 adapter->mbuf_cluster_failed++;
3262 m->m_len = m->m_pkthdr.len = MCLBYTES;
3264 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3265 m_adj(m, ETHER_ALIGN);
3268 * Using memory from the mbuf cluster pool, invoke the
3269 * bus_dma machinery to arrange the memory mapping.
3271 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3272 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3278 /* If nsegs is wrong then the stack is corrupt. */
3279 KASSERT(nsegs == 1, ("Too many segments returned!"));
3281 rx_buffer = &adapter->rx_buffer_area[i];
3282 if (rx_buffer->m_head != NULL)
3283 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3285 map = rx_buffer->map;
3286 rx_buffer->map = adapter->rx_sparemap;
3287 adapter->rx_sparemap = map;
3288 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3289 rx_buffer->m_head = m;
3291 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3295 /*********************************************************************
3297 * Allocate memory for rx_buffer structures. Since we use one
3298 * rx_buffer per received packet, the maximum number of rx_buffer's
3299 * that we'll need is equal to the number of receive descriptors
3300 * that we've allocated.
3302 **********************************************************************/
3304 lem_allocate_receive_structures(struct adapter *adapter)
3306 device_t dev = adapter->dev;
3307 struct em_buffer *rx_buffer;
3310 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3311 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3312 if (adapter->rx_buffer_area == NULL) {
3313 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3317 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3318 1, 0, /* alignment, bounds */
3319 BUS_SPACE_MAXADDR, /* lowaddr */
3320 BUS_SPACE_MAXADDR, /* highaddr */
3321 NULL, NULL, /* filter, filterarg */
3322 MCLBYTES, /* maxsize */
3324 MCLBYTES, /* maxsegsize */
3326 NULL, /* lockfunc */
3330 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3335 /* Create the spare map (used by getbuf) */
3336 error = bus_dmamap_create(adapter->rxtag, 0, &adapter->rx_sparemap);
3338 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3343 rx_buffer = adapter->rx_buffer_area;
3344 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3345 error = bus_dmamap_create(adapter->rxtag, 0, &rx_buffer->map);
3347 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3356 lem_free_receive_structures(adapter);
3360 /*********************************************************************
3362 * (Re)initialize receive structures.
3364 **********************************************************************/
3366 lem_setup_receive_structures(struct adapter *adapter)
3368 struct em_buffer *rx_buffer;
3371 /* we are already under lock */
3372 struct netmap_adapter *na = NA(adapter->ifp);
3373 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
3376 /* Reset descriptor ring */
3377 bzero(adapter->rx_desc_base,
3378 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3380 /* Free current RX buffers. */
3381 rx_buffer = adapter->rx_buffer_area;
3382 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3383 if (rx_buffer->m_head != NULL) {
3384 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3385 BUS_DMASYNC_POSTREAD);
3386 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3387 m_freem(rx_buffer->m_head);
3388 rx_buffer->m_head = NULL;
3392 /* Allocate new ones. */
3393 for (i = 0; i < adapter->num_rx_desc; i++) {
3396 /* the i-th NIC entry goes to slot si */
3397 int si = netmap_idx_n2k(&na->rx_rings[0], i);
3401 addr = PNMB(na, slot + si, &paddr);
3402 netmap_load_map(na, adapter->rxtag, rx_buffer->map, addr);
3403 /* Update descriptor */
3404 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
3407 #endif /* DEV_NETMAP */
3408 error = lem_get_buf(adapter, i);
3413 /* Setup our descriptor pointers */
3414 adapter->next_rx_desc_to_check = 0;
3415 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3416 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3421 /*********************************************************************
3423 * Enable receive unit.
3425 **********************************************************************/
3428 lem_initialize_receive_unit(struct adapter *adapter)
3430 struct ifnet *ifp = adapter->ifp;
3434 INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3437 * Make sure receives are disabled while setting
3438 * up the descriptor ring
3440 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3441 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3443 if (adapter->hw.mac.type >= e1000_82540) {
3444 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3445 adapter->rx_abs_int_delay.value);
3447 * Set the interrupt throttling rate. Value is calculated
3448 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3450 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3453 /* Setup the Base and Length of the Rx Descriptor Ring */
3454 bus_addr = adapter->rxdma.dma_paddr;
3455 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3456 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3457 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3458 (u32)(bus_addr >> 32));
3459 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3462 /* Setup the Receive Control Register */
3463 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3464 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3465 E1000_RCTL_RDMTS_HALF |
3466 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3468 /* Make sure VLAN Filters are off */
3469 rctl &= ~E1000_RCTL_VFE;
3471 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3472 rctl |= E1000_RCTL_SBP;
3474 rctl &= ~E1000_RCTL_SBP;
3476 switch (adapter->rx_buffer_len) {
3479 rctl |= E1000_RCTL_SZ_2048;
3482 rctl |= E1000_RCTL_SZ_4096 |
3483 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3486 rctl |= E1000_RCTL_SZ_8192 |
3487 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3490 rctl |= E1000_RCTL_SZ_16384 |
3491 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3495 if (ifp->if_mtu > ETHERMTU)
3496 rctl |= E1000_RCTL_LPE;
3498 rctl &= ~E1000_RCTL_LPE;
3500 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3501 if ((adapter->hw.mac.type >= e1000_82543) &&
3502 (ifp->if_capenable & IFCAP_RXCSUM)) {
3503 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3504 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3505 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3508 /* Enable Receives */
3509 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3512 * Setup the HW Rx Head and
3513 * Tail Descriptor Pointers
3515 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3516 rctl = adapter->num_rx_desc - 1; /* default RDT value */
3518 /* preserve buffers already made available to clients */
3519 if (ifp->if_capenable & IFCAP_NETMAP)
3520 rctl -= nm_kr_rxspace(&NA(adapter->ifp)->rx_rings[0]);
3521 #endif /* DEV_NETMAP */
3522 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rctl);
3527 /*********************************************************************
3529 * Free receive related data structures.
3531 **********************************************************************/
3533 lem_free_receive_structures(struct adapter *adapter)
3535 struct em_buffer *rx_buffer;
3538 INIT_DEBUGOUT("free_receive_structures: begin");
3540 if (adapter->rx_sparemap) {
3541 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3542 adapter->rx_sparemap = NULL;
3545 /* Cleanup any existing buffers */
3546 if (adapter->rx_buffer_area != NULL) {
3547 rx_buffer = adapter->rx_buffer_area;
3548 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3549 if (rx_buffer->m_head != NULL) {
3550 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3551 BUS_DMASYNC_POSTREAD);
3552 bus_dmamap_unload(adapter->rxtag,
3554 m_freem(rx_buffer->m_head);
3555 rx_buffer->m_head = NULL;
3556 } else if (rx_buffer->map != NULL)
3557 bus_dmamap_unload(adapter->rxtag,
3559 if (rx_buffer->map != NULL) {
3560 bus_dmamap_destroy(adapter->rxtag,
3562 rx_buffer->map = NULL;
3567 if (adapter->rx_buffer_area != NULL) {
3568 free(adapter->rx_buffer_area, M_DEVBUF);
3569 adapter->rx_buffer_area = NULL;
3572 if (adapter->rxtag != NULL) {
3573 bus_dma_tag_destroy(adapter->rxtag);
3574 adapter->rxtag = NULL;
3578 /*********************************************************************
3580 * This routine executes in interrupt context. It replenishes
3581 * the mbufs in the descriptor and sends data which has been
3582 * dma'ed into host memory to upper layer.
3584 * We loop at most count times if count is > 0, or until done if
3587 * For polling we also now return the number of cleaned packets
3588 *********************************************************************/
3590 lem_rxeof(struct adapter *adapter, int count, int *done)
3592 struct ifnet *ifp = adapter->ifp;
3594 u8 status = 0, accept_frame = 0, eop = 0;
3595 u16 len, desc_len, prev_len_adj;
3597 struct e1000_rx_desc *current_desc;
3599 #ifdef BATCH_DISPATCH
3600 struct mbuf *mh = NULL, *mt = NULL;
3601 #endif /* BATCH_DISPATCH */
3604 struct paravirt_csb* csb = adapter->csb;
3605 int csb_mode = csb && csb->guest_csb_on;
3607 //ND("clear guest_rxkick at %d", adapter->next_rx_desc_to_check);
3608 if (csb_mode && csb->guest_need_rxkick)
3609 csb->guest_need_rxkick = 0;
3610 #endif /* NIC_PARAVIRT */
3611 EM_RX_LOCK(adapter);
3613 #ifdef BATCH_DISPATCH
3615 #endif /* BATCH_DISPATCH */
3616 i = adapter->next_rx_desc_to_check;
3617 current_desc = &adapter->rx_desc_base[i];
3618 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3619 BUS_DMASYNC_POSTREAD);
3622 if (netmap_rx_irq(ifp, 0, &rx_sent)) {
3623 EM_RX_UNLOCK(adapter);
3626 #endif /* DEV_NETMAP */
3628 #if 1 // XXX optimization ?
3629 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3632 EM_RX_UNLOCK(adapter);
3637 while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3638 struct mbuf *m = NULL;
3640 status = current_desc->status;
3641 if ((status & E1000_RXD_STAT_DD) == 0) {
3644 /* buffer not ready yet. Retry a few times before giving up */
3645 if (++retries <= adapter->rx_retries) {
3648 if (csb->guest_need_rxkick == 0) {
3649 // ND("set guest_rxkick at %d", adapter->next_rx_desc_to_check);
3650 csb->guest_need_rxkick = 1;
3651 // XXX memory barrier, status volatile ?
3652 continue; /* double check */
3655 /* no buffer ready, give up */
3656 #endif /* NIC_PARAVIRT */
3661 if (csb->guest_need_rxkick)
3662 // ND("clear again guest_rxkick at %d", adapter->next_rx_desc_to_check);
3663 csb->guest_need_rxkick = 0;
3666 #endif /* NIC_PARAVIRT */
3668 mp = adapter->rx_buffer_area[i].m_head;
3670 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3671 * needs to access the last received byte in the mbuf.
3673 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3674 BUS_DMASYNC_POSTREAD);
3678 desc_len = le16toh(current_desc->length);
3679 if (status & E1000_RXD_STAT_EOP) {
3682 if (desc_len < ETHER_CRC_LEN) {
3684 prev_len_adj = ETHER_CRC_LEN - desc_len;
3686 len = desc_len - ETHER_CRC_LEN;
3692 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3694 u32 pkt_len = desc_len;
3696 if (adapter->fmp != NULL)
3697 pkt_len += adapter->fmp->m_pkthdr.len;
3699 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3700 if (TBI_ACCEPT(&adapter->hw, status,
3701 current_desc->errors, pkt_len, last_byte,
3702 adapter->min_frame_size, adapter->max_frame_size)) {
3703 e1000_tbi_adjust_stats_82543(&adapter->hw,
3704 &adapter->stats, pkt_len,
3705 adapter->hw.mac.addr,
3706 adapter->max_frame_size);
3714 if (lem_get_buf(adapter, i) != 0) {
3719 /* Assign correct length to the current fragment */
3722 if (adapter->fmp == NULL) {
3723 mp->m_pkthdr.len = len;
3724 adapter->fmp = mp; /* Store the first mbuf */
3727 /* Chain mbuf's together */
3728 mp->m_flags &= ~M_PKTHDR;
3730 * Adjust length of previous mbuf in chain if
3731 * we received less than 4 bytes in the last
3734 if (prev_len_adj > 0) {
3735 adapter->lmp->m_len -= prev_len_adj;
3736 adapter->fmp->m_pkthdr.len -=
3739 adapter->lmp->m_next = mp;
3740 adapter->lmp = adapter->lmp->m_next;
3741 adapter->fmp->m_pkthdr.len += len;
3745 adapter->fmp->m_pkthdr.rcvif = ifp;
3747 lem_receive_checksum(adapter, current_desc,
3749 #ifndef __NO_STRICT_ALIGNMENT
3750 if (adapter->max_frame_size >
3751 (MCLBYTES - ETHER_ALIGN) &&
3752 lem_fixup_rx(adapter) != 0)
3755 if (status & E1000_RXD_STAT_VP) {
3756 adapter->fmp->m_pkthdr.ether_vtag =
3757 le16toh(current_desc->special);
3758 adapter->fmp->m_flags |= M_VLANTAG;
3760 #ifndef __NO_STRICT_ALIGNMENT
3764 adapter->fmp = NULL;
3765 adapter->lmp = NULL;
3768 adapter->dropped_pkts++;
3770 /* Reuse loaded DMA map and just update mbuf chain */
3771 mp = adapter->rx_buffer_area[i].m_head;
3772 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3773 mp->m_data = mp->m_ext.ext_buf;
3775 if (adapter->max_frame_size <=
3776 (MCLBYTES - ETHER_ALIGN))
3777 m_adj(mp, ETHER_ALIGN);
3778 if (adapter->fmp != NULL) {
3779 m_freem(adapter->fmp);
3780 adapter->fmp = NULL;
3781 adapter->lmp = NULL;
3786 /* Zero out the receive descriptors status. */
3787 current_desc->status = 0;
3788 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3789 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3793 /* the buffer at i has been already replaced by lem_get_buf()
3794 * so it is safe to set guest_rdt = i and possibly send a kick.
3795 * XXX see if we can optimize it later.
3798 // XXX memory barrier
3799 if (i == csb->host_rxkick_at)
3800 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3802 #endif /* NIC_PARAVIRT */
3803 /* Advance our pointers to the next descriptor. */
3804 if (++i == adapter->num_rx_desc)
3806 /* Call into the stack */
3808 #ifdef BATCH_DISPATCH
3809 if (adapter->batch_enable) {
3815 m->m_nextpkt = NULL;
3817 current_desc = &adapter->rx_desc_base[i];
3820 #endif /* BATCH_DISPATCH */
3821 adapter->next_rx_desc_to_check = i;
3822 EM_RX_UNLOCK(adapter);
3823 (*ifp->if_input)(ifp, m);
3824 EM_RX_LOCK(adapter);
3826 i = adapter->next_rx_desc_to_check;
3828 current_desc = &adapter->rx_desc_base[i];
3830 adapter->next_rx_desc_to_check = i;
3831 #ifdef BATCH_DISPATCH
3833 EM_RX_UNLOCK(adapter);
3834 while ( (mt = mh) != NULL) {
3836 mt->m_nextpkt = NULL;
3839 EM_RX_LOCK(adapter);
3840 i = adapter->next_rx_desc_to_check; /* in case of interrupts */
3844 #endif /* BATCH_DISPATCH */
3846 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3848 i = adapter->num_rx_desc - 1;
3850 if (!csb_mode) /* filter out writes */
3851 #endif /* NIC_PARAVIRT */
3852 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3855 EM_RX_UNLOCK(adapter);
3856 return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3859 #ifndef __NO_STRICT_ALIGNMENT
3861 * When jumbo frames are enabled we should realign entire payload on
3862 * architecures with strict alignment. This is serious design mistake of 8254x
3863 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3864 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3865 * payload. On architecures without strict alignment restrictions 8254x still
3866 * performs unaligned memory access which would reduce the performance too.
3867 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3868 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3869 * existing mbuf chain.
3871 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3872 * not used at all on architectures with strict alignment.
3875 lem_fixup_rx(struct adapter *adapter)
3882 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3883 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3884 m->m_data += ETHER_HDR_LEN;
3886 MGETHDR(n, M_NOWAIT, MT_DATA);
3888 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3889 m->m_data += ETHER_HDR_LEN;
3890 m->m_len -= ETHER_HDR_LEN;
3891 n->m_len = ETHER_HDR_LEN;
3892 M_MOVE_PKTHDR(n, m);
3896 adapter->dropped_pkts++;
3897 m_freem(adapter->fmp);
3898 adapter->fmp = NULL;
3907 /*********************************************************************
3909 * Verify that the hardware indicated that the checksum is valid.
3910 * Inform the stack about the status of checksum so that stack
3911 * doesn't spend time verifying the checksum.
3913 *********************************************************************/
3915 lem_receive_checksum(struct adapter *adapter,
3916 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3918 /* 82543 or newer only */
3919 if ((adapter->hw.mac.type < e1000_82543) ||
3920 /* Ignore Checksum bit is set */
3921 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3922 mp->m_pkthdr.csum_flags = 0;
3926 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3928 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3929 /* IP Checksum Good */
3930 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3931 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3934 mp->m_pkthdr.csum_flags = 0;
3938 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3940 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3941 mp->m_pkthdr.csum_flags |=
3942 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3943 mp->m_pkthdr.csum_data = htons(0xffff);
3949 * This routine is run via an vlan
3953 lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3955 struct adapter *adapter = ifp->if_softc;
3958 if (ifp->if_softc != arg) /* Not our event */
3961 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
3964 EM_CORE_LOCK(adapter);
3965 index = (vtag >> 5) & 0x7F;
3967 adapter->shadow_vfta[index] |= (1 << bit);
3968 ++adapter->num_vlans;
3969 /* Re-init to load the changes */
3970 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3971 lem_init_locked(adapter);
3972 EM_CORE_UNLOCK(adapter);
3976 * This routine is run via an vlan
3980 lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3982 struct adapter *adapter = ifp->if_softc;
3985 if (ifp->if_softc != arg)
3988 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3991 EM_CORE_LOCK(adapter);
3992 index = (vtag >> 5) & 0x7F;
3994 adapter->shadow_vfta[index] &= ~(1 << bit);
3995 --adapter->num_vlans;
3996 /* Re-init to load the changes */
3997 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3998 lem_init_locked(adapter);
3999 EM_CORE_UNLOCK(adapter);
4003 lem_setup_vlan_hw_support(struct adapter *adapter)
4005 struct e1000_hw *hw = &adapter->hw;
4009 ** We get here thru init_locked, meaning
4010 ** a soft reset, this has already cleared
4011 ** the VFTA and other state, so if there
4012 ** have been no vlan's registered do nothing.
4014 if (adapter->num_vlans == 0)
4018 ** A soft reset zero's out the VFTA, so
4019 ** we need to repopulate it now.
4021 for (int i = 0; i < EM_VFTA_SIZE; i++)
4022 if (adapter->shadow_vfta[i] != 0)
4023 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
4024 i, adapter->shadow_vfta[i]);
4026 reg = E1000_READ_REG(hw, E1000_CTRL);
4027 reg |= E1000_CTRL_VME;
4028 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4030 /* Enable the Filter Table */
4031 reg = E1000_READ_REG(hw, E1000_RCTL);
4032 reg &= ~E1000_RCTL_CFIEN;
4033 reg |= E1000_RCTL_VFE;
4034 E1000_WRITE_REG(hw, E1000_RCTL, reg);
4038 lem_enable_intr(struct adapter *adapter)
4040 struct e1000_hw *hw = &adapter->hw;
4041 u32 ims_mask = IMS_ENABLE_MASK;
4043 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4047 lem_disable_intr(struct adapter *adapter)
4049 struct e1000_hw *hw = &adapter->hw;
4051 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4055 * Bit of a misnomer, what this really means is
4056 * to enable OS management of the system... aka
4057 * to disable special hardware management features
4060 lem_init_manageability(struct adapter *adapter)
4062 /* A shared code workaround */
4063 if (adapter->has_manage) {
4064 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4065 /* disable hardware interception of ARP */
4066 manc &= ~(E1000_MANC_ARP_EN);
4067 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4072 * Give control back to hardware management
4073 * controller if there is one.
4076 lem_release_manageability(struct adapter *adapter)
4078 if (adapter->has_manage) {
4079 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4081 /* re-enable hardware interception of ARP */
4082 manc |= E1000_MANC_ARP_EN;
4083 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4088 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
4089 * For ASF and Pass Through versions of f/w this means
4090 * that the driver is loaded. For AMT version type f/w
4091 * this means that the network i/f is open.
4094 lem_get_hw_control(struct adapter *adapter)
4098 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4099 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4100 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4105 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4106 * For ASF and Pass Through versions of f/w this means that
4107 * the driver is no longer loaded. For AMT versions of the
4108 * f/w this means that the network i/f is closed.
4111 lem_release_hw_control(struct adapter *adapter)
4115 if (!adapter->has_manage)
4118 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4119 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4120 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4125 lem_is_valid_ether_addr(u8 *addr)
4127 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4129 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4137 ** Parse the interface capabilities with regard
4138 ** to both system management and wake-on-lan for
4142 lem_get_wakeup(device_t dev)
4144 struct adapter *adapter = device_get_softc(dev);
4145 u16 eeprom_data = 0, device_id, apme_mask;
4147 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
4148 apme_mask = EM_EEPROM_APME;
4150 switch (adapter->hw.mac.type) {
4155 e1000_read_nvm(&adapter->hw,
4156 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
4157 apme_mask = EM_82544_APME;
4160 case e1000_82546_rev_3:
4161 if (adapter->hw.bus.func == 1) {
4162 e1000_read_nvm(&adapter->hw,
4163 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
4166 e1000_read_nvm(&adapter->hw,
4167 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4170 e1000_read_nvm(&adapter->hw,
4171 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4174 if (eeprom_data & apme_mask)
4175 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
4177 * We have the eeprom settings, now apply the special cases
4178 * where the eeprom may be wrong or the board won't support
4179 * wake on lan on a particular port
4181 device_id = pci_get_device(dev);
4182 switch (device_id) {
4183 case E1000_DEV_ID_82546GB_PCIE:
4186 case E1000_DEV_ID_82546EB_FIBER:
4187 case E1000_DEV_ID_82546GB_FIBER:
4188 /* Wake events only supported on port A for dual fiber
4189 * regardless of eeprom setting */
4190 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
4191 E1000_STATUS_FUNC_1)
4194 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
4195 /* if quad port adapter, disable WoL on all but port A */
4196 if (global_quad_port_a != 0)
4198 /* Reset for multiple quad port adapters */
4199 if (++global_quad_port_a == 4)
4200 global_quad_port_a = 0;
4208 * Enable PCI Wake On Lan capability
4211 lem_enable_wakeup(device_t dev)
4213 struct adapter *adapter = device_get_softc(dev);
4214 struct ifnet *ifp = adapter->ifp;
4215 u32 pmc, ctrl, ctrl_ext, rctl;
4218 if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
4221 /* Advertise the wakeup capability */
4222 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4223 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4224 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4225 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4227 /* Keep the laser running on Fiber adapters */
4228 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4229 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4230 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4231 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4232 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4236 ** Determine type of Wakeup: note that wol
4237 ** is set with all bits on by default.
4239 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
4240 adapter->wol &= ~E1000_WUFC_MAG;
4242 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
4243 adapter->wol &= ~E1000_WUFC_MC;
4245 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4246 rctl |= E1000_RCTL_MPE;
4247 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4250 if (adapter->hw.mac.type == e1000_pchlan) {
4251 if (lem_enable_phy_wakeup(adapter))
4254 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4255 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4260 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4261 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4262 if (ifp->if_capenable & IFCAP_WOL)
4263 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4264 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4270 ** WOL in the newer chipset interfaces (pchlan)
4271 ** require thing to be copied into the phy
4274 lem_enable_phy_wakeup(struct adapter *adapter)
4276 struct e1000_hw *hw = &adapter->hw;
4280 /* copy MAC RARs to PHY RARs */
4281 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4282 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4283 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4284 e1000_write_phy_reg(hw, BM_RAR_M(i),
4285 (u16)((mreg >> 16) & 0xFFFF));
4286 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4287 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4288 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4289 (u16)((mreg >> 16) & 0xFFFF));
4292 /* copy MAC MTA to PHY MTA */
4293 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4294 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4295 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4296 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4297 (u16)((mreg >> 16) & 0xFFFF));
4300 /* configure PHY Rx Control register */
4301 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4302 mreg = E1000_READ_REG(hw, E1000_RCTL);
4303 if (mreg & E1000_RCTL_UPE)
4304 preg |= BM_RCTL_UPE;
4305 if (mreg & E1000_RCTL_MPE)
4306 preg |= BM_RCTL_MPE;
4307 preg &= ~(BM_RCTL_MO_MASK);
4308 if (mreg & E1000_RCTL_MO_3)
4309 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4310 << BM_RCTL_MO_SHIFT);
4311 if (mreg & E1000_RCTL_BAM)
4312 preg |= BM_RCTL_BAM;
4313 if (mreg & E1000_RCTL_PMCF)
4314 preg |= BM_RCTL_PMCF;
4315 mreg = E1000_READ_REG(hw, E1000_CTRL);
4316 if (mreg & E1000_CTRL_RFCE)
4317 preg |= BM_RCTL_RFCE;
4318 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4320 /* enable PHY wakeup in MAC register */
4321 E1000_WRITE_REG(hw, E1000_WUC,
4322 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4323 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4325 /* configure and enable PHY wakeup in PHY registers */
4326 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4327 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4329 /* activate PHY wakeup */
4330 ret = hw->phy.ops.acquire(hw);
4332 printf("Could not acquire PHY\n");
4335 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4336 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4337 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4339 printf("Could not read PHY page 769\n");
4342 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4343 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4345 printf("Could not set PHY Host Wakeup bit\n");
4347 hw->phy.ops.release(hw);
4353 lem_led_func(void *arg, int onoff)
4355 struct adapter *adapter = arg;
4357 EM_CORE_LOCK(adapter);
4359 e1000_setup_led(&adapter->hw);
4360 e1000_led_on(&adapter->hw);
4362 e1000_led_off(&adapter->hw);
4363 e1000_cleanup_led(&adapter->hw);
4365 EM_CORE_UNLOCK(adapter);
4368 /*********************************************************************
4369 * 82544 Coexistence issue workaround.
4370 * There are 2 issues.
4371 * 1. Transmit Hang issue.
4372 * To detect this issue, following equation can be used...
4373 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4374 * If SUM[3:0] is in between 1 to 4, we will have this issue.
4377 * To detect this issue, following equation can be used...
4378 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4379 * If SUM[3:0] is in between 9 to c, we will have this issue.
4383 * Make sure we do not have ending address
4384 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4386 *************************************************************************/
4388 lem_fill_descriptors (bus_addr_t address, u32 length,
4389 PDESC_ARRAY desc_array)
4391 u32 safe_terminator;
4393 /* Since issue is sensitive to length and address.*/
4394 /* Let us first check the address...*/
4396 desc_array->descriptor[0].address = address;
4397 desc_array->descriptor[0].length = length;
4398 desc_array->elements = 1;
4399 return (desc_array->elements);
4401 safe_terminator = (u32)((((u32)address & 0x7) +
4402 (length & 0xF)) & 0xF);
4403 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4404 if (safe_terminator == 0 ||
4405 (safe_terminator > 4 &&
4406 safe_terminator < 9) ||
4407 (safe_terminator > 0xC &&
4408 safe_terminator <= 0xF)) {
4409 desc_array->descriptor[0].address = address;
4410 desc_array->descriptor[0].length = length;
4411 desc_array->elements = 1;
4412 return (desc_array->elements);
4415 desc_array->descriptor[0].address = address;
4416 desc_array->descriptor[0].length = length - 4;
4417 desc_array->descriptor[1].address = address + (length - 4);
4418 desc_array->descriptor[1].length = 4;
4419 desc_array->elements = 2;
4420 return (desc_array->elements);
4423 /**********************************************************************
4425 * Update the board statistics counters.
4427 **********************************************************************/
4429 lem_update_stats_counters(struct adapter *adapter)
4433 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4434 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4435 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4436 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4438 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4439 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4440 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4441 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4443 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4444 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4445 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4446 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4447 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4448 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4449 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4450 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4451 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4452 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4453 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4454 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4455 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4456 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4457 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4458 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4459 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4460 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4461 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4462 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4464 /* For the 64-bit byte counters the low dword must be read first. */
4465 /* Both registers clear on the read of the high dword */
4467 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4468 ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4469 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4470 ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4472 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4473 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4474 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4475 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4476 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4478 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4479 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4481 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4482 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4483 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4484 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4485 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4486 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4487 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4488 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4489 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4490 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4492 if (adapter->hw.mac.type >= e1000_82543) {
4493 adapter->stats.algnerrc +=
4494 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4495 adapter->stats.rxerrc +=
4496 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4497 adapter->stats.tncrs +=
4498 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4499 adapter->stats.cexterr +=
4500 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4501 adapter->stats.tsctc +=
4502 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4503 adapter->stats.tsctfc +=
4504 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4508 ifp->if_collisions = adapter->stats.colc;
4511 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4512 adapter->stats.crcerrs + adapter->stats.algnerrc +
4513 adapter->stats.ruc + adapter->stats.roc +
4514 adapter->stats.mpc + adapter->stats.cexterr;
4517 ifp->if_oerrors = adapter->stats.ecol +
4518 adapter->stats.latecol + adapter->watchdog_events;
4521 /* Export a single 32-bit register via a read-only sysctl. */
4523 lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4525 struct adapter *adapter;
4528 adapter = oidp->oid_arg1;
4529 val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4530 return (sysctl_handle_int(oidp, &val, 0, req));
4534 * Add sysctl variables, one per statistic, to the system.
4537 lem_add_hw_stats(struct adapter *adapter)
4539 device_t dev = adapter->dev;
4541 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4542 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4543 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4544 struct e1000_hw_stats *stats = &adapter->stats;
4546 struct sysctl_oid *stat_node;
4547 struct sysctl_oid_list *stat_list;
4549 /* Driver Statistics */
4550 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
4551 CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4552 "Std mbuf cluster failed");
4553 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_fail",
4554 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
4555 "Defragmenting mbuf chain failed");
4556 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4557 CTLFLAG_RD, &adapter->dropped_pkts,
4558 "Driver dropped packets");
4559 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
4560 CTLFLAG_RD, &adapter->no_tx_dma_setup,
4561 "Driver tx dma failure in xmit");
4562 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4563 CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4564 "Not enough tx descriptors failure in xmit");
4565 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4566 CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4567 "Not enough tx descriptors failure in xmit");
4568 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4569 CTLFLAG_RD, &adapter->rx_overruns,
4571 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4572 CTLFLAG_RD, &adapter->watchdog_events,
4573 "Watchdog timeouts");
4575 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4576 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
4577 lem_sysctl_reg_handler, "IU",
4578 "Device Control Register");
4579 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4580 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
4581 lem_sysctl_reg_handler, "IU",
4582 "Receiver Control Register");
4583 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4584 CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4585 "Flow Control High Watermark");
4586 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
4587 CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4588 "Flow Control Low Watermark");
4589 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
4590 CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4591 "TX FIFO workaround events");
4592 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
4593 CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4596 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head",
4597 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
4598 lem_sysctl_reg_handler, "IU",
4599 "Transmit Descriptor Head");
4600 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail",
4601 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
4602 lem_sysctl_reg_handler, "IU",
4603 "Transmit Descriptor Tail");
4604 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head",
4605 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
4606 lem_sysctl_reg_handler, "IU",
4607 "Receive Descriptor Head");
4608 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail",
4609 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
4610 lem_sysctl_reg_handler, "IU",
4611 "Receive Descriptor Tail");
4614 /* MAC stats get their own sub node */
4616 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4617 CTLFLAG_RD, NULL, "Statistics");
4618 stat_list = SYSCTL_CHILDREN(stat_node);
4620 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4621 CTLFLAG_RD, &stats->ecol,
4622 "Excessive collisions");
4623 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4624 CTLFLAG_RD, &stats->scc,
4625 "Single collisions");
4626 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4627 CTLFLAG_RD, &stats->mcc,
4628 "Multiple collisions");
4629 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4630 CTLFLAG_RD, &stats->latecol,
4632 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4633 CTLFLAG_RD, &stats->colc,
4635 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4636 CTLFLAG_RD, &adapter->stats.symerrs,
4638 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4639 CTLFLAG_RD, &adapter->stats.sec,
4641 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4642 CTLFLAG_RD, &adapter->stats.dc,
4644 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4645 CTLFLAG_RD, &adapter->stats.mpc,
4647 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4648 CTLFLAG_RD, &adapter->stats.rnbc,
4649 "Receive No Buffers");
4650 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4651 CTLFLAG_RD, &adapter->stats.ruc,
4652 "Receive Undersize");
4653 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4654 CTLFLAG_RD, &adapter->stats.rfc,
4655 "Fragmented Packets Received ");
4656 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4657 CTLFLAG_RD, &adapter->stats.roc,
4658 "Oversized Packets Received");
4659 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4660 CTLFLAG_RD, &adapter->stats.rjc,
4662 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4663 CTLFLAG_RD, &adapter->stats.rxerrc,
4665 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4666 CTLFLAG_RD, &adapter->stats.crcerrs,
4668 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4669 CTLFLAG_RD, &adapter->stats.algnerrc,
4670 "Alignment Errors");
4671 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4672 CTLFLAG_RD, &adapter->stats.cexterr,
4673 "Collision/Carrier extension errors");
4674 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4675 CTLFLAG_RD, &adapter->stats.xonrxc,
4677 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4678 CTLFLAG_RD, &adapter->stats.xontxc,
4680 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4681 CTLFLAG_RD, &adapter->stats.xoffrxc,
4683 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4684 CTLFLAG_RD, &adapter->stats.xofftxc,
4685 "XOFF Transmitted");
4687 /* Packet Reception Stats */
4688 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4689 CTLFLAG_RD, &adapter->stats.tpr,
4690 "Total Packets Received ");
4691 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4692 CTLFLAG_RD, &adapter->stats.gprc,
4693 "Good Packets Received");
4694 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4695 CTLFLAG_RD, &adapter->stats.bprc,
4696 "Broadcast Packets Received");
4697 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4698 CTLFLAG_RD, &adapter->stats.mprc,
4699 "Multicast Packets Received");
4700 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4701 CTLFLAG_RD, &adapter->stats.prc64,
4702 "64 byte frames received ");
4703 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4704 CTLFLAG_RD, &adapter->stats.prc127,
4705 "65-127 byte frames received");
4706 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4707 CTLFLAG_RD, &adapter->stats.prc255,
4708 "128-255 byte frames received");
4709 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4710 CTLFLAG_RD, &adapter->stats.prc511,
4711 "256-511 byte frames received");
4712 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4713 CTLFLAG_RD, &adapter->stats.prc1023,
4714 "512-1023 byte frames received");
4715 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4716 CTLFLAG_RD, &adapter->stats.prc1522,
4717 "1023-1522 byte frames received");
4718 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4719 CTLFLAG_RD, &adapter->stats.gorc,
4720 "Good Octets Received");
4722 /* Packet Transmission Stats */
4723 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4724 CTLFLAG_RD, &adapter->stats.gotc,
4725 "Good Octets Transmitted");
4726 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4727 CTLFLAG_RD, &adapter->stats.tpt,
4728 "Total Packets Transmitted");
4729 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4730 CTLFLAG_RD, &adapter->stats.gptc,
4731 "Good Packets Transmitted");
4732 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4733 CTLFLAG_RD, &adapter->stats.bptc,
4734 "Broadcast Packets Transmitted");
4735 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4736 CTLFLAG_RD, &adapter->stats.mptc,
4737 "Multicast Packets Transmitted");
4738 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4739 CTLFLAG_RD, &adapter->stats.ptc64,
4740 "64 byte frames transmitted ");
4741 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4742 CTLFLAG_RD, &adapter->stats.ptc127,
4743 "65-127 byte frames transmitted");
4744 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4745 CTLFLAG_RD, &adapter->stats.ptc255,
4746 "128-255 byte frames transmitted");
4747 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4748 CTLFLAG_RD, &adapter->stats.ptc511,
4749 "256-511 byte frames transmitted");
4750 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4751 CTLFLAG_RD, &adapter->stats.ptc1023,
4752 "512-1023 byte frames transmitted");
4753 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4754 CTLFLAG_RD, &adapter->stats.ptc1522,
4755 "1024-1522 byte frames transmitted");
4756 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4757 CTLFLAG_RD, &adapter->stats.tsctc,
4758 "TSO Contexts Transmitted");
4759 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4760 CTLFLAG_RD, &adapter->stats.tsctfc,
4761 "TSO Contexts Failed");
4764 /**********************************************************************
4766 * This routine provides a way to dump out the adapter eeprom,
4767 * often a useful debug/service tool. This only dumps the first
4768 * 32 words, stuff that matters is in that extent.
4770 **********************************************************************/
4773 lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4775 struct adapter *adapter;
4780 error = sysctl_handle_int(oidp, &result, 0, req);
4782 if (error || !req->newptr)
4786 * This value will cause a hex dump of the
4787 * first 32 16-bit words of the EEPROM to
4791 adapter = (struct adapter *)arg1;
4792 lem_print_nvm_info(adapter);
4799 lem_print_nvm_info(struct adapter *adapter)
4804 /* Its a bit crude, but it gets the job done */
4805 printf("\nInterface EEPROM Dump:\n");
4806 printf("Offset\n0x0000 ");
4807 for (i = 0, j = 0; i < 32; i++, j++) {
4808 if (j == 8) { /* Make the offset block */
4810 printf("\n0x00%x0 ",row);
4812 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4813 printf("%04x ", eeprom_data);
4819 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4821 struct em_int_delay_info *info;
4822 struct adapter *adapter;
4828 info = (struct em_int_delay_info *)arg1;
4829 usecs = info->value;
4830 error = sysctl_handle_int(oidp, &usecs, 0, req);
4831 if (error != 0 || req->newptr == NULL)
4833 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4835 info->value = usecs;
4836 ticks = EM_USECS_TO_TICKS(usecs);
4837 if (info->offset == E1000_ITR) /* units are 256ns here */
4840 adapter = info->adapter;
4842 EM_CORE_LOCK(adapter);
4843 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4844 regval = (regval & ~0xffff) | (ticks & 0xffff);
4845 /* Handle a few special cases. */
4846 switch (info->offset) {
4851 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4852 /* Don't write 0 into the TIDV register. */
4855 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4858 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4859 EM_CORE_UNLOCK(adapter);
4864 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4865 const char *description, struct em_int_delay_info *info,
4866 int offset, int value)
4868 info->adapter = adapter;
4869 info->offset = offset;
4870 info->value = value;
4871 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4872 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4873 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4874 info, 0, lem_sysctl_int_delay, "I", description);
4878 lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4879 const char *description, int *limit, int value)
4882 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4883 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4884 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
4888 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4889 const char *description, int *limit, int value)
4892 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4893 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4894 OID_AUTO, name, CTLFLAG_RW, limit, value, description);