1 /******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
38 #include "opt_inet6.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
44 #include <sys/endian.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/malloc.h>
49 #include <sys/module.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/sysctl.h>
54 #include <sys/taskqueue.h>
55 #include <sys/eventhandler.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <net/ethernet.h>
62 #include <net/if_arp.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
66 #include <net/if_types.h>
67 #include <net/if_vlan_var.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/in.h>
71 #include <netinet/if_ether.h>
72 #include <netinet/ip.h>
73 #include <netinet/ip6.h>
74 #include <netinet/tcp.h>
75 #include <netinet/udp.h>
77 #include <machine/in_cksum.h>
78 #include <dev/led/led.h>
79 #include <dev/pci/pcivar.h>
80 #include <dev/pci/pcireg.h>
82 #include "e1000_api.h"
85 /*********************************************************************
86 * Legacy Em Driver version:
87 *********************************************************************/
88 char lem_driver_version[] = "1.0.5";
90 /*********************************************************************
93 * Used by probe to select devices to load on
94 * Last field stores an index into e1000_strings
95 * Last entry must be all 0s
97 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
98 *********************************************************************/
100 static em_vendor_info_t lem_vendor_info_array[] =
102 /* Intel(R) PRO/1000 Network Connection */
103 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
104 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
105 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
106 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
107 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
109 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
110 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
111 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
112 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
113 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
128 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
131 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
142 PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
147 /* required last entry */
151 /*********************************************************************
152 * Table of branding strings for all supported NICs.
153 *********************************************************************/
155 static char *lem_strings[] = {
156 "Intel(R) PRO/1000 Legacy Network Connection"
159 /*********************************************************************
160 * Function prototypes
161 *********************************************************************/
162 static int lem_probe(device_t);
163 static int lem_attach(device_t);
164 static int lem_detach(device_t);
165 static int lem_shutdown(device_t);
166 static int lem_suspend(device_t);
167 static int lem_resume(device_t);
168 static void lem_start(struct ifnet *);
169 static void lem_start_locked(struct ifnet *ifp);
170 static int lem_ioctl(struct ifnet *, u_long, caddr_t);
171 static void lem_init(void *);
172 static void lem_init_locked(struct adapter *);
173 static void lem_stop(void *);
174 static void lem_media_status(struct ifnet *, struct ifmediareq *);
175 static int lem_media_change(struct ifnet *);
176 static void lem_identify_hardware(struct adapter *);
177 static int lem_allocate_pci_resources(struct adapter *);
178 static int lem_allocate_irq(struct adapter *adapter);
179 static void lem_free_pci_resources(struct adapter *);
180 static void lem_local_timer(void *);
181 static int lem_hardware_init(struct adapter *);
182 static int lem_setup_interface(device_t, struct adapter *);
183 static void lem_setup_transmit_structures(struct adapter *);
184 static void lem_initialize_transmit_unit(struct adapter *);
185 static int lem_setup_receive_structures(struct adapter *);
186 static void lem_initialize_receive_unit(struct adapter *);
187 static void lem_enable_intr(struct adapter *);
188 static void lem_disable_intr(struct adapter *);
189 static void lem_free_transmit_structures(struct adapter *);
190 static void lem_free_receive_structures(struct adapter *);
191 static void lem_update_stats_counters(struct adapter *);
192 static void lem_add_hw_stats(struct adapter *adapter);
193 static void lem_txeof(struct adapter *);
194 static void lem_tx_purge(struct adapter *);
195 static int lem_allocate_receive_structures(struct adapter *);
196 static int lem_allocate_transmit_structures(struct adapter *);
197 static bool lem_rxeof(struct adapter *, int, int *);
198 #ifndef __NO_STRICT_ALIGNMENT
199 static int lem_fixup_rx(struct adapter *);
201 static void lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
203 static void lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
205 static void lem_set_promisc(struct adapter *);
206 static void lem_disable_promisc(struct adapter *);
207 static void lem_set_multi(struct adapter *);
208 static void lem_update_link_status(struct adapter *);
209 static int lem_get_buf(struct adapter *, int);
210 static void lem_register_vlan(void *, struct ifnet *, u16);
211 static void lem_unregister_vlan(void *, struct ifnet *, u16);
212 static void lem_setup_vlan_hw_support(struct adapter *);
213 static int lem_xmit(struct adapter *, struct mbuf **);
214 static void lem_smartspeed(struct adapter *);
215 static int lem_82547_fifo_workaround(struct adapter *, int);
216 static void lem_82547_update_fifo_head(struct adapter *, int);
217 static int lem_82547_tx_fifo_reset(struct adapter *);
218 static void lem_82547_move_tail(void *);
219 static int lem_dma_malloc(struct adapter *, bus_size_t,
220 struct em_dma_alloc *, int);
221 static void lem_dma_free(struct adapter *, struct em_dma_alloc *);
222 static int lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
223 static void lem_print_nvm_info(struct adapter *);
224 static int lem_is_valid_ether_addr(u8 *);
225 static u32 lem_fill_descriptors (bus_addr_t address, u32 length,
226 PDESC_ARRAY desc_array);
227 static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
228 static void lem_add_int_delay_sysctl(struct adapter *, const char *,
229 const char *, struct em_int_delay_info *, int, int);
230 static void lem_set_flow_cntrl(struct adapter *, const char *,
231 const char *, int *, int);
232 /* Management and WOL Support */
233 static void lem_init_manageability(struct adapter *);
234 static void lem_release_manageability(struct adapter *);
235 static void lem_get_hw_control(struct adapter *);
236 static void lem_release_hw_control(struct adapter *);
237 static void lem_get_wakeup(device_t);
238 static void lem_enable_wakeup(device_t);
239 static int lem_enable_phy_wakeup(struct adapter *);
240 static void lem_led_func(void *, int);
242 static void lem_intr(void *);
243 static int lem_irq_fast(void *);
244 static void lem_handle_rxtx(void *context, int pending);
245 static void lem_handle_link(void *context, int pending);
246 static void lem_add_rx_process_limit(struct adapter *, const char *,
247 const char *, int *, int);
249 #ifdef DEVICE_POLLING
250 static poll_handler_t lem_poll;
253 /*********************************************************************
254 * FreeBSD Device Interface Entry Points
255 *********************************************************************/
257 static device_method_t lem_methods[] = {
258 /* Device interface */
259 DEVMETHOD(device_probe, lem_probe),
260 DEVMETHOD(device_attach, lem_attach),
261 DEVMETHOD(device_detach, lem_detach),
262 DEVMETHOD(device_shutdown, lem_shutdown),
263 DEVMETHOD(device_suspend, lem_suspend),
264 DEVMETHOD(device_resume, lem_resume),
268 static driver_t lem_driver = {
269 "em", lem_methods, sizeof(struct adapter),
272 extern devclass_t em_devclass;
273 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
274 MODULE_DEPEND(lem, pci, 1, 1, 1);
275 MODULE_DEPEND(lem, ether, 1, 1, 1);
277 /*********************************************************************
278 * Tunable default values.
279 *********************************************************************/
281 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
282 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
284 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
285 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
286 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
287 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
288 static int lem_rxd = EM_DEFAULT_RXD;
289 static int lem_txd = EM_DEFAULT_TXD;
290 static int lem_smart_pwr_down = FALSE;
292 /* Controls whether promiscuous also shows bad packets */
293 static int lem_debug_sbp = FALSE;
295 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
296 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
297 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
298 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
299 TUNABLE_INT("hw.em.rxd", &lem_rxd);
300 TUNABLE_INT("hw.em.txd", &lem_txd);
301 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
302 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
304 /* Interrupt style - default to fast */
305 static int lem_use_legacy_irq = 0;
306 TUNABLE_INT("hw.em.use_legacy_irq", &lem_use_legacy_irq);
308 /* How many packets rxeof tries to clean at a time */
309 static int lem_rx_process_limit = 100;
310 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
312 /* Flow control setting - default to FULL */
313 static int lem_fc_setting = e1000_fc_full;
314 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
316 /* Global used in WOL setup with multiport cards */
317 static int global_quad_port_a = 0;
319 #ifdef DEV_NETMAP /* see ixgbe.c for details */
320 #include <dev/netmap/if_lem_netmap.h>
321 #endif /* DEV_NETMAP */
323 /*********************************************************************
324 * Device identification routine
326 * em_probe determines if the driver should be loaded on
327 * adapter based on PCI vendor/device id of the adapter.
329 * return BUS_PROBE_DEFAULT on success, positive on failure
330 *********************************************************************/
333 lem_probe(device_t dev)
335 char adapter_name[60];
336 u16 pci_vendor_id = 0;
337 u16 pci_device_id = 0;
338 u16 pci_subvendor_id = 0;
339 u16 pci_subdevice_id = 0;
340 em_vendor_info_t *ent;
342 INIT_DEBUGOUT("em_probe: begin");
344 pci_vendor_id = pci_get_vendor(dev);
345 if (pci_vendor_id != EM_VENDOR_ID)
348 pci_device_id = pci_get_device(dev);
349 pci_subvendor_id = pci_get_subvendor(dev);
350 pci_subdevice_id = pci_get_subdevice(dev);
352 ent = lem_vendor_info_array;
353 while (ent->vendor_id != 0) {
354 if ((pci_vendor_id == ent->vendor_id) &&
355 (pci_device_id == ent->device_id) &&
357 ((pci_subvendor_id == ent->subvendor_id) ||
358 (ent->subvendor_id == PCI_ANY_ID)) &&
360 ((pci_subdevice_id == ent->subdevice_id) ||
361 (ent->subdevice_id == PCI_ANY_ID))) {
362 sprintf(adapter_name, "%s %s",
363 lem_strings[ent->index],
365 device_set_desc_copy(dev, adapter_name);
366 return (BUS_PROBE_DEFAULT);
374 /*********************************************************************
375 * Device initialization routine
377 * The attach entry point is called when the driver is being loaded.
378 * This routine identifies the type of hardware, allocates all resources
379 * and initializes the hardware.
381 * return 0 on success, positive on failure
382 *********************************************************************/
385 lem_attach(device_t dev)
387 struct adapter *adapter;
391 INIT_DEBUGOUT("lem_attach: begin");
393 adapter = device_get_softc(dev);
394 adapter->dev = adapter->osdep.dev = dev;
395 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
396 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
397 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
400 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
401 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
402 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
403 lem_sysctl_nvm_info, "I", "NVM Information");
405 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
406 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
408 /* Determine hardware and mac info */
409 lem_identify_hardware(adapter);
411 /* Setup PCI resources */
412 if (lem_allocate_pci_resources(adapter)) {
413 device_printf(dev, "Allocation of PCI resources failed\n");
418 /* Do Shared Code initialization */
419 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
420 device_printf(dev, "Setup of Shared code failed\n");
425 e1000_get_bus_info(&adapter->hw);
427 /* Set up some sysctls for the tunable interrupt delays */
428 lem_add_int_delay_sysctl(adapter, "rx_int_delay",
429 "receive interrupt delay in usecs", &adapter->rx_int_delay,
430 E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
431 lem_add_int_delay_sysctl(adapter, "tx_int_delay",
432 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
433 E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
434 if (adapter->hw.mac.type >= e1000_82540) {
435 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
436 "receive interrupt delay limit in usecs",
437 &adapter->rx_abs_int_delay,
438 E1000_REGISTER(&adapter->hw, E1000_RADV),
439 lem_rx_abs_int_delay_dflt);
440 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
441 "transmit interrupt delay limit in usecs",
442 &adapter->tx_abs_int_delay,
443 E1000_REGISTER(&adapter->hw, E1000_TADV),
444 lem_tx_abs_int_delay_dflt);
447 /* Sysctls for limiting the amount of work done in the taskqueue */
448 lem_add_rx_process_limit(adapter, "rx_processing_limit",
449 "max number of rx packets to process", &adapter->rx_process_limit,
450 lem_rx_process_limit);
452 /* Sysctl for setting the interface flow control */
453 lem_set_flow_cntrl(adapter, "flow_control",
454 "flow control setting",
455 &adapter->fc_setting, lem_fc_setting);
458 * Validate number of transmit and receive descriptors. It
459 * must not exceed hardware maximum, and must be multiple
460 * of E1000_DBA_ALIGN.
462 if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
463 (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
464 (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
465 (lem_txd < EM_MIN_TXD)) {
466 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
467 EM_DEFAULT_TXD, lem_txd);
468 adapter->num_tx_desc = EM_DEFAULT_TXD;
470 adapter->num_tx_desc = lem_txd;
471 if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
472 (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
473 (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
474 (lem_rxd < EM_MIN_RXD)) {
475 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
476 EM_DEFAULT_RXD, lem_rxd);
477 adapter->num_rx_desc = EM_DEFAULT_RXD;
479 adapter->num_rx_desc = lem_rxd;
481 adapter->hw.mac.autoneg = DO_AUTO_NEG;
482 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
483 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
484 adapter->rx_buffer_len = 2048;
486 e1000_init_script_state_82541(&adapter->hw, TRUE);
487 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
490 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
491 adapter->hw.phy.mdix = AUTO_ALL_MODES;
492 adapter->hw.phy.disable_polarity_correction = FALSE;
493 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
497 * Set the frame limits assuming
498 * standard ethernet sized frames.
500 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
501 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
504 * This controls when hardware reports transmit completion
507 adapter->hw.mac.report_tx_early = 1;
509 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
512 /* Allocate Transmit Descriptor ring */
513 if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
514 device_printf(dev, "Unable to allocate tx_desc memory\n");
518 adapter->tx_desc_base =
519 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
521 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
524 /* Allocate Receive Descriptor ring */
525 if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
526 device_printf(dev, "Unable to allocate rx_desc memory\n");
530 adapter->rx_desc_base =
531 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
533 /* Allocate multicast array memory. */
534 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
535 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
536 if (adapter->mta == NULL) {
537 device_printf(dev, "Can not allocate multicast setup array\n");
543 ** Start from a known state, this is
544 ** important in reading the nvm and
547 e1000_reset_hw(&adapter->hw);
549 /* Make sure we have a good EEPROM before we read from it */
550 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
552 ** Some PCI-E parts fail the first check due to
553 ** the link being in sleep state, call it again,
554 ** if it fails a second time its a real issue.
556 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
558 "The EEPROM Checksum Is Not Valid\n");
564 /* Copy the permanent MAC address out of the EEPROM */
565 if (e1000_read_mac_addr(&adapter->hw) < 0) {
566 device_printf(dev, "EEPROM read error while reading MAC"
572 if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
573 device_printf(dev, "Invalid MAC address\n");
578 /* Initialize the hardware */
579 if (lem_hardware_init(adapter)) {
580 device_printf(dev, "Unable to initialize the hardware\n");
585 /* Allocate transmit descriptors and buffers */
586 if (lem_allocate_transmit_structures(adapter)) {
587 device_printf(dev, "Could not setup transmit structures\n");
592 /* Allocate receive descriptors and buffers */
593 if (lem_allocate_receive_structures(adapter)) {
594 device_printf(dev, "Could not setup receive structures\n");
600 ** Do interrupt configuration
602 error = lem_allocate_irq(adapter);
607 * Get Wake-on-Lan and Management info for later use
611 /* Setup OS specific network interface */
612 if (lem_setup_interface(dev, adapter) != 0)
615 /* Initialize statistics */
616 lem_update_stats_counters(adapter);
618 adapter->hw.mac.get_link_status = 1;
619 lem_update_link_status(adapter);
621 /* Indicate SOL/IDER usage */
622 if (e1000_check_reset_block(&adapter->hw))
624 "PHY reset is blocked due to SOL/IDER session.\n");
626 /* Do we need workaround for 82544 PCI-X adapter? */
627 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
628 adapter->hw.mac.type == e1000_82544)
629 adapter->pcix_82544 = TRUE;
631 adapter->pcix_82544 = FALSE;
633 /* Register for VLAN events */
634 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
635 lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
636 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
637 lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
639 lem_add_hw_stats(adapter);
641 /* Non-AMT based hardware can now take control from firmware */
642 if (adapter->has_manage && !adapter->has_amt)
643 lem_get_hw_control(adapter);
645 /* Tell the stack that the interface is not active */
646 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
648 adapter->led_dev = led_create(lem_led_func, adapter,
649 device_get_nameunit(dev));
652 lem_netmap_attach(adapter);
653 #endif /* DEV_NETMAP */
654 INIT_DEBUGOUT("lem_attach: end");
659 lem_free_transmit_structures(adapter);
662 lem_release_hw_control(adapter);
663 lem_dma_free(adapter, &adapter->rxdma);
665 lem_dma_free(adapter, &adapter->txdma);
668 if (adapter->ifp != NULL)
669 if_free(adapter->ifp);
670 lem_free_pci_resources(adapter);
671 free(adapter->mta, M_DEVBUF);
672 EM_TX_LOCK_DESTROY(adapter);
673 EM_RX_LOCK_DESTROY(adapter);
674 EM_CORE_LOCK_DESTROY(adapter);
679 /*********************************************************************
680 * Device removal routine
682 * The detach entry point is called when the driver is being removed.
683 * This routine stops the adapter and deallocates all the resources
684 * that were allocated for driver operation.
686 * return 0 on success, positive on failure
687 *********************************************************************/
690 lem_detach(device_t dev)
692 struct adapter *adapter = device_get_softc(dev);
693 struct ifnet *ifp = adapter->ifp;
695 INIT_DEBUGOUT("em_detach: begin");
697 /* Make sure VLANS are not using driver */
698 if (adapter->ifp->if_vlantrunk != NULL) {
699 device_printf(dev,"Vlan in use, detach first\n");
703 #ifdef DEVICE_POLLING
704 if (ifp->if_capenable & IFCAP_POLLING)
705 ether_poll_deregister(ifp);
708 if (adapter->led_dev != NULL)
709 led_destroy(adapter->led_dev);
711 EM_CORE_LOCK(adapter);
713 adapter->in_detach = 1;
715 e1000_phy_hw_reset(&adapter->hw);
717 lem_release_manageability(adapter);
719 EM_TX_UNLOCK(adapter);
720 EM_CORE_UNLOCK(adapter);
722 /* Unregister VLAN events */
723 if (adapter->vlan_attach != NULL)
724 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
725 if (adapter->vlan_detach != NULL)
726 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
728 ether_ifdetach(adapter->ifp);
729 callout_drain(&adapter->timer);
730 callout_drain(&adapter->tx_fifo_timer);
734 #endif /* DEV_NETMAP */
735 lem_free_pci_resources(adapter);
736 bus_generic_detach(dev);
739 lem_free_transmit_structures(adapter);
740 lem_free_receive_structures(adapter);
742 /* Free Transmit Descriptor ring */
743 if (adapter->tx_desc_base) {
744 lem_dma_free(adapter, &adapter->txdma);
745 adapter->tx_desc_base = NULL;
748 /* Free Receive Descriptor ring */
749 if (adapter->rx_desc_base) {
750 lem_dma_free(adapter, &adapter->rxdma);
751 adapter->rx_desc_base = NULL;
754 lem_release_hw_control(adapter);
755 free(adapter->mta, M_DEVBUF);
756 EM_TX_LOCK_DESTROY(adapter);
757 EM_RX_LOCK_DESTROY(adapter);
758 EM_CORE_LOCK_DESTROY(adapter);
763 /*********************************************************************
765 * Shutdown entry point
767 **********************************************************************/
770 lem_shutdown(device_t dev)
772 return lem_suspend(dev);
776 * Suspend/resume device methods.
779 lem_suspend(device_t dev)
781 struct adapter *adapter = device_get_softc(dev);
783 EM_CORE_LOCK(adapter);
785 lem_release_manageability(adapter);
786 lem_release_hw_control(adapter);
787 lem_enable_wakeup(dev);
789 EM_CORE_UNLOCK(adapter);
791 return bus_generic_suspend(dev);
795 lem_resume(device_t dev)
797 struct adapter *adapter = device_get_softc(dev);
798 struct ifnet *ifp = adapter->ifp;
800 EM_CORE_LOCK(adapter);
801 lem_init_locked(adapter);
802 lem_init_manageability(adapter);
803 EM_CORE_UNLOCK(adapter);
806 return bus_generic_resume(dev);
811 lem_start_locked(struct ifnet *ifp)
813 struct adapter *adapter = ifp->if_softc;
816 EM_TX_LOCK_ASSERT(adapter);
818 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
821 if (!adapter->link_active)
825 * Force a cleanup if number of TX descriptors
826 * available hits the threshold
828 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
830 /* Now do we at least have a minimal? */
831 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
832 adapter->no_tx_desc_avail1++;
837 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
839 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
843 * Encapsulation can modify our pointer, and or make it
844 * NULL on failure. In that event, we can't requeue.
846 if (lem_xmit(adapter, &m_head)) {
849 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
850 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
854 /* Send a copy of the frame to the BPF listener */
855 ETHER_BPF_MTAP(ifp, m_head);
857 /* Set timeout in case hardware has problems transmitting. */
858 adapter->watchdog_check = TRUE;
859 adapter->watchdog_time = ticks;
861 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
862 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
868 lem_start(struct ifnet *ifp)
870 struct adapter *adapter = ifp->if_softc;
873 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
874 lem_start_locked(ifp);
875 EM_TX_UNLOCK(adapter);
878 /*********************************************************************
881 * em_ioctl is called when the user wants to configure the
884 * return 0 on success, positive on failure
885 **********************************************************************/
888 lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
890 struct adapter *adapter = ifp->if_softc;
891 struct ifreq *ifr = (struct ifreq *)data;
892 #if defined(INET) || defined(INET6)
893 struct ifaddr *ifa = (struct ifaddr *)data;
895 bool avoid_reset = FALSE;
898 if (adapter->in_detach)
904 if (ifa->ifa_addr->sa_family == AF_INET)
908 if (ifa->ifa_addr->sa_family == AF_INET6)
912 ** Calling init results in link renegotiation,
913 ** so we avoid doing it when possible.
916 ifp->if_flags |= IFF_UP;
917 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
920 if (!(ifp->if_flags & IFF_NOARP))
921 arp_ifinit(ifp, ifa);
924 error = ether_ioctl(ifp, command, data);
930 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
932 EM_CORE_LOCK(adapter);
933 switch (adapter->hw.mac.type) {
935 max_frame_size = ETHER_MAX_LEN;
938 max_frame_size = MAX_JUMBO_FRAME_SIZE;
940 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
942 EM_CORE_UNLOCK(adapter);
947 ifp->if_mtu = ifr->ifr_mtu;
948 adapter->max_frame_size =
949 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
950 lem_init_locked(adapter);
951 EM_CORE_UNLOCK(adapter);
955 IOCTL_DEBUGOUT("ioctl rcv'd:\
956 SIOCSIFFLAGS (Set Interface Flags)");
957 EM_CORE_LOCK(adapter);
958 if (ifp->if_flags & IFF_UP) {
959 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
960 if ((ifp->if_flags ^ adapter->if_flags) &
961 (IFF_PROMISC | IFF_ALLMULTI)) {
962 lem_disable_promisc(adapter);
963 lem_set_promisc(adapter);
966 lem_init_locked(adapter);
968 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
971 EM_TX_UNLOCK(adapter);
973 adapter->if_flags = ifp->if_flags;
974 EM_CORE_UNLOCK(adapter);
978 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
979 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
980 EM_CORE_LOCK(adapter);
981 lem_disable_intr(adapter);
982 lem_set_multi(adapter);
983 if (adapter->hw.mac.type == e1000_82542 &&
984 adapter->hw.revision_id == E1000_REVISION_2) {
985 lem_initialize_receive_unit(adapter);
987 #ifdef DEVICE_POLLING
988 if (!(ifp->if_capenable & IFCAP_POLLING))
990 lem_enable_intr(adapter);
991 EM_CORE_UNLOCK(adapter);
995 /* Check SOL/IDER usage */
996 EM_CORE_LOCK(adapter);
997 if (e1000_check_reset_block(&adapter->hw)) {
998 EM_CORE_UNLOCK(adapter);
999 device_printf(adapter->dev, "Media change is"
1000 " blocked due to SOL/IDER session.\n");
1003 EM_CORE_UNLOCK(adapter);
1005 IOCTL_DEBUGOUT("ioctl rcv'd: \
1006 SIOCxIFMEDIA (Get/Set Interface Media)");
1007 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1013 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1015 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1016 #ifdef DEVICE_POLLING
1017 if (mask & IFCAP_POLLING) {
1018 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1019 error = ether_poll_register(lem_poll, ifp);
1022 EM_CORE_LOCK(adapter);
1023 lem_disable_intr(adapter);
1024 ifp->if_capenable |= IFCAP_POLLING;
1025 EM_CORE_UNLOCK(adapter);
1027 error = ether_poll_deregister(ifp);
1028 /* Enable interrupt even in error case */
1029 EM_CORE_LOCK(adapter);
1030 lem_enable_intr(adapter);
1031 ifp->if_capenable &= ~IFCAP_POLLING;
1032 EM_CORE_UNLOCK(adapter);
1036 if (mask & IFCAP_HWCSUM) {
1037 ifp->if_capenable ^= IFCAP_HWCSUM;
1040 if (mask & IFCAP_VLAN_HWTAGGING) {
1041 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1044 if ((mask & IFCAP_WOL) &&
1045 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1046 if (mask & IFCAP_WOL_MCAST)
1047 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1048 if (mask & IFCAP_WOL_MAGIC)
1049 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1051 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1053 VLAN_CAPABILITIES(ifp);
1058 error = ether_ioctl(ifp, command, data);
1066 /*********************************************************************
1069 * This routine is used in two ways. It is used by the stack as
1070 * init entry point in network interface structure. It is also used
1071 * by the driver as a hw/sw initialization routine to get to a
1074 * return 0 on success, positive on failure
1075 **********************************************************************/
1078 lem_init_locked(struct adapter *adapter)
1080 struct ifnet *ifp = adapter->ifp;
1081 device_t dev = adapter->dev;
1084 INIT_DEBUGOUT("lem_init: begin");
1086 EM_CORE_LOCK_ASSERT(adapter);
1088 EM_TX_LOCK(adapter);
1090 EM_TX_UNLOCK(adapter);
1093 * Packet Buffer Allocation (PBA)
1094 * Writing PBA sets the receive portion of the buffer
1095 * the remainder is used for the transmit buffer.
1097 * Devices before the 82547 had a Packet Buffer of 64K.
1098 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1099 * After the 82547 the buffer was reduced to 40K.
1100 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1101 * Note: default does not leave enough room for Jumbo Frame >10k.
1103 switch (adapter->hw.mac.type) {
1105 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1106 if (adapter->max_frame_size > 8192)
1107 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1109 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1110 adapter->tx_fifo_head = 0;
1111 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1112 adapter->tx_fifo_size =
1113 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1116 /* Devices before 82547 had a Packet Buffer of 64K. */
1117 if (adapter->max_frame_size > 8192)
1118 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1120 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1123 INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1124 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1126 /* Get the latest mac address, User can use a LAA */
1127 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1130 /* Put the address into the Receive Address Array */
1131 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1133 /* Initialize the hardware */
1134 if (lem_hardware_init(adapter)) {
1135 device_printf(dev, "Unable to initialize the hardware\n");
1138 lem_update_link_status(adapter);
1140 /* Setup VLAN support, basic and offload if available */
1141 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1143 /* Set hardware offload abilities */
1144 ifp->if_hwassist = 0;
1145 if (adapter->hw.mac.type >= e1000_82543) {
1146 if (ifp->if_capenable & IFCAP_TXCSUM)
1147 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1150 /* Configure for OS presence */
1151 lem_init_manageability(adapter);
1153 /* Prepare transmit descriptors and buffers */
1154 lem_setup_transmit_structures(adapter);
1155 lem_initialize_transmit_unit(adapter);
1157 /* Setup Multicast table */
1158 lem_set_multi(adapter);
1160 /* Prepare receive descriptors and buffers */
1161 if (lem_setup_receive_structures(adapter)) {
1162 device_printf(dev, "Could not setup receive structures\n");
1163 EM_TX_LOCK(adapter);
1165 EM_TX_UNLOCK(adapter);
1168 lem_initialize_receive_unit(adapter);
1170 /* Use real VLAN Filter support? */
1171 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1172 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1173 /* Use real VLAN Filter support */
1174 lem_setup_vlan_hw_support(adapter);
1177 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1178 ctrl |= E1000_CTRL_VME;
1179 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1183 /* Don't lose promiscuous settings */
1184 lem_set_promisc(adapter);
1186 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1187 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1189 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1190 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1192 #ifdef DEVICE_POLLING
1194 * Only enable interrupts if we are not polling, make sure
1195 * they are off otherwise.
1197 if (ifp->if_capenable & IFCAP_POLLING)
1198 lem_disable_intr(adapter);
1200 #endif /* DEVICE_POLLING */
1201 lem_enable_intr(adapter);
1203 /* AMT based hardware can now take control from firmware */
1204 if (adapter->has_manage && adapter->has_amt)
1205 lem_get_hw_control(adapter);
1211 struct adapter *adapter = arg;
1213 EM_CORE_LOCK(adapter);
1214 lem_init_locked(adapter);
1215 EM_CORE_UNLOCK(adapter);
1219 #ifdef DEVICE_POLLING
1220 /*********************************************************************
1222 * Legacy polling routine
1224 *********************************************************************/
1226 lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1228 struct adapter *adapter = ifp->if_softc;
1229 u32 reg_icr, rx_done = 0;
1231 EM_CORE_LOCK(adapter);
1232 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1233 EM_CORE_UNLOCK(adapter);
1237 if (cmd == POLL_AND_CHECK_STATUS) {
1238 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1239 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1240 callout_stop(&adapter->timer);
1241 adapter->hw.mac.get_link_status = 1;
1242 lem_update_link_status(adapter);
1243 callout_reset(&adapter->timer, hz,
1244 lem_local_timer, adapter);
1247 EM_CORE_UNLOCK(adapter);
1249 lem_rxeof(adapter, count, &rx_done);
1251 EM_TX_LOCK(adapter);
1253 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1254 lem_start_locked(ifp);
1255 EM_TX_UNLOCK(adapter);
1258 #endif /* DEVICE_POLLING */
1260 /*********************************************************************
1262 * Legacy Interrupt Service routine
1264 *********************************************************************/
1268 struct adapter *adapter = arg;
1269 struct ifnet *ifp = adapter->ifp;
1273 if ((ifp->if_capenable & IFCAP_POLLING) ||
1274 ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0))
1277 EM_CORE_LOCK(adapter);
1278 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1279 if (reg_icr & E1000_ICR_RXO)
1280 adapter->rx_overruns++;
1282 if ((reg_icr == 0xffffffff) || (reg_icr == 0)) {
1283 EM_CORE_UNLOCK(adapter);
1287 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1288 callout_stop(&adapter->timer);
1289 adapter->hw.mac.get_link_status = 1;
1290 lem_update_link_status(adapter);
1291 /* Deal with TX cruft when link lost */
1292 lem_tx_purge(adapter);
1293 callout_reset(&adapter->timer, hz,
1294 lem_local_timer, adapter);
1295 EM_CORE_UNLOCK(adapter);
1299 EM_CORE_UNLOCK(adapter);
1300 lem_rxeof(adapter, -1, NULL);
1302 EM_TX_LOCK(adapter);
1304 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1305 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1306 lem_start_locked(ifp);
1307 EM_TX_UNLOCK(adapter);
1313 lem_handle_link(void *context, int pending)
1315 struct adapter *adapter = context;
1316 struct ifnet *ifp = adapter->ifp;
1318 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1321 EM_CORE_LOCK(adapter);
1322 callout_stop(&adapter->timer);
1323 lem_update_link_status(adapter);
1324 /* Deal with TX cruft when link lost */
1325 lem_tx_purge(adapter);
1326 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1327 EM_CORE_UNLOCK(adapter);
1331 /* Combined RX/TX handler, used by Legacy and MSI */
1333 lem_handle_rxtx(void *context, int pending)
1335 struct adapter *adapter = context;
1336 struct ifnet *ifp = adapter->ifp;
1339 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1340 lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1341 EM_TX_LOCK(adapter);
1343 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1344 lem_start_locked(ifp);
1345 EM_TX_UNLOCK(adapter);
1348 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1349 lem_enable_intr(adapter);
1352 /*********************************************************************
1354 * Fast Legacy/MSI Combined Interrupt Service routine
1356 *********************************************************************/
1358 lem_irq_fast(void *arg)
1360 struct adapter *adapter = arg;
1366 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1369 if (reg_icr == 0xffffffff)
1370 return FILTER_STRAY;
1372 /* Definitely not our interrupt. */
1374 return FILTER_STRAY;
1377 * Mask interrupts until the taskqueue is finished running. This is
1378 * cheap, just assume that it is needed. This also works around the
1379 * MSI message reordering errata on certain systems.
1381 lem_disable_intr(adapter);
1382 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1384 /* Link status change */
1385 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1386 adapter->hw.mac.get_link_status = 1;
1387 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1390 if (reg_icr & E1000_ICR_RXO)
1391 adapter->rx_overruns++;
1392 return FILTER_HANDLED;
1396 /*********************************************************************
1398 * Media Ioctl callback
1400 * This routine is called whenever the user queries the status of
1401 * the interface using ifconfig.
1403 **********************************************************************/
1405 lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1407 struct adapter *adapter = ifp->if_softc;
1408 u_char fiber_type = IFM_1000_SX;
1410 INIT_DEBUGOUT("lem_media_status: begin");
1412 EM_CORE_LOCK(adapter);
1413 lem_update_link_status(adapter);
1415 ifmr->ifm_status = IFM_AVALID;
1416 ifmr->ifm_active = IFM_ETHER;
1418 if (!adapter->link_active) {
1419 EM_CORE_UNLOCK(adapter);
1423 ifmr->ifm_status |= IFM_ACTIVE;
1425 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1426 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1427 if (adapter->hw.mac.type == e1000_82545)
1428 fiber_type = IFM_1000_LX;
1429 ifmr->ifm_active |= fiber_type | IFM_FDX;
1431 switch (adapter->link_speed) {
1433 ifmr->ifm_active |= IFM_10_T;
1436 ifmr->ifm_active |= IFM_100_TX;
1439 ifmr->ifm_active |= IFM_1000_T;
1442 if (adapter->link_duplex == FULL_DUPLEX)
1443 ifmr->ifm_active |= IFM_FDX;
1445 ifmr->ifm_active |= IFM_HDX;
1447 EM_CORE_UNLOCK(adapter);
1450 /*********************************************************************
1452 * Media Ioctl callback
1454 * This routine is called when the user changes speed/duplex using
1455 * media/mediopt option with ifconfig.
1457 **********************************************************************/
1459 lem_media_change(struct ifnet *ifp)
1461 struct adapter *adapter = ifp->if_softc;
1462 struct ifmedia *ifm = &adapter->media;
1464 INIT_DEBUGOUT("lem_media_change: begin");
1466 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1469 EM_CORE_LOCK(adapter);
1470 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1472 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1473 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1478 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1479 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1482 adapter->hw.mac.autoneg = FALSE;
1483 adapter->hw.phy.autoneg_advertised = 0;
1484 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1485 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1487 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1490 adapter->hw.mac.autoneg = FALSE;
1491 adapter->hw.phy.autoneg_advertised = 0;
1492 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1493 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1495 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1498 device_printf(adapter->dev, "Unsupported media type\n");
1501 lem_init_locked(adapter);
1502 EM_CORE_UNLOCK(adapter);
1507 /*********************************************************************
1509 * This routine maps the mbufs to tx descriptors.
1511 * return 0 on success, positive on failure
1512 **********************************************************************/
1515 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1517 bus_dma_segment_t segs[EM_MAX_SCATTER];
1519 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1520 struct e1000_tx_desc *ctxd = NULL;
1521 struct mbuf *m_head;
1522 u32 txd_upper, txd_lower, txd_used, txd_saved;
1523 int error, nsegs, i, j, first, last = 0;
1526 txd_upper = txd_lower = txd_used = txd_saved = 0;
1529 ** When doing checksum offload, it is critical to
1530 ** make sure the first mbuf has more than header,
1531 ** because that routine expects data to be present.
1533 if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1534 (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1535 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1542 * Map the packet for DMA
1544 * Capture the first descriptor index,
1545 * this descriptor will have the index
1546 * of the EOP which is the only one that
1547 * now gets a DONE bit writeback.
1549 first = adapter->next_avail_tx_desc;
1550 tx_buffer = &adapter->tx_buffer_area[first];
1551 tx_buffer_mapped = tx_buffer;
1552 map = tx_buffer->map;
1554 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1555 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1558 * There are two types of errors we can (try) to handle:
1559 * - EFBIG means the mbuf chain was too long and bus_dma ran
1560 * out of segments. Defragment the mbuf chain and try again.
1561 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1562 * at this point in time. Defer sending and try again later.
1563 * All other errors, in particular EINVAL, are fatal and prevent the
1564 * mbuf chain from ever going through. Drop it and report error.
1566 if (error == EFBIG) {
1569 m = m_defrag(*m_headp, M_NOWAIT);
1571 adapter->mbuf_alloc_failed++;
1579 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1580 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1583 adapter->no_tx_dma_setup++;
1588 } else if (error != 0) {
1589 adapter->no_tx_dma_setup++;
1593 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1594 adapter->no_tx_desc_avail2++;
1595 bus_dmamap_unload(adapter->txtag, map);
1600 /* Do hardware assists */
1601 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1602 lem_transmit_checksum_setup(adapter, m_head,
1603 &txd_upper, &txd_lower);
1605 i = adapter->next_avail_tx_desc;
1606 if (adapter->pcix_82544)
1609 /* Set up our transmit descriptors */
1610 for (j = 0; j < nsegs; j++) {
1612 bus_addr_t seg_addr;
1613 /* If adapter is 82544 and on PCIX bus */
1614 if(adapter->pcix_82544) {
1615 DESC_ARRAY desc_array;
1616 u32 array_elements, counter;
1618 * Check the Address and Length combination and
1619 * split the data accordingly
1621 array_elements = lem_fill_descriptors(segs[j].ds_addr,
1622 segs[j].ds_len, &desc_array);
1623 for (counter = 0; counter < array_elements; counter++) {
1624 if (txd_used == adapter->num_tx_desc_avail) {
1625 adapter->next_avail_tx_desc = txd_saved;
1626 adapter->no_tx_desc_avail2++;
1627 bus_dmamap_unload(adapter->txtag, map);
1630 tx_buffer = &adapter->tx_buffer_area[i];
1631 ctxd = &adapter->tx_desc_base[i];
1632 ctxd->buffer_addr = htole64(
1633 desc_array.descriptor[counter].address);
1634 ctxd->lower.data = htole32(
1635 (adapter->txd_cmd | txd_lower | (u16)
1636 desc_array.descriptor[counter].length));
1638 htole32((txd_upper));
1640 if (++i == adapter->num_tx_desc)
1642 tx_buffer->m_head = NULL;
1643 tx_buffer->next_eop = -1;
1647 tx_buffer = &adapter->tx_buffer_area[i];
1648 ctxd = &adapter->tx_desc_base[i];
1649 seg_addr = segs[j].ds_addr;
1650 seg_len = segs[j].ds_len;
1651 ctxd->buffer_addr = htole64(seg_addr);
1652 ctxd->lower.data = htole32(
1653 adapter->txd_cmd | txd_lower | seg_len);
1657 if (++i == adapter->num_tx_desc)
1659 tx_buffer->m_head = NULL;
1660 tx_buffer->next_eop = -1;
1664 adapter->next_avail_tx_desc = i;
1666 if (adapter->pcix_82544)
1667 adapter->num_tx_desc_avail -= txd_used;
1669 adapter->num_tx_desc_avail -= nsegs;
1671 if (m_head->m_flags & M_VLANTAG) {
1672 /* Set the vlan id. */
1673 ctxd->upper.fields.special =
1674 htole16(m_head->m_pkthdr.ether_vtag);
1675 /* Tell hardware to add tag */
1676 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1679 tx_buffer->m_head = m_head;
1680 tx_buffer_mapped->map = tx_buffer->map;
1681 tx_buffer->map = map;
1682 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1685 * Last Descriptor of Packet
1686 * needs End Of Packet (EOP)
1687 * and Report Status (RS)
1690 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1692 * Keep track in the first buffer which
1693 * descriptor will be written back
1695 tx_buffer = &adapter->tx_buffer_area[first];
1696 tx_buffer->next_eop = last;
1697 adapter->watchdog_time = ticks;
1700 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1701 * that this frame is available to transmit.
1703 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1704 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1705 if (adapter->hw.mac.type == e1000_82547 &&
1706 adapter->link_duplex == HALF_DUPLEX)
1707 lem_82547_move_tail(adapter);
1709 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1710 if (adapter->hw.mac.type == e1000_82547)
1711 lem_82547_update_fifo_head(adapter,
1712 m_head->m_pkthdr.len);
1718 /*********************************************************************
1720 * 82547 workaround to avoid controller hang in half-duplex environment.
1721 * The workaround is to avoid queuing a large packet that would span
1722 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1723 * in this case. We do that only when FIFO is quiescent.
1725 **********************************************************************/
1727 lem_82547_move_tail(void *arg)
1729 struct adapter *adapter = arg;
1730 struct e1000_tx_desc *tx_desc;
1731 u16 hw_tdt, sw_tdt, length = 0;
1734 EM_TX_LOCK_ASSERT(adapter);
1736 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1737 sw_tdt = adapter->next_avail_tx_desc;
1739 while (hw_tdt != sw_tdt) {
1740 tx_desc = &adapter->tx_desc_base[hw_tdt];
1741 length += tx_desc->lower.flags.length;
1742 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1743 if (++hw_tdt == adapter->num_tx_desc)
1747 if (lem_82547_fifo_workaround(adapter, length)) {
1748 adapter->tx_fifo_wrk_cnt++;
1749 callout_reset(&adapter->tx_fifo_timer, 1,
1750 lem_82547_move_tail, adapter);
1753 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1754 lem_82547_update_fifo_head(adapter, length);
1761 lem_82547_fifo_workaround(struct adapter *adapter, int len)
1763 int fifo_space, fifo_pkt_len;
1765 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1767 if (adapter->link_duplex == HALF_DUPLEX) {
1768 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1770 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1771 if (lem_82547_tx_fifo_reset(adapter))
1782 lem_82547_update_fifo_head(struct adapter *adapter, int len)
1784 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1786 /* tx_fifo_head is always 16 byte aligned */
1787 adapter->tx_fifo_head += fifo_pkt_len;
1788 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1789 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1795 lem_82547_tx_fifo_reset(struct adapter *adapter)
1799 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1800 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1801 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1802 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1803 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1804 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1805 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1806 /* Disable TX unit */
1807 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1808 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1809 tctl & ~E1000_TCTL_EN);
1811 /* Reset FIFO pointers */
1812 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1813 adapter->tx_head_addr);
1814 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1815 adapter->tx_head_addr);
1816 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1817 adapter->tx_head_addr);
1818 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1819 adapter->tx_head_addr);
1821 /* Re-enable TX unit */
1822 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1823 E1000_WRITE_FLUSH(&adapter->hw);
1825 adapter->tx_fifo_head = 0;
1826 adapter->tx_fifo_reset_cnt++;
1836 lem_set_promisc(struct adapter *adapter)
1838 struct ifnet *ifp = adapter->ifp;
1841 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1843 if (ifp->if_flags & IFF_PROMISC) {
1844 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1845 /* Turn this on if you want to see bad packets */
1847 reg_rctl |= E1000_RCTL_SBP;
1848 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1849 } else if (ifp->if_flags & IFF_ALLMULTI) {
1850 reg_rctl |= E1000_RCTL_MPE;
1851 reg_rctl &= ~E1000_RCTL_UPE;
1852 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1857 lem_disable_promisc(struct adapter *adapter)
1861 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1863 reg_rctl &= (~E1000_RCTL_UPE);
1864 reg_rctl &= (~E1000_RCTL_MPE);
1865 reg_rctl &= (~E1000_RCTL_SBP);
1866 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1870 /*********************************************************************
1873 * This routine is called whenever multicast address list is updated.
1875 **********************************************************************/
1878 lem_set_multi(struct adapter *adapter)
1880 struct ifnet *ifp = adapter->ifp;
1881 struct ifmultiaddr *ifma;
1883 u8 *mta; /* Multicast array memory */
1886 IOCTL_DEBUGOUT("lem_set_multi: begin");
1889 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1891 if (adapter->hw.mac.type == e1000_82542 &&
1892 adapter->hw.revision_id == E1000_REVISION_2) {
1893 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1894 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1895 e1000_pci_clear_mwi(&adapter->hw);
1896 reg_rctl |= E1000_RCTL_RST;
1897 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1901 #if __FreeBSD_version < 800000
1904 if_maddr_rlock(ifp);
1906 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1907 if (ifma->ifma_addr->sa_family != AF_LINK)
1910 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1913 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1914 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1917 #if __FreeBSD_version < 800000
1918 IF_ADDR_UNLOCK(ifp);
1920 if_maddr_runlock(ifp);
1922 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1923 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1924 reg_rctl |= E1000_RCTL_MPE;
1925 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1927 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1929 if (adapter->hw.mac.type == e1000_82542 &&
1930 adapter->hw.revision_id == E1000_REVISION_2) {
1931 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1932 reg_rctl &= ~E1000_RCTL_RST;
1933 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1935 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1936 e1000_pci_set_mwi(&adapter->hw);
1941 /*********************************************************************
1944 * This routine checks for link status and updates statistics.
1946 **********************************************************************/
1949 lem_local_timer(void *arg)
1951 struct adapter *adapter = arg;
1953 EM_CORE_LOCK_ASSERT(adapter);
1955 lem_update_link_status(adapter);
1956 lem_update_stats_counters(adapter);
1958 lem_smartspeed(adapter);
1961 * We check the watchdog: the time since
1962 * the last TX descriptor was cleaned.
1963 * This implies a functional TX engine.
1965 if ((adapter->watchdog_check == TRUE) &&
1966 (ticks - adapter->watchdog_time > EM_WATCHDOG))
1969 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1972 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1973 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1974 adapter->watchdog_events++;
1975 lem_init_locked(adapter);
1979 lem_update_link_status(struct adapter *adapter)
1981 struct e1000_hw *hw = &adapter->hw;
1982 struct ifnet *ifp = adapter->ifp;
1983 device_t dev = adapter->dev;
1986 /* Get the cached link value or read phy for real */
1987 switch (hw->phy.media_type) {
1988 case e1000_media_type_copper:
1989 if (hw->mac.get_link_status) {
1990 /* Do the work to read phy */
1991 e1000_check_for_link(hw);
1992 link_check = !hw->mac.get_link_status;
1993 if (link_check) /* ESB2 fix */
1994 e1000_cfg_on_link_up(hw);
1998 case e1000_media_type_fiber:
1999 e1000_check_for_link(hw);
2000 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2003 case e1000_media_type_internal_serdes:
2004 e1000_check_for_link(hw);
2005 link_check = adapter->hw.mac.serdes_has_link;
2008 case e1000_media_type_unknown:
2012 /* Now check for a transition */
2013 if (link_check && (adapter->link_active == 0)) {
2014 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2015 &adapter->link_duplex);
2017 device_printf(dev, "Link is up %d Mbps %s\n",
2018 adapter->link_speed,
2019 ((adapter->link_duplex == FULL_DUPLEX) ?
2020 "Full Duplex" : "Half Duplex"));
2021 adapter->link_active = 1;
2022 adapter->smartspeed = 0;
2023 ifp->if_baudrate = adapter->link_speed * 1000000;
2024 if_link_state_change(ifp, LINK_STATE_UP);
2025 } else if (!link_check && (adapter->link_active == 1)) {
2026 ifp->if_baudrate = adapter->link_speed = 0;
2027 adapter->link_duplex = 0;
2029 device_printf(dev, "Link is Down\n");
2030 adapter->link_active = 0;
2031 /* Link down, disable watchdog */
2032 adapter->watchdog_check = FALSE;
2033 if_link_state_change(ifp, LINK_STATE_DOWN);
2037 /*********************************************************************
2039 * This routine disables all traffic on the adapter by issuing a
2040 * global reset on the MAC and deallocates TX/RX buffers.
2042 * This routine should always be called with BOTH the CORE
2044 **********************************************************************/
2049 struct adapter *adapter = arg;
2050 struct ifnet *ifp = adapter->ifp;
2052 EM_CORE_LOCK_ASSERT(adapter);
2053 EM_TX_LOCK_ASSERT(adapter);
2055 INIT_DEBUGOUT("lem_stop: begin");
2057 lem_disable_intr(adapter);
2058 callout_stop(&adapter->timer);
2059 callout_stop(&adapter->tx_fifo_timer);
2061 /* Tell the stack that the interface is no longer active */
2062 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2064 e1000_reset_hw(&adapter->hw);
2065 if (adapter->hw.mac.type >= e1000_82544)
2066 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2068 e1000_led_off(&adapter->hw);
2069 e1000_cleanup_led(&adapter->hw);
2073 /*********************************************************************
2075 * Determine hardware revision.
2077 **********************************************************************/
2079 lem_identify_hardware(struct adapter *adapter)
2081 device_t dev = adapter->dev;
2083 /* Make sure our PCI config space has the necessary stuff set */
2084 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2085 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2086 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2087 device_printf(dev, "Memory Access and/or Bus Master bits "
2089 adapter->hw.bus.pci_cmd_word |=
2090 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2091 pci_write_config(dev, PCIR_COMMAND,
2092 adapter->hw.bus.pci_cmd_word, 2);
2095 /* Save off the information about this board */
2096 adapter->hw.vendor_id = pci_get_vendor(dev);
2097 adapter->hw.device_id = pci_get_device(dev);
2098 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2099 adapter->hw.subsystem_vendor_id =
2100 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2101 adapter->hw.subsystem_device_id =
2102 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2104 /* Do Shared Code Init and Setup */
2105 if (e1000_set_mac_type(&adapter->hw)) {
2106 device_printf(dev, "Setup init failure\n");
2112 lem_allocate_pci_resources(struct adapter *adapter)
2114 device_t dev = adapter->dev;
2115 int val, rid, error = E1000_SUCCESS;
2118 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2120 if (adapter->memory == NULL) {
2121 device_printf(dev, "Unable to allocate bus resource: memory\n");
2124 adapter->osdep.mem_bus_space_tag =
2125 rman_get_bustag(adapter->memory);
2126 adapter->osdep.mem_bus_space_handle =
2127 rman_get_bushandle(adapter->memory);
2128 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2130 /* Only older adapters use IO mapping */
2131 if (adapter->hw.mac.type > e1000_82543) {
2132 /* Figure our where our IO BAR is ? */
2133 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2134 val = pci_read_config(dev, rid, 4);
2135 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2136 adapter->io_rid = rid;
2140 /* check for 64bit BAR */
2141 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2144 if (rid >= PCIR_CIS) {
2145 device_printf(dev, "Unable to locate IO BAR\n");
2148 adapter->ioport = bus_alloc_resource_any(dev,
2149 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2150 if (adapter->ioport == NULL) {
2151 device_printf(dev, "Unable to allocate bus resource: "
2155 adapter->hw.io_base = 0;
2156 adapter->osdep.io_bus_space_tag =
2157 rman_get_bustag(adapter->ioport);
2158 adapter->osdep.io_bus_space_handle =
2159 rman_get_bushandle(adapter->ioport);
2162 adapter->hw.back = &adapter->osdep;
2167 /*********************************************************************
2169 * Setup the Legacy or MSI Interrupt handler
2171 **********************************************************************/
2173 lem_allocate_irq(struct adapter *adapter)
2175 device_t dev = adapter->dev;
2178 /* Manually turn off all interrupts */
2179 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2181 /* We allocate a single interrupt resource */
2182 adapter->res[0] = bus_alloc_resource_any(dev,
2183 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2184 if (adapter->res[0] == NULL) {
2185 device_printf(dev, "Unable to allocate bus resource: "
2190 /* Do Legacy setup? */
2191 if (lem_use_legacy_irq) {
2192 if ((error = bus_setup_intr(dev, adapter->res[0],
2193 INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2194 &adapter->tag[0])) != 0) {
2196 "Failed to register interrupt handler");
2203 * Use a Fast interrupt and the associated
2204 * deferred processing contexts.
2206 TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2207 TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2208 adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2209 taskqueue_thread_enqueue, &adapter->tq);
2210 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2211 device_get_nameunit(adapter->dev));
2212 if ((error = bus_setup_intr(dev, adapter->res[0],
2213 INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2214 &adapter->tag[0])) != 0) {
2215 device_printf(dev, "Failed to register fast interrupt "
2216 "handler: %d\n", error);
2217 taskqueue_free(adapter->tq);
2227 lem_free_pci_resources(struct adapter *adapter)
2229 device_t dev = adapter->dev;
2232 if (adapter->tag[0] != NULL) {
2233 bus_teardown_intr(dev, adapter->res[0],
2235 adapter->tag[0] = NULL;
2238 if (adapter->res[0] != NULL) {
2239 bus_release_resource(dev, SYS_RES_IRQ,
2240 0, adapter->res[0]);
2243 if (adapter->memory != NULL)
2244 bus_release_resource(dev, SYS_RES_MEMORY,
2245 PCIR_BAR(0), adapter->memory);
2247 if (adapter->ioport != NULL)
2248 bus_release_resource(dev, SYS_RES_IOPORT,
2249 adapter->io_rid, adapter->ioport);
2253 /*********************************************************************
2255 * Initialize the hardware to a configuration
2256 * as specified by the adapter structure.
2258 **********************************************************************/
2260 lem_hardware_init(struct adapter *adapter)
2262 device_t dev = adapter->dev;
2265 INIT_DEBUGOUT("lem_hardware_init: begin");
2267 /* Issue a global reset */
2268 e1000_reset_hw(&adapter->hw);
2270 /* When hardware is reset, fifo_head is also reset */
2271 adapter->tx_fifo_head = 0;
2274 * These parameters control the automatic generation (Tx) and
2275 * response (Rx) to Ethernet PAUSE frames.
2276 * - High water mark should allow for at least two frames to be
2277 * received after sending an XOFF.
2278 * - Low water mark works best when it is very near the high water mark.
2279 * This allows the receiver to restart by sending XON when it has
2280 * drained a bit. Here we use an arbitary value of 1500 which will
2281 * restart after one full frame is pulled from the buffer. There
2282 * could be several smaller frames in the buffer and if so they will
2283 * not trigger the XON until their total number reduces the buffer
2285 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2287 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2290 adapter->hw.fc.high_water = rx_buffer_size -
2291 roundup2(adapter->max_frame_size, 1024);
2292 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2294 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2295 adapter->hw.fc.send_xon = TRUE;
2297 /* Set Flow control, use the tunable location if sane */
2298 if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2299 adapter->hw.fc.requested_mode = lem_fc_setting;
2301 adapter->hw.fc.requested_mode = e1000_fc_none;
2303 if (e1000_init_hw(&adapter->hw) < 0) {
2304 device_printf(dev, "Hardware Initialization Failed\n");
2308 e1000_check_for_link(&adapter->hw);
2313 /*********************************************************************
2315 * Setup networking device structure and register an interface.
2317 **********************************************************************/
2319 lem_setup_interface(device_t dev, struct adapter *adapter)
2323 INIT_DEBUGOUT("lem_setup_interface: begin");
2325 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2327 device_printf(dev, "can not allocate ifnet structure\n");
2330 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2331 ifp->if_init = lem_init;
2332 ifp->if_softc = adapter;
2333 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2334 ifp->if_ioctl = lem_ioctl;
2335 ifp->if_start = lem_start;
2336 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2337 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2338 IFQ_SET_READY(&ifp->if_snd);
2340 ether_ifattach(ifp, adapter->hw.mac.addr);
2342 ifp->if_capabilities = ifp->if_capenable = 0;
2344 if (adapter->hw.mac.type >= e1000_82543) {
2345 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2346 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2350 * Tell the upper layer(s) we support long frames.
2352 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2353 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2354 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2357 ** Dont turn this on by default, if vlans are
2358 ** created on another pseudo device (eg. lagg)
2359 ** then vlan events are not passed thru, breaking
2360 ** operation, but with HW FILTER off it works. If
2361 ** using vlans directly on the em driver you can
2362 ** enable this and get full hardware tag filtering.
2364 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2366 #ifdef DEVICE_POLLING
2367 ifp->if_capabilities |= IFCAP_POLLING;
2370 /* Enable only WOL MAGIC by default */
2372 ifp->if_capabilities |= IFCAP_WOL;
2373 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2377 * Specify the media types supported by this adapter and register
2378 * callbacks to update media and link information
2380 ifmedia_init(&adapter->media, IFM_IMASK,
2381 lem_media_change, lem_media_status);
2382 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2383 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2384 u_char fiber_type = IFM_1000_SX; /* default type */
2386 if (adapter->hw.mac.type == e1000_82545)
2387 fiber_type = IFM_1000_LX;
2388 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2390 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2392 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2393 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2395 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2397 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2399 if (adapter->hw.phy.type != e1000_phy_ife) {
2400 ifmedia_add(&adapter->media,
2401 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2402 ifmedia_add(&adapter->media,
2403 IFM_ETHER | IFM_1000_T, 0, NULL);
2406 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2407 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2412 /*********************************************************************
2414 * Workaround for SmartSpeed on 82541 and 82547 controllers
2416 **********************************************************************/
2418 lem_smartspeed(struct adapter *adapter)
2422 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2423 adapter->hw.mac.autoneg == 0 ||
2424 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2427 if (adapter->smartspeed == 0) {
2428 /* If Master/Slave config fault is asserted twice,
2429 * we assume back-to-back */
2430 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2431 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2433 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2434 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2435 e1000_read_phy_reg(&adapter->hw,
2436 PHY_1000T_CTRL, &phy_tmp);
2437 if(phy_tmp & CR_1000T_MS_ENABLE) {
2438 phy_tmp &= ~CR_1000T_MS_ENABLE;
2439 e1000_write_phy_reg(&adapter->hw,
2440 PHY_1000T_CTRL, phy_tmp);
2441 adapter->smartspeed++;
2442 if(adapter->hw.mac.autoneg &&
2443 !e1000_copper_link_autoneg(&adapter->hw) &&
2444 !e1000_read_phy_reg(&adapter->hw,
2445 PHY_CONTROL, &phy_tmp)) {
2446 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2447 MII_CR_RESTART_AUTO_NEG);
2448 e1000_write_phy_reg(&adapter->hw,
2449 PHY_CONTROL, phy_tmp);
2454 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2455 /* If still no link, perhaps using 2/3 pair cable */
2456 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2457 phy_tmp |= CR_1000T_MS_ENABLE;
2458 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2459 if(adapter->hw.mac.autoneg &&
2460 !e1000_copper_link_autoneg(&adapter->hw) &&
2461 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2462 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2463 MII_CR_RESTART_AUTO_NEG);
2464 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2467 /* Restart process after EM_SMARTSPEED_MAX iterations */
2468 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2469 adapter->smartspeed = 0;
2474 * Manage DMA'able memory.
2477 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2481 *(bus_addr_t *) arg = segs[0].ds_addr;
2485 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2486 struct em_dma_alloc *dma, int mapflags)
2490 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2491 EM_DBA_ALIGN, 0, /* alignment, bounds */
2492 BUS_SPACE_MAXADDR, /* lowaddr */
2493 BUS_SPACE_MAXADDR, /* highaddr */
2494 NULL, NULL, /* filter, filterarg */
2497 size, /* maxsegsize */
2499 NULL, /* lockfunc */
2503 device_printf(adapter->dev,
2504 "%s: bus_dma_tag_create failed: %d\n",
2509 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2510 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2512 device_printf(adapter->dev,
2513 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2514 __func__, (uintmax_t)size, error);
2519 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2520 size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2521 if (error || dma->dma_paddr == 0) {
2522 device_printf(adapter->dev,
2523 "%s: bus_dmamap_load failed: %d\n",
2531 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2533 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2534 bus_dma_tag_destroy(dma->dma_tag);
2536 dma->dma_map = NULL;
2537 dma->dma_tag = NULL;
2543 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2545 if (dma->dma_tag == NULL)
2547 if (dma->dma_map != NULL) {
2548 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2549 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2550 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2551 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2552 dma->dma_map = NULL;
2554 bus_dma_tag_destroy(dma->dma_tag);
2555 dma->dma_tag = NULL;
2559 /*********************************************************************
2561 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2562 * the information needed to transmit a packet on the wire.
2564 **********************************************************************/
2566 lem_allocate_transmit_structures(struct adapter *adapter)
2568 device_t dev = adapter->dev;
2569 struct em_buffer *tx_buffer;
2573 * Create DMA tags for tx descriptors
2575 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2576 1, 0, /* alignment, bounds */
2577 BUS_SPACE_MAXADDR, /* lowaddr */
2578 BUS_SPACE_MAXADDR, /* highaddr */
2579 NULL, NULL, /* filter, filterarg */
2580 MCLBYTES * EM_MAX_SCATTER, /* maxsize */
2581 EM_MAX_SCATTER, /* nsegments */
2582 MCLBYTES, /* maxsegsize */
2584 NULL, /* lockfunc */
2586 &adapter->txtag)) != 0) {
2587 device_printf(dev, "Unable to allocate TX DMA tag\n");
2591 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2592 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2593 if (adapter->tx_buffer_area == NULL) {
2594 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2599 /* Create the descriptor buffer dma maps */
2600 for (int i = 0; i < adapter->num_tx_desc; i++) {
2601 tx_buffer = &adapter->tx_buffer_area[i];
2602 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2604 device_printf(dev, "Unable to create TX DMA map\n");
2607 tx_buffer->next_eop = -1;
2612 lem_free_transmit_structures(adapter);
2616 /*********************************************************************
2618 * (Re)Initialize transmit structures.
2620 **********************************************************************/
2622 lem_setup_transmit_structures(struct adapter *adapter)
2624 struct em_buffer *tx_buffer;
2626 /* we are already locked */
2627 struct netmap_adapter *na = NA(adapter->ifp);
2628 struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
2629 #endif /* DEV_NETMAP */
2631 /* Clear the old ring contents */
2632 bzero(adapter->tx_desc_base,
2633 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2635 /* Free any existing TX buffers */
2636 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2637 tx_buffer = &adapter->tx_buffer_area[i];
2638 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2639 BUS_DMASYNC_POSTWRITE);
2640 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2641 m_freem(tx_buffer->m_head);
2642 tx_buffer->m_head = NULL;
2645 /* the i-th NIC entry goes to slot si */
2646 int si = netmap_idx_n2k(&na->tx_rings[0], i);
2650 addr = PNMB(slot + si, &paddr);
2651 adapter->tx_desc_base[si].buffer_addr = htole64(paddr);
2652 /* reload the map for netmap mode */
2653 netmap_load_map(adapter->txtag, tx_buffer->map, addr);
2655 #endif /* DEV_NETMAP */
2656 tx_buffer->next_eop = -1;
2660 adapter->last_hw_offload = 0;
2661 adapter->next_avail_tx_desc = 0;
2662 adapter->next_tx_to_clean = 0;
2663 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2665 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2666 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2671 /*********************************************************************
2673 * Enable transmit unit.
2675 **********************************************************************/
2677 lem_initialize_transmit_unit(struct adapter *adapter)
2682 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2683 /* Setup the Base and Length of the Tx Descriptor Ring */
2684 bus_addr = adapter->txdma.dma_paddr;
2685 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2686 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2687 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2688 (u32)(bus_addr >> 32));
2689 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2691 /* Setup the HW Tx Head and Tail descriptor pointers */
2692 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2693 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2695 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2696 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2697 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2699 /* Set the default values for the Tx Inter Packet Gap timer */
2700 switch (adapter->hw.mac.type) {
2702 tipg = DEFAULT_82542_TIPG_IPGT;
2703 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2704 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2707 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2708 (adapter->hw.phy.media_type ==
2709 e1000_media_type_internal_serdes))
2710 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2712 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2713 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2714 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2717 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2718 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2719 if(adapter->hw.mac.type >= e1000_82540)
2720 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2721 adapter->tx_abs_int_delay.value);
2723 /* Program the Transmit Control Register */
2724 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2725 tctl &= ~E1000_TCTL_CT;
2726 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2727 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2729 /* This write will effectively turn on the transmit unit. */
2730 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2732 /* Setup Transmit Descriptor Base Settings */
2733 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2735 if (adapter->tx_int_delay.value > 0)
2736 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2739 /*********************************************************************
2741 * Free all transmit related data structures.
2743 **********************************************************************/
2745 lem_free_transmit_structures(struct adapter *adapter)
2747 struct em_buffer *tx_buffer;
2749 INIT_DEBUGOUT("free_transmit_structures: begin");
2751 if (adapter->tx_buffer_area != NULL) {
2752 for (int i = 0; i < adapter->num_tx_desc; i++) {
2753 tx_buffer = &adapter->tx_buffer_area[i];
2754 if (tx_buffer->m_head != NULL) {
2755 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2756 BUS_DMASYNC_POSTWRITE);
2757 bus_dmamap_unload(adapter->txtag,
2759 m_freem(tx_buffer->m_head);
2760 tx_buffer->m_head = NULL;
2761 } else if (tx_buffer->map != NULL)
2762 bus_dmamap_unload(adapter->txtag,
2764 if (tx_buffer->map != NULL) {
2765 bus_dmamap_destroy(adapter->txtag,
2767 tx_buffer->map = NULL;
2771 if (adapter->tx_buffer_area != NULL) {
2772 free(adapter->tx_buffer_area, M_DEVBUF);
2773 adapter->tx_buffer_area = NULL;
2775 if (adapter->txtag != NULL) {
2776 bus_dma_tag_destroy(adapter->txtag);
2777 adapter->txtag = NULL;
2779 #if __FreeBSD_version >= 800000
2780 if (adapter->br != NULL)
2781 buf_ring_free(adapter->br, M_DEVBUF);
2785 /*********************************************************************
2787 * The offload context needs to be set when we transfer the first
2788 * packet of a particular protocol (TCP/UDP). This routine has been
2789 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2791 * Added back the old method of keeping the current context type
2792 * and not setting if unnecessary, as this is reported to be a
2793 * big performance win. -jfv
2794 **********************************************************************/
2796 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2797 u32 *txd_upper, u32 *txd_lower)
2799 struct e1000_context_desc *TXD = NULL;
2800 struct em_buffer *tx_buffer;
2801 struct ether_vlan_header *eh;
2802 struct ip *ip = NULL;
2803 struct ip6_hdr *ip6;
2804 int curr_txd, ehdrlen;
2805 u32 cmd, hdr_len, ip_hlen;
2810 cmd = hdr_len = ipproto = 0;
2811 *txd_upper = *txd_lower = 0;
2812 curr_txd = adapter->next_avail_tx_desc;
2815 * Determine where frame payload starts.
2816 * Jump over vlan headers if already present,
2817 * helpful for QinQ too.
2819 eh = mtod(mp, struct ether_vlan_header *);
2820 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2821 etype = ntohs(eh->evl_proto);
2822 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2824 etype = ntohs(eh->evl_encap_proto);
2825 ehdrlen = ETHER_HDR_LEN;
2829 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2830 * TODO: Support SCTP too when it hits the tree.
2834 ip = (struct ip *)(mp->m_data + ehdrlen);
2835 ip_hlen = ip->ip_hl << 2;
2837 /* Setup of IP header checksum. */
2838 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2840 * Start offset for header checksum calculation.
2841 * End offset for header checksum calculation.
2842 * Offset of place to put the checksum.
2844 TXD = (struct e1000_context_desc *)
2845 &adapter->tx_desc_base[curr_txd];
2846 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2847 TXD->lower_setup.ip_fields.ipcse =
2848 htole16(ehdrlen + ip_hlen);
2849 TXD->lower_setup.ip_fields.ipcso =
2850 ehdrlen + offsetof(struct ip, ip_sum);
2851 cmd |= E1000_TXD_CMD_IP;
2852 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2855 hdr_len = ehdrlen + ip_hlen;
2859 case ETHERTYPE_IPV6:
2860 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2861 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2863 /* IPv6 doesn't have a header checksum. */
2865 hdr_len = ehdrlen + ip_hlen;
2866 ipproto = ip6->ip6_nxt;
2875 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2876 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2877 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2878 /* no need for context if already set */
2879 if (adapter->last_hw_offload == CSUM_TCP)
2881 adapter->last_hw_offload = CSUM_TCP;
2883 * Start offset for payload checksum calculation.
2884 * End offset for payload checksum calculation.
2885 * Offset of place to put the checksum.
2887 TXD = (struct e1000_context_desc *)
2888 &adapter->tx_desc_base[curr_txd];
2889 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2890 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2891 TXD->upper_setup.tcp_fields.tucso =
2892 hdr_len + offsetof(struct tcphdr, th_sum);
2893 cmd |= E1000_TXD_CMD_TCP;
2898 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2899 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2900 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2901 /* no need for context if already set */
2902 if (adapter->last_hw_offload == CSUM_UDP)
2904 adapter->last_hw_offload = CSUM_UDP;
2906 * Start offset for header checksum calculation.
2907 * End offset for header checksum calculation.
2908 * Offset of place to put the checksum.
2910 TXD = (struct e1000_context_desc *)
2911 &adapter->tx_desc_base[curr_txd];
2912 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2913 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2914 TXD->upper_setup.tcp_fields.tucso =
2915 hdr_len + offsetof(struct udphdr, uh_sum);
2925 TXD->tcp_seg_setup.data = htole32(0);
2926 TXD->cmd_and_length =
2927 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
2928 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2929 tx_buffer->m_head = NULL;
2930 tx_buffer->next_eop = -1;
2932 if (++curr_txd == adapter->num_tx_desc)
2935 adapter->num_tx_desc_avail--;
2936 adapter->next_avail_tx_desc = curr_txd;
2940 /**********************************************************************
2942 * Examine each tx_buffer in the used queue. If the hardware is done
2943 * processing the packet then free associated resources. The
2944 * tx_buffer is put back on the free queue.
2946 **********************************************************************/
2948 lem_txeof(struct adapter *adapter)
2950 int first, last, done, num_avail;
2951 struct em_buffer *tx_buffer;
2952 struct e1000_tx_desc *tx_desc, *eop_desc;
2953 struct ifnet *ifp = adapter->ifp;
2955 EM_TX_LOCK_ASSERT(adapter);
2958 if (ifp->if_capenable & IFCAP_NETMAP) {
2959 selwakeuppri(&NA(ifp)->tx_rings[0].si, PI_NET);
2962 #endif /* DEV_NETMAP */
2963 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2966 num_avail = adapter->num_tx_desc_avail;
2967 first = adapter->next_tx_to_clean;
2968 tx_desc = &adapter->tx_desc_base[first];
2969 tx_buffer = &adapter->tx_buffer_area[first];
2970 last = tx_buffer->next_eop;
2971 eop_desc = &adapter->tx_desc_base[last];
2974 * What this does is get the index of the
2975 * first descriptor AFTER the EOP of the
2976 * first packet, that way we can do the
2977 * simple comparison on the inner while loop.
2979 if (++last == adapter->num_tx_desc)
2983 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2984 BUS_DMASYNC_POSTREAD);
2986 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2987 /* We clean the range of the packet */
2988 while (first != done) {
2989 tx_desc->upper.data = 0;
2990 tx_desc->lower.data = 0;
2991 tx_desc->buffer_addr = 0;
2994 if (tx_buffer->m_head) {
2996 bus_dmamap_sync(adapter->txtag,
2998 BUS_DMASYNC_POSTWRITE);
2999 bus_dmamap_unload(adapter->txtag,
3002 m_freem(tx_buffer->m_head);
3003 tx_buffer->m_head = NULL;
3005 tx_buffer->next_eop = -1;
3006 adapter->watchdog_time = ticks;
3008 if (++first == adapter->num_tx_desc)
3011 tx_buffer = &adapter->tx_buffer_area[first];
3012 tx_desc = &adapter->tx_desc_base[first];
3014 /* See if we can continue to the next packet */
3015 last = tx_buffer->next_eop;
3017 eop_desc = &adapter->tx_desc_base[last];
3018 /* Get new done point */
3019 if (++last == adapter->num_tx_desc) last = 0;
3024 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3025 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3027 adapter->next_tx_to_clean = first;
3028 adapter->num_tx_desc_avail = num_avail;
3031 * If we have enough room, clear IFF_DRV_OACTIVE to
3032 * tell the stack that it is OK to send packets.
3033 * If there are no pending descriptors, clear the watchdog.
3035 if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {
3036 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3037 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3038 adapter->watchdog_check = FALSE;
3044 /*********************************************************************
3046 * When Link is lost sometimes there is work still in the TX ring
3047 * which may result in a watchdog, rather than allow that we do an
3048 * attempted cleanup and then reinit here. Note that this has been
3049 * seens mostly with fiber adapters.
3051 **********************************************************************/
3053 lem_tx_purge(struct adapter *adapter)
3055 if ((!adapter->link_active) && (adapter->watchdog_check)) {
3056 EM_TX_LOCK(adapter);
3058 EM_TX_UNLOCK(adapter);
3059 if (adapter->watchdog_check) /* Still outstanding? */
3060 lem_init_locked(adapter);
3064 /*********************************************************************
3066 * Get a buffer from system mbuf buffer pool.
3068 **********************************************************************/
3070 lem_get_buf(struct adapter *adapter, int i)
3073 bus_dma_segment_t segs[1];
3075 struct em_buffer *rx_buffer;
3078 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3080 adapter->mbuf_cluster_failed++;
3083 m->m_len = m->m_pkthdr.len = MCLBYTES;
3085 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3086 m_adj(m, ETHER_ALIGN);
3089 * Using memory from the mbuf cluster pool, invoke the
3090 * bus_dma machinery to arrange the memory mapping.
3092 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3093 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3099 /* If nsegs is wrong then the stack is corrupt. */
3100 KASSERT(nsegs == 1, ("Too many segments returned!"));
3102 rx_buffer = &adapter->rx_buffer_area[i];
3103 if (rx_buffer->m_head != NULL)
3104 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3106 map = rx_buffer->map;
3107 rx_buffer->map = adapter->rx_sparemap;
3108 adapter->rx_sparemap = map;
3109 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3110 rx_buffer->m_head = m;
3112 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3116 /*********************************************************************
3118 * Allocate memory for rx_buffer structures. Since we use one
3119 * rx_buffer per received packet, the maximum number of rx_buffer's
3120 * that we'll need is equal to the number of receive descriptors
3121 * that we've allocated.
3123 **********************************************************************/
3125 lem_allocate_receive_structures(struct adapter *adapter)
3127 device_t dev = adapter->dev;
3128 struct em_buffer *rx_buffer;
3131 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3132 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3133 if (adapter->rx_buffer_area == NULL) {
3134 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3138 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3139 1, 0, /* alignment, bounds */
3140 BUS_SPACE_MAXADDR, /* lowaddr */
3141 BUS_SPACE_MAXADDR, /* highaddr */
3142 NULL, NULL, /* filter, filterarg */
3143 MCLBYTES, /* maxsize */
3145 MCLBYTES, /* maxsegsize */
3147 NULL, /* lockfunc */
3151 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3156 /* Create the spare map (used by getbuf) */
3157 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3158 &adapter->rx_sparemap);
3160 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3165 rx_buffer = adapter->rx_buffer_area;
3166 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3167 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3170 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3179 lem_free_receive_structures(adapter);
3183 /*********************************************************************
3185 * (Re)initialize receive structures.
3187 **********************************************************************/
3189 lem_setup_receive_structures(struct adapter *adapter)
3191 struct em_buffer *rx_buffer;
3194 /* we are already under lock */
3195 struct netmap_adapter *na = NA(adapter->ifp);
3196 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
3199 /* Reset descriptor ring */
3200 bzero(adapter->rx_desc_base,
3201 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3203 /* Free current RX buffers. */
3204 rx_buffer = adapter->rx_buffer_area;
3205 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3206 if (rx_buffer->m_head != NULL) {
3207 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3208 BUS_DMASYNC_POSTREAD);
3209 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3210 m_freem(rx_buffer->m_head);
3211 rx_buffer->m_head = NULL;
3215 /* Allocate new ones. */
3216 for (i = 0; i < adapter->num_rx_desc; i++) {
3219 /* the i-th NIC entry goes to slot si */
3220 int si = netmap_idx_n2k(&na->rx_rings[0], i);
3224 addr = PNMB(slot + si, &paddr);
3225 netmap_load_map(adapter->rxtag, rx_buffer->map, addr);
3226 /* Update descriptor */
3227 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
3230 #endif /* DEV_NETMAP */
3231 error = lem_get_buf(adapter, i);
3236 /* Setup our descriptor pointers */
3237 adapter->next_rx_desc_to_check = 0;
3238 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3239 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3244 /*********************************************************************
3246 * Enable receive unit.
3248 **********************************************************************/
3249 #define MAX_INTS_PER_SEC 8000
3250 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
3253 lem_initialize_receive_unit(struct adapter *adapter)
3255 struct ifnet *ifp = adapter->ifp;
3259 INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3262 * Make sure receives are disabled while setting
3263 * up the descriptor ring
3265 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3266 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3268 if (adapter->hw.mac.type >= e1000_82540) {
3269 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3270 adapter->rx_abs_int_delay.value);
3272 * Set the interrupt throttling rate. Value is calculated
3273 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3275 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3278 /* Setup the Base and Length of the Rx Descriptor Ring */
3279 bus_addr = adapter->rxdma.dma_paddr;
3280 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3281 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3282 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3283 (u32)(bus_addr >> 32));
3284 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3287 /* Setup the Receive Control Register */
3288 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3289 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3290 E1000_RCTL_RDMTS_HALF |
3291 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3293 /* Make sure VLAN Filters are off */
3294 rctl &= ~E1000_RCTL_VFE;
3296 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3297 rctl |= E1000_RCTL_SBP;
3299 rctl &= ~E1000_RCTL_SBP;
3301 switch (adapter->rx_buffer_len) {
3304 rctl |= E1000_RCTL_SZ_2048;
3307 rctl |= E1000_RCTL_SZ_4096 |
3308 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3311 rctl |= E1000_RCTL_SZ_8192 |
3312 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3315 rctl |= E1000_RCTL_SZ_16384 |
3316 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3320 if (ifp->if_mtu > ETHERMTU)
3321 rctl |= E1000_RCTL_LPE;
3323 rctl &= ~E1000_RCTL_LPE;
3325 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3326 if ((adapter->hw.mac.type >= e1000_82543) &&
3327 (ifp->if_capenable & IFCAP_RXCSUM)) {
3328 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3329 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3330 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3333 /* Enable Receives */
3334 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3337 * Setup the HW Rx Head and
3338 * Tail Descriptor Pointers
3340 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3342 /* preserve buffers already made available to clients */
3343 if (ifp->if_capenable & IFCAP_NETMAP) {
3344 struct netmap_adapter *na = NA(adapter->ifp);
3345 struct netmap_kring *kring = &na->rx_rings[0];
3346 int t = na->num_rx_desc - 1 - kring->nr_hwavail;
3348 if (t >= na->num_rx_desc)
3349 t -= na->num_rx_desc;
3350 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), t);
3352 #endif /* DEV_NETMAP */
3353 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
3358 /*********************************************************************
3360 * Free receive related data structures.
3362 **********************************************************************/
3364 lem_free_receive_structures(struct adapter *adapter)
3366 struct em_buffer *rx_buffer;
3369 INIT_DEBUGOUT("free_receive_structures: begin");
3371 if (adapter->rx_sparemap) {
3372 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3373 adapter->rx_sparemap = NULL;
3376 /* Cleanup any existing buffers */
3377 if (adapter->rx_buffer_area != NULL) {
3378 rx_buffer = adapter->rx_buffer_area;
3379 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3380 if (rx_buffer->m_head != NULL) {
3381 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3382 BUS_DMASYNC_POSTREAD);
3383 bus_dmamap_unload(adapter->rxtag,
3385 m_freem(rx_buffer->m_head);
3386 rx_buffer->m_head = NULL;
3387 } else if (rx_buffer->map != NULL)
3388 bus_dmamap_unload(adapter->rxtag,
3390 if (rx_buffer->map != NULL) {
3391 bus_dmamap_destroy(adapter->rxtag,
3393 rx_buffer->map = NULL;
3398 if (adapter->rx_buffer_area != NULL) {
3399 free(adapter->rx_buffer_area, M_DEVBUF);
3400 adapter->rx_buffer_area = NULL;
3403 if (adapter->rxtag != NULL) {
3404 bus_dma_tag_destroy(adapter->rxtag);
3405 adapter->rxtag = NULL;
3409 /*********************************************************************
3411 * This routine executes in interrupt context. It replenishes
3412 * the mbufs in the descriptor and sends data which has been
3413 * dma'ed into host memory to upper layer.
3415 * We loop at most count times if count is > 0, or until done if
3418 * For polling we also now return the number of cleaned packets
3419 *********************************************************************/
3421 lem_rxeof(struct adapter *adapter, int count, int *done)
3423 struct ifnet *ifp = adapter->ifp;
3425 u8 status = 0, accept_frame = 0, eop = 0;
3426 u16 len, desc_len, prev_len_adj;
3428 struct e1000_rx_desc *current_desc;
3430 EM_RX_LOCK(adapter);
3431 i = adapter->next_rx_desc_to_check;
3432 current_desc = &adapter->rx_desc_base[i];
3433 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3434 BUS_DMASYNC_POSTREAD);
3437 if (ifp->if_capenable & IFCAP_NETMAP) {
3438 struct netmap_adapter *na = NA(ifp);
3439 na->rx_rings[0].nr_kflags |= NKR_PENDINTR;
3440 selwakeuppri(&na->rx_rings[0].si, PI_NET);
3441 EM_RX_UNLOCK(adapter);
3444 #endif /* DEV_NETMAP */
3446 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3449 EM_RX_UNLOCK(adapter);
3453 while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3454 struct mbuf *m = NULL;
3456 status = current_desc->status;
3457 if ((status & E1000_RXD_STAT_DD) == 0)
3460 mp = adapter->rx_buffer_area[i].m_head;
3462 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3463 * needs to access the last received byte in the mbuf.
3465 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3466 BUS_DMASYNC_POSTREAD);
3470 desc_len = le16toh(current_desc->length);
3471 if (status & E1000_RXD_STAT_EOP) {
3474 if (desc_len < ETHER_CRC_LEN) {
3476 prev_len_adj = ETHER_CRC_LEN - desc_len;
3478 len = desc_len - ETHER_CRC_LEN;
3484 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3486 u32 pkt_len = desc_len;
3488 if (adapter->fmp != NULL)
3489 pkt_len += adapter->fmp->m_pkthdr.len;
3491 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3492 if (TBI_ACCEPT(&adapter->hw, status,
3493 current_desc->errors, pkt_len, last_byte,
3494 adapter->min_frame_size, adapter->max_frame_size)) {
3495 e1000_tbi_adjust_stats_82543(&adapter->hw,
3496 &adapter->stats, pkt_len,
3497 adapter->hw.mac.addr,
3498 adapter->max_frame_size);
3506 if (lem_get_buf(adapter, i) != 0) {
3511 /* Assign correct length to the current fragment */
3514 if (adapter->fmp == NULL) {
3515 mp->m_pkthdr.len = len;
3516 adapter->fmp = mp; /* Store the first mbuf */
3519 /* Chain mbuf's together */
3520 mp->m_flags &= ~M_PKTHDR;
3522 * Adjust length of previous mbuf in chain if
3523 * we received less than 4 bytes in the last
3526 if (prev_len_adj > 0) {
3527 adapter->lmp->m_len -= prev_len_adj;
3528 adapter->fmp->m_pkthdr.len -=
3531 adapter->lmp->m_next = mp;
3532 adapter->lmp = adapter->lmp->m_next;
3533 adapter->fmp->m_pkthdr.len += len;
3537 adapter->fmp->m_pkthdr.rcvif = ifp;
3539 lem_receive_checksum(adapter, current_desc,
3541 #ifndef __NO_STRICT_ALIGNMENT
3542 if (adapter->max_frame_size >
3543 (MCLBYTES - ETHER_ALIGN) &&
3544 lem_fixup_rx(adapter) != 0)
3547 if (status & E1000_RXD_STAT_VP) {
3548 adapter->fmp->m_pkthdr.ether_vtag =
3549 le16toh(current_desc->special);
3550 adapter->fmp->m_flags |= M_VLANTAG;
3552 #ifndef __NO_STRICT_ALIGNMENT
3556 adapter->fmp = NULL;
3557 adapter->lmp = NULL;
3560 adapter->dropped_pkts++;
3562 /* Reuse loaded DMA map and just update mbuf chain */
3563 mp = adapter->rx_buffer_area[i].m_head;
3564 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3565 mp->m_data = mp->m_ext.ext_buf;
3567 if (adapter->max_frame_size <=
3568 (MCLBYTES - ETHER_ALIGN))
3569 m_adj(mp, ETHER_ALIGN);
3570 if (adapter->fmp != NULL) {
3571 m_freem(adapter->fmp);
3572 adapter->fmp = NULL;
3573 adapter->lmp = NULL;
3578 /* Zero out the receive descriptors status. */
3579 current_desc->status = 0;
3580 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3581 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3583 /* Advance our pointers to the next descriptor. */
3584 if (++i == adapter->num_rx_desc)
3586 /* Call into the stack */
3588 adapter->next_rx_desc_to_check = i;
3589 EM_RX_UNLOCK(adapter);
3590 (*ifp->if_input)(ifp, m);
3591 EM_RX_LOCK(adapter);
3593 i = adapter->next_rx_desc_to_check;
3595 current_desc = &adapter->rx_desc_base[i];
3597 adapter->next_rx_desc_to_check = i;
3599 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3601 i = adapter->num_rx_desc - 1;
3602 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3605 EM_RX_UNLOCK(adapter);
3606 return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3609 #ifndef __NO_STRICT_ALIGNMENT
3611 * When jumbo frames are enabled we should realign entire payload on
3612 * architecures with strict alignment. This is serious design mistake of 8254x
3613 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3614 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3615 * payload. On architecures without strict alignment restrictions 8254x still
3616 * performs unaligned memory access which would reduce the performance too.
3617 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3618 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3619 * existing mbuf chain.
3621 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3622 * not used at all on architectures with strict alignment.
3625 lem_fixup_rx(struct adapter *adapter)
3632 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3633 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3634 m->m_data += ETHER_HDR_LEN;
3636 MGETHDR(n, M_NOWAIT, MT_DATA);
3638 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3639 m->m_data += ETHER_HDR_LEN;
3640 m->m_len -= ETHER_HDR_LEN;
3641 n->m_len = ETHER_HDR_LEN;
3642 M_MOVE_PKTHDR(n, m);
3646 adapter->dropped_pkts++;
3647 m_freem(adapter->fmp);
3648 adapter->fmp = NULL;
3657 /*********************************************************************
3659 * Verify that the hardware indicated that the checksum is valid.
3660 * Inform the stack about the status of checksum so that stack
3661 * doesn't spend time verifying the checksum.
3663 *********************************************************************/
3665 lem_receive_checksum(struct adapter *adapter,
3666 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3668 /* 82543 or newer only */
3669 if ((adapter->hw.mac.type < e1000_82543) ||
3670 /* Ignore Checksum bit is set */
3671 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3672 mp->m_pkthdr.csum_flags = 0;
3676 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3678 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3679 /* IP Checksum Good */
3680 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3681 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3684 mp->m_pkthdr.csum_flags = 0;
3688 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3690 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3691 mp->m_pkthdr.csum_flags |=
3692 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3693 mp->m_pkthdr.csum_data = htons(0xffff);
3699 * This routine is run via an vlan
3703 lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3705 struct adapter *adapter = ifp->if_softc;
3708 if (ifp->if_softc != arg) /* Not our event */
3711 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
3714 EM_CORE_LOCK(adapter);
3715 index = (vtag >> 5) & 0x7F;
3717 adapter->shadow_vfta[index] |= (1 << bit);
3718 ++adapter->num_vlans;
3719 /* Re-init to load the changes */
3720 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3721 lem_init_locked(adapter);
3722 EM_CORE_UNLOCK(adapter);
3726 * This routine is run via an vlan
3730 lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3732 struct adapter *adapter = ifp->if_softc;
3735 if (ifp->if_softc != arg)
3738 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3741 EM_CORE_LOCK(adapter);
3742 index = (vtag >> 5) & 0x7F;
3744 adapter->shadow_vfta[index] &= ~(1 << bit);
3745 --adapter->num_vlans;
3746 /* Re-init to load the changes */
3747 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3748 lem_init_locked(adapter);
3749 EM_CORE_UNLOCK(adapter);
3753 lem_setup_vlan_hw_support(struct adapter *adapter)
3755 struct e1000_hw *hw = &adapter->hw;
3759 ** We get here thru init_locked, meaning
3760 ** a soft reset, this has already cleared
3761 ** the VFTA and other state, so if there
3762 ** have been no vlan's registered do nothing.
3764 if (adapter->num_vlans == 0)
3768 ** A soft reset zero's out the VFTA, so
3769 ** we need to repopulate it now.
3771 for (int i = 0; i < EM_VFTA_SIZE; i++)
3772 if (adapter->shadow_vfta[i] != 0)
3773 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3774 i, adapter->shadow_vfta[i]);
3776 reg = E1000_READ_REG(hw, E1000_CTRL);
3777 reg |= E1000_CTRL_VME;
3778 E1000_WRITE_REG(hw, E1000_CTRL, reg);
3780 /* Enable the Filter Table */
3781 reg = E1000_READ_REG(hw, E1000_RCTL);
3782 reg &= ~E1000_RCTL_CFIEN;
3783 reg |= E1000_RCTL_VFE;
3784 E1000_WRITE_REG(hw, E1000_RCTL, reg);
3788 lem_enable_intr(struct adapter *adapter)
3790 struct e1000_hw *hw = &adapter->hw;
3791 u32 ims_mask = IMS_ENABLE_MASK;
3793 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3797 lem_disable_intr(struct adapter *adapter)
3799 struct e1000_hw *hw = &adapter->hw;
3801 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3805 * Bit of a misnomer, what this really means is
3806 * to enable OS management of the system... aka
3807 * to disable special hardware management features
3810 lem_init_manageability(struct adapter *adapter)
3812 /* A shared code workaround */
3813 if (adapter->has_manage) {
3814 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3815 /* disable hardware interception of ARP */
3816 manc &= ~(E1000_MANC_ARP_EN);
3817 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3822 * Give control back to hardware management
3823 * controller if there is one.
3826 lem_release_manageability(struct adapter *adapter)
3828 if (adapter->has_manage) {
3829 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3831 /* re-enable hardware interception of ARP */
3832 manc |= E1000_MANC_ARP_EN;
3833 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3838 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3839 * For ASF and Pass Through versions of f/w this means
3840 * that the driver is loaded. For AMT version type f/w
3841 * this means that the network i/f is open.
3844 lem_get_hw_control(struct adapter *adapter)
3848 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3849 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3850 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3855 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3856 * For ASF and Pass Through versions of f/w this means that
3857 * the driver is no longer loaded. For AMT versions of the
3858 * f/w this means that the network i/f is closed.
3861 lem_release_hw_control(struct adapter *adapter)
3865 if (!adapter->has_manage)
3868 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3869 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3870 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3875 lem_is_valid_ether_addr(u8 *addr)
3877 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3879 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3887 ** Parse the interface capabilities with regard
3888 ** to both system management and wake-on-lan for
3892 lem_get_wakeup(device_t dev)
3894 struct adapter *adapter = device_get_softc(dev);
3895 u16 eeprom_data = 0, device_id, apme_mask;
3897 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3898 apme_mask = EM_EEPROM_APME;
3900 switch (adapter->hw.mac.type) {
3905 e1000_read_nvm(&adapter->hw,
3906 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3907 apme_mask = EM_82544_APME;
3910 case e1000_82546_rev_3:
3911 if (adapter->hw.bus.func == 1) {
3912 e1000_read_nvm(&adapter->hw,
3913 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3916 e1000_read_nvm(&adapter->hw,
3917 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3920 e1000_read_nvm(&adapter->hw,
3921 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3924 if (eeprom_data & apme_mask)
3925 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
3927 * We have the eeprom settings, now apply the special cases
3928 * where the eeprom may be wrong or the board won't support
3929 * wake on lan on a particular port
3931 device_id = pci_get_device(dev);
3932 switch (device_id) {
3933 case E1000_DEV_ID_82546GB_PCIE:
3936 case E1000_DEV_ID_82546EB_FIBER:
3937 case E1000_DEV_ID_82546GB_FIBER:
3938 /* Wake events only supported on port A for dual fiber
3939 * regardless of eeprom setting */
3940 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3941 E1000_STATUS_FUNC_1)
3944 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
3945 /* if quad port adapter, disable WoL on all but port A */
3946 if (global_quad_port_a != 0)
3948 /* Reset for multiple quad port adapters */
3949 if (++global_quad_port_a == 4)
3950 global_quad_port_a = 0;
3958 * Enable PCI Wake On Lan capability
3961 lem_enable_wakeup(device_t dev)
3963 struct adapter *adapter = device_get_softc(dev);
3964 struct ifnet *ifp = adapter->ifp;
3965 u32 pmc, ctrl, ctrl_ext, rctl;
3968 if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
3971 /* Advertise the wakeup capability */
3972 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
3973 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
3974 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
3975 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3977 /* Keep the laser running on Fiber adapters */
3978 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
3979 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
3980 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3981 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
3982 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
3986 ** Determine type of Wakeup: note that wol
3987 ** is set with all bits on by default.
3989 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
3990 adapter->wol &= ~E1000_WUFC_MAG;
3992 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
3993 adapter->wol &= ~E1000_WUFC_MC;
3995 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3996 rctl |= E1000_RCTL_MPE;
3997 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4000 if (adapter->hw.mac.type == e1000_pchlan) {
4001 if (lem_enable_phy_wakeup(adapter))
4004 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4005 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4010 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4011 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4012 if (ifp->if_capenable & IFCAP_WOL)
4013 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4014 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4020 ** WOL in the newer chipset interfaces (pchlan)
4021 ** require thing to be copied into the phy
4024 lem_enable_phy_wakeup(struct adapter *adapter)
4026 struct e1000_hw *hw = &adapter->hw;
4030 /* copy MAC RARs to PHY RARs */
4031 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4032 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4033 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4034 e1000_write_phy_reg(hw, BM_RAR_M(i),
4035 (u16)((mreg >> 16) & 0xFFFF));
4036 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4037 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4038 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4039 (u16)((mreg >> 16) & 0xFFFF));
4042 /* copy MAC MTA to PHY MTA */
4043 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4044 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4045 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4046 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4047 (u16)((mreg >> 16) & 0xFFFF));
4050 /* configure PHY Rx Control register */
4051 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4052 mreg = E1000_READ_REG(hw, E1000_RCTL);
4053 if (mreg & E1000_RCTL_UPE)
4054 preg |= BM_RCTL_UPE;
4055 if (mreg & E1000_RCTL_MPE)
4056 preg |= BM_RCTL_MPE;
4057 preg &= ~(BM_RCTL_MO_MASK);
4058 if (mreg & E1000_RCTL_MO_3)
4059 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4060 << BM_RCTL_MO_SHIFT);
4061 if (mreg & E1000_RCTL_BAM)
4062 preg |= BM_RCTL_BAM;
4063 if (mreg & E1000_RCTL_PMCF)
4064 preg |= BM_RCTL_PMCF;
4065 mreg = E1000_READ_REG(hw, E1000_CTRL);
4066 if (mreg & E1000_CTRL_RFCE)
4067 preg |= BM_RCTL_RFCE;
4068 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4070 /* enable PHY wakeup in MAC register */
4071 E1000_WRITE_REG(hw, E1000_WUC,
4072 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4073 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4075 /* configure and enable PHY wakeup in PHY registers */
4076 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4077 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4079 /* activate PHY wakeup */
4080 ret = hw->phy.ops.acquire(hw);
4082 printf("Could not acquire PHY\n");
4085 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4086 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4087 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4089 printf("Could not read PHY page 769\n");
4092 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4093 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4095 printf("Could not set PHY Host Wakeup bit\n");
4097 hw->phy.ops.release(hw);
4103 lem_led_func(void *arg, int onoff)
4105 struct adapter *adapter = arg;
4107 EM_CORE_LOCK(adapter);
4109 e1000_setup_led(&adapter->hw);
4110 e1000_led_on(&adapter->hw);
4112 e1000_led_off(&adapter->hw);
4113 e1000_cleanup_led(&adapter->hw);
4115 EM_CORE_UNLOCK(adapter);
4118 /*********************************************************************
4119 * 82544 Coexistence issue workaround.
4120 * There are 2 issues.
4121 * 1. Transmit Hang issue.
4122 * To detect this issue, following equation can be used...
4123 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4124 * If SUM[3:0] is in between 1 to 4, we will have this issue.
4127 * To detect this issue, following equation can be used...
4128 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4129 * If SUM[3:0] is in between 9 to c, we will have this issue.
4133 * Make sure we do not have ending address
4134 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4136 *************************************************************************/
4138 lem_fill_descriptors (bus_addr_t address, u32 length,
4139 PDESC_ARRAY desc_array)
4141 u32 safe_terminator;
4143 /* Since issue is sensitive to length and address.*/
4144 /* Let us first check the address...*/
4146 desc_array->descriptor[0].address = address;
4147 desc_array->descriptor[0].length = length;
4148 desc_array->elements = 1;
4149 return (desc_array->elements);
4151 safe_terminator = (u32)((((u32)address & 0x7) +
4152 (length & 0xF)) & 0xF);
4153 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4154 if (safe_terminator == 0 ||
4155 (safe_terminator > 4 &&
4156 safe_terminator < 9) ||
4157 (safe_terminator > 0xC &&
4158 safe_terminator <= 0xF)) {
4159 desc_array->descriptor[0].address = address;
4160 desc_array->descriptor[0].length = length;
4161 desc_array->elements = 1;
4162 return (desc_array->elements);
4165 desc_array->descriptor[0].address = address;
4166 desc_array->descriptor[0].length = length - 4;
4167 desc_array->descriptor[1].address = address + (length - 4);
4168 desc_array->descriptor[1].length = 4;
4169 desc_array->elements = 2;
4170 return (desc_array->elements);
4173 /**********************************************************************
4175 * Update the board statistics counters.
4177 **********************************************************************/
4179 lem_update_stats_counters(struct adapter *adapter)
4183 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4184 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4185 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4186 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4188 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4189 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4190 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4191 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4193 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4194 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4195 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4196 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4197 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4198 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4199 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4200 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4201 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4202 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4203 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4204 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4205 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4206 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4207 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4208 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4209 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4210 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4211 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4212 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4214 /* For the 64-bit byte counters the low dword must be read first. */
4215 /* Both registers clear on the read of the high dword */
4217 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4218 ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4219 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4220 ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4222 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4223 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4224 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4225 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4226 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4228 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4229 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4231 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4232 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4233 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4234 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4235 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4236 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4237 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4238 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4239 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4240 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4242 if (adapter->hw.mac.type >= e1000_82543) {
4243 adapter->stats.algnerrc +=
4244 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4245 adapter->stats.rxerrc +=
4246 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4247 adapter->stats.tncrs +=
4248 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4249 adapter->stats.cexterr +=
4250 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4251 adapter->stats.tsctc +=
4252 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4253 adapter->stats.tsctfc +=
4254 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4258 ifp->if_collisions = adapter->stats.colc;
4261 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4262 adapter->stats.crcerrs + adapter->stats.algnerrc +
4263 adapter->stats.ruc + adapter->stats.roc +
4264 adapter->stats.mpc + adapter->stats.cexterr;
4267 ifp->if_oerrors = adapter->stats.ecol +
4268 adapter->stats.latecol + adapter->watchdog_events;
4271 /* Export a single 32-bit register via a read-only sysctl. */
4273 lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4275 struct adapter *adapter;
4278 adapter = oidp->oid_arg1;
4279 val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4280 return (sysctl_handle_int(oidp, &val, 0, req));
4284 * Add sysctl variables, one per statistic, to the system.
4287 lem_add_hw_stats(struct adapter *adapter)
4289 device_t dev = adapter->dev;
4291 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4292 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4293 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4294 struct e1000_hw_stats *stats = &adapter->stats;
4296 struct sysctl_oid *stat_node;
4297 struct sysctl_oid_list *stat_list;
4299 /* Driver Statistics */
4300 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail",
4301 CTLFLAG_RD, &adapter->mbuf_alloc_failed,
4303 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
4304 CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4305 "Std mbuf cluster failed");
4306 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4307 CTLFLAG_RD, &adapter->dropped_pkts,
4308 "Driver dropped packets");
4309 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
4310 CTLFLAG_RD, &adapter->no_tx_dma_setup,
4311 "Driver tx dma failure in xmit");
4312 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4313 CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4314 "Not enough tx descriptors failure in xmit");
4315 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4316 CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4317 "Not enough tx descriptors failure in xmit");
4318 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4319 CTLFLAG_RD, &adapter->rx_overruns,
4321 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4322 CTLFLAG_RD, &adapter->watchdog_events,
4323 "Watchdog timeouts");
4325 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4326 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
4327 lem_sysctl_reg_handler, "IU",
4328 "Device Control Register");
4329 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4330 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
4331 lem_sysctl_reg_handler, "IU",
4332 "Receiver Control Register");
4333 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4334 CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4335 "Flow Control High Watermark");
4336 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
4337 CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4338 "Flow Control Low Watermark");
4339 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
4340 CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4341 "TX FIFO workaround events");
4342 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
4343 CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4346 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head",
4347 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
4348 lem_sysctl_reg_handler, "IU",
4349 "Transmit Descriptor Head");
4350 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail",
4351 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
4352 lem_sysctl_reg_handler, "IU",
4353 "Transmit Descriptor Tail");
4354 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head",
4355 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
4356 lem_sysctl_reg_handler, "IU",
4357 "Receive Descriptor Head");
4358 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail",
4359 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
4360 lem_sysctl_reg_handler, "IU",
4361 "Receive Descriptor Tail");
4364 /* MAC stats get their own sub node */
4366 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4367 CTLFLAG_RD, NULL, "Statistics");
4368 stat_list = SYSCTL_CHILDREN(stat_node);
4370 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4371 CTLFLAG_RD, &stats->ecol,
4372 "Excessive collisions");
4373 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4374 CTLFLAG_RD, &stats->scc,
4375 "Single collisions");
4376 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4377 CTLFLAG_RD, &stats->mcc,
4378 "Multiple collisions");
4379 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4380 CTLFLAG_RD, &stats->latecol,
4382 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4383 CTLFLAG_RD, &stats->colc,
4385 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4386 CTLFLAG_RD, &adapter->stats.symerrs,
4388 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4389 CTLFLAG_RD, &adapter->stats.sec,
4391 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4392 CTLFLAG_RD, &adapter->stats.dc,
4394 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4395 CTLFLAG_RD, &adapter->stats.mpc,
4397 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4398 CTLFLAG_RD, &adapter->stats.rnbc,
4399 "Receive No Buffers");
4400 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4401 CTLFLAG_RD, &adapter->stats.ruc,
4402 "Receive Undersize");
4403 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4404 CTLFLAG_RD, &adapter->stats.rfc,
4405 "Fragmented Packets Received ");
4406 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4407 CTLFLAG_RD, &adapter->stats.roc,
4408 "Oversized Packets Received");
4409 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4410 CTLFLAG_RD, &adapter->stats.rjc,
4412 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4413 CTLFLAG_RD, &adapter->stats.rxerrc,
4415 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4416 CTLFLAG_RD, &adapter->stats.crcerrs,
4418 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4419 CTLFLAG_RD, &adapter->stats.algnerrc,
4420 "Alignment Errors");
4421 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4422 CTLFLAG_RD, &adapter->stats.cexterr,
4423 "Collision/Carrier extension errors");
4424 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4425 CTLFLAG_RD, &adapter->stats.xonrxc,
4427 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4428 CTLFLAG_RD, &adapter->stats.xontxc,
4430 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4431 CTLFLAG_RD, &adapter->stats.xoffrxc,
4433 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4434 CTLFLAG_RD, &adapter->stats.xofftxc,
4435 "XOFF Transmitted");
4437 /* Packet Reception Stats */
4438 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4439 CTLFLAG_RD, &adapter->stats.tpr,
4440 "Total Packets Received ");
4441 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4442 CTLFLAG_RD, &adapter->stats.gprc,
4443 "Good Packets Received");
4444 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4445 CTLFLAG_RD, &adapter->stats.bprc,
4446 "Broadcast Packets Received");
4447 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4448 CTLFLAG_RD, &adapter->stats.mprc,
4449 "Multicast Packets Received");
4450 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4451 CTLFLAG_RD, &adapter->stats.prc64,
4452 "64 byte frames received ");
4453 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4454 CTLFLAG_RD, &adapter->stats.prc127,
4455 "65-127 byte frames received");
4456 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4457 CTLFLAG_RD, &adapter->stats.prc255,
4458 "128-255 byte frames received");
4459 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4460 CTLFLAG_RD, &adapter->stats.prc511,
4461 "256-511 byte frames received");
4462 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4463 CTLFLAG_RD, &adapter->stats.prc1023,
4464 "512-1023 byte frames received");
4465 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4466 CTLFLAG_RD, &adapter->stats.prc1522,
4467 "1023-1522 byte frames received");
4468 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4469 CTLFLAG_RD, &adapter->stats.gorc,
4470 "Good Octets Received");
4472 /* Packet Transmission Stats */
4473 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4474 CTLFLAG_RD, &adapter->stats.gotc,
4475 "Good Octets Transmitted");
4476 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4477 CTLFLAG_RD, &adapter->stats.tpt,
4478 "Total Packets Transmitted");
4479 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4480 CTLFLAG_RD, &adapter->stats.gptc,
4481 "Good Packets Transmitted");
4482 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4483 CTLFLAG_RD, &adapter->stats.bptc,
4484 "Broadcast Packets Transmitted");
4485 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4486 CTLFLAG_RD, &adapter->stats.mptc,
4487 "Multicast Packets Transmitted");
4488 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4489 CTLFLAG_RD, &adapter->stats.ptc64,
4490 "64 byte frames transmitted ");
4491 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4492 CTLFLAG_RD, &adapter->stats.ptc127,
4493 "65-127 byte frames transmitted");
4494 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4495 CTLFLAG_RD, &adapter->stats.ptc255,
4496 "128-255 byte frames transmitted");
4497 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4498 CTLFLAG_RD, &adapter->stats.ptc511,
4499 "256-511 byte frames transmitted");
4500 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4501 CTLFLAG_RD, &adapter->stats.ptc1023,
4502 "512-1023 byte frames transmitted");
4503 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4504 CTLFLAG_RD, &adapter->stats.ptc1522,
4505 "1024-1522 byte frames transmitted");
4506 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4507 CTLFLAG_RD, &adapter->stats.tsctc,
4508 "TSO Contexts Transmitted");
4509 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4510 CTLFLAG_RD, &adapter->stats.tsctfc,
4511 "TSO Contexts Failed");
4514 /**********************************************************************
4516 * This routine provides a way to dump out the adapter eeprom,
4517 * often a useful debug/service tool. This only dumps the first
4518 * 32 words, stuff that matters is in that extent.
4520 **********************************************************************/
4523 lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4525 struct adapter *adapter;
4530 error = sysctl_handle_int(oidp, &result, 0, req);
4532 if (error || !req->newptr)
4536 * This value will cause a hex dump of the
4537 * first 32 16-bit words of the EEPROM to
4541 adapter = (struct adapter *)arg1;
4542 lem_print_nvm_info(adapter);
4549 lem_print_nvm_info(struct adapter *adapter)
4554 /* Its a bit crude, but it gets the job done */
4555 printf("\nInterface EEPROM Dump:\n");
4556 printf("Offset\n0x0000 ");
4557 for (i = 0, j = 0; i < 32; i++, j++) {
4558 if (j == 8) { /* Make the offset block */
4560 printf("\n0x00%x0 ",row);
4562 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4563 printf("%04x ", eeprom_data);
4569 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4571 struct em_int_delay_info *info;
4572 struct adapter *adapter;
4578 info = (struct em_int_delay_info *)arg1;
4579 usecs = info->value;
4580 error = sysctl_handle_int(oidp, &usecs, 0, req);
4581 if (error != 0 || req->newptr == NULL)
4583 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4585 info->value = usecs;
4586 ticks = EM_USECS_TO_TICKS(usecs);
4588 adapter = info->adapter;
4590 EM_CORE_LOCK(adapter);
4591 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4592 regval = (regval & ~0xffff) | (ticks & 0xffff);
4593 /* Handle a few special cases. */
4594 switch (info->offset) {
4599 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4600 /* Don't write 0 into the TIDV register. */
4603 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4606 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4607 EM_CORE_UNLOCK(adapter);
4612 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4613 const char *description, struct em_int_delay_info *info,
4614 int offset, int value)
4616 info->adapter = adapter;
4617 info->offset = offset;
4618 info->value = value;
4619 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4620 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4621 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4622 info, 0, lem_sysctl_int_delay, "I", description);
4626 lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4627 const char *description, int *limit, int value)
4630 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4631 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4632 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4636 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4637 const char *description, int *limit, int value)
4640 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4641 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4642 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);