1 /******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
38 #include "opt_inet6.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
44 #include <sys/endian.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/malloc.h>
49 #include <sys/module.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/sysctl.h>
54 #include <sys/taskqueue.h>
55 #include <sys/eventhandler.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <net/ethernet.h>
62 #include <net/if_arp.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
66 #include <net/if_types.h>
67 #include <net/if_vlan_var.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/in.h>
71 #include <netinet/if_ether.h>
72 #include <netinet/ip.h>
73 #include <netinet/ip6.h>
74 #include <netinet/tcp.h>
75 #include <netinet/udp.h>
77 #include <machine/in_cksum.h>
78 #include <dev/led/led.h>
79 #include <dev/pci/pcivar.h>
80 #include <dev/pci/pcireg.h>
82 #include "e1000_api.h"
85 /*********************************************************************
86 * Legacy Em Driver version:
87 *********************************************************************/
88 char lem_driver_version[] = "1.0.5";
90 /*********************************************************************
93 * Used by probe to select devices to load on
94 * Last field stores an index into e1000_strings
95 * Last entry must be all 0s
97 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
98 *********************************************************************/
100 static em_vendor_info_t lem_vendor_info_array[] =
102 /* Intel(R) PRO/1000 Network Connection */
103 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
104 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
105 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
106 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
107 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
109 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
110 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
111 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
112 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
113 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
128 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
131 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
142 PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
147 /* required last entry */
151 /*********************************************************************
152 * Table of branding strings for all supported NICs.
153 *********************************************************************/
155 static char *lem_strings[] = {
156 "Intel(R) PRO/1000 Legacy Network Connection"
159 /*********************************************************************
160 * Function prototypes
161 *********************************************************************/
162 static int lem_probe(device_t);
163 static int lem_attach(device_t);
164 static int lem_detach(device_t);
165 static int lem_shutdown(device_t);
166 static int lem_suspend(device_t);
167 static int lem_resume(device_t);
168 static void lem_start(struct ifnet *);
169 static void lem_start_locked(struct ifnet *ifp);
170 static int lem_ioctl(struct ifnet *, u_long, caddr_t);
171 static void lem_init(void *);
172 static void lem_init_locked(struct adapter *);
173 static void lem_stop(void *);
174 static void lem_media_status(struct ifnet *, struct ifmediareq *);
175 static int lem_media_change(struct ifnet *);
176 static void lem_identify_hardware(struct adapter *);
177 static int lem_allocate_pci_resources(struct adapter *);
178 static int lem_allocate_irq(struct adapter *adapter);
179 static void lem_free_pci_resources(struct adapter *);
180 static void lem_local_timer(void *);
181 static int lem_hardware_init(struct adapter *);
182 static int lem_setup_interface(device_t, struct adapter *);
183 static void lem_setup_transmit_structures(struct adapter *);
184 static void lem_initialize_transmit_unit(struct adapter *);
185 static int lem_setup_receive_structures(struct adapter *);
186 static void lem_initialize_receive_unit(struct adapter *);
187 static void lem_enable_intr(struct adapter *);
188 static void lem_disable_intr(struct adapter *);
189 static void lem_free_transmit_structures(struct adapter *);
190 static void lem_free_receive_structures(struct adapter *);
191 static void lem_update_stats_counters(struct adapter *);
192 static void lem_add_hw_stats(struct adapter *adapter);
193 static void lem_txeof(struct adapter *);
194 static void lem_tx_purge(struct adapter *);
195 static int lem_allocate_receive_structures(struct adapter *);
196 static int lem_allocate_transmit_structures(struct adapter *);
197 static bool lem_rxeof(struct adapter *, int, int *);
198 #ifndef __NO_STRICT_ALIGNMENT
199 static int lem_fixup_rx(struct adapter *);
201 static void lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
203 static void lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
205 static void lem_set_promisc(struct adapter *);
206 static void lem_disable_promisc(struct adapter *);
207 static void lem_set_multi(struct adapter *);
208 static void lem_update_link_status(struct adapter *);
209 static int lem_get_buf(struct adapter *, int);
210 static void lem_register_vlan(void *, struct ifnet *, u16);
211 static void lem_unregister_vlan(void *, struct ifnet *, u16);
212 static void lem_setup_vlan_hw_support(struct adapter *);
213 static int lem_xmit(struct adapter *, struct mbuf **);
214 static void lem_smartspeed(struct adapter *);
215 static int lem_82547_fifo_workaround(struct adapter *, int);
216 static void lem_82547_update_fifo_head(struct adapter *, int);
217 static int lem_82547_tx_fifo_reset(struct adapter *);
218 static void lem_82547_move_tail(void *);
219 static int lem_dma_malloc(struct adapter *, bus_size_t,
220 struct em_dma_alloc *, int);
221 static void lem_dma_free(struct adapter *, struct em_dma_alloc *);
222 static int lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
223 static void lem_print_nvm_info(struct adapter *);
224 static int lem_is_valid_ether_addr(u8 *);
225 static u32 lem_fill_descriptors (bus_addr_t address, u32 length,
226 PDESC_ARRAY desc_array);
227 static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
228 static void lem_add_int_delay_sysctl(struct adapter *, const char *,
229 const char *, struct em_int_delay_info *, int, int);
230 static void lem_set_flow_cntrl(struct adapter *, const char *,
231 const char *, int *, int);
232 /* Management and WOL Support */
233 static void lem_init_manageability(struct adapter *);
234 static void lem_release_manageability(struct adapter *);
235 static void lem_get_hw_control(struct adapter *);
236 static void lem_release_hw_control(struct adapter *);
237 static void lem_get_wakeup(device_t);
238 static void lem_enable_wakeup(device_t);
239 static int lem_enable_phy_wakeup(struct adapter *);
240 static void lem_led_func(void *, int);
242 static void lem_intr(void *);
243 static int lem_irq_fast(void *);
244 static void lem_handle_rxtx(void *context, int pending);
245 static void lem_handle_link(void *context, int pending);
246 static void lem_add_rx_process_limit(struct adapter *, const char *,
247 const char *, int *, int);
249 #ifdef DEVICE_POLLING
250 static poll_handler_t lem_poll;
253 /*********************************************************************
254 * FreeBSD Device Interface Entry Points
255 *********************************************************************/
257 static device_method_t lem_methods[] = {
258 /* Device interface */
259 DEVMETHOD(device_probe, lem_probe),
260 DEVMETHOD(device_attach, lem_attach),
261 DEVMETHOD(device_detach, lem_detach),
262 DEVMETHOD(device_shutdown, lem_shutdown),
263 DEVMETHOD(device_suspend, lem_suspend),
264 DEVMETHOD(device_resume, lem_resume),
268 static driver_t lem_driver = {
269 "em", lem_methods, sizeof(struct adapter),
272 extern devclass_t em_devclass;
273 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
274 MODULE_DEPEND(lem, pci, 1, 1, 1);
275 MODULE_DEPEND(lem, ether, 1, 1, 1);
277 /*********************************************************************
278 * Tunable default values.
279 *********************************************************************/
281 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
282 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
284 #define MAX_INTS_PER_SEC 8000
285 #define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256))
287 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
288 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
289 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
290 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
291 static int lem_rxd = EM_DEFAULT_RXD;
292 static int lem_txd = EM_DEFAULT_TXD;
293 static int lem_smart_pwr_down = FALSE;
295 /* Controls whether promiscuous also shows bad packets */
296 static int lem_debug_sbp = FALSE;
298 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
299 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
300 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
301 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
302 TUNABLE_INT("hw.em.rxd", &lem_rxd);
303 TUNABLE_INT("hw.em.txd", &lem_txd);
304 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
305 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
307 /* Interrupt style - default to fast */
308 static int lem_use_legacy_irq = 0;
309 TUNABLE_INT("hw.em.use_legacy_irq", &lem_use_legacy_irq);
311 /* How many packets rxeof tries to clean at a time */
312 static int lem_rx_process_limit = 100;
313 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
315 /* Flow control setting - default to FULL */
316 static int lem_fc_setting = e1000_fc_full;
317 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
319 /* Global used in WOL setup with multiport cards */
320 static int global_quad_port_a = 0;
322 #ifdef DEV_NETMAP /* see ixgbe.c for details */
323 #include <dev/netmap/if_lem_netmap.h>
324 #endif /* DEV_NETMAP */
326 /*********************************************************************
327 * Device identification routine
329 * em_probe determines if the driver should be loaded on
330 * adapter based on PCI vendor/device id of the adapter.
332 * return BUS_PROBE_DEFAULT on success, positive on failure
333 *********************************************************************/
336 lem_probe(device_t dev)
338 char adapter_name[60];
339 u16 pci_vendor_id = 0;
340 u16 pci_device_id = 0;
341 u16 pci_subvendor_id = 0;
342 u16 pci_subdevice_id = 0;
343 em_vendor_info_t *ent;
345 INIT_DEBUGOUT("em_probe: begin");
347 pci_vendor_id = pci_get_vendor(dev);
348 if (pci_vendor_id != EM_VENDOR_ID)
351 pci_device_id = pci_get_device(dev);
352 pci_subvendor_id = pci_get_subvendor(dev);
353 pci_subdevice_id = pci_get_subdevice(dev);
355 ent = lem_vendor_info_array;
356 while (ent->vendor_id != 0) {
357 if ((pci_vendor_id == ent->vendor_id) &&
358 (pci_device_id == ent->device_id) &&
360 ((pci_subvendor_id == ent->subvendor_id) ||
361 (ent->subvendor_id == PCI_ANY_ID)) &&
363 ((pci_subdevice_id == ent->subdevice_id) ||
364 (ent->subdevice_id == PCI_ANY_ID))) {
365 sprintf(adapter_name, "%s %s",
366 lem_strings[ent->index],
368 device_set_desc_copy(dev, adapter_name);
369 return (BUS_PROBE_DEFAULT);
377 /*********************************************************************
378 * Device initialization routine
380 * The attach entry point is called when the driver is being loaded.
381 * This routine identifies the type of hardware, allocates all resources
382 * and initializes the hardware.
384 * return 0 on success, positive on failure
385 *********************************************************************/
388 lem_attach(device_t dev)
390 struct adapter *adapter;
394 INIT_DEBUGOUT("lem_attach: begin");
396 adapter = device_get_softc(dev);
397 adapter->dev = adapter->osdep.dev = dev;
398 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
399 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
400 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
403 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
404 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
405 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
406 lem_sysctl_nvm_info, "I", "NVM Information");
408 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
409 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
411 /* Determine hardware and mac info */
412 lem_identify_hardware(adapter);
414 /* Setup PCI resources */
415 if (lem_allocate_pci_resources(adapter)) {
416 device_printf(dev, "Allocation of PCI resources failed\n");
421 /* Do Shared Code initialization */
422 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
423 device_printf(dev, "Setup of Shared code failed\n");
428 e1000_get_bus_info(&adapter->hw);
430 /* Set up some sysctls for the tunable interrupt delays */
431 lem_add_int_delay_sysctl(adapter, "rx_int_delay",
432 "receive interrupt delay in usecs", &adapter->rx_int_delay,
433 E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
434 lem_add_int_delay_sysctl(adapter, "tx_int_delay",
435 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
436 E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
437 if (adapter->hw.mac.type >= e1000_82540) {
438 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
439 "receive interrupt delay limit in usecs",
440 &adapter->rx_abs_int_delay,
441 E1000_REGISTER(&adapter->hw, E1000_RADV),
442 lem_rx_abs_int_delay_dflt);
443 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
444 "transmit interrupt delay limit in usecs",
445 &adapter->tx_abs_int_delay,
446 E1000_REGISTER(&adapter->hw, E1000_TADV),
447 lem_tx_abs_int_delay_dflt);
448 lem_add_int_delay_sysctl(adapter, "itr",
449 "interrupt delay limit in usecs/4",
451 E1000_REGISTER(&adapter->hw, E1000_ITR),
455 /* Sysctls for limiting the amount of work done in the taskqueue */
456 lem_add_rx_process_limit(adapter, "rx_processing_limit",
457 "max number of rx packets to process", &adapter->rx_process_limit,
458 lem_rx_process_limit);
460 /* Sysctl for setting the interface flow control */
461 lem_set_flow_cntrl(adapter, "flow_control",
462 "flow control setting",
463 &adapter->fc_setting, lem_fc_setting);
466 * Validate number of transmit and receive descriptors. It
467 * must not exceed hardware maximum, and must be multiple
468 * of E1000_DBA_ALIGN.
470 if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
471 (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
472 (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
473 (lem_txd < EM_MIN_TXD)) {
474 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
475 EM_DEFAULT_TXD, lem_txd);
476 adapter->num_tx_desc = EM_DEFAULT_TXD;
478 adapter->num_tx_desc = lem_txd;
479 if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
480 (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
481 (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
482 (lem_rxd < EM_MIN_RXD)) {
483 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
484 EM_DEFAULT_RXD, lem_rxd);
485 adapter->num_rx_desc = EM_DEFAULT_RXD;
487 adapter->num_rx_desc = lem_rxd;
489 adapter->hw.mac.autoneg = DO_AUTO_NEG;
490 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
491 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
492 adapter->rx_buffer_len = 2048;
494 e1000_init_script_state_82541(&adapter->hw, TRUE);
495 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
498 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
499 adapter->hw.phy.mdix = AUTO_ALL_MODES;
500 adapter->hw.phy.disable_polarity_correction = FALSE;
501 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
505 * Set the frame limits assuming
506 * standard ethernet sized frames.
508 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
509 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
512 * This controls when hardware reports transmit completion
515 adapter->hw.mac.report_tx_early = 1;
517 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
520 /* Allocate Transmit Descriptor ring */
521 if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
522 device_printf(dev, "Unable to allocate tx_desc memory\n");
526 adapter->tx_desc_base =
527 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
529 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
532 /* Allocate Receive Descriptor ring */
533 if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
534 device_printf(dev, "Unable to allocate rx_desc memory\n");
538 adapter->rx_desc_base =
539 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
541 /* Allocate multicast array memory. */
542 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
543 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
544 if (adapter->mta == NULL) {
545 device_printf(dev, "Can not allocate multicast setup array\n");
551 ** Start from a known state, this is
552 ** important in reading the nvm and
555 e1000_reset_hw(&adapter->hw);
557 /* Make sure we have a good EEPROM before we read from it */
558 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
560 ** Some PCI-E parts fail the first check due to
561 ** the link being in sleep state, call it again,
562 ** if it fails a second time its a real issue.
564 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
566 "The EEPROM Checksum Is Not Valid\n");
572 /* Copy the permanent MAC address out of the EEPROM */
573 if (e1000_read_mac_addr(&adapter->hw) < 0) {
574 device_printf(dev, "EEPROM read error while reading MAC"
580 if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
581 device_printf(dev, "Invalid MAC address\n");
586 /* Initialize the hardware */
587 if (lem_hardware_init(adapter)) {
588 device_printf(dev, "Unable to initialize the hardware\n");
593 /* Allocate transmit descriptors and buffers */
594 if (lem_allocate_transmit_structures(adapter)) {
595 device_printf(dev, "Could not setup transmit structures\n");
600 /* Allocate receive descriptors and buffers */
601 if (lem_allocate_receive_structures(adapter)) {
602 device_printf(dev, "Could not setup receive structures\n");
608 ** Do interrupt configuration
610 error = lem_allocate_irq(adapter);
615 * Get Wake-on-Lan and Management info for later use
619 /* Setup OS specific network interface */
620 if (lem_setup_interface(dev, adapter) != 0)
623 /* Initialize statistics */
624 lem_update_stats_counters(adapter);
626 adapter->hw.mac.get_link_status = 1;
627 lem_update_link_status(adapter);
629 /* Indicate SOL/IDER usage */
630 if (e1000_check_reset_block(&adapter->hw))
632 "PHY reset is blocked due to SOL/IDER session.\n");
634 /* Do we need workaround for 82544 PCI-X adapter? */
635 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
636 adapter->hw.mac.type == e1000_82544)
637 adapter->pcix_82544 = TRUE;
639 adapter->pcix_82544 = FALSE;
641 /* Register for VLAN events */
642 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
643 lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
644 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
645 lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
647 lem_add_hw_stats(adapter);
649 /* Non-AMT based hardware can now take control from firmware */
650 if (adapter->has_manage && !adapter->has_amt)
651 lem_get_hw_control(adapter);
653 /* Tell the stack that the interface is not active */
654 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
656 adapter->led_dev = led_create(lem_led_func, adapter,
657 device_get_nameunit(dev));
660 lem_netmap_attach(adapter);
661 #endif /* DEV_NETMAP */
662 INIT_DEBUGOUT("lem_attach: end");
667 lem_free_transmit_structures(adapter);
670 lem_release_hw_control(adapter);
671 lem_dma_free(adapter, &adapter->rxdma);
673 lem_dma_free(adapter, &adapter->txdma);
676 if (adapter->ifp != NULL)
677 if_free(adapter->ifp);
678 lem_free_pci_resources(adapter);
679 free(adapter->mta, M_DEVBUF);
680 EM_TX_LOCK_DESTROY(adapter);
681 EM_RX_LOCK_DESTROY(adapter);
682 EM_CORE_LOCK_DESTROY(adapter);
687 /*********************************************************************
688 * Device removal routine
690 * The detach entry point is called when the driver is being removed.
691 * This routine stops the adapter and deallocates all the resources
692 * that were allocated for driver operation.
694 * return 0 on success, positive on failure
695 *********************************************************************/
698 lem_detach(device_t dev)
700 struct adapter *adapter = device_get_softc(dev);
701 struct ifnet *ifp = adapter->ifp;
703 INIT_DEBUGOUT("em_detach: begin");
705 /* Make sure VLANS are not using driver */
706 if (adapter->ifp->if_vlantrunk != NULL) {
707 device_printf(dev,"Vlan in use, detach first\n");
711 #ifdef DEVICE_POLLING
712 if (ifp->if_capenable & IFCAP_POLLING)
713 ether_poll_deregister(ifp);
716 if (adapter->led_dev != NULL)
717 led_destroy(adapter->led_dev);
719 EM_CORE_LOCK(adapter);
721 adapter->in_detach = 1;
723 e1000_phy_hw_reset(&adapter->hw);
725 lem_release_manageability(adapter);
727 EM_TX_UNLOCK(adapter);
728 EM_CORE_UNLOCK(adapter);
730 /* Unregister VLAN events */
731 if (adapter->vlan_attach != NULL)
732 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
733 if (adapter->vlan_detach != NULL)
734 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
736 ether_ifdetach(adapter->ifp);
737 callout_drain(&adapter->timer);
738 callout_drain(&adapter->tx_fifo_timer);
742 #endif /* DEV_NETMAP */
743 lem_free_pci_resources(adapter);
744 bus_generic_detach(dev);
747 lem_free_transmit_structures(adapter);
748 lem_free_receive_structures(adapter);
750 /* Free Transmit Descriptor ring */
751 if (adapter->tx_desc_base) {
752 lem_dma_free(adapter, &adapter->txdma);
753 adapter->tx_desc_base = NULL;
756 /* Free Receive Descriptor ring */
757 if (adapter->rx_desc_base) {
758 lem_dma_free(adapter, &adapter->rxdma);
759 adapter->rx_desc_base = NULL;
762 lem_release_hw_control(adapter);
763 free(adapter->mta, M_DEVBUF);
764 EM_TX_LOCK_DESTROY(adapter);
765 EM_RX_LOCK_DESTROY(adapter);
766 EM_CORE_LOCK_DESTROY(adapter);
771 /*********************************************************************
773 * Shutdown entry point
775 **********************************************************************/
778 lem_shutdown(device_t dev)
780 return lem_suspend(dev);
784 * Suspend/resume device methods.
787 lem_suspend(device_t dev)
789 struct adapter *adapter = device_get_softc(dev);
791 EM_CORE_LOCK(adapter);
793 lem_release_manageability(adapter);
794 lem_release_hw_control(adapter);
795 lem_enable_wakeup(dev);
797 EM_CORE_UNLOCK(adapter);
799 return bus_generic_suspend(dev);
803 lem_resume(device_t dev)
805 struct adapter *adapter = device_get_softc(dev);
806 struct ifnet *ifp = adapter->ifp;
808 EM_CORE_LOCK(adapter);
809 lem_init_locked(adapter);
810 lem_init_manageability(adapter);
811 EM_CORE_UNLOCK(adapter);
814 return bus_generic_resume(dev);
819 lem_start_locked(struct ifnet *ifp)
821 struct adapter *adapter = ifp->if_softc;
824 EM_TX_LOCK_ASSERT(adapter);
826 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
829 if (!adapter->link_active)
833 * Force a cleanup if number of TX descriptors
834 * available hits the threshold
836 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
838 /* Now do we at least have a minimal? */
839 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
840 adapter->no_tx_desc_avail1++;
845 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
847 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
851 * Encapsulation can modify our pointer, and or make it
852 * NULL on failure. In that event, we can't requeue.
854 if (lem_xmit(adapter, &m_head)) {
857 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
858 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
862 /* Send a copy of the frame to the BPF listener */
863 ETHER_BPF_MTAP(ifp, m_head);
865 /* Set timeout in case hardware has problems transmitting. */
866 adapter->watchdog_check = TRUE;
867 adapter->watchdog_time = ticks;
869 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
870 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
876 lem_start(struct ifnet *ifp)
878 struct adapter *adapter = ifp->if_softc;
881 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
882 lem_start_locked(ifp);
883 EM_TX_UNLOCK(adapter);
886 /*********************************************************************
889 * em_ioctl is called when the user wants to configure the
892 * return 0 on success, positive on failure
893 **********************************************************************/
896 lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
898 struct adapter *adapter = ifp->if_softc;
899 struct ifreq *ifr = (struct ifreq *)data;
900 #if defined(INET) || defined(INET6)
901 struct ifaddr *ifa = (struct ifaddr *)data;
903 bool avoid_reset = FALSE;
906 if (adapter->in_detach)
912 if (ifa->ifa_addr->sa_family == AF_INET)
916 if (ifa->ifa_addr->sa_family == AF_INET6)
920 ** Calling init results in link renegotiation,
921 ** so we avoid doing it when possible.
924 ifp->if_flags |= IFF_UP;
925 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
928 if (!(ifp->if_flags & IFF_NOARP))
929 arp_ifinit(ifp, ifa);
932 error = ether_ioctl(ifp, command, data);
938 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
940 EM_CORE_LOCK(adapter);
941 switch (adapter->hw.mac.type) {
943 max_frame_size = ETHER_MAX_LEN;
946 max_frame_size = MAX_JUMBO_FRAME_SIZE;
948 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
950 EM_CORE_UNLOCK(adapter);
955 ifp->if_mtu = ifr->ifr_mtu;
956 adapter->max_frame_size =
957 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
958 lem_init_locked(adapter);
959 EM_CORE_UNLOCK(adapter);
963 IOCTL_DEBUGOUT("ioctl rcv'd:\
964 SIOCSIFFLAGS (Set Interface Flags)");
965 EM_CORE_LOCK(adapter);
966 if (ifp->if_flags & IFF_UP) {
967 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
968 if ((ifp->if_flags ^ adapter->if_flags) &
969 (IFF_PROMISC | IFF_ALLMULTI)) {
970 lem_disable_promisc(adapter);
971 lem_set_promisc(adapter);
974 lem_init_locked(adapter);
976 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
979 EM_TX_UNLOCK(adapter);
981 adapter->if_flags = ifp->if_flags;
982 EM_CORE_UNLOCK(adapter);
986 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
987 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
988 EM_CORE_LOCK(adapter);
989 lem_disable_intr(adapter);
990 lem_set_multi(adapter);
991 if (adapter->hw.mac.type == e1000_82542 &&
992 adapter->hw.revision_id == E1000_REVISION_2) {
993 lem_initialize_receive_unit(adapter);
995 #ifdef DEVICE_POLLING
996 if (!(ifp->if_capenable & IFCAP_POLLING))
998 lem_enable_intr(adapter);
999 EM_CORE_UNLOCK(adapter);
1003 /* Check SOL/IDER usage */
1004 EM_CORE_LOCK(adapter);
1005 if (e1000_check_reset_block(&adapter->hw)) {
1006 EM_CORE_UNLOCK(adapter);
1007 device_printf(adapter->dev, "Media change is"
1008 " blocked due to SOL/IDER session.\n");
1011 EM_CORE_UNLOCK(adapter);
1013 IOCTL_DEBUGOUT("ioctl rcv'd: \
1014 SIOCxIFMEDIA (Get/Set Interface Media)");
1015 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1021 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1023 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1024 #ifdef DEVICE_POLLING
1025 if (mask & IFCAP_POLLING) {
1026 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1027 error = ether_poll_register(lem_poll, ifp);
1030 EM_CORE_LOCK(adapter);
1031 lem_disable_intr(adapter);
1032 ifp->if_capenable |= IFCAP_POLLING;
1033 EM_CORE_UNLOCK(adapter);
1035 error = ether_poll_deregister(ifp);
1036 /* Enable interrupt even in error case */
1037 EM_CORE_LOCK(adapter);
1038 lem_enable_intr(adapter);
1039 ifp->if_capenable &= ~IFCAP_POLLING;
1040 EM_CORE_UNLOCK(adapter);
1044 if (mask & IFCAP_HWCSUM) {
1045 ifp->if_capenable ^= IFCAP_HWCSUM;
1048 if (mask & IFCAP_VLAN_HWTAGGING) {
1049 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1052 if ((mask & IFCAP_WOL) &&
1053 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1054 if (mask & IFCAP_WOL_MCAST)
1055 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1056 if (mask & IFCAP_WOL_MAGIC)
1057 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1059 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1061 VLAN_CAPABILITIES(ifp);
1066 error = ether_ioctl(ifp, command, data);
1074 /*********************************************************************
1077 * This routine is used in two ways. It is used by the stack as
1078 * init entry point in network interface structure. It is also used
1079 * by the driver as a hw/sw initialization routine to get to a
1082 * return 0 on success, positive on failure
1083 **********************************************************************/
1086 lem_init_locked(struct adapter *adapter)
1088 struct ifnet *ifp = adapter->ifp;
1089 device_t dev = adapter->dev;
1092 INIT_DEBUGOUT("lem_init: begin");
1094 EM_CORE_LOCK_ASSERT(adapter);
1096 EM_TX_LOCK(adapter);
1098 EM_TX_UNLOCK(adapter);
1101 * Packet Buffer Allocation (PBA)
1102 * Writing PBA sets the receive portion of the buffer
1103 * the remainder is used for the transmit buffer.
1105 * Devices before the 82547 had a Packet Buffer of 64K.
1106 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1107 * After the 82547 the buffer was reduced to 40K.
1108 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1109 * Note: default does not leave enough room for Jumbo Frame >10k.
1111 switch (adapter->hw.mac.type) {
1113 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1114 if (adapter->max_frame_size > 8192)
1115 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1117 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1118 adapter->tx_fifo_head = 0;
1119 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1120 adapter->tx_fifo_size =
1121 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1124 /* Devices before 82547 had a Packet Buffer of 64K. */
1125 if (adapter->max_frame_size > 8192)
1126 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1128 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1131 INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1132 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1134 /* Get the latest mac address, User can use a LAA */
1135 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1138 /* Put the address into the Receive Address Array */
1139 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1141 /* Initialize the hardware */
1142 if (lem_hardware_init(adapter)) {
1143 device_printf(dev, "Unable to initialize the hardware\n");
1146 lem_update_link_status(adapter);
1148 /* Setup VLAN support, basic and offload if available */
1149 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1151 /* Set hardware offload abilities */
1152 ifp->if_hwassist = 0;
1153 if (adapter->hw.mac.type >= e1000_82543) {
1154 if (ifp->if_capenable & IFCAP_TXCSUM)
1155 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1158 /* Configure for OS presence */
1159 lem_init_manageability(adapter);
1161 /* Prepare transmit descriptors and buffers */
1162 lem_setup_transmit_structures(adapter);
1163 lem_initialize_transmit_unit(adapter);
1165 /* Setup Multicast table */
1166 lem_set_multi(adapter);
1168 /* Prepare receive descriptors and buffers */
1169 if (lem_setup_receive_structures(adapter)) {
1170 device_printf(dev, "Could not setup receive structures\n");
1171 EM_TX_LOCK(adapter);
1173 EM_TX_UNLOCK(adapter);
1176 lem_initialize_receive_unit(adapter);
1178 /* Use real VLAN Filter support? */
1179 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1180 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1181 /* Use real VLAN Filter support */
1182 lem_setup_vlan_hw_support(adapter);
1185 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1186 ctrl |= E1000_CTRL_VME;
1187 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1191 /* Don't lose promiscuous settings */
1192 lem_set_promisc(adapter);
1194 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1195 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1197 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1198 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1200 #ifdef DEVICE_POLLING
1202 * Only enable interrupts if we are not polling, make sure
1203 * they are off otherwise.
1205 if (ifp->if_capenable & IFCAP_POLLING)
1206 lem_disable_intr(adapter);
1208 #endif /* DEVICE_POLLING */
1209 lem_enable_intr(adapter);
1211 /* AMT based hardware can now take control from firmware */
1212 if (adapter->has_manage && adapter->has_amt)
1213 lem_get_hw_control(adapter);
1219 struct adapter *adapter = arg;
1221 EM_CORE_LOCK(adapter);
1222 lem_init_locked(adapter);
1223 EM_CORE_UNLOCK(adapter);
1227 #ifdef DEVICE_POLLING
1228 /*********************************************************************
1230 * Legacy polling routine
1232 *********************************************************************/
1234 lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1236 struct adapter *adapter = ifp->if_softc;
1237 u32 reg_icr, rx_done = 0;
1239 EM_CORE_LOCK(adapter);
1240 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1241 EM_CORE_UNLOCK(adapter);
1245 if (cmd == POLL_AND_CHECK_STATUS) {
1246 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1247 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1248 callout_stop(&adapter->timer);
1249 adapter->hw.mac.get_link_status = 1;
1250 lem_update_link_status(adapter);
1251 callout_reset(&adapter->timer, hz,
1252 lem_local_timer, adapter);
1255 EM_CORE_UNLOCK(adapter);
1257 lem_rxeof(adapter, count, &rx_done);
1259 EM_TX_LOCK(adapter);
1261 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1262 lem_start_locked(ifp);
1263 EM_TX_UNLOCK(adapter);
1266 #endif /* DEVICE_POLLING */
1268 /*********************************************************************
1270 * Legacy Interrupt Service routine
1272 *********************************************************************/
1276 struct adapter *adapter = arg;
1277 struct ifnet *ifp = adapter->ifp;
1281 if ((ifp->if_capenable & IFCAP_POLLING) ||
1282 ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0))
1285 EM_CORE_LOCK(adapter);
1286 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1287 if (reg_icr & E1000_ICR_RXO)
1288 adapter->rx_overruns++;
1290 if ((reg_icr == 0xffffffff) || (reg_icr == 0)) {
1291 EM_CORE_UNLOCK(adapter);
1295 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1296 callout_stop(&adapter->timer);
1297 adapter->hw.mac.get_link_status = 1;
1298 lem_update_link_status(adapter);
1299 /* Deal with TX cruft when link lost */
1300 lem_tx_purge(adapter);
1301 callout_reset(&adapter->timer, hz,
1302 lem_local_timer, adapter);
1303 EM_CORE_UNLOCK(adapter);
1307 EM_CORE_UNLOCK(adapter);
1308 lem_rxeof(adapter, -1, NULL);
1310 EM_TX_LOCK(adapter);
1312 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1313 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1314 lem_start_locked(ifp);
1315 EM_TX_UNLOCK(adapter);
1321 lem_handle_link(void *context, int pending)
1323 struct adapter *adapter = context;
1324 struct ifnet *ifp = adapter->ifp;
1326 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1329 EM_CORE_LOCK(adapter);
1330 callout_stop(&adapter->timer);
1331 lem_update_link_status(adapter);
1332 /* Deal with TX cruft when link lost */
1333 lem_tx_purge(adapter);
1334 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1335 EM_CORE_UNLOCK(adapter);
1339 /* Combined RX/TX handler, used by Legacy and MSI */
1341 lem_handle_rxtx(void *context, int pending)
1343 struct adapter *adapter = context;
1344 struct ifnet *ifp = adapter->ifp;
1347 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1348 bool more = lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1349 EM_TX_LOCK(adapter);
1351 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1352 lem_start_locked(ifp);
1353 EM_TX_UNLOCK(adapter);
1355 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1360 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1361 lem_enable_intr(adapter);
1364 /*********************************************************************
1366 * Fast Legacy/MSI Combined Interrupt Service routine
1368 *********************************************************************/
1370 lem_irq_fast(void *arg)
1372 struct adapter *adapter = arg;
1378 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1381 if (reg_icr == 0xffffffff)
1382 return FILTER_STRAY;
1384 /* Definitely not our interrupt. */
1386 return FILTER_STRAY;
1389 * Mask interrupts until the taskqueue is finished running. This is
1390 * cheap, just assume that it is needed. This also works around the
1391 * MSI message reordering errata on certain systems.
1393 lem_disable_intr(adapter);
1394 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1396 /* Link status change */
1397 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1398 adapter->hw.mac.get_link_status = 1;
1399 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1402 if (reg_icr & E1000_ICR_RXO)
1403 adapter->rx_overruns++;
1404 return FILTER_HANDLED;
1408 /*********************************************************************
1410 * Media Ioctl callback
1412 * This routine is called whenever the user queries the status of
1413 * the interface using ifconfig.
1415 **********************************************************************/
1417 lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1419 struct adapter *adapter = ifp->if_softc;
1420 u_char fiber_type = IFM_1000_SX;
1422 INIT_DEBUGOUT("lem_media_status: begin");
1424 EM_CORE_LOCK(adapter);
1425 lem_update_link_status(adapter);
1427 ifmr->ifm_status = IFM_AVALID;
1428 ifmr->ifm_active = IFM_ETHER;
1430 if (!adapter->link_active) {
1431 EM_CORE_UNLOCK(adapter);
1435 ifmr->ifm_status |= IFM_ACTIVE;
1437 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1438 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1439 if (adapter->hw.mac.type == e1000_82545)
1440 fiber_type = IFM_1000_LX;
1441 ifmr->ifm_active |= fiber_type | IFM_FDX;
1443 switch (adapter->link_speed) {
1445 ifmr->ifm_active |= IFM_10_T;
1448 ifmr->ifm_active |= IFM_100_TX;
1451 ifmr->ifm_active |= IFM_1000_T;
1454 if (adapter->link_duplex == FULL_DUPLEX)
1455 ifmr->ifm_active |= IFM_FDX;
1457 ifmr->ifm_active |= IFM_HDX;
1459 EM_CORE_UNLOCK(adapter);
1462 /*********************************************************************
1464 * Media Ioctl callback
1466 * This routine is called when the user changes speed/duplex using
1467 * media/mediopt option with ifconfig.
1469 **********************************************************************/
1471 lem_media_change(struct ifnet *ifp)
1473 struct adapter *adapter = ifp->if_softc;
1474 struct ifmedia *ifm = &adapter->media;
1476 INIT_DEBUGOUT("lem_media_change: begin");
1478 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1481 EM_CORE_LOCK(adapter);
1482 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1484 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1485 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1490 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1491 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1494 adapter->hw.mac.autoneg = FALSE;
1495 adapter->hw.phy.autoneg_advertised = 0;
1496 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1497 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1499 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1502 adapter->hw.mac.autoneg = FALSE;
1503 adapter->hw.phy.autoneg_advertised = 0;
1504 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1505 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1507 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1510 device_printf(adapter->dev, "Unsupported media type\n");
1513 lem_init_locked(adapter);
1514 EM_CORE_UNLOCK(adapter);
1519 /*********************************************************************
1521 * This routine maps the mbufs to tx descriptors.
1523 * return 0 on success, positive on failure
1524 **********************************************************************/
1527 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1529 bus_dma_segment_t segs[EM_MAX_SCATTER];
1531 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1532 struct e1000_tx_desc *ctxd = NULL;
1533 struct mbuf *m_head;
1534 u32 txd_upper, txd_lower, txd_used, txd_saved;
1535 int error, nsegs, i, j, first, last = 0;
1538 txd_upper = txd_lower = txd_used = txd_saved = 0;
1541 ** When doing checksum offload, it is critical to
1542 ** make sure the first mbuf has more than header,
1543 ** because that routine expects data to be present.
1545 if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1546 (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1547 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1554 * Map the packet for DMA
1556 * Capture the first descriptor index,
1557 * this descriptor will have the index
1558 * of the EOP which is the only one that
1559 * now gets a DONE bit writeback.
1561 first = adapter->next_avail_tx_desc;
1562 tx_buffer = &adapter->tx_buffer_area[first];
1563 tx_buffer_mapped = tx_buffer;
1564 map = tx_buffer->map;
1566 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1567 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1570 * There are two types of errors we can (try) to handle:
1571 * - EFBIG means the mbuf chain was too long and bus_dma ran
1572 * out of segments. Defragment the mbuf chain and try again.
1573 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1574 * at this point in time. Defer sending and try again later.
1575 * All other errors, in particular EINVAL, are fatal and prevent the
1576 * mbuf chain from ever going through. Drop it and report error.
1578 if (error == EFBIG) {
1581 m = m_defrag(*m_headp, M_NOWAIT);
1583 adapter->mbuf_alloc_failed++;
1591 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1592 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1595 adapter->no_tx_dma_setup++;
1600 } else if (error != 0) {
1601 adapter->no_tx_dma_setup++;
1605 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1606 adapter->no_tx_desc_avail2++;
1607 bus_dmamap_unload(adapter->txtag, map);
1612 /* Do hardware assists */
1613 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1614 lem_transmit_checksum_setup(adapter, m_head,
1615 &txd_upper, &txd_lower);
1617 i = adapter->next_avail_tx_desc;
1618 if (adapter->pcix_82544)
1621 /* Set up our transmit descriptors */
1622 for (j = 0; j < nsegs; j++) {
1624 bus_addr_t seg_addr;
1625 /* If adapter is 82544 and on PCIX bus */
1626 if(adapter->pcix_82544) {
1627 DESC_ARRAY desc_array;
1628 u32 array_elements, counter;
1630 * Check the Address and Length combination and
1631 * split the data accordingly
1633 array_elements = lem_fill_descriptors(segs[j].ds_addr,
1634 segs[j].ds_len, &desc_array);
1635 for (counter = 0; counter < array_elements; counter++) {
1636 if (txd_used == adapter->num_tx_desc_avail) {
1637 adapter->next_avail_tx_desc = txd_saved;
1638 adapter->no_tx_desc_avail2++;
1639 bus_dmamap_unload(adapter->txtag, map);
1642 tx_buffer = &adapter->tx_buffer_area[i];
1643 ctxd = &adapter->tx_desc_base[i];
1644 ctxd->buffer_addr = htole64(
1645 desc_array.descriptor[counter].address);
1646 ctxd->lower.data = htole32(
1647 (adapter->txd_cmd | txd_lower | (u16)
1648 desc_array.descriptor[counter].length));
1650 htole32((txd_upper));
1652 if (++i == adapter->num_tx_desc)
1654 tx_buffer->m_head = NULL;
1655 tx_buffer->next_eop = -1;
1659 tx_buffer = &adapter->tx_buffer_area[i];
1660 ctxd = &adapter->tx_desc_base[i];
1661 seg_addr = segs[j].ds_addr;
1662 seg_len = segs[j].ds_len;
1663 ctxd->buffer_addr = htole64(seg_addr);
1664 ctxd->lower.data = htole32(
1665 adapter->txd_cmd | txd_lower | seg_len);
1669 if (++i == adapter->num_tx_desc)
1671 tx_buffer->m_head = NULL;
1672 tx_buffer->next_eop = -1;
1676 adapter->next_avail_tx_desc = i;
1678 if (adapter->pcix_82544)
1679 adapter->num_tx_desc_avail -= txd_used;
1681 adapter->num_tx_desc_avail -= nsegs;
1683 if (m_head->m_flags & M_VLANTAG) {
1684 /* Set the vlan id. */
1685 ctxd->upper.fields.special =
1686 htole16(m_head->m_pkthdr.ether_vtag);
1687 /* Tell hardware to add tag */
1688 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1691 tx_buffer->m_head = m_head;
1692 tx_buffer_mapped->map = tx_buffer->map;
1693 tx_buffer->map = map;
1694 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1697 * Last Descriptor of Packet
1698 * needs End Of Packet (EOP)
1699 * and Report Status (RS)
1702 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1704 * Keep track in the first buffer which
1705 * descriptor will be written back
1707 tx_buffer = &adapter->tx_buffer_area[first];
1708 tx_buffer->next_eop = last;
1709 adapter->watchdog_time = ticks;
1712 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1713 * that this frame is available to transmit.
1715 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1716 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1717 if (adapter->hw.mac.type == e1000_82547 &&
1718 adapter->link_duplex == HALF_DUPLEX)
1719 lem_82547_move_tail(adapter);
1721 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1722 if (adapter->hw.mac.type == e1000_82547)
1723 lem_82547_update_fifo_head(adapter,
1724 m_head->m_pkthdr.len);
1730 /*********************************************************************
1732 * 82547 workaround to avoid controller hang in half-duplex environment.
1733 * The workaround is to avoid queuing a large packet that would span
1734 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1735 * in this case. We do that only when FIFO is quiescent.
1737 **********************************************************************/
1739 lem_82547_move_tail(void *arg)
1741 struct adapter *adapter = arg;
1742 struct e1000_tx_desc *tx_desc;
1743 u16 hw_tdt, sw_tdt, length = 0;
1746 EM_TX_LOCK_ASSERT(adapter);
1748 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1749 sw_tdt = adapter->next_avail_tx_desc;
1751 while (hw_tdt != sw_tdt) {
1752 tx_desc = &adapter->tx_desc_base[hw_tdt];
1753 length += tx_desc->lower.flags.length;
1754 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1755 if (++hw_tdt == adapter->num_tx_desc)
1759 if (lem_82547_fifo_workaround(adapter, length)) {
1760 adapter->tx_fifo_wrk_cnt++;
1761 callout_reset(&adapter->tx_fifo_timer, 1,
1762 lem_82547_move_tail, adapter);
1765 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1766 lem_82547_update_fifo_head(adapter, length);
1773 lem_82547_fifo_workaround(struct adapter *adapter, int len)
1775 int fifo_space, fifo_pkt_len;
1777 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1779 if (adapter->link_duplex == HALF_DUPLEX) {
1780 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1782 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1783 if (lem_82547_tx_fifo_reset(adapter))
1794 lem_82547_update_fifo_head(struct adapter *adapter, int len)
1796 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1798 /* tx_fifo_head is always 16 byte aligned */
1799 adapter->tx_fifo_head += fifo_pkt_len;
1800 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1801 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1807 lem_82547_tx_fifo_reset(struct adapter *adapter)
1811 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1812 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1813 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1814 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1815 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1816 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1817 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1818 /* Disable TX unit */
1819 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1820 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1821 tctl & ~E1000_TCTL_EN);
1823 /* Reset FIFO pointers */
1824 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1825 adapter->tx_head_addr);
1826 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1827 adapter->tx_head_addr);
1828 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1829 adapter->tx_head_addr);
1830 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1831 adapter->tx_head_addr);
1833 /* Re-enable TX unit */
1834 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1835 E1000_WRITE_FLUSH(&adapter->hw);
1837 adapter->tx_fifo_head = 0;
1838 adapter->tx_fifo_reset_cnt++;
1848 lem_set_promisc(struct adapter *adapter)
1850 struct ifnet *ifp = adapter->ifp;
1853 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1855 if (ifp->if_flags & IFF_PROMISC) {
1856 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1857 /* Turn this on if you want to see bad packets */
1859 reg_rctl |= E1000_RCTL_SBP;
1860 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1861 } else if (ifp->if_flags & IFF_ALLMULTI) {
1862 reg_rctl |= E1000_RCTL_MPE;
1863 reg_rctl &= ~E1000_RCTL_UPE;
1864 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1869 lem_disable_promisc(struct adapter *adapter)
1873 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1875 reg_rctl &= (~E1000_RCTL_UPE);
1876 reg_rctl &= (~E1000_RCTL_MPE);
1877 reg_rctl &= (~E1000_RCTL_SBP);
1878 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1882 /*********************************************************************
1885 * This routine is called whenever multicast address list is updated.
1887 **********************************************************************/
1890 lem_set_multi(struct adapter *adapter)
1892 struct ifnet *ifp = adapter->ifp;
1893 struct ifmultiaddr *ifma;
1895 u8 *mta; /* Multicast array memory */
1898 IOCTL_DEBUGOUT("lem_set_multi: begin");
1901 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1903 if (adapter->hw.mac.type == e1000_82542 &&
1904 adapter->hw.revision_id == E1000_REVISION_2) {
1905 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1906 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1907 e1000_pci_clear_mwi(&adapter->hw);
1908 reg_rctl |= E1000_RCTL_RST;
1909 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1913 #if __FreeBSD_version < 800000
1916 if_maddr_rlock(ifp);
1918 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1919 if (ifma->ifma_addr->sa_family != AF_LINK)
1922 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1925 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1926 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1929 #if __FreeBSD_version < 800000
1930 IF_ADDR_UNLOCK(ifp);
1932 if_maddr_runlock(ifp);
1934 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1935 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1936 reg_rctl |= E1000_RCTL_MPE;
1937 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1939 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1941 if (adapter->hw.mac.type == e1000_82542 &&
1942 adapter->hw.revision_id == E1000_REVISION_2) {
1943 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1944 reg_rctl &= ~E1000_RCTL_RST;
1945 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1947 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1948 e1000_pci_set_mwi(&adapter->hw);
1953 /*********************************************************************
1956 * This routine checks for link status and updates statistics.
1958 **********************************************************************/
1961 lem_local_timer(void *arg)
1963 struct adapter *adapter = arg;
1965 EM_CORE_LOCK_ASSERT(adapter);
1967 lem_update_link_status(adapter);
1968 lem_update_stats_counters(adapter);
1970 lem_smartspeed(adapter);
1973 * We check the watchdog: the time since
1974 * the last TX descriptor was cleaned.
1975 * This implies a functional TX engine.
1977 if ((adapter->watchdog_check == TRUE) &&
1978 (ticks - adapter->watchdog_time > EM_WATCHDOG))
1981 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1984 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1985 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1986 adapter->watchdog_events++;
1987 lem_init_locked(adapter);
1991 lem_update_link_status(struct adapter *adapter)
1993 struct e1000_hw *hw = &adapter->hw;
1994 struct ifnet *ifp = adapter->ifp;
1995 device_t dev = adapter->dev;
1998 /* Get the cached link value or read phy for real */
1999 switch (hw->phy.media_type) {
2000 case e1000_media_type_copper:
2001 if (hw->mac.get_link_status) {
2002 /* Do the work to read phy */
2003 e1000_check_for_link(hw);
2004 link_check = !hw->mac.get_link_status;
2005 if (link_check) /* ESB2 fix */
2006 e1000_cfg_on_link_up(hw);
2010 case e1000_media_type_fiber:
2011 e1000_check_for_link(hw);
2012 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2015 case e1000_media_type_internal_serdes:
2016 e1000_check_for_link(hw);
2017 link_check = adapter->hw.mac.serdes_has_link;
2020 case e1000_media_type_unknown:
2024 /* Now check for a transition */
2025 if (link_check && (adapter->link_active == 0)) {
2026 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2027 &adapter->link_duplex);
2029 device_printf(dev, "Link is up %d Mbps %s\n",
2030 adapter->link_speed,
2031 ((adapter->link_duplex == FULL_DUPLEX) ?
2032 "Full Duplex" : "Half Duplex"));
2033 adapter->link_active = 1;
2034 adapter->smartspeed = 0;
2035 ifp->if_baudrate = adapter->link_speed * 1000000;
2036 if_link_state_change(ifp, LINK_STATE_UP);
2037 } else if (!link_check && (adapter->link_active == 1)) {
2038 ifp->if_baudrate = adapter->link_speed = 0;
2039 adapter->link_duplex = 0;
2041 device_printf(dev, "Link is Down\n");
2042 adapter->link_active = 0;
2043 /* Link down, disable watchdog */
2044 adapter->watchdog_check = FALSE;
2045 if_link_state_change(ifp, LINK_STATE_DOWN);
2049 /*********************************************************************
2051 * This routine disables all traffic on the adapter by issuing a
2052 * global reset on the MAC and deallocates TX/RX buffers.
2054 * This routine should always be called with BOTH the CORE
2056 **********************************************************************/
2061 struct adapter *adapter = arg;
2062 struct ifnet *ifp = adapter->ifp;
2064 EM_CORE_LOCK_ASSERT(adapter);
2065 EM_TX_LOCK_ASSERT(adapter);
2067 INIT_DEBUGOUT("lem_stop: begin");
2069 lem_disable_intr(adapter);
2070 callout_stop(&adapter->timer);
2071 callout_stop(&adapter->tx_fifo_timer);
2073 /* Tell the stack that the interface is no longer active */
2074 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2076 e1000_reset_hw(&adapter->hw);
2077 if (adapter->hw.mac.type >= e1000_82544)
2078 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2080 e1000_led_off(&adapter->hw);
2081 e1000_cleanup_led(&adapter->hw);
2085 /*********************************************************************
2087 * Determine hardware revision.
2089 **********************************************************************/
2091 lem_identify_hardware(struct adapter *adapter)
2093 device_t dev = adapter->dev;
2095 /* Make sure our PCI config space has the necessary stuff set */
2096 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2097 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2098 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2099 device_printf(dev, "Memory Access and/or Bus Master bits "
2101 adapter->hw.bus.pci_cmd_word |=
2102 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2103 pci_write_config(dev, PCIR_COMMAND,
2104 adapter->hw.bus.pci_cmd_word, 2);
2107 /* Save off the information about this board */
2108 adapter->hw.vendor_id = pci_get_vendor(dev);
2109 adapter->hw.device_id = pci_get_device(dev);
2110 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2111 adapter->hw.subsystem_vendor_id =
2112 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2113 adapter->hw.subsystem_device_id =
2114 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2116 /* Do Shared Code Init and Setup */
2117 if (e1000_set_mac_type(&adapter->hw)) {
2118 device_printf(dev, "Setup init failure\n");
2124 lem_allocate_pci_resources(struct adapter *adapter)
2126 device_t dev = adapter->dev;
2127 int val, rid, error = E1000_SUCCESS;
2130 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2132 if (adapter->memory == NULL) {
2133 device_printf(dev, "Unable to allocate bus resource: memory\n");
2136 adapter->osdep.mem_bus_space_tag =
2137 rman_get_bustag(adapter->memory);
2138 adapter->osdep.mem_bus_space_handle =
2139 rman_get_bushandle(adapter->memory);
2140 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2142 /* Only older adapters use IO mapping */
2143 if (adapter->hw.mac.type > e1000_82543) {
2144 /* Figure our where our IO BAR is ? */
2145 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2146 val = pci_read_config(dev, rid, 4);
2147 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2148 adapter->io_rid = rid;
2152 /* check for 64bit BAR */
2153 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2156 if (rid >= PCIR_CIS) {
2157 device_printf(dev, "Unable to locate IO BAR\n");
2160 adapter->ioport = bus_alloc_resource_any(dev,
2161 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2162 if (adapter->ioport == NULL) {
2163 device_printf(dev, "Unable to allocate bus resource: "
2167 adapter->hw.io_base = 0;
2168 adapter->osdep.io_bus_space_tag =
2169 rman_get_bustag(adapter->ioport);
2170 adapter->osdep.io_bus_space_handle =
2171 rman_get_bushandle(adapter->ioport);
2174 adapter->hw.back = &adapter->osdep;
2179 /*********************************************************************
2181 * Setup the Legacy or MSI Interrupt handler
2183 **********************************************************************/
2185 lem_allocate_irq(struct adapter *adapter)
2187 device_t dev = adapter->dev;
2190 /* Manually turn off all interrupts */
2191 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2193 /* We allocate a single interrupt resource */
2194 adapter->res[0] = bus_alloc_resource_any(dev,
2195 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2196 if (adapter->res[0] == NULL) {
2197 device_printf(dev, "Unable to allocate bus resource: "
2202 /* Do Legacy setup? */
2203 if (lem_use_legacy_irq) {
2204 if ((error = bus_setup_intr(dev, adapter->res[0],
2205 INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2206 &adapter->tag[0])) != 0) {
2208 "Failed to register interrupt handler");
2215 * Use a Fast interrupt and the associated
2216 * deferred processing contexts.
2218 TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2219 TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2220 adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2221 taskqueue_thread_enqueue, &adapter->tq);
2222 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2223 device_get_nameunit(adapter->dev));
2224 if ((error = bus_setup_intr(dev, adapter->res[0],
2225 INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2226 &adapter->tag[0])) != 0) {
2227 device_printf(dev, "Failed to register fast interrupt "
2228 "handler: %d\n", error);
2229 taskqueue_free(adapter->tq);
2239 lem_free_pci_resources(struct adapter *adapter)
2241 device_t dev = adapter->dev;
2244 if (adapter->tag[0] != NULL) {
2245 bus_teardown_intr(dev, adapter->res[0],
2247 adapter->tag[0] = NULL;
2250 if (adapter->res[0] != NULL) {
2251 bus_release_resource(dev, SYS_RES_IRQ,
2252 0, adapter->res[0]);
2255 if (adapter->memory != NULL)
2256 bus_release_resource(dev, SYS_RES_MEMORY,
2257 PCIR_BAR(0), adapter->memory);
2259 if (adapter->ioport != NULL)
2260 bus_release_resource(dev, SYS_RES_IOPORT,
2261 adapter->io_rid, adapter->ioport);
2265 /*********************************************************************
2267 * Initialize the hardware to a configuration
2268 * as specified by the adapter structure.
2270 **********************************************************************/
2272 lem_hardware_init(struct adapter *adapter)
2274 device_t dev = adapter->dev;
2277 INIT_DEBUGOUT("lem_hardware_init: begin");
2279 /* Issue a global reset */
2280 e1000_reset_hw(&adapter->hw);
2282 /* When hardware is reset, fifo_head is also reset */
2283 adapter->tx_fifo_head = 0;
2286 * These parameters control the automatic generation (Tx) and
2287 * response (Rx) to Ethernet PAUSE frames.
2288 * - High water mark should allow for at least two frames to be
2289 * received after sending an XOFF.
2290 * - Low water mark works best when it is very near the high water mark.
2291 * This allows the receiver to restart by sending XON when it has
2292 * drained a bit. Here we use an arbitary value of 1500 which will
2293 * restart after one full frame is pulled from the buffer. There
2294 * could be several smaller frames in the buffer and if so they will
2295 * not trigger the XON until their total number reduces the buffer
2297 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2299 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2302 adapter->hw.fc.high_water = rx_buffer_size -
2303 roundup2(adapter->max_frame_size, 1024);
2304 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2306 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2307 adapter->hw.fc.send_xon = TRUE;
2309 /* Set Flow control, use the tunable location if sane */
2310 if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2311 adapter->hw.fc.requested_mode = lem_fc_setting;
2313 adapter->hw.fc.requested_mode = e1000_fc_none;
2315 if (e1000_init_hw(&adapter->hw) < 0) {
2316 device_printf(dev, "Hardware Initialization Failed\n");
2320 e1000_check_for_link(&adapter->hw);
2325 /*********************************************************************
2327 * Setup networking device structure and register an interface.
2329 **********************************************************************/
2331 lem_setup_interface(device_t dev, struct adapter *adapter)
2335 INIT_DEBUGOUT("lem_setup_interface: begin");
2337 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2339 device_printf(dev, "can not allocate ifnet structure\n");
2342 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2343 ifp->if_init = lem_init;
2344 ifp->if_softc = adapter;
2345 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2346 ifp->if_ioctl = lem_ioctl;
2347 ifp->if_start = lem_start;
2348 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2349 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2350 IFQ_SET_READY(&ifp->if_snd);
2352 ether_ifattach(ifp, adapter->hw.mac.addr);
2354 ifp->if_capabilities = ifp->if_capenable = 0;
2356 if (adapter->hw.mac.type >= e1000_82543) {
2357 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2358 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2362 * Tell the upper layer(s) we support long frames.
2364 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2365 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2366 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2369 ** Dont turn this on by default, if vlans are
2370 ** created on another pseudo device (eg. lagg)
2371 ** then vlan events are not passed thru, breaking
2372 ** operation, but with HW FILTER off it works. If
2373 ** using vlans directly on the em driver you can
2374 ** enable this and get full hardware tag filtering.
2376 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2378 #ifdef DEVICE_POLLING
2379 ifp->if_capabilities |= IFCAP_POLLING;
2382 /* Enable only WOL MAGIC by default */
2384 ifp->if_capabilities |= IFCAP_WOL;
2385 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2389 * Specify the media types supported by this adapter and register
2390 * callbacks to update media and link information
2392 ifmedia_init(&adapter->media, IFM_IMASK,
2393 lem_media_change, lem_media_status);
2394 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2395 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2396 u_char fiber_type = IFM_1000_SX; /* default type */
2398 if (adapter->hw.mac.type == e1000_82545)
2399 fiber_type = IFM_1000_LX;
2400 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2402 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2404 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2405 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2407 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2409 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2411 if (adapter->hw.phy.type != e1000_phy_ife) {
2412 ifmedia_add(&adapter->media,
2413 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2414 ifmedia_add(&adapter->media,
2415 IFM_ETHER | IFM_1000_T, 0, NULL);
2418 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2419 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2424 /*********************************************************************
2426 * Workaround for SmartSpeed on 82541 and 82547 controllers
2428 **********************************************************************/
2430 lem_smartspeed(struct adapter *adapter)
2434 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2435 adapter->hw.mac.autoneg == 0 ||
2436 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2439 if (adapter->smartspeed == 0) {
2440 /* If Master/Slave config fault is asserted twice,
2441 * we assume back-to-back */
2442 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2443 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2445 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2446 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2447 e1000_read_phy_reg(&adapter->hw,
2448 PHY_1000T_CTRL, &phy_tmp);
2449 if(phy_tmp & CR_1000T_MS_ENABLE) {
2450 phy_tmp &= ~CR_1000T_MS_ENABLE;
2451 e1000_write_phy_reg(&adapter->hw,
2452 PHY_1000T_CTRL, phy_tmp);
2453 adapter->smartspeed++;
2454 if(adapter->hw.mac.autoneg &&
2455 !e1000_copper_link_autoneg(&adapter->hw) &&
2456 !e1000_read_phy_reg(&adapter->hw,
2457 PHY_CONTROL, &phy_tmp)) {
2458 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2459 MII_CR_RESTART_AUTO_NEG);
2460 e1000_write_phy_reg(&adapter->hw,
2461 PHY_CONTROL, phy_tmp);
2466 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2467 /* If still no link, perhaps using 2/3 pair cable */
2468 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2469 phy_tmp |= CR_1000T_MS_ENABLE;
2470 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2471 if(adapter->hw.mac.autoneg &&
2472 !e1000_copper_link_autoneg(&adapter->hw) &&
2473 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2474 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2475 MII_CR_RESTART_AUTO_NEG);
2476 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2479 /* Restart process after EM_SMARTSPEED_MAX iterations */
2480 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2481 adapter->smartspeed = 0;
2486 * Manage DMA'able memory.
2489 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2493 *(bus_addr_t *) arg = segs[0].ds_addr;
2497 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2498 struct em_dma_alloc *dma, int mapflags)
2502 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2503 EM_DBA_ALIGN, 0, /* alignment, bounds */
2504 BUS_SPACE_MAXADDR, /* lowaddr */
2505 BUS_SPACE_MAXADDR, /* highaddr */
2506 NULL, NULL, /* filter, filterarg */
2509 size, /* maxsegsize */
2511 NULL, /* lockfunc */
2515 device_printf(adapter->dev,
2516 "%s: bus_dma_tag_create failed: %d\n",
2521 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2522 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2524 device_printf(adapter->dev,
2525 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2526 __func__, (uintmax_t)size, error);
2531 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2532 size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2533 if (error || dma->dma_paddr == 0) {
2534 device_printf(adapter->dev,
2535 "%s: bus_dmamap_load failed: %d\n",
2543 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2545 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2546 bus_dma_tag_destroy(dma->dma_tag);
2548 dma->dma_map = NULL;
2549 dma->dma_tag = NULL;
2555 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2557 if (dma->dma_tag == NULL)
2559 if (dma->dma_map != NULL) {
2560 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2561 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2562 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2563 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2564 dma->dma_map = NULL;
2566 bus_dma_tag_destroy(dma->dma_tag);
2567 dma->dma_tag = NULL;
2571 /*********************************************************************
2573 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2574 * the information needed to transmit a packet on the wire.
2576 **********************************************************************/
2578 lem_allocate_transmit_structures(struct adapter *adapter)
2580 device_t dev = adapter->dev;
2581 struct em_buffer *tx_buffer;
2585 * Create DMA tags for tx descriptors
2587 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2588 1, 0, /* alignment, bounds */
2589 BUS_SPACE_MAXADDR, /* lowaddr */
2590 BUS_SPACE_MAXADDR, /* highaddr */
2591 NULL, NULL, /* filter, filterarg */
2592 MCLBYTES * EM_MAX_SCATTER, /* maxsize */
2593 EM_MAX_SCATTER, /* nsegments */
2594 MCLBYTES, /* maxsegsize */
2596 NULL, /* lockfunc */
2598 &adapter->txtag)) != 0) {
2599 device_printf(dev, "Unable to allocate TX DMA tag\n");
2603 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2604 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2605 if (adapter->tx_buffer_area == NULL) {
2606 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2611 /* Create the descriptor buffer dma maps */
2612 for (int i = 0; i < adapter->num_tx_desc; i++) {
2613 tx_buffer = &adapter->tx_buffer_area[i];
2614 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2616 device_printf(dev, "Unable to create TX DMA map\n");
2619 tx_buffer->next_eop = -1;
2624 lem_free_transmit_structures(adapter);
2628 /*********************************************************************
2630 * (Re)Initialize transmit structures.
2632 **********************************************************************/
2634 lem_setup_transmit_structures(struct adapter *adapter)
2636 struct em_buffer *tx_buffer;
2638 /* we are already locked */
2639 struct netmap_adapter *na = NA(adapter->ifp);
2640 struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
2641 #endif /* DEV_NETMAP */
2643 /* Clear the old ring contents */
2644 bzero(adapter->tx_desc_base,
2645 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2647 /* Free any existing TX buffers */
2648 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2649 tx_buffer = &adapter->tx_buffer_area[i];
2650 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2651 BUS_DMASYNC_POSTWRITE);
2652 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2653 m_freem(tx_buffer->m_head);
2654 tx_buffer->m_head = NULL;
2657 /* the i-th NIC entry goes to slot si */
2658 int si = netmap_idx_n2k(&na->tx_rings[0], i);
2662 addr = PNMB(slot + si, &paddr);
2663 adapter->tx_desc_base[si].buffer_addr = htole64(paddr);
2664 /* reload the map for netmap mode */
2665 netmap_load_map(adapter->txtag, tx_buffer->map, addr);
2667 #endif /* DEV_NETMAP */
2668 tx_buffer->next_eop = -1;
2672 adapter->last_hw_offload = 0;
2673 adapter->next_avail_tx_desc = 0;
2674 adapter->next_tx_to_clean = 0;
2675 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2677 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2678 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2683 /*********************************************************************
2685 * Enable transmit unit.
2687 **********************************************************************/
2689 lem_initialize_transmit_unit(struct adapter *adapter)
2694 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2695 /* Setup the Base and Length of the Tx Descriptor Ring */
2696 bus_addr = adapter->txdma.dma_paddr;
2697 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2698 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2699 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2700 (u32)(bus_addr >> 32));
2701 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2703 /* Setup the HW Tx Head and Tail descriptor pointers */
2704 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2705 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2707 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2708 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2709 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2711 /* Set the default values for the Tx Inter Packet Gap timer */
2712 switch (adapter->hw.mac.type) {
2714 tipg = DEFAULT_82542_TIPG_IPGT;
2715 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2716 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2719 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2720 (adapter->hw.phy.media_type ==
2721 e1000_media_type_internal_serdes))
2722 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2724 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2725 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2726 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2729 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2730 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2731 if(adapter->hw.mac.type >= e1000_82540)
2732 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2733 adapter->tx_abs_int_delay.value);
2735 /* Program the Transmit Control Register */
2736 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2737 tctl &= ~E1000_TCTL_CT;
2738 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2739 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2741 /* This write will effectively turn on the transmit unit. */
2742 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2744 /* Setup Transmit Descriptor Base Settings */
2745 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2747 if (adapter->tx_int_delay.value > 0)
2748 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2751 /*********************************************************************
2753 * Free all transmit related data structures.
2755 **********************************************************************/
2757 lem_free_transmit_structures(struct adapter *adapter)
2759 struct em_buffer *tx_buffer;
2761 INIT_DEBUGOUT("free_transmit_structures: begin");
2763 if (adapter->tx_buffer_area != NULL) {
2764 for (int i = 0; i < adapter->num_tx_desc; i++) {
2765 tx_buffer = &adapter->tx_buffer_area[i];
2766 if (tx_buffer->m_head != NULL) {
2767 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2768 BUS_DMASYNC_POSTWRITE);
2769 bus_dmamap_unload(adapter->txtag,
2771 m_freem(tx_buffer->m_head);
2772 tx_buffer->m_head = NULL;
2773 } else if (tx_buffer->map != NULL)
2774 bus_dmamap_unload(adapter->txtag,
2776 if (tx_buffer->map != NULL) {
2777 bus_dmamap_destroy(adapter->txtag,
2779 tx_buffer->map = NULL;
2783 if (adapter->tx_buffer_area != NULL) {
2784 free(adapter->tx_buffer_area, M_DEVBUF);
2785 adapter->tx_buffer_area = NULL;
2787 if (adapter->txtag != NULL) {
2788 bus_dma_tag_destroy(adapter->txtag);
2789 adapter->txtag = NULL;
2791 #if __FreeBSD_version >= 800000
2792 if (adapter->br != NULL)
2793 buf_ring_free(adapter->br, M_DEVBUF);
2797 /*********************************************************************
2799 * The offload context needs to be set when we transfer the first
2800 * packet of a particular protocol (TCP/UDP). This routine has been
2801 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2803 * Added back the old method of keeping the current context type
2804 * and not setting if unnecessary, as this is reported to be a
2805 * big performance win. -jfv
2806 **********************************************************************/
2808 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2809 u32 *txd_upper, u32 *txd_lower)
2811 struct e1000_context_desc *TXD = NULL;
2812 struct em_buffer *tx_buffer;
2813 struct ether_vlan_header *eh;
2814 struct ip *ip = NULL;
2815 struct ip6_hdr *ip6;
2816 int curr_txd, ehdrlen;
2817 u32 cmd, hdr_len, ip_hlen;
2822 cmd = hdr_len = ipproto = 0;
2823 *txd_upper = *txd_lower = 0;
2824 curr_txd = adapter->next_avail_tx_desc;
2827 * Determine where frame payload starts.
2828 * Jump over vlan headers if already present,
2829 * helpful for QinQ too.
2831 eh = mtod(mp, struct ether_vlan_header *);
2832 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2833 etype = ntohs(eh->evl_proto);
2834 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2836 etype = ntohs(eh->evl_encap_proto);
2837 ehdrlen = ETHER_HDR_LEN;
2841 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2842 * TODO: Support SCTP too when it hits the tree.
2846 ip = (struct ip *)(mp->m_data + ehdrlen);
2847 ip_hlen = ip->ip_hl << 2;
2849 /* Setup of IP header checksum. */
2850 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2852 * Start offset for header checksum calculation.
2853 * End offset for header checksum calculation.
2854 * Offset of place to put the checksum.
2856 TXD = (struct e1000_context_desc *)
2857 &adapter->tx_desc_base[curr_txd];
2858 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2859 TXD->lower_setup.ip_fields.ipcse =
2860 htole16(ehdrlen + ip_hlen);
2861 TXD->lower_setup.ip_fields.ipcso =
2862 ehdrlen + offsetof(struct ip, ip_sum);
2863 cmd |= E1000_TXD_CMD_IP;
2864 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2867 hdr_len = ehdrlen + ip_hlen;
2871 case ETHERTYPE_IPV6:
2872 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2873 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2875 /* IPv6 doesn't have a header checksum. */
2877 hdr_len = ehdrlen + ip_hlen;
2878 ipproto = ip6->ip6_nxt;
2887 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2888 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2889 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2890 /* no need for context if already set */
2891 if (adapter->last_hw_offload == CSUM_TCP)
2893 adapter->last_hw_offload = CSUM_TCP;
2895 * Start offset for payload checksum calculation.
2896 * End offset for payload checksum calculation.
2897 * Offset of place to put the checksum.
2899 TXD = (struct e1000_context_desc *)
2900 &adapter->tx_desc_base[curr_txd];
2901 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2902 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2903 TXD->upper_setup.tcp_fields.tucso =
2904 hdr_len + offsetof(struct tcphdr, th_sum);
2905 cmd |= E1000_TXD_CMD_TCP;
2910 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2911 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2912 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2913 /* no need for context if already set */
2914 if (adapter->last_hw_offload == CSUM_UDP)
2916 adapter->last_hw_offload = CSUM_UDP;
2918 * Start offset for header checksum calculation.
2919 * End offset for header checksum calculation.
2920 * Offset of place to put the checksum.
2922 TXD = (struct e1000_context_desc *)
2923 &adapter->tx_desc_base[curr_txd];
2924 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2925 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2926 TXD->upper_setup.tcp_fields.tucso =
2927 hdr_len + offsetof(struct udphdr, uh_sum);
2937 TXD->tcp_seg_setup.data = htole32(0);
2938 TXD->cmd_and_length =
2939 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
2940 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2941 tx_buffer->m_head = NULL;
2942 tx_buffer->next_eop = -1;
2944 if (++curr_txd == adapter->num_tx_desc)
2947 adapter->num_tx_desc_avail--;
2948 adapter->next_avail_tx_desc = curr_txd;
2952 /**********************************************************************
2954 * Examine each tx_buffer in the used queue. If the hardware is done
2955 * processing the packet then free associated resources. The
2956 * tx_buffer is put back on the free queue.
2958 **********************************************************************/
2960 lem_txeof(struct adapter *adapter)
2962 int first, last, done, num_avail;
2963 struct em_buffer *tx_buffer;
2964 struct e1000_tx_desc *tx_desc, *eop_desc;
2965 struct ifnet *ifp = adapter->ifp;
2967 EM_TX_LOCK_ASSERT(adapter);
2970 if (netmap_tx_irq(ifp, 0 | (NETMAP_LOCKED_ENTER|NETMAP_LOCKED_EXIT)))
2972 #endif /* DEV_NETMAP */
2973 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2976 num_avail = adapter->num_tx_desc_avail;
2977 first = adapter->next_tx_to_clean;
2978 tx_desc = &adapter->tx_desc_base[first];
2979 tx_buffer = &adapter->tx_buffer_area[first];
2980 last = tx_buffer->next_eop;
2981 eop_desc = &adapter->tx_desc_base[last];
2984 * What this does is get the index of the
2985 * first descriptor AFTER the EOP of the
2986 * first packet, that way we can do the
2987 * simple comparison on the inner while loop.
2989 if (++last == adapter->num_tx_desc)
2993 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2994 BUS_DMASYNC_POSTREAD);
2996 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2997 /* We clean the range of the packet */
2998 while (first != done) {
2999 tx_desc->upper.data = 0;
3000 tx_desc->lower.data = 0;
3001 tx_desc->buffer_addr = 0;
3004 if (tx_buffer->m_head) {
3006 bus_dmamap_sync(adapter->txtag,
3008 BUS_DMASYNC_POSTWRITE);
3009 bus_dmamap_unload(adapter->txtag,
3012 m_freem(tx_buffer->m_head);
3013 tx_buffer->m_head = NULL;
3015 tx_buffer->next_eop = -1;
3016 adapter->watchdog_time = ticks;
3018 if (++first == adapter->num_tx_desc)
3021 tx_buffer = &adapter->tx_buffer_area[first];
3022 tx_desc = &adapter->tx_desc_base[first];
3024 /* See if we can continue to the next packet */
3025 last = tx_buffer->next_eop;
3027 eop_desc = &adapter->tx_desc_base[last];
3028 /* Get new done point */
3029 if (++last == adapter->num_tx_desc) last = 0;
3034 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3035 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3037 adapter->next_tx_to_clean = first;
3038 adapter->num_tx_desc_avail = num_avail;
3041 * If we have enough room, clear IFF_DRV_OACTIVE to
3042 * tell the stack that it is OK to send packets.
3043 * If there are no pending descriptors, clear the watchdog.
3045 if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {
3046 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3047 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3048 adapter->watchdog_check = FALSE;
3054 /*********************************************************************
3056 * When Link is lost sometimes there is work still in the TX ring
3057 * which may result in a watchdog, rather than allow that we do an
3058 * attempted cleanup and then reinit here. Note that this has been
3059 * seens mostly with fiber adapters.
3061 **********************************************************************/
3063 lem_tx_purge(struct adapter *adapter)
3065 if ((!adapter->link_active) && (adapter->watchdog_check)) {
3066 EM_TX_LOCK(adapter);
3068 EM_TX_UNLOCK(adapter);
3069 if (adapter->watchdog_check) /* Still outstanding? */
3070 lem_init_locked(adapter);
3074 /*********************************************************************
3076 * Get a buffer from system mbuf buffer pool.
3078 **********************************************************************/
3080 lem_get_buf(struct adapter *adapter, int i)
3083 bus_dma_segment_t segs[1];
3085 struct em_buffer *rx_buffer;
3088 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3090 adapter->mbuf_cluster_failed++;
3093 m->m_len = m->m_pkthdr.len = MCLBYTES;
3095 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3096 m_adj(m, ETHER_ALIGN);
3099 * Using memory from the mbuf cluster pool, invoke the
3100 * bus_dma machinery to arrange the memory mapping.
3102 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3103 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3109 /* If nsegs is wrong then the stack is corrupt. */
3110 KASSERT(nsegs == 1, ("Too many segments returned!"));
3112 rx_buffer = &adapter->rx_buffer_area[i];
3113 if (rx_buffer->m_head != NULL)
3114 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3116 map = rx_buffer->map;
3117 rx_buffer->map = adapter->rx_sparemap;
3118 adapter->rx_sparemap = map;
3119 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3120 rx_buffer->m_head = m;
3122 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3126 /*********************************************************************
3128 * Allocate memory for rx_buffer structures. Since we use one
3129 * rx_buffer per received packet, the maximum number of rx_buffer's
3130 * that we'll need is equal to the number of receive descriptors
3131 * that we've allocated.
3133 **********************************************************************/
3135 lem_allocate_receive_structures(struct adapter *adapter)
3137 device_t dev = adapter->dev;
3138 struct em_buffer *rx_buffer;
3141 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3142 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3143 if (adapter->rx_buffer_area == NULL) {
3144 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3148 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3149 1, 0, /* alignment, bounds */
3150 BUS_SPACE_MAXADDR, /* lowaddr */
3151 BUS_SPACE_MAXADDR, /* highaddr */
3152 NULL, NULL, /* filter, filterarg */
3153 MCLBYTES, /* maxsize */
3155 MCLBYTES, /* maxsegsize */
3157 NULL, /* lockfunc */
3161 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3166 /* Create the spare map (used by getbuf) */
3167 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3168 &adapter->rx_sparemap);
3170 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3175 rx_buffer = adapter->rx_buffer_area;
3176 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3177 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3180 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3189 lem_free_receive_structures(adapter);
3193 /*********************************************************************
3195 * (Re)initialize receive structures.
3197 **********************************************************************/
3199 lem_setup_receive_structures(struct adapter *adapter)
3201 struct em_buffer *rx_buffer;
3204 /* we are already under lock */
3205 struct netmap_adapter *na = NA(adapter->ifp);
3206 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
3209 /* Reset descriptor ring */
3210 bzero(adapter->rx_desc_base,
3211 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3213 /* Free current RX buffers. */
3214 rx_buffer = adapter->rx_buffer_area;
3215 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3216 if (rx_buffer->m_head != NULL) {
3217 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3218 BUS_DMASYNC_POSTREAD);
3219 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3220 m_freem(rx_buffer->m_head);
3221 rx_buffer->m_head = NULL;
3225 /* Allocate new ones. */
3226 for (i = 0; i < adapter->num_rx_desc; i++) {
3229 /* the i-th NIC entry goes to slot si */
3230 int si = netmap_idx_n2k(&na->rx_rings[0], i);
3234 addr = PNMB(slot + si, &paddr);
3235 netmap_load_map(adapter->rxtag, rx_buffer->map, addr);
3236 /* Update descriptor */
3237 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
3240 #endif /* DEV_NETMAP */
3241 error = lem_get_buf(adapter, i);
3246 /* Setup our descriptor pointers */
3247 adapter->next_rx_desc_to_check = 0;
3248 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3249 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3254 /*********************************************************************
3256 * Enable receive unit.
3258 **********************************************************************/
3261 lem_initialize_receive_unit(struct adapter *adapter)
3263 struct ifnet *ifp = adapter->ifp;
3267 INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3270 * Make sure receives are disabled while setting
3271 * up the descriptor ring
3273 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3274 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3276 if (adapter->hw.mac.type >= e1000_82540) {
3277 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3278 adapter->rx_abs_int_delay.value);
3280 * Set the interrupt throttling rate. Value is calculated
3281 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3283 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3286 /* Setup the Base and Length of the Rx Descriptor Ring */
3287 bus_addr = adapter->rxdma.dma_paddr;
3288 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3289 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3290 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3291 (u32)(bus_addr >> 32));
3292 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3295 /* Setup the Receive Control Register */
3296 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3297 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3298 E1000_RCTL_RDMTS_HALF |
3299 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3301 /* Make sure VLAN Filters are off */
3302 rctl &= ~E1000_RCTL_VFE;
3304 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3305 rctl |= E1000_RCTL_SBP;
3307 rctl &= ~E1000_RCTL_SBP;
3309 switch (adapter->rx_buffer_len) {
3312 rctl |= E1000_RCTL_SZ_2048;
3315 rctl |= E1000_RCTL_SZ_4096 |
3316 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3319 rctl |= E1000_RCTL_SZ_8192 |
3320 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3323 rctl |= E1000_RCTL_SZ_16384 |
3324 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3328 if (ifp->if_mtu > ETHERMTU)
3329 rctl |= E1000_RCTL_LPE;
3331 rctl &= ~E1000_RCTL_LPE;
3333 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3334 if ((adapter->hw.mac.type >= e1000_82543) &&
3335 (ifp->if_capenable & IFCAP_RXCSUM)) {
3336 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3337 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3338 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3341 /* Enable Receives */
3342 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3345 * Setup the HW Rx Head and
3346 * Tail Descriptor Pointers
3348 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3349 rctl = adapter->num_rx_desc - 1; /* default RDT value */
3351 /* preserve buffers already made available to clients */
3352 if (ifp->if_capenable & IFCAP_NETMAP)
3353 rctl -= NA(adapter->ifp)->rx_rings[0].nr_hwavail;
3354 #endif /* DEV_NETMAP */
3355 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rctl);
3360 /*********************************************************************
3362 * Free receive related data structures.
3364 **********************************************************************/
3366 lem_free_receive_structures(struct adapter *adapter)
3368 struct em_buffer *rx_buffer;
3371 INIT_DEBUGOUT("free_receive_structures: begin");
3373 if (adapter->rx_sparemap) {
3374 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3375 adapter->rx_sparemap = NULL;
3378 /* Cleanup any existing buffers */
3379 if (adapter->rx_buffer_area != NULL) {
3380 rx_buffer = adapter->rx_buffer_area;
3381 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3382 if (rx_buffer->m_head != NULL) {
3383 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3384 BUS_DMASYNC_POSTREAD);
3385 bus_dmamap_unload(adapter->rxtag,
3387 m_freem(rx_buffer->m_head);
3388 rx_buffer->m_head = NULL;
3389 } else if (rx_buffer->map != NULL)
3390 bus_dmamap_unload(adapter->rxtag,
3392 if (rx_buffer->map != NULL) {
3393 bus_dmamap_destroy(adapter->rxtag,
3395 rx_buffer->map = NULL;
3400 if (adapter->rx_buffer_area != NULL) {
3401 free(adapter->rx_buffer_area, M_DEVBUF);
3402 adapter->rx_buffer_area = NULL;
3405 if (adapter->rxtag != NULL) {
3406 bus_dma_tag_destroy(adapter->rxtag);
3407 adapter->rxtag = NULL;
3411 /*********************************************************************
3413 * This routine executes in interrupt context. It replenishes
3414 * the mbufs in the descriptor and sends data which has been
3415 * dma'ed into host memory to upper layer.
3417 * We loop at most count times if count is > 0, or until done if
3420 * For polling we also now return the number of cleaned packets
3421 *********************************************************************/
3423 lem_rxeof(struct adapter *adapter, int count, int *done)
3425 struct ifnet *ifp = adapter->ifp;
3427 u8 status = 0, accept_frame = 0, eop = 0;
3428 u16 len, desc_len, prev_len_adj;
3430 struct e1000_rx_desc *current_desc;
3432 EM_RX_LOCK(adapter);
3433 i = adapter->next_rx_desc_to_check;
3434 current_desc = &adapter->rx_desc_base[i];
3435 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3436 BUS_DMASYNC_POSTREAD);
3439 if (netmap_rx_irq(ifp, 0 | NETMAP_LOCKED_ENTER, &rx_sent))
3441 #endif /* DEV_NETMAP */
3443 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3446 EM_RX_UNLOCK(adapter);
3450 while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3451 struct mbuf *m = NULL;
3453 status = current_desc->status;
3454 if ((status & E1000_RXD_STAT_DD) == 0)
3457 mp = adapter->rx_buffer_area[i].m_head;
3459 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3460 * needs to access the last received byte in the mbuf.
3462 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3463 BUS_DMASYNC_POSTREAD);
3467 desc_len = le16toh(current_desc->length);
3468 if (status & E1000_RXD_STAT_EOP) {
3471 if (desc_len < ETHER_CRC_LEN) {
3473 prev_len_adj = ETHER_CRC_LEN - desc_len;
3475 len = desc_len - ETHER_CRC_LEN;
3481 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3483 u32 pkt_len = desc_len;
3485 if (adapter->fmp != NULL)
3486 pkt_len += adapter->fmp->m_pkthdr.len;
3488 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3489 if (TBI_ACCEPT(&adapter->hw, status,
3490 current_desc->errors, pkt_len, last_byte,
3491 adapter->min_frame_size, adapter->max_frame_size)) {
3492 e1000_tbi_adjust_stats_82543(&adapter->hw,
3493 &adapter->stats, pkt_len,
3494 adapter->hw.mac.addr,
3495 adapter->max_frame_size);
3503 if (lem_get_buf(adapter, i) != 0) {
3508 /* Assign correct length to the current fragment */
3511 if (adapter->fmp == NULL) {
3512 mp->m_pkthdr.len = len;
3513 adapter->fmp = mp; /* Store the first mbuf */
3516 /* Chain mbuf's together */
3517 mp->m_flags &= ~M_PKTHDR;
3519 * Adjust length of previous mbuf in chain if
3520 * we received less than 4 bytes in the last
3523 if (prev_len_adj > 0) {
3524 adapter->lmp->m_len -= prev_len_adj;
3525 adapter->fmp->m_pkthdr.len -=
3528 adapter->lmp->m_next = mp;
3529 adapter->lmp = adapter->lmp->m_next;
3530 adapter->fmp->m_pkthdr.len += len;
3534 adapter->fmp->m_pkthdr.rcvif = ifp;
3536 lem_receive_checksum(adapter, current_desc,
3538 #ifndef __NO_STRICT_ALIGNMENT
3539 if (adapter->max_frame_size >
3540 (MCLBYTES - ETHER_ALIGN) &&
3541 lem_fixup_rx(adapter) != 0)
3544 if (status & E1000_RXD_STAT_VP) {
3545 adapter->fmp->m_pkthdr.ether_vtag =
3546 le16toh(current_desc->special);
3547 adapter->fmp->m_flags |= M_VLANTAG;
3549 #ifndef __NO_STRICT_ALIGNMENT
3553 adapter->fmp = NULL;
3554 adapter->lmp = NULL;
3557 adapter->dropped_pkts++;
3559 /* Reuse loaded DMA map and just update mbuf chain */
3560 mp = adapter->rx_buffer_area[i].m_head;
3561 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3562 mp->m_data = mp->m_ext.ext_buf;
3564 if (adapter->max_frame_size <=
3565 (MCLBYTES - ETHER_ALIGN))
3566 m_adj(mp, ETHER_ALIGN);
3567 if (adapter->fmp != NULL) {
3568 m_freem(adapter->fmp);
3569 adapter->fmp = NULL;
3570 adapter->lmp = NULL;
3575 /* Zero out the receive descriptors status. */
3576 current_desc->status = 0;
3577 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3578 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3580 /* Advance our pointers to the next descriptor. */
3581 if (++i == adapter->num_rx_desc)
3583 /* Call into the stack */
3585 adapter->next_rx_desc_to_check = i;
3586 EM_RX_UNLOCK(adapter);
3587 (*ifp->if_input)(ifp, m);
3588 EM_RX_LOCK(adapter);
3590 i = adapter->next_rx_desc_to_check;
3592 current_desc = &adapter->rx_desc_base[i];
3594 adapter->next_rx_desc_to_check = i;
3596 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3598 i = adapter->num_rx_desc - 1;
3599 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3602 EM_RX_UNLOCK(adapter);
3603 return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3606 #ifndef __NO_STRICT_ALIGNMENT
3608 * When jumbo frames are enabled we should realign entire payload on
3609 * architecures with strict alignment. This is serious design mistake of 8254x
3610 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3611 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3612 * payload. On architecures without strict alignment restrictions 8254x still
3613 * performs unaligned memory access which would reduce the performance too.
3614 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3615 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3616 * existing mbuf chain.
3618 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3619 * not used at all on architectures with strict alignment.
3622 lem_fixup_rx(struct adapter *adapter)
3629 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3630 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3631 m->m_data += ETHER_HDR_LEN;
3633 MGETHDR(n, M_NOWAIT, MT_DATA);
3635 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3636 m->m_data += ETHER_HDR_LEN;
3637 m->m_len -= ETHER_HDR_LEN;
3638 n->m_len = ETHER_HDR_LEN;
3639 M_MOVE_PKTHDR(n, m);
3643 adapter->dropped_pkts++;
3644 m_freem(adapter->fmp);
3645 adapter->fmp = NULL;
3654 /*********************************************************************
3656 * Verify that the hardware indicated that the checksum is valid.
3657 * Inform the stack about the status of checksum so that stack
3658 * doesn't spend time verifying the checksum.
3660 *********************************************************************/
3662 lem_receive_checksum(struct adapter *adapter,
3663 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3665 /* 82543 or newer only */
3666 if ((adapter->hw.mac.type < e1000_82543) ||
3667 /* Ignore Checksum bit is set */
3668 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3669 mp->m_pkthdr.csum_flags = 0;
3673 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3675 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3676 /* IP Checksum Good */
3677 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3678 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3681 mp->m_pkthdr.csum_flags = 0;
3685 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3687 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3688 mp->m_pkthdr.csum_flags |=
3689 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3690 mp->m_pkthdr.csum_data = htons(0xffff);
3696 * This routine is run via an vlan
3700 lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3702 struct adapter *adapter = ifp->if_softc;
3705 if (ifp->if_softc != arg) /* Not our event */
3708 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
3711 EM_CORE_LOCK(adapter);
3712 index = (vtag >> 5) & 0x7F;
3714 adapter->shadow_vfta[index] |= (1 << bit);
3715 ++adapter->num_vlans;
3716 /* Re-init to load the changes */
3717 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3718 lem_init_locked(adapter);
3719 EM_CORE_UNLOCK(adapter);
3723 * This routine is run via an vlan
3727 lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3729 struct adapter *adapter = ifp->if_softc;
3732 if (ifp->if_softc != arg)
3735 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3738 EM_CORE_LOCK(adapter);
3739 index = (vtag >> 5) & 0x7F;
3741 adapter->shadow_vfta[index] &= ~(1 << bit);
3742 --adapter->num_vlans;
3743 /* Re-init to load the changes */
3744 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3745 lem_init_locked(adapter);
3746 EM_CORE_UNLOCK(adapter);
3750 lem_setup_vlan_hw_support(struct adapter *adapter)
3752 struct e1000_hw *hw = &adapter->hw;
3756 ** We get here thru init_locked, meaning
3757 ** a soft reset, this has already cleared
3758 ** the VFTA and other state, so if there
3759 ** have been no vlan's registered do nothing.
3761 if (adapter->num_vlans == 0)
3765 ** A soft reset zero's out the VFTA, so
3766 ** we need to repopulate it now.
3768 for (int i = 0; i < EM_VFTA_SIZE; i++)
3769 if (adapter->shadow_vfta[i] != 0)
3770 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3771 i, adapter->shadow_vfta[i]);
3773 reg = E1000_READ_REG(hw, E1000_CTRL);
3774 reg |= E1000_CTRL_VME;
3775 E1000_WRITE_REG(hw, E1000_CTRL, reg);
3777 /* Enable the Filter Table */
3778 reg = E1000_READ_REG(hw, E1000_RCTL);
3779 reg &= ~E1000_RCTL_CFIEN;
3780 reg |= E1000_RCTL_VFE;
3781 E1000_WRITE_REG(hw, E1000_RCTL, reg);
3785 lem_enable_intr(struct adapter *adapter)
3787 struct e1000_hw *hw = &adapter->hw;
3788 u32 ims_mask = IMS_ENABLE_MASK;
3790 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3794 lem_disable_intr(struct adapter *adapter)
3796 struct e1000_hw *hw = &adapter->hw;
3798 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3802 * Bit of a misnomer, what this really means is
3803 * to enable OS management of the system... aka
3804 * to disable special hardware management features
3807 lem_init_manageability(struct adapter *adapter)
3809 /* A shared code workaround */
3810 if (adapter->has_manage) {
3811 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3812 /* disable hardware interception of ARP */
3813 manc &= ~(E1000_MANC_ARP_EN);
3814 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3819 * Give control back to hardware management
3820 * controller if there is one.
3823 lem_release_manageability(struct adapter *adapter)
3825 if (adapter->has_manage) {
3826 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3828 /* re-enable hardware interception of ARP */
3829 manc |= E1000_MANC_ARP_EN;
3830 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3835 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3836 * For ASF and Pass Through versions of f/w this means
3837 * that the driver is loaded. For AMT version type f/w
3838 * this means that the network i/f is open.
3841 lem_get_hw_control(struct adapter *adapter)
3845 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3846 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3847 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3852 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3853 * For ASF and Pass Through versions of f/w this means that
3854 * the driver is no longer loaded. For AMT versions of the
3855 * f/w this means that the network i/f is closed.
3858 lem_release_hw_control(struct adapter *adapter)
3862 if (!adapter->has_manage)
3865 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3866 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3867 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3872 lem_is_valid_ether_addr(u8 *addr)
3874 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3876 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3884 ** Parse the interface capabilities with regard
3885 ** to both system management and wake-on-lan for
3889 lem_get_wakeup(device_t dev)
3891 struct adapter *adapter = device_get_softc(dev);
3892 u16 eeprom_data = 0, device_id, apme_mask;
3894 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3895 apme_mask = EM_EEPROM_APME;
3897 switch (adapter->hw.mac.type) {
3902 e1000_read_nvm(&adapter->hw,
3903 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3904 apme_mask = EM_82544_APME;
3907 case e1000_82546_rev_3:
3908 if (adapter->hw.bus.func == 1) {
3909 e1000_read_nvm(&adapter->hw,
3910 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3913 e1000_read_nvm(&adapter->hw,
3914 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3917 e1000_read_nvm(&adapter->hw,
3918 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3921 if (eeprom_data & apme_mask)
3922 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
3924 * We have the eeprom settings, now apply the special cases
3925 * where the eeprom may be wrong or the board won't support
3926 * wake on lan on a particular port
3928 device_id = pci_get_device(dev);
3929 switch (device_id) {
3930 case E1000_DEV_ID_82546GB_PCIE:
3933 case E1000_DEV_ID_82546EB_FIBER:
3934 case E1000_DEV_ID_82546GB_FIBER:
3935 /* Wake events only supported on port A for dual fiber
3936 * regardless of eeprom setting */
3937 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3938 E1000_STATUS_FUNC_1)
3941 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
3942 /* if quad port adapter, disable WoL on all but port A */
3943 if (global_quad_port_a != 0)
3945 /* Reset for multiple quad port adapters */
3946 if (++global_quad_port_a == 4)
3947 global_quad_port_a = 0;
3955 * Enable PCI Wake On Lan capability
3958 lem_enable_wakeup(device_t dev)
3960 struct adapter *adapter = device_get_softc(dev);
3961 struct ifnet *ifp = adapter->ifp;
3962 u32 pmc, ctrl, ctrl_ext, rctl;
3965 if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
3968 /* Advertise the wakeup capability */
3969 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
3970 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
3971 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
3972 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3974 /* Keep the laser running on Fiber adapters */
3975 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
3976 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
3977 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3978 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
3979 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
3983 ** Determine type of Wakeup: note that wol
3984 ** is set with all bits on by default.
3986 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
3987 adapter->wol &= ~E1000_WUFC_MAG;
3989 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
3990 adapter->wol &= ~E1000_WUFC_MC;
3992 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3993 rctl |= E1000_RCTL_MPE;
3994 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3997 if (adapter->hw.mac.type == e1000_pchlan) {
3998 if (lem_enable_phy_wakeup(adapter))
4001 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4002 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4007 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4008 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4009 if (ifp->if_capenable & IFCAP_WOL)
4010 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4011 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4017 ** WOL in the newer chipset interfaces (pchlan)
4018 ** require thing to be copied into the phy
4021 lem_enable_phy_wakeup(struct adapter *adapter)
4023 struct e1000_hw *hw = &adapter->hw;
4027 /* copy MAC RARs to PHY RARs */
4028 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4029 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4030 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4031 e1000_write_phy_reg(hw, BM_RAR_M(i),
4032 (u16)((mreg >> 16) & 0xFFFF));
4033 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4034 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4035 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4036 (u16)((mreg >> 16) & 0xFFFF));
4039 /* copy MAC MTA to PHY MTA */
4040 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4041 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4042 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4043 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4044 (u16)((mreg >> 16) & 0xFFFF));
4047 /* configure PHY Rx Control register */
4048 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4049 mreg = E1000_READ_REG(hw, E1000_RCTL);
4050 if (mreg & E1000_RCTL_UPE)
4051 preg |= BM_RCTL_UPE;
4052 if (mreg & E1000_RCTL_MPE)
4053 preg |= BM_RCTL_MPE;
4054 preg &= ~(BM_RCTL_MO_MASK);
4055 if (mreg & E1000_RCTL_MO_3)
4056 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4057 << BM_RCTL_MO_SHIFT);
4058 if (mreg & E1000_RCTL_BAM)
4059 preg |= BM_RCTL_BAM;
4060 if (mreg & E1000_RCTL_PMCF)
4061 preg |= BM_RCTL_PMCF;
4062 mreg = E1000_READ_REG(hw, E1000_CTRL);
4063 if (mreg & E1000_CTRL_RFCE)
4064 preg |= BM_RCTL_RFCE;
4065 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4067 /* enable PHY wakeup in MAC register */
4068 E1000_WRITE_REG(hw, E1000_WUC,
4069 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4070 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4072 /* configure and enable PHY wakeup in PHY registers */
4073 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4074 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4076 /* activate PHY wakeup */
4077 ret = hw->phy.ops.acquire(hw);
4079 printf("Could not acquire PHY\n");
4082 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4083 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4084 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4086 printf("Could not read PHY page 769\n");
4089 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4090 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4092 printf("Could not set PHY Host Wakeup bit\n");
4094 hw->phy.ops.release(hw);
4100 lem_led_func(void *arg, int onoff)
4102 struct adapter *adapter = arg;
4104 EM_CORE_LOCK(adapter);
4106 e1000_setup_led(&adapter->hw);
4107 e1000_led_on(&adapter->hw);
4109 e1000_led_off(&adapter->hw);
4110 e1000_cleanup_led(&adapter->hw);
4112 EM_CORE_UNLOCK(adapter);
4115 /*********************************************************************
4116 * 82544 Coexistence issue workaround.
4117 * There are 2 issues.
4118 * 1. Transmit Hang issue.
4119 * To detect this issue, following equation can be used...
4120 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4121 * If SUM[3:0] is in between 1 to 4, we will have this issue.
4124 * To detect this issue, following equation can be used...
4125 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4126 * If SUM[3:0] is in between 9 to c, we will have this issue.
4130 * Make sure we do not have ending address
4131 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4133 *************************************************************************/
4135 lem_fill_descriptors (bus_addr_t address, u32 length,
4136 PDESC_ARRAY desc_array)
4138 u32 safe_terminator;
4140 /* Since issue is sensitive to length and address.*/
4141 /* Let us first check the address...*/
4143 desc_array->descriptor[0].address = address;
4144 desc_array->descriptor[0].length = length;
4145 desc_array->elements = 1;
4146 return (desc_array->elements);
4148 safe_terminator = (u32)((((u32)address & 0x7) +
4149 (length & 0xF)) & 0xF);
4150 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4151 if (safe_terminator == 0 ||
4152 (safe_terminator > 4 &&
4153 safe_terminator < 9) ||
4154 (safe_terminator > 0xC &&
4155 safe_terminator <= 0xF)) {
4156 desc_array->descriptor[0].address = address;
4157 desc_array->descriptor[0].length = length;
4158 desc_array->elements = 1;
4159 return (desc_array->elements);
4162 desc_array->descriptor[0].address = address;
4163 desc_array->descriptor[0].length = length - 4;
4164 desc_array->descriptor[1].address = address + (length - 4);
4165 desc_array->descriptor[1].length = 4;
4166 desc_array->elements = 2;
4167 return (desc_array->elements);
4170 /**********************************************************************
4172 * Update the board statistics counters.
4174 **********************************************************************/
4176 lem_update_stats_counters(struct adapter *adapter)
4180 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4181 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4182 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4183 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4185 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4186 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4187 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4188 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4190 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4191 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4192 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4193 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4194 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4195 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4196 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4197 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4198 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4199 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4200 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4201 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4202 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4203 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4204 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4205 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4206 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4207 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4208 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4209 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4211 /* For the 64-bit byte counters the low dword must be read first. */
4212 /* Both registers clear on the read of the high dword */
4214 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4215 ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4216 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4217 ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4219 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4220 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4221 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4222 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4223 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4225 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4226 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4228 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4229 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4230 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4231 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4232 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4233 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4234 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4235 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4236 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4237 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4239 if (adapter->hw.mac.type >= e1000_82543) {
4240 adapter->stats.algnerrc +=
4241 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4242 adapter->stats.rxerrc +=
4243 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4244 adapter->stats.tncrs +=
4245 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4246 adapter->stats.cexterr +=
4247 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4248 adapter->stats.tsctc +=
4249 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4250 adapter->stats.tsctfc +=
4251 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4255 ifp->if_collisions = adapter->stats.colc;
4258 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4259 adapter->stats.crcerrs + adapter->stats.algnerrc +
4260 adapter->stats.ruc + adapter->stats.roc +
4261 adapter->stats.mpc + adapter->stats.cexterr;
4264 ifp->if_oerrors = adapter->stats.ecol +
4265 adapter->stats.latecol + adapter->watchdog_events;
4268 /* Export a single 32-bit register via a read-only sysctl. */
4270 lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4272 struct adapter *adapter;
4275 adapter = oidp->oid_arg1;
4276 val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4277 return (sysctl_handle_int(oidp, &val, 0, req));
4281 * Add sysctl variables, one per statistic, to the system.
4284 lem_add_hw_stats(struct adapter *adapter)
4286 device_t dev = adapter->dev;
4288 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4289 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4290 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4291 struct e1000_hw_stats *stats = &adapter->stats;
4293 struct sysctl_oid *stat_node;
4294 struct sysctl_oid_list *stat_list;
4296 /* Driver Statistics */
4297 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail",
4298 CTLFLAG_RD, &adapter->mbuf_alloc_failed,
4300 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
4301 CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4302 "Std mbuf cluster failed");
4303 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4304 CTLFLAG_RD, &adapter->dropped_pkts,
4305 "Driver dropped packets");
4306 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
4307 CTLFLAG_RD, &adapter->no_tx_dma_setup,
4308 "Driver tx dma failure in xmit");
4309 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4310 CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4311 "Not enough tx descriptors failure in xmit");
4312 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4313 CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4314 "Not enough tx descriptors failure in xmit");
4315 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4316 CTLFLAG_RD, &adapter->rx_overruns,
4318 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4319 CTLFLAG_RD, &adapter->watchdog_events,
4320 "Watchdog timeouts");
4322 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4323 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
4324 lem_sysctl_reg_handler, "IU",
4325 "Device Control Register");
4326 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4327 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
4328 lem_sysctl_reg_handler, "IU",
4329 "Receiver Control Register");
4330 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4331 CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4332 "Flow Control High Watermark");
4333 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
4334 CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4335 "Flow Control Low Watermark");
4336 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
4337 CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4338 "TX FIFO workaround events");
4339 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
4340 CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4343 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head",
4344 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
4345 lem_sysctl_reg_handler, "IU",
4346 "Transmit Descriptor Head");
4347 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail",
4348 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
4349 lem_sysctl_reg_handler, "IU",
4350 "Transmit Descriptor Tail");
4351 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head",
4352 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
4353 lem_sysctl_reg_handler, "IU",
4354 "Receive Descriptor Head");
4355 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail",
4356 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
4357 lem_sysctl_reg_handler, "IU",
4358 "Receive Descriptor Tail");
4361 /* MAC stats get their own sub node */
4363 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4364 CTLFLAG_RD, NULL, "Statistics");
4365 stat_list = SYSCTL_CHILDREN(stat_node);
4367 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4368 CTLFLAG_RD, &stats->ecol,
4369 "Excessive collisions");
4370 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4371 CTLFLAG_RD, &stats->scc,
4372 "Single collisions");
4373 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4374 CTLFLAG_RD, &stats->mcc,
4375 "Multiple collisions");
4376 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4377 CTLFLAG_RD, &stats->latecol,
4379 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4380 CTLFLAG_RD, &stats->colc,
4382 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4383 CTLFLAG_RD, &adapter->stats.symerrs,
4385 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4386 CTLFLAG_RD, &adapter->stats.sec,
4388 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4389 CTLFLAG_RD, &adapter->stats.dc,
4391 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4392 CTLFLAG_RD, &adapter->stats.mpc,
4394 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4395 CTLFLAG_RD, &adapter->stats.rnbc,
4396 "Receive No Buffers");
4397 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4398 CTLFLAG_RD, &adapter->stats.ruc,
4399 "Receive Undersize");
4400 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4401 CTLFLAG_RD, &adapter->stats.rfc,
4402 "Fragmented Packets Received ");
4403 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4404 CTLFLAG_RD, &adapter->stats.roc,
4405 "Oversized Packets Received");
4406 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4407 CTLFLAG_RD, &adapter->stats.rjc,
4409 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4410 CTLFLAG_RD, &adapter->stats.rxerrc,
4412 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4413 CTLFLAG_RD, &adapter->stats.crcerrs,
4415 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4416 CTLFLAG_RD, &adapter->stats.algnerrc,
4417 "Alignment Errors");
4418 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4419 CTLFLAG_RD, &adapter->stats.cexterr,
4420 "Collision/Carrier extension errors");
4421 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4422 CTLFLAG_RD, &adapter->stats.xonrxc,
4424 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4425 CTLFLAG_RD, &adapter->stats.xontxc,
4427 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4428 CTLFLAG_RD, &adapter->stats.xoffrxc,
4430 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4431 CTLFLAG_RD, &adapter->stats.xofftxc,
4432 "XOFF Transmitted");
4434 /* Packet Reception Stats */
4435 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4436 CTLFLAG_RD, &adapter->stats.tpr,
4437 "Total Packets Received ");
4438 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4439 CTLFLAG_RD, &adapter->stats.gprc,
4440 "Good Packets Received");
4441 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4442 CTLFLAG_RD, &adapter->stats.bprc,
4443 "Broadcast Packets Received");
4444 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4445 CTLFLAG_RD, &adapter->stats.mprc,
4446 "Multicast Packets Received");
4447 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4448 CTLFLAG_RD, &adapter->stats.prc64,
4449 "64 byte frames received ");
4450 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4451 CTLFLAG_RD, &adapter->stats.prc127,
4452 "65-127 byte frames received");
4453 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4454 CTLFLAG_RD, &adapter->stats.prc255,
4455 "128-255 byte frames received");
4456 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4457 CTLFLAG_RD, &adapter->stats.prc511,
4458 "256-511 byte frames received");
4459 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4460 CTLFLAG_RD, &adapter->stats.prc1023,
4461 "512-1023 byte frames received");
4462 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4463 CTLFLAG_RD, &adapter->stats.prc1522,
4464 "1023-1522 byte frames received");
4465 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4466 CTLFLAG_RD, &adapter->stats.gorc,
4467 "Good Octets Received");
4469 /* Packet Transmission Stats */
4470 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4471 CTLFLAG_RD, &adapter->stats.gotc,
4472 "Good Octets Transmitted");
4473 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4474 CTLFLAG_RD, &adapter->stats.tpt,
4475 "Total Packets Transmitted");
4476 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4477 CTLFLAG_RD, &adapter->stats.gptc,
4478 "Good Packets Transmitted");
4479 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4480 CTLFLAG_RD, &adapter->stats.bptc,
4481 "Broadcast Packets Transmitted");
4482 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4483 CTLFLAG_RD, &adapter->stats.mptc,
4484 "Multicast Packets Transmitted");
4485 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4486 CTLFLAG_RD, &adapter->stats.ptc64,
4487 "64 byte frames transmitted ");
4488 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4489 CTLFLAG_RD, &adapter->stats.ptc127,
4490 "65-127 byte frames transmitted");
4491 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4492 CTLFLAG_RD, &adapter->stats.ptc255,
4493 "128-255 byte frames transmitted");
4494 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4495 CTLFLAG_RD, &adapter->stats.ptc511,
4496 "256-511 byte frames transmitted");
4497 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4498 CTLFLAG_RD, &adapter->stats.ptc1023,
4499 "512-1023 byte frames transmitted");
4500 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4501 CTLFLAG_RD, &adapter->stats.ptc1522,
4502 "1024-1522 byte frames transmitted");
4503 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4504 CTLFLAG_RD, &adapter->stats.tsctc,
4505 "TSO Contexts Transmitted");
4506 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4507 CTLFLAG_RD, &adapter->stats.tsctfc,
4508 "TSO Contexts Failed");
4511 /**********************************************************************
4513 * This routine provides a way to dump out the adapter eeprom,
4514 * often a useful debug/service tool. This only dumps the first
4515 * 32 words, stuff that matters is in that extent.
4517 **********************************************************************/
4520 lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4522 struct adapter *adapter;
4527 error = sysctl_handle_int(oidp, &result, 0, req);
4529 if (error || !req->newptr)
4533 * This value will cause a hex dump of the
4534 * first 32 16-bit words of the EEPROM to
4538 adapter = (struct adapter *)arg1;
4539 lem_print_nvm_info(adapter);
4546 lem_print_nvm_info(struct adapter *adapter)
4551 /* Its a bit crude, but it gets the job done */
4552 printf("\nInterface EEPROM Dump:\n");
4553 printf("Offset\n0x0000 ");
4554 for (i = 0, j = 0; i < 32; i++, j++) {
4555 if (j == 8) { /* Make the offset block */
4557 printf("\n0x00%x0 ",row);
4559 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4560 printf("%04x ", eeprom_data);
4566 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4568 struct em_int_delay_info *info;
4569 struct adapter *adapter;
4575 info = (struct em_int_delay_info *)arg1;
4576 usecs = info->value;
4577 error = sysctl_handle_int(oidp, &usecs, 0, req);
4578 if (error != 0 || req->newptr == NULL)
4580 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4582 info->value = usecs;
4583 ticks = EM_USECS_TO_TICKS(usecs);
4584 if (info->offset == E1000_ITR) /* units are 256ns here */
4587 adapter = info->adapter;
4589 EM_CORE_LOCK(adapter);
4590 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4591 regval = (regval & ~0xffff) | (ticks & 0xffff);
4592 /* Handle a few special cases. */
4593 switch (info->offset) {
4598 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4599 /* Don't write 0 into the TIDV register. */
4602 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4605 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4606 EM_CORE_UNLOCK(adapter);
4611 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4612 const char *description, struct em_int_delay_info *info,
4613 int offset, int value)
4615 info->adapter = adapter;
4616 info->offset = offset;
4617 info->value = value;
4618 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4619 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4620 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4621 info, 0, lem_sysctl_int_delay, "I", description);
4625 lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4626 const char *description, int *limit, int value)
4629 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4630 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4631 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4635 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4636 const char *description, int *limit, int value)
4639 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4640 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4641 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);