1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
43 #include <sys/endian.h>
44 #include <sys/kernel.h>
45 #include <sys/kthread.h>
46 #include <sys/malloc.h>
48 #include <sys/module.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/taskqueue.h>
54 #include <sys/eventhandler.h>
55 #include <machine/bus.h>
56 #include <machine/resource.h>
59 #include <net/ethernet.h>
61 #include <net/if_arp.h>
62 #include <net/if_dl.h>
63 #include <net/if_media.h>
65 #include <net/if_types.h>
66 #include <net/if_vlan_var.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/in.h>
70 #include <netinet/if_ether.h>
71 #include <netinet/ip.h>
72 #include <netinet/ip6.h>
73 #include <netinet/tcp.h>
74 #include <netinet/udp.h>
76 #include <machine/in_cksum.h>
77 #include <dev/led/led.h>
78 #include <dev/pci/pcivar.h>
79 #include <dev/pci/pcireg.h>
81 #include "e1000_api.h"
84 /*********************************************************************
85 * Legacy Em Driver version:
86 *********************************************************************/
87 char lem_driver_version[] = "1.0.3";
89 /*********************************************************************
92 * Used by probe to select devices to load on
93 * Last field stores an index into e1000_strings
94 * Last entry must be all 0s
96 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
97 *********************************************************************/
99 static em_vendor_info_t lem_vendor_info_array[] =
101 /* Intel(R) PRO/1000 Network Connection */
102 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
103 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
104 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
105 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
106 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
108 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
109 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
110 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
111 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
112 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
113 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
118 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
126 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
128 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
132 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
141 PCI_ANY_ID, PCI_ANY_ID, 0},
143 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
146 /* required last entry */
150 /*********************************************************************
151 * Table of branding strings for all supported NICs.
152 *********************************************************************/
154 static char *lem_strings[] = {
155 "Intel(R) PRO/1000 Legacy Network Connection"
158 /*********************************************************************
159 * Function prototypes
160 *********************************************************************/
161 static int lem_probe(device_t);
162 static int lem_attach(device_t);
163 static int lem_detach(device_t);
164 static int lem_shutdown(device_t);
165 static int lem_suspend(device_t);
166 static int lem_resume(device_t);
167 static void lem_start(struct ifnet *);
168 static void lem_start_locked(struct ifnet *ifp);
169 static int lem_ioctl(struct ifnet *, u_long, caddr_t);
170 static void lem_init(void *);
171 static void lem_init_locked(struct adapter *);
172 static void lem_stop(void *);
173 static void lem_media_status(struct ifnet *, struct ifmediareq *);
174 static int lem_media_change(struct ifnet *);
175 static void lem_identify_hardware(struct adapter *);
176 static int lem_allocate_pci_resources(struct adapter *);
177 static int lem_allocate_irq(struct adapter *adapter);
178 static void lem_free_pci_resources(struct adapter *);
179 static void lem_local_timer(void *);
180 static int lem_hardware_init(struct adapter *);
181 static int lem_setup_interface(device_t, struct adapter *);
182 static void lem_setup_transmit_structures(struct adapter *);
183 static void lem_initialize_transmit_unit(struct adapter *);
184 static int lem_setup_receive_structures(struct adapter *);
185 static void lem_initialize_receive_unit(struct adapter *);
186 static void lem_enable_intr(struct adapter *);
187 static void lem_disable_intr(struct adapter *);
188 static void lem_free_transmit_structures(struct adapter *);
189 static void lem_free_receive_structures(struct adapter *);
190 static void lem_update_stats_counters(struct adapter *);
191 static void lem_add_hw_stats(struct adapter *adapter);
192 static void lem_txeof(struct adapter *);
193 static void lem_tx_purge(struct adapter *);
194 static int lem_allocate_receive_structures(struct adapter *);
195 static int lem_allocate_transmit_structures(struct adapter *);
196 static bool lem_rxeof(struct adapter *, int, int *);
197 #ifndef __NO_STRICT_ALIGNMENT
198 static int lem_fixup_rx(struct adapter *);
200 static void lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
202 static void lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
204 static void lem_set_promisc(struct adapter *);
205 static void lem_disable_promisc(struct adapter *);
206 static void lem_set_multi(struct adapter *);
207 static void lem_update_link_status(struct adapter *);
208 static int lem_get_buf(struct adapter *, int);
209 static void lem_register_vlan(void *, struct ifnet *, u16);
210 static void lem_unregister_vlan(void *, struct ifnet *, u16);
211 static void lem_setup_vlan_hw_support(struct adapter *);
212 static int lem_xmit(struct adapter *, struct mbuf **);
213 static void lem_smartspeed(struct adapter *);
214 static int lem_82547_fifo_workaround(struct adapter *, int);
215 static void lem_82547_update_fifo_head(struct adapter *, int);
216 static int lem_82547_tx_fifo_reset(struct adapter *);
217 static void lem_82547_move_tail(void *);
218 static int lem_dma_malloc(struct adapter *, bus_size_t,
219 struct em_dma_alloc *, int);
220 static void lem_dma_free(struct adapter *, struct em_dma_alloc *);
221 static int lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
222 static void lem_print_nvm_info(struct adapter *);
223 static int lem_is_valid_ether_addr(u8 *);
224 static u32 lem_fill_descriptors (bus_addr_t address, u32 length,
225 PDESC_ARRAY desc_array);
226 static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
227 static void lem_add_int_delay_sysctl(struct adapter *, const char *,
228 const char *, struct em_int_delay_info *, int, int);
229 static void lem_set_flow_cntrl(struct adapter *, const char *,
230 const char *, int *, int);
231 /* Management and WOL Support */
232 static void lem_init_manageability(struct adapter *);
233 static void lem_release_manageability(struct adapter *);
234 static void lem_get_hw_control(struct adapter *);
235 static void lem_release_hw_control(struct adapter *);
236 static void lem_get_wakeup(device_t);
237 static void lem_enable_wakeup(device_t);
238 static int lem_enable_phy_wakeup(struct adapter *);
239 static void lem_led_func(void *, int);
242 static void lem_intr(void *);
244 static int lem_irq_fast(void *);
245 static void lem_handle_rxtx(void *context, int pending);
246 static void lem_handle_link(void *context, int pending);
247 static void lem_add_rx_process_limit(struct adapter *, const char *,
248 const char *, int *, int);
249 #endif /* ~EM_LEGACY_IRQ */
251 #ifdef DEVICE_POLLING
252 static poll_handler_t lem_poll;
255 /*********************************************************************
256 * FreeBSD Device Interface Entry Points
257 *********************************************************************/
259 static device_method_t lem_methods[] = {
260 /* Device interface */
261 DEVMETHOD(device_probe, lem_probe),
262 DEVMETHOD(device_attach, lem_attach),
263 DEVMETHOD(device_detach, lem_detach),
264 DEVMETHOD(device_shutdown, lem_shutdown),
265 DEVMETHOD(device_suspend, lem_suspend),
266 DEVMETHOD(device_resume, lem_resume),
270 static driver_t lem_driver = {
271 "em", lem_methods, sizeof(struct adapter),
274 extern devclass_t em_devclass;
275 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
276 MODULE_DEPEND(lem, pci, 1, 1, 1);
277 MODULE_DEPEND(lem, ether, 1, 1, 1);
279 /*********************************************************************
280 * Tunable default values.
281 *********************************************************************/
283 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
284 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
286 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
287 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
288 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
289 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
290 static int lem_rxd = EM_DEFAULT_RXD;
291 static int lem_txd = EM_DEFAULT_TXD;
292 static int lem_smart_pwr_down = FALSE;
294 /* Controls whether promiscuous also shows bad packets */
295 static int lem_debug_sbp = FALSE;
297 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
298 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
299 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
300 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
301 TUNABLE_INT("hw.em.rxd", &lem_rxd);
302 TUNABLE_INT("hw.em.txd", &lem_txd);
303 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
304 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
306 #ifndef EM_LEGACY_IRQ
307 /* How many packets rxeof tries to clean at a time */
308 static int lem_rx_process_limit = 100;
309 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
312 /* Flow control setting - default to FULL */
313 static int lem_fc_setting = e1000_fc_full;
314 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
316 /* Global used in WOL setup with multiport cards */
317 static int global_quad_port_a = 0;
319 /*********************************************************************
320 * Device identification routine
322 * em_probe determines if the driver should be loaded on
323 * adapter based on PCI vendor/device id of the adapter.
325 * return BUS_PROBE_DEFAULT on success, positive on failure
326 *********************************************************************/
329 lem_probe(device_t dev)
331 char adapter_name[60];
332 u16 pci_vendor_id = 0;
333 u16 pci_device_id = 0;
334 u16 pci_subvendor_id = 0;
335 u16 pci_subdevice_id = 0;
336 em_vendor_info_t *ent;
338 INIT_DEBUGOUT("em_probe: begin");
340 pci_vendor_id = pci_get_vendor(dev);
341 if (pci_vendor_id != EM_VENDOR_ID)
344 pci_device_id = pci_get_device(dev);
345 pci_subvendor_id = pci_get_subvendor(dev);
346 pci_subdevice_id = pci_get_subdevice(dev);
348 ent = lem_vendor_info_array;
349 while (ent->vendor_id != 0) {
350 if ((pci_vendor_id == ent->vendor_id) &&
351 (pci_device_id == ent->device_id) &&
353 ((pci_subvendor_id == ent->subvendor_id) ||
354 (ent->subvendor_id == PCI_ANY_ID)) &&
356 ((pci_subdevice_id == ent->subdevice_id) ||
357 (ent->subdevice_id == PCI_ANY_ID))) {
358 sprintf(adapter_name, "%s %s",
359 lem_strings[ent->index],
361 device_set_desc_copy(dev, adapter_name);
362 return (BUS_PROBE_DEFAULT);
370 /*********************************************************************
371 * Device initialization routine
373 * The attach entry point is called when the driver is being loaded.
374 * This routine identifies the type of hardware, allocates all resources
375 * and initializes the hardware.
377 * return 0 on success, positive on failure
378 *********************************************************************/
381 lem_attach(device_t dev)
383 struct adapter *adapter;
387 INIT_DEBUGOUT("lem_attach: begin");
389 adapter = device_get_softc(dev);
390 adapter->dev = adapter->osdep.dev = dev;
391 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
392 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
393 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
396 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
397 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
398 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
399 lem_sysctl_nvm_info, "I", "NVM Information");
401 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
402 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
404 /* Determine hardware and mac info */
405 lem_identify_hardware(adapter);
407 /* Setup PCI resources */
408 if (lem_allocate_pci_resources(adapter)) {
409 device_printf(dev, "Allocation of PCI resources failed\n");
414 /* Do Shared Code initialization */
415 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
416 device_printf(dev, "Setup of Shared code failed\n");
421 e1000_get_bus_info(&adapter->hw);
423 /* Set up some sysctls for the tunable interrupt delays */
424 lem_add_int_delay_sysctl(adapter, "rx_int_delay",
425 "receive interrupt delay in usecs", &adapter->rx_int_delay,
426 E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
427 lem_add_int_delay_sysctl(adapter, "tx_int_delay",
428 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
429 E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
430 if (adapter->hw.mac.type >= e1000_82540) {
431 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
432 "receive interrupt delay limit in usecs",
433 &adapter->rx_abs_int_delay,
434 E1000_REGISTER(&adapter->hw, E1000_RADV),
435 lem_rx_abs_int_delay_dflt);
436 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
437 "transmit interrupt delay limit in usecs",
438 &adapter->tx_abs_int_delay,
439 E1000_REGISTER(&adapter->hw, E1000_TADV),
440 lem_tx_abs_int_delay_dflt);
443 #ifndef EM_LEGACY_IRQ
444 /* Sysctls for limiting the amount of work done in the taskqueue */
445 lem_add_rx_process_limit(adapter, "rx_processing_limit",
446 "max number of rx packets to process", &adapter->rx_process_limit,
447 lem_rx_process_limit);
450 /* Sysctl for setting the interface flow control */
451 lem_set_flow_cntrl(adapter, "flow_control",
452 "max number of rx packets to process",
453 &adapter->fc_setting, lem_fc_setting);
456 * Validate number of transmit and receive descriptors. It
457 * must not exceed hardware maximum, and must be multiple
458 * of E1000_DBA_ALIGN.
460 if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
461 (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
462 (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
463 (lem_txd < EM_MIN_TXD)) {
464 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
465 EM_DEFAULT_TXD, lem_txd);
466 adapter->num_tx_desc = EM_DEFAULT_TXD;
468 adapter->num_tx_desc = lem_txd;
469 if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
470 (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
471 (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
472 (lem_rxd < EM_MIN_RXD)) {
473 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
474 EM_DEFAULT_RXD, lem_rxd);
475 adapter->num_rx_desc = EM_DEFAULT_RXD;
477 adapter->num_rx_desc = lem_rxd;
479 adapter->hw.mac.autoneg = DO_AUTO_NEG;
480 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
481 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
482 adapter->rx_buffer_len = 2048;
484 e1000_init_script_state_82541(&adapter->hw, TRUE);
485 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
488 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
489 adapter->hw.phy.mdix = AUTO_ALL_MODES;
490 adapter->hw.phy.disable_polarity_correction = FALSE;
491 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
495 * Set the frame limits assuming
496 * standard ethernet sized frames.
498 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
499 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
502 * This controls when hardware reports transmit completion
505 adapter->hw.mac.report_tx_early = 1;
507 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
510 /* Allocate Transmit Descriptor ring */
511 if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
512 device_printf(dev, "Unable to allocate tx_desc memory\n");
516 adapter->tx_desc_base =
517 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
519 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
522 /* Allocate Receive Descriptor ring */
523 if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
524 device_printf(dev, "Unable to allocate rx_desc memory\n");
528 adapter->rx_desc_base =
529 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
531 /* Allocate multicast array memory. */
532 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
533 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
534 if (adapter->mta == NULL) {
535 device_printf(dev, "Can not allocate multicast setup array\n");
541 ** Start from a known state, this is
542 ** important in reading the nvm and
545 e1000_reset_hw(&adapter->hw);
547 /* Make sure we have a good EEPROM before we read from it */
548 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
550 ** Some PCI-E parts fail the first check due to
551 ** the link being in sleep state, call it again,
552 ** if it fails a second time its a real issue.
554 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
556 "The EEPROM Checksum Is Not Valid\n");
562 /* Copy the permanent MAC address out of the EEPROM */
563 if (e1000_read_mac_addr(&adapter->hw) < 0) {
564 device_printf(dev, "EEPROM read error while reading MAC"
570 if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
571 device_printf(dev, "Invalid MAC address\n");
576 /* Initialize the hardware */
577 if (lem_hardware_init(adapter)) {
578 device_printf(dev, "Unable to initialize the hardware\n");
583 /* Allocate transmit descriptors and buffers */
584 if (lem_allocate_transmit_structures(adapter)) {
585 device_printf(dev, "Could not setup transmit structures\n");
590 /* Allocate receive descriptors and buffers */
591 if (lem_allocate_receive_structures(adapter)) {
592 device_printf(dev, "Could not setup receive structures\n");
598 ** Do interrupt configuration
600 error = lem_allocate_irq(adapter);
605 * Get Wake-on-Lan and Management info for later use
609 /* Setup OS specific network interface */
610 if (lem_setup_interface(dev, adapter) != 0)
613 /* Initialize statistics */
614 lem_update_stats_counters(adapter);
616 adapter->hw.mac.get_link_status = 1;
617 lem_update_link_status(adapter);
619 /* Indicate SOL/IDER usage */
620 if (e1000_check_reset_block(&adapter->hw))
622 "PHY reset is blocked due to SOL/IDER session.\n");
624 /* Do we need workaround for 82544 PCI-X adapter? */
625 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
626 adapter->hw.mac.type == e1000_82544)
627 adapter->pcix_82544 = TRUE;
629 adapter->pcix_82544 = FALSE;
631 /* Register for VLAN events */
632 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
633 lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
634 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
635 lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
637 lem_add_hw_stats(adapter);
639 /* Non-AMT based hardware can now take control from firmware */
640 if (adapter->has_manage && !adapter->has_amt)
641 lem_get_hw_control(adapter);
643 /* Tell the stack that the interface is not active */
644 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
646 adapter->led_dev = led_create(lem_led_func, adapter,
647 device_get_nameunit(dev));
649 INIT_DEBUGOUT("lem_attach: end");
654 lem_free_transmit_structures(adapter);
657 lem_release_hw_control(adapter);
658 lem_dma_free(adapter, &adapter->rxdma);
660 lem_dma_free(adapter, &adapter->txdma);
663 if (adapter->ifp != NULL)
664 if_free(adapter->ifp);
665 lem_free_pci_resources(adapter);
666 free(adapter->mta, M_DEVBUF);
667 EM_TX_LOCK_DESTROY(adapter);
668 EM_RX_LOCK_DESTROY(adapter);
669 EM_CORE_LOCK_DESTROY(adapter);
674 /*********************************************************************
675 * Device removal routine
677 * The detach entry point is called when the driver is being removed.
678 * This routine stops the adapter and deallocates all the resources
679 * that were allocated for driver operation.
681 * return 0 on success, positive on failure
682 *********************************************************************/
685 lem_detach(device_t dev)
687 struct adapter *adapter = device_get_softc(dev);
688 struct ifnet *ifp = adapter->ifp;
690 INIT_DEBUGOUT("em_detach: begin");
692 /* Make sure VLANS are not using driver */
693 if (adapter->ifp->if_vlantrunk != NULL) {
694 device_printf(dev,"Vlan in use, detach first\n");
698 #ifdef DEVICE_POLLING
699 if (ifp->if_capenable & IFCAP_POLLING)
700 ether_poll_deregister(ifp);
703 if (adapter->led_dev != NULL)
704 led_destroy(adapter->led_dev);
706 EM_CORE_LOCK(adapter);
708 adapter->in_detach = 1;
710 e1000_phy_hw_reset(&adapter->hw);
712 lem_release_manageability(adapter);
714 EM_TX_UNLOCK(adapter);
715 EM_CORE_UNLOCK(adapter);
717 /* Unregister VLAN events */
718 if (adapter->vlan_attach != NULL)
719 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
720 if (adapter->vlan_detach != NULL)
721 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
723 ether_ifdetach(adapter->ifp);
724 callout_drain(&adapter->timer);
725 callout_drain(&adapter->tx_fifo_timer);
727 lem_free_pci_resources(adapter);
728 bus_generic_detach(dev);
731 lem_free_transmit_structures(adapter);
732 lem_free_receive_structures(adapter);
734 /* Free Transmit Descriptor ring */
735 if (adapter->tx_desc_base) {
736 lem_dma_free(adapter, &adapter->txdma);
737 adapter->tx_desc_base = NULL;
740 /* Free Receive Descriptor ring */
741 if (adapter->rx_desc_base) {
742 lem_dma_free(adapter, &adapter->rxdma);
743 adapter->rx_desc_base = NULL;
746 lem_release_hw_control(adapter);
747 free(adapter->mta, M_DEVBUF);
748 EM_TX_LOCK_DESTROY(adapter);
749 EM_RX_LOCK_DESTROY(adapter);
750 EM_CORE_LOCK_DESTROY(adapter);
755 /*********************************************************************
757 * Shutdown entry point
759 **********************************************************************/
762 lem_shutdown(device_t dev)
764 return lem_suspend(dev);
768 * Suspend/resume device methods.
771 lem_suspend(device_t dev)
773 struct adapter *adapter = device_get_softc(dev);
775 EM_CORE_LOCK(adapter);
777 lem_release_manageability(adapter);
778 lem_release_hw_control(adapter);
779 lem_enable_wakeup(dev);
781 EM_CORE_UNLOCK(adapter);
783 return bus_generic_suspend(dev);
787 lem_resume(device_t dev)
789 struct adapter *adapter = device_get_softc(dev);
790 struct ifnet *ifp = adapter->ifp;
792 EM_CORE_LOCK(adapter);
793 lem_init_locked(adapter);
794 lem_init_manageability(adapter);
795 EM_CORE_UNLOCK(adapter);
798 return bus_generic_resume(dev);
803 lem_start_locked(struct ifnet *ifp)
805 struct adapter *adapter = ifp->if_softc;
808 EM_TX_LOCK_ASSERT(adapter);
810 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
813 if (!adapter->link_active)
817 * Force a cleanup if number of TX descriptors
818 * available hits the threshold
820 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
822 /* Now do we at least have a minimal? */
823 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
824 adapter->no_tx_desc_avail1++;
829 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
831 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
835 * Encapsulation can modify our pointer, and or make it
836 * NULL on failure. In that event, we can't requeue.
838 if (lem_xmit(adapter, &m_head)) {
841 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
842 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
846 /* Send a copy of the frame to the BPF listener */
847 ETHER_BPF_MTAP(ifp, m_head);
849 /* Set timeout in case hardware has problems transmitting. */
850 adapter->watchdog_check = TRUE;
851 adapter->watchdog_time = ticks;
853 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
854 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
860 lem_start(struct ifnet *ifp)
862 struct adapter *adapter = ifp->if_softc;
865 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
866 lem_start_locked(ifp);
867 EM_TX_UNLOCK(adapter);
870 /*********************************************************************
873 * em_ioctl is called when the user wants to configure the
876 * return 0 on success, positive on failure
877 **********************************************************************/
880 lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
882 struct adapter *adapter = ifp->if_softc;
883 struct ifreq *ifr = (struct ifreq *)data;
885 struct ifaddr *ifa = (struct ifaddr *)data;
889 if (adapter->in_detach)
895 if (ifa->ifa_addr->sa_family == AF_INET) {
898 * Since resetting hardware takes a very long time
899 * and results in link renegotiation we only
900 * initialize the hardware only when it is absolutely
903 ifp->if_flags |= IFF_UP;
904 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
905 EM_CORE_LOCK(adapter);
906 lem_init_locked(adapter);
907 EM_CORE_UNLOCK(adapter);
909 arp_ifinit(ifp, ifa);
912 error = ether_ioctl(ifp, command, data);
918 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
920 EM_CORE_LOCK(adapter);
921 switch (adapter->hw.mac.type) {
923 max_frame_size = ETHER_MAX_LEN;
926 max_frame_size = MAX_JUMBO_FRAME_SIZE;
928 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
930 EM_CORE_UNLOCK(adapter);
935 ifp->if_mtu = ifr->ifr_mtu;
936 adapter->max_frame_size =
937 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
938 lem_init_locked(adapter);
939 EM_CORE_UNLOCK(adapter);
943 IOCTL_DEBUGOUT("ioctl rcv'd:\
944 SIOCSIFFLAGS (Set Interface Flags)");
945 EM_CORE_LOCK(adapter);
946 if (ifp->if_flags & IFF_UP) {
947 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
948 if ((ifp->if_flags ^ adapter->if_flags) &
949 (IFF_PROMISC | IFF_ALLMULTI)) {
950 lem_disable_promisc(adapter);
951 lem_set_promisc(adapter);
954 lem_init_locked(adapter);
956 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
959 EM_TX_UNLOCK(adapter);
961 adapter->if_flags = ifp->if_flags;
962 EM_CORE_UNLOCK(adapter);
966 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
967 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
968 EM_CORE_LOCK(adapter);
969 lem_disable_intr(adapter);
970 lem_set_multi(adapter);
971 if (adapter->hw.mac.type == e1000_82542 &&
972 adapter->hw.revision_id == E1000_REVISION_2) {
973 lem_initialize_receive_unit(adapter);
975 #ifdef DEVICE_POLLING
976 if (!(ifp->if_capenable & IFCAP_POLLING))
978 lem_enable_intr(adapter);
979 EM_CORE_UNLOCK(adapter);
983 /* Check SOL/IDER usage */
984 EM_CORE_LOCK(adapter);
985 if (e1000_check_reset_block(&adapter->hw)) {
986 EM_CORE_UNLOCK(adapter);
987 device_printf(adapter->dev, "Media change is"
988 " blocked due to SOL/IDER session.\n");
991 EM_CORE_UNLOCK(adapter);
993 IOCTL_DEBUGOUT("ioctl rcv'd: \
994 SIOCxIFMEDIA (Get/Set Interface Media)");
995 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1001 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1003 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1004 #ifdef DEVICE_POLLING
1005 if (mask & IFCAP_POLLING) {
1006 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1007 error = ether_poll_register(lem_poll, ifp);
1010 EM_CORE_LOCK(adapter);
1011 lem_disable_intr(adapter);
1012 ifp->if_capenable |= IFCAP_POLLING;
1013 EM_CORE_UNLOCK(adapter);
1015 error = ether_poll_deregister(ifp);
1016 /* Enable interrupt even in error case */
1017 EM_CORE_LOCK(adapter);
1018 lem_enable_intr(adapter);
1019 ifp->if_capenable &= ~IFCAP_POLLING;
1020 EM_CORE_UNLOCK(adapter);
1024 if (mask & IFCAP_HWCSUM) {
1025 ifp->if_capenable ^= IFCAP_HWCSUM;
1028 if (mask & IFCAP_VLAN_HWTAGGING) {
1029 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1032 if ((mask & IFCAP_WOL) &&
1033 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1034 if (mask & IFCAP_WOL_MCAST)
1035 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1036 if (mask & IFCAP_WOL_MAGIC)
1037 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1039 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1041 VLAN_CAPABILITIES(ifp);
1046 error = ether_ioctl(ifp, command, data);
1054 /*********************************************************************
1057 * This routine is used in two ways. It is used by the stack as
1058 * init entry point in network interface structure. It is also used
1059 * by the driver as a hw/sw initialization routine to get to a
1062 * return 0 on success, positive on failure
1063 **********************************************************************/
1066 lem_init_locked(struct adapter *adapter)
1068 struct ifnet *ifp = adapter->ifp;
1069 device_t dev = adapter->dev;
1072 INIT_DEBUGOUT("lem_init: begin");
1074 EM_CORE_LOCK_ASSERT(adapter);
1076 EM_TX_LOCK(adapter);
1078 EM_TX_UNLOCK(adapter);
1081 * Packet Buffer Allocation (PBA)
1082 * Writing PBA sets the receive portion of the buffer
1083 * the remainder is used for the transmit buffer.
1085 * Devices before the 82547 had a Packet Buffer of 64K.
1086 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1087 * After the 82547 the buffer was reduced to 40K.
1088 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1089 * Note: default does not leave enough room for Jumbo Frame >10k.
1091 switch (adapter->hw.mac.type) {
1093 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1094 if (adapter->max_frame_size > 8192)
1095 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1097 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1098 adapter->tx_fifo_head = 0;
1099 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1100 adapter->tx_fifo_size =
1101 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1104 /* Devices before 82547 had a Packet Buffer of 64K. */
1105 if (adapter->max_frame_size > 8192)
1106 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1108 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1111 INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1112 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1114 /* Get the latest mac address, User can use a LAA */
1115 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1118 /* Put the address into the Receive Address Array */
1119 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1121 /* Initialize the hardware */
1122 if (lem_hardware_init(adapter)) {
1123 device_printf(dev, "Unable to initialize the hardware\n");
1126 lem_update_link_status(adapter);
1128 /* Setup VLAN support, basic and offload if available */
1129 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1131 /* Set hardware offload abilities */
1132 ifp->if_hwassist = 0;
1133 if (adapter->hw.mac.type >= e1000_82543) {
1134 if (ifp->if_capenable & IFCAP_TXCSUM)
1135 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1138 /* Configure for OS presence */
1139 lem_init_manageability(adapter);
1141 /* Prepare transmit descriptors and buffers */
1142 lem_setup_transmit_structures(adapter);
1143 lem_initialize_transmit_unit(adapter);
1145 /* Setup Multicast table */
1146 lem_set_multi(adapter);
1148 /* Prepare receive descriptors and buffers */
1149 if (lem_setup_receive_structures(adapter)) {
1150 device_printf(dev, "Could not setup receive structures\n");
1151 EM_TX_LOCK(adapter);
1153 EM_TX_UNLOCK(adapter);
1156 lem_initialize_receive_unit(adapter);
1158 /* Use real VLAN Filter support? */
1159 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1160 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1161 /* Use real VLAN Filter support */
1162 lem_setup_vlan_hw_support(adapter);
1165 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1166 ctrl |= E1000_CTRL_VME;
1167 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1171 /* Don't lose promiscuous settings */
1172 lem_set_promisc(adapter);
1174 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1175 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1177 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1178 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1180 /* MSI/X configuration for 82574 */
1181 if (adapter->hw.mac.type == e1000_82574) {
1183 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1184 tmp |= E1000_CTRL_EXT_PBA_CLR;
1185 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1187 ** Set the IVAR - interrupt vector routing.
1188 ** Each nibble represents a vector, high bit
1189 ** is enable, other 3 bits are the MSIX table
1190 ** entry, we map RXQ0 to 0, TXQ0 to 1, and
1191 ** Link (other) to 2, hence the magic number.
1193 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1196 #ifdef DEVICE_POLLING
1198 * Only enable interrupts if we are not polling, make sure
1199 * they are off otherwise.
1201 if (ifp->if_capenable & IFCAP_POLLING)
1202 lem_disable_intr(adapter);
1204 #endif /* DEVICE_POLLING */
1205 lem_enable_intr(adapter);
1207 /* AMT based hardware can now take control from firmware */
1208 if (adapter->has_manage && adapter->has_amt)
1209 lem_get_hw_control(adapter);
1211 /* Don't reset the phy next time init gets called */
1212 adapter->hw.phy.reset_disable = TRUE;
1218 struct adapter *adapter = arg;
1220 EM_CORE_LOCK(adapter);
1221 lem_init_locked(adapter);
1222 EM_CORE_UNLOCK(adapter);
1226 #ifdef DEVICE_POLLING
1227 /*********************************************************************
1229 * Legacy polling routine
1231 *********************************************************************/
1233 lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1235 struct adapter *adapter = ifp->if_softc;
1236 u32 reg_icr, rx_done = 0;
1238 EM_CORE_LOCK(adapter);
1239 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1240 EM_CORE_UNLOCK(adapter);
1244 if (cmd == POLL_AND_CHECK_STATUS) {
1245 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1246 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1247 callout_stop(&adapter->timer);
1248 adapter->hw.mac.get_link_status = 1;
1249 lem_update_link_status(adapter);
1250 callout_reset(&adapter->timer, hz,
1251 lem_local_timer, adapter);
1254 EM_CORE_UNLOCK(adapter);
1256 lem_rxeof(adapter, count, &rx_done);
1258 EM_TX_LOCK(adapter);
1260 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1261 lem_start_locked(ifp);
1262 EM_TX_UNLOCK(adapter);
1265 #endif /* DEVICE_POLLING */
1267 #ifdef EM_LEGACY_IRQ
1268 /*********************************************************************
1270 * Legacy Interrupt Service routine
1272 *********************************************************************/
1276 struct adapter *adapter = arg;
1277 struct ifnet *ifp = adapter->ifp;
1281 if (ifp->if_capenable & IFCAP_POLLING)
1284 EM_CORE_LOCK(adapter);
1285 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1286 if (reg_icr & E1000_ICR_RXO)
1287 adapter->rx_overruns++;
1289 if ((reg_icr == 0xffffffff) || (reg_icr == 0))
1292 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1295 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1296 callout_stop(&adapter->timer);
1297 adapter->hw.mac.get_link_status = 1;
1298 lem_update_link_status(adapter);
1299 /* Deal with TX cruft when link lost */
1300 lem_tx_purge(adapter);
1301 callout_reset(&adapter->timer, hz,
1302 lem_local_timer, adapter);
1306 EM_TX_LOCK(adapter);
1307 lem_rxeof(adapter, -1, NULL);
1309 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1310 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1311 lem_start_locked(ifp);
1312 EM_TX_UNLOCK(adapter);
1315 EM_CORE_UNLOCK(adapter);
1319 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1322 lem_handle_link(void *context, int pending)
1324 struct adapter *adapter = context;
1325 struct ifnet *ifp = adapter->ifp;
1327 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1330 EM_CORE_LOCK(adapter);
1331 callout_stop(&adapter->timer);
1332 lem_update_link_status(adapter);
1333 /* Deal with TX cruft when link lost */
1334 lem_tx_purge(adapter);
1335 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1336 EM_CORE_UNLOCK(adapter);
1340 /* Combined RX/TX handler, used by Legacy and MSI */
1342 lem_handle_rxtx(void *context, int pending)
1344 struct adapter *adapter = context;
1345 struct ifnet *ifp = adapter->ifp;
1348 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1349 lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1350 EM_TX_LOCK(adapter);
1352 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1353 lem_start_locked(ifp);
1354 EM_TX_UNLOCK(adapter);
1357 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1358 lem_enable_intr(adapter);
1361 /*********************************************************************
1363 * Fast Legacy/MSI Combined Interrupt Service routine
1365 *********************************************************************/
1367 lem_irq_fast(void *arg)
1369 struct adapter *adapter = arg;
1375 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1378 if (reg_icr == 0xffffffff)
1379 return FILTER_STRAY;
1381 /* Definitely not our interrupt. */
1383 return FILTER_STRAY;
1386 * Mask interrupts until the taskqueue is finished running. This is
1387 * cheap, just assume that it is needed. This also works around the
1388 * MSI message reordering errata on certain systems.
1390 lem_disable_intr(adapter);
1391 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1393 /* Link status change */
1394 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1395 adapter->hw.mac.get_link_status = 1;
1396 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1399 if (reg_icr & E1000_ICR_RXO)
1400 adapter->rx_overruns++;
1401 return FILTER_HANDLED;
1403 #endif /* ~EM_LEGACY_IRQ */
1406 /*********************************************************************
1408 * Media Ioctl callback
1410 * This routine is called whenever the user queries the status of
1411 * the interface using ifconfig.
1413 **********************************************************************/
1415 lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1417 struct adapter *adapter = ifp->if_softc;
1418 u_char fiber_type = IFM_1000_SX;
1420 INIT_DEBUGOUT("lem_media_status: begin");
1422 EM_CORE_LOCK(adapter);
1423 lem_update_link_status(adapter);
1425 ifmr->ifm_status = IFM_AVALID;
1426 ifmr->ifm_active = IFM_ETHER;
1428 if (!adapter->link_active) {
1429 EM_CORE_UNLOCK(adapter);
1433 ifmr->ifm_status |= IFM_ACTIVE;
1435 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1436 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1437 if (adapter->hw.mac.type == e1000_82545)
1438 fiber_type = IFM_1000_LX;
1439 ifmr->ifm_active |= fiber_type | IFM_FDX;
1441 switch (adapter->link_speed) {
1443 ifmr->ifm_active |= IFM_10_T;
1446 ifmr->ifm_active |= IFM_100_TX;
1449 ifmr->ifm_active |= IFM_1000_T;
1452 if (adapter->link_duplex == FULL_DUPLEX)
1453 ifmr->ifm_active |= IFM_FDX;
1455 ifmr->ifm_active |= IFM_HDX;
1457 EM_CORE_UNLOCK(adapter);
1460 /*********************************************************************
1462 * Media Ioctl callback
1464 * This routine is called when the user changes speed/duplex using
1465 * media/mediopt option with ifconfig.
1467 **********************************************************************/
1469 lem_media_change(struct ifnet *ifp)
1471 struct adapter *adapter = ifp->if_softc;
1472 struct ifmedia *ifm = &adapter->media;
1474 INIT_DEBUGOUT("lem_media_change: begin");
1476 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1479 EM_CORE_LOCK(adapter);
1480 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1482 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1483 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1488 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1489 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1492 adapter->hw.mac.autoneg = FALSE;
1493 adapter->hw.phy.autoneg_advertised = 0;
1494 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1495 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1497 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1500 adapter->hw.mac.autoneg = FALSE;
1501 adapter->hw.phy.autoneg_advertised = 0;
1502 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1503 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1505 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1508 device_printf(adapter->dev, "Unsupported media type\n");
1511 /* As the speed/duplex settings my have changed we need to
1514 adapter->hw.phy.reset_disable = FALSE;
1516 lem_init_locked(adapter);
1517 EM_CORE_UNLOCK(adapter);
1522 /*********************************************************************
1524 * This routine maps the mbufs to tx descriptors.
1526 * return 0 on success, positive on failure
1527 **********************************************************************/
1530 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1532 bus_dma_segment_t segs[EM_MAX_SCATTER];
1534 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1535 struct e1000_tx_desc *ctxd = NULL;
1536 struct mbuf *m_head;
1537 u32 txd_upper, txd_lower, txd_used, txd_saved;
1538 int error, nsegs, i, j, first, last = 0;
1541 txd_upper = txd_lower = txd_used = txd_saved = 0;
1544 ** When doing checksum offload, it is critical to
1545 ** make sure the first mbuf has more than header,
1546 ** because that routine expects data to be present.
1548 if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1549 (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1550 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1557 * Map the packet for DMA
1559 * Capture the first descriptor index,
1560 * this descriptor will have the index
1561 * of the EOP which is the only one that
1562 * now gets a DONE bit writeback.
1564 first = adapter->next_avail_tx_desc;
1565 tx_buffer = &adapter->tx_buffer_area[first];
1566 tx_buffer_mapped = tx_buffer;
1567 map = tx_buffer->map;
1569 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1570 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1573 * There are two types of errors we can (try) to handle:
1574 * - EFBIG means the mbuf chain was too long and bus_dma ran
1575 * out of segments. Defragment the mbuf chain and try again.
1576 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1577 * at this point in time. Defer sending and try again later.
1578 * All other errors, in particular EINVAL, are fatal and prevent the
1579 * mbuf chain from ever going through. Drop it and report error.
1581 if (error == EFBIG) {
1584 m = m_defrag(*m_headp, M_DONTWAIT);
1586 adapter->mbuf_alloc_failed++;
1594 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1595 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1598 adapter->no_tx_dma_setup++;
1603 } else if (error != 0) {
1604 adapter->no_tx_dma_setup++;
1608 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1609 adapter->no_tx_desc_avail2++;
1610 bus_dmamap_unload(adapter->txtag, map);
1615 /* Do hardware assists */
1616 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1617 lem_transmit_checksum_setup(adapter, m_head,
1618 &txd_upper, &txd_lower);
1620 i = adapter->next_avail_tx_desc;
1621 if (adapter->pcix_82544)
1624 /* Set up our transmit descriptors */
1625 for (j = 0; j < nsegs; j++) {
1627 bus_addr_t seg_addr;
1628 /* If adapter is 82544 and on PCIX bus */
1629 if(adapter->pcix_82544) {
1630 DESC_ARRAY desc_array;
1631 u32 array_elements, counter;
1633 * Check the Address and Length combination and
1634 * split the data accordingly
1636 array_elements = lem_fill_descriptors(segs[j].ds_addr,
1637 segs[j].ds_len, &desc_array);
1638 for (counter = 0; counter < array_elements; counter++) {
1639 if (txd_used == adapter->num_tx_desc_avail) {
1640 adapter->next_avail_tx_desc = txd_saved;
1641 adapter->no_tx_desc_avail2++;
1642 bus_dmamap_unload(adapter->txtag, map);
1645 tx_buffer = &adapter->tx_buffer_area[i];
1646 ctxd = &adapter->tx_desc_base[i];
1647 ctxd->buffer_addr = htole64(
1648 desc_array.descriptor[counter].address);
1649 ctxd->lower.data = htole32(
1650 (adapter->txd_cmd | txd_lower | (u16)
1651 desc_array.descriptor[counter].length));
1653 htole32((txd_upper));
1655 if (++i == adapter->num_tx_desc)
1657 tx_buffer->m_head = NULL;
1658 tx_buffer->next_eop = -1;
1662 tx_buffer = &adapter->tx_buffer_area[i];
1663 ctxd = &adapter->tx_desc_base[i];
1664 seg_addr = segs[j].ds_addr;
1665 seg_len = segs[j].ds_len;
1666 ctxd->buffer_addr = htole64(seg_addr);
1667 ctxd->lower.data = htole32(
1668 adapter->txd_cmd | txd_lower | seg_len);
1672 if (++i == adapter->num_tx_desc)
1674 tx_buffer->m_head = NULL;
1675 tx_buffer->next_eop = -1;
1679 adapter->next_avail_tx_desc = i;
1681 if (adapter->pcix_82544)
1682 adapter->num_tx_desc_avail -= txd_used;
1684 adapter->num_tx_desc_avail -= nsegs;
1686 if (m_head->m_flags & M_VLANTAG) {
1687 /* Set the vlan id. */
1688 ctxd->upper.fields.special =
1689 htole16(m_head->m_pkthdr.ether_vtag);
1690 /* Tell hardware to add tag */
1691 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1694 tx_buffer->m_head = m_head;
1695 tx_buffer_mapped->map = tx_buffer->map;
1696 tx_buffer->map = map;
1697 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1700 * Last Descriptor of Packet
1701 * needs End Of Packet (EOP)
1702 * and Report Status (RS)
1705 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1707 * Keep track in the first buffer which
1708 * descriptor will be written back
1710 tx_buffer = &adapter->tx_buffer_area[first];
1711 tx_buffer->next_eop = last;
1712 adapter->watchdog_time = ticks;
1715 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1716 * that this frame is available to transmit.
1718 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1719 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1720 if (adapter->hw.mac.type == e1000_82547 &&
1721 adapter->link_duplex == HALF_DUPLEX)
1722 lem_82547_move_tail(adapter);
1724 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1725 if (adapter->hw.mac.type == e1000_82547)
1726 lem_82547_update_fifo_head(adapter,
1727 m_head->m_pkthdr.len);
1733 /*********************************************************************
1735 * 82547 workaround to avoid controller hang in half-duplex environment.
1736 * The workaround is to avoid queuing a large packet that would span
1737 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1738 * in this case. We do that only when FIFO is quiescent.
1740 **********************************************************************/
1742 lem_82547_move_tail(void *arg)
1744 struct adapter *adapter = arg;
1745 struct e1000_tx_desc *tx_desc;
1746 u16 hw_tdt, sw_tdt, length = 0;
1749 EM_TX_LOCK_ASSERT(adapter);
1751 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1752 sw_tdt = adapter->next_avail_tx_desc;
1754 while (hw_tdt != sw_tdt) {
1755 tx_desc = &adapter->tx_desc_base[hw_tdt];
1756 length += tx_desc->lower.flags.length;
1757 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1758 if (++hw_tdt == adapter->num_tx_desc)
1762 if (lem_82547_fifo_workaround(adapter, length)) {
1763 adapter->tx_fifo_wrk_cnt++;
1764 callout_reset(&adapter->tx_fifo_timer, 1,
1765 lem_82547_move_tail, adapter);
1768 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1769 lem_82547_update_fifo_head(adapter, length);
1776 lem_82547_fifo_workaround(struct adapter *adapter, int len)
1778 int fifo_space, fifo_pkt_len;
1780 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1782 if (adapter->link_duplex == HALF_DUPLEX) {
1783 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1785 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1786 if (lem_82547_tx_fifo_reset(adapter))
1797 lem_82547_update_fifo_head(struct adapter *adapter, int len)
1799 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1801 /* tx_fifo_head is always 16 byte aligned */
1802 adapter->tx_fifo_head += fifo_pkt_len;
1803 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1804 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1810 lem_82547_tx_fifo_reset(struct adapter *adapter)
1814 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1815 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1816 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1817 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1818 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1819 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1820 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1821 /* Disable TX unit */
1822 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1823 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1824 tctl & ~E1000_TCTL_EN);
1826 /* Reset FIFO pointers */
1827 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1828 adapter->tx_head_addr);
1829 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1830 adapter->tx_head_addr);
1831 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1832 adapter->tx_head_addr);
1833 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1834 adapter->tx_head_addr);
1836 /* Re-enable TX unit */
1837 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1838 E1000_WRITE_FLUSH(&adapter->hw);
1840 adapter->tx_fifo_head = 0;
1841 adapter->tx_fifo_reset_cnt++;
1851 lem_set_promisc(struct adapter *adapter)
1853 struct ifnet *ifp = adapter->ifp;
1856 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1858 if (ifp->if_flags & IFF_PROMISC) {
1859 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1860 /* Turn this on if you want to see bad packets */
1862 reg_rctl |= E1000_RCTL_SBP;
1863 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1864 } else if (ifp->if_flags & IFF_ALLMULTI) {
1865 reg_rctl |= E1000_RCTL_MPE;
1866 reg_rctl &= ~E1000_RCTL_UPE;
1867 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1872 lem_disable_promisc(struct adapter *adapter)
1876 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1878 reg_rctl &= (~E1000_RCTL_UPE);
1879 reg_rctl &= (~E1000_RCTL_MPE);
1880 reg_rctl &= (~E1000_RCTL_SBP);
1881 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1885 /*********************************************************************
1888 * This routine is called whenever multicast address list is updated.
1890 **********************************************************************/
1893 lem_set_multi(struct adapter *adapter)
1895 struct ifnet *ifp = adapter->ifp;
1896 struct ifmultiaddr *ifma;
1898 u8 *mta; /* Multicast array memory */
1901 IOCTL_DEBUGOUT("lem_set_multi: begin");
1904 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1906 if (adapter->hw.mac.type == e1000_82542 &&
1907 adapter->hw.revision_id == E1000_REVISION_2) {
1908 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1909 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1910 e1000_pci_clear_mwi(&adapter->hw);
1911 reg_rctl |= E1000_RCTL_RST;
1912 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1916 #if __FreeBSD_version < 800000
1919 if_maddr_rlock(ifp);
1921 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1922 if (ifma->ifma_addr->sa_family != AF_LINK)
1925 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1928 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1929 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1932 #if __FreeBSD_version < 800000
1933 IF_ADDR_UNLOCK(ifp);
1935 if_maddr_runlock(ifp);
1937 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1938 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1939 reg_rctl |= E1000_RCTL_MPE;
1940 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1942 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1944 if (adapter->hw.mac.type == e1000_82542 &&
1945 adapter->hw.revision_id == E1000_REVISION_2) {
1946 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1947 reg_rctl &= ~E1000_RCTL_RST;
1948 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1950 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1951 e1000_pci_set_mwi(&adapter->hw);
1956 /*********************************************************************
1959 * This routine checks for link status and updates statistics.
1961 **********************************************************************/
1964 lem_local_timer(void *arg)
1966 struct adapter *adapter = arg;
1968 EM_CORE_LOCK_ASSERT(adapter);
1970 lem_update_link_status(adapter);
1971 lem_update_stats_counters(adapter);
1973 lem_smartspeed(adapter);
1976 * We check the watchdog: the time since
1977 * the last TX descriptor was cleaned.
1978 * This implies a functional TX engine.
1980 if ((adapter->watchdog_check == TRUE) &&
1981 (ticks - adapter->watchdog_time > EM_WATCHDOG))
1984 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1987 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1988 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1989 adapter->watchdog_events++;
1990 lem_init_locked(adapter);
1994 lem_update_link_status(struct adapter *adapter)
1996 struct e1000_hw *hw = &adapter->hw;
1997 struct ifnet *ifp = adapter->ifp;
1998 device_t dev = adapter->dev;
2001 /* Get the cached link value or read phy for real */
2002 switch (hw->phy.media_type) {
2003 case e1000_media_type_copper:
2004 if (hw->mac.get_link_status) {
2005 /* Do the work to read phy */
2006 e1000_check_for_link(hw);
2007 link_check = !hw->mac.get_link_status;
2008 if (link_check) /* ESB2 fix */
2009 e1000_cfg_on_link_up(hw);
2013 case e1000_media_type_fiber:
2014 e1000_check_for_link(hw);
2015 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2018 case e1000_media_type_internal_serdes:
2019 e1000_check_for_link(hw);
2020 link_check = adapter->hw.mac.serdes_has_link;
2023 case e1000_media_type_unknown:
2027 /* Now check for a transition */
2028 if (link_check && (adapter->link_active == 0)) {
2029 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2030 &adapter->link_duplex);
2032 device_printf(dev, "Link is up %d Mbps %s\n",
2033 adapter->link_speed,
2034 ((adapter->link_duplex == FULL_DUPLEX) ?
2035 "Full Duplex" : "Half Duplex"));
2036 adapter->link_active = 1;
2037 adapter->smartspeed = 0;
2038 ifp->if_baudrate = adapter->link_speed * 1000000;
2039 if_link_state_change(ifp, LINK_STATE_UP);
2040 } else if (!link_check && (adapter->link_active == 1)) {
2041 ifp->if_baudrate = adapter->link_speed = 0;
2042 adapter->link_duplex = 0;
2044 device_printf(dev, "Link is Down\n");
2045 adapter->link_active = 0;
2046 /* Link down, disable watchdog */
2047 adapter->watchdog_check = FALSE;
2048 if_link_state_change(ifp, LINK_STATE_DOWN);
2052 /*********************************************************************
2054 * This routine disables all traffic on the adapter by issuing a
2055 * global reset on the MAC and deallocates TX/RX buffers.
2057 * This routine should always be called with BOTH the CORE
2059 **********************************************************************/
2064 struct adapter *adapter = arg;
2065 struct ifnet *ifp = adapter->ifp;
2067 EM_CORE_LOCK_ASSERT(adapter);
2068 EM_TX_LOCK_ASSERT(adapter);
2070 INIT_DEBUGOUT("lem_stop: begin");
2072 lem_disable_intr(adapter);
2073 callout_stop(&adapter->timer);
2074 callout_stop(&adapter->tx_fifo_timer);
2076 /* Tell the stack that the interface is no longer active */
2077 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2079 e1000_reset_hw(&adapter->hw);
2080 if (adapter->hw.mac.type >= e1000_82544)
2081 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2083 e1000_led_off(&adapter->hw);
2084 e1000_cleanup_led(&adapter->hw);
2088 /*********************************************************************
2090 * Determine hardware revision.
2092 **********************************************************************/
2094 lem_identify_hardware(struct adapter *adapter)
2096 device_t dev = adapter->dev;
2098 /* Make sure our PCI config space has the necessary stuff set */
2099 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2100 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2101 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2102 device_printf(dev, "Memory Access and/or Bus Master bits "
2104 adapter->hw.bus.pci_cmd_word |=
2105 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2106 pci_write_config(dev, PCIR_COMMAND,
2107 adapter->hw.bus.pci_cmd_word, 2);
2110 /* Save off the information about this board */
2111 adapter->hw.vendor_id = pci_get_vendor(dev);
2112 adapter->hw.device_id = pci_get_device(dev);
2113 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2114 adapter->hw.subsystem_vendor_id =
2115 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2116 adapter->hw.subsystem_device_id =
2117 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2119 /* Do Shared Code Init and Setup */
2120 if (e1000_set_mac_type(&adapter->hw)) {
2121 device_printf(dev, "Setup init failure\n");
2127 lem_allocate_pci_resources(struct adapter *adapter)
2129 device_t dev = adapter->dev;
2130 int val, rid, error = E1000_SUCCESS;
2133 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2135 if (adapter->memory == NULL) {
2136 device_printf(dev, "Unable to allocate bus resource: memory\n");
2139 adapter->osdep.mem_bus_space_tag =
2140 rman_get_bustag(adapter->memory);
2141 adapter->osdep.mem_bus_space_handle =
2142 rman_get_bushandle(adapter->memory);
2143 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2145 /* Only older adapters use IO mapping */
2146 if (adapter->hw.mac.type > e1000_82543) {
2147 /* Figure our where our IO BAR is ? */
2148 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2149 val = pci_read_config(dev, rid, 4);
2150 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2151 adapter->io_rid = rid;
2155 /* check for 64bit BAR */
2156 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2159 if (rid >= PCIR_CIS) {
2160 device_printf(dev, "Unable to locate IO BAR\n");
2163 adapter->ioport = bus_alloc_resource_any(dev,
2164 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2165 if (adapter->ioport == NULL) {
2166 device_printf(dev, "Unable to allocate bus resource: "
2170 adapter->hw.io_base = 0;
2171 adapter->osdep.io_bus_space_tag =
2172 rman_get_bustag(adapter->ioport);
2173 adapter->osdep.io_bus_space_handle =
2174 rman_get_bushandle(adapter->ioport);
2177 adapter->hw.back = &adapter->osdep;
2182 /*********************************************************************
2184 * Setup the Legacy or MSI Interrupt handler
2186 **********************************************************************/
2188 lem_allocate_irq(struct adapter *adapter)
2190 device_t dev = adapter->dev;
2193 /* Manually turn off all interrupts */
2194 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2196 /* We allocate a single interrupt resource */
2197 adapter->res[0] = bus_alloc_resource_any(dev,
2198 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2199 if (adapter->res[0] == NULL) {
2200 device_printf(dev, "Unable to allocate bus resource: "
2205 #ifdef EM_LEGACY_IRQ
2206 /* We do Legacy setup */
2207 if ((error = bus_setup_intr(dev, adapter->res[0],
2208 INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2209 &adapter->tag[0])) != 0) {
2210 device_printf(dev, "Failed to register interrupt handler");
2214 #else /* FAST_IRQ */
2216 * Try allocating a fast interrupt and the associated deferred
2217 * processing contexts.
2219 TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2220 TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2221 adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2222 taskqueue_thread_enqueue, &adapter->tq);
2223 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2224 device_get_nameunit(adapter->dev));
2225 if ((error = bus_setup_intr(dev, adapter->res[0],
2226 INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2227 &adapter->tag[0])) != 0) {
2228 device_printf(dev, "Failed to register fast interrupt "
2229 "handler: %d\n", error);
2230 taskqueue_free(adapter->tq);
2234 #endif /* EM_LEGACY_IRQ */
2241 lem_free_pci_resources(struct adapter *adapter)
2243 device_t dev = adapter->dev;
2246 if (adapter->tag[0] != NULL) {
2247 bus_teardown_intr(dev, adapter->res[0],
2249 adapter->tag[0] = NULL;
2252 if (adapter->res[0] != NULL) {
2253 bus_release_resource(dev, SYS_RES_IRQ,
2254 0, adapter->res[0]);
2257 if (adapter->memory != NULL)
2258 bus_release_resource(dev, SYS_RES_MEMORY,
2259 PCIR_BAR(0), adapter->memory);
2261 if (adapter->ioport != NULL)
2262 bus_release_resource(dev, SYS_RES_IOPORT,
2263 adapter->io_rid, adapter->ioport);
2267 /*********************************************************************
2269 * Initialize the hardware to a configuration
2270 * as specified by the adapter structure.
2272 **********************************************************************/
2274 lem_hardware_init(struct adapter *adapter)
2276 device_t dev = adapter->dev;
2279 INIT_DEBUGOUT("lem_hardware_init: begin");
2281 /* Issue a global reset */
2282 e1000_reset_hw(&adapter->hw);
2284 /* When hardware is reset, fifo_head is also reset */
2285 adapter->tx_fifo_head = 0;
2288 * These parameters control the automatic generation (Tx) and
2289 * response (Rx) to Ethernet PAUSE frames.
2290 * - High water mark should allow for at least two frames to be
2291 * received after sending an XOFF.
2292 * - Low water mark works best when it is very near the high water mark.
2293 * This allows the receiver to restart by sending XON when it has
2294 * drained a bit. Here we use an arbitary value of 1500 which will
2295 * restart after one full frame is pulled from the buffer. There
2296 * could be several smaller frames in the buffer and if so they will
2297 * not trigger the XON until their total number reduces the buffer
2299 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2301 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2304 adapter->hw.fc.high_water = rx_buffer_size -
2305 roundup2(adapter->max_frame_size, 1024);
2306 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2308 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2309 adapter->hw.fc.send_xon = TRUE;
2311 /* Set Flow control, use the tunable location if sane */
2312 if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2313 adapter->hw.fc.requested_mode = lem_fc_setting;
2315 adapter->hw.fc.requested_mode = e1000_fc_none;
2317 if (e1000_init_hw(&adapter->hw) < 0) {
2318 device_printf(dev, "Hardware Initialization Failed\n");
2322 e1000_check_for_link(&adapter->hw);
2327 /*********************************************************************
2329 * Setup networking device structure and register an interface.
2331 **********************************************************************/
2333 lem_setup_interface(device_t dev, struct adapter *adapter)
2337 INIT_DEBUGOUT("lem_setup_interface: begin");
2339 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2341 device_printf(dev, "can not allocate ifnet structure\n");
2344 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2345 ifp->if_mtu = ETHERMTU;
2346 ifp->if_init = lem_init;
2347 ifp->if_softc = adapter;
2348 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2349 ifp->if_ioctl = lem_ioctl;
2350 ifp->if_start = lem_start;
2351 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2352 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2353 IFQ_SET_READY(&ifp->if_snd);
2355 ether_ifattach(ifp, adapter->hw.mac.addr);
2357 ifp->if_capabilities = ifp->if_capenable = 0;
2359 if (adapter->hw.mac.type >= e1000_82543) {
2360 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2361 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2365 * Tell the upper layer(s) we support long frames.
2367 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2368 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2369 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2372 ** Dont turn this on by default, if vlans are
2373 ** created on another pseudo device (eg. lagg)
2374 ** then vlan events are not passed thru, breaking
2375 ** operation, but with HW FILTER off it works. If
2376 ** using vlans directly on the em driver you can
2377 ** enable this and get full hardware tag filtering.
2379 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2381 #ifdef DEVICE_POLLING
2382 ifp->if_capabilities |= IFCAP_POLLING;
2385 /* Enable only WOL MAGIC by default */
2387 ifp->if_capabilities |= IFCAP_WOL;
2388 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2392 * Specify the media types supported by this adapter and register
2393 * callbacks to update media and link information
2395 ifmedia_init(&adapter->media, IFM_IMASK,
2396 lem_media_change, lem_media_status);
2397 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2398 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2399 u_char fiber_type = IFM_1000_SX; /* default type */
2401 if (adapter->hw.mac.type == e1000_82545)
2402 fiber_type = IFM_1000_LX;
2403 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2405 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2407 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2408 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2410 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2412 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2414 if (adapter->hw.phy.type != e1000_phy_ife) {
2415 ifmedia_add(&adapter->media,
2416 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2417 ifmedia_add(&adapter->media,
2418 IFM_ETHER | IFM_1000_T, 0, NULL);
2421 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2422 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2427 /*********************************************************************
2429 * Workaround for SmartSpeed on 82541 and 82547 controllers
2431 **********************************************************************/
2433 lem_smartspeed(struct adapter *adapter)
2437 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2438 adapter->hw.mac.autoneg == 0 ||
2439 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2442 if (adapter->smartspeed == 0) {
2443 /* If Master/Slave config fault is asserted twice,
2444 * we assume back-to-back */
2445 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2446 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2448 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2449 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2450 e1000_read_phy_reg(&adapter->hw,
2451 PHY_1000T_CTRL, &phy_tmp);
2452 if(phy_tmp & CR_1000T_MS_ENABLE) {
2453 phy_tmp &= ~CR_1000T_MS_ENABLE;
2454 e1000_write_phy_reg(&adapter->hw,
2455 PHY_1000T_CTRL, phy_tmp);
2456 adapter->smartspeed++;
2457 if(adapter->hw.mac.autoneg &&
2458 !e1000_copper_link_autoneg(&adapter->hw) &&
2459 !e1000_read_phy_reg(&adapter->hw,
2460 PHY_CONTROL, &phy_tmp)) {
2461 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2462 MII_CR_RESTART_AUTO_NEG);
2463 e1000_write_phy_reg(&adapter->hw,
2464 PHY_CONTROL, phy_tmp);
2469 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2470 /* If still no link, perhaps using 2/3 pair cable */
2471 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2472 phy_tmp |= CR_1000T_MS_ENABLE;
2473 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2474 if(adapter->hw.mac.autoneg &&
2475 !e1000_copper_link_autoneg(&adapter->hw) &&
2476 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2477 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2478 MII_CR_RESTART_AUTO_NEG);
2479 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2482 /* Restart process after EM_SMARTSPEED_MAX iterations */
2483 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2484 adapter->smartspeed = 0;
2489 * Manage DMA'able memory.
2492 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2496 *(bus_addr_t *) arg = segs[0].ds_addr;
2500 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2501 struct em_dma_alloc *dma, int mapflags)
2505 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2506 EM_DBA_ALIGN, 0, /* alignment, bounds */
2507 BUS_SPACE_MAXADDR, /* lowaddr */
2508 BUS_SPACE_MAXADDR, /* highaddr */
2509 NULL, NULL, /* filter, filterarg */
2512 size, /* maxsegsize */
2514 NULL, /* lockfunc */
2518 device_printf(adapter->dev,
2519 "%s: bus_dma_tag_create failed: %d\n",
2524 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2525 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2527 device_printf(adapter->dev,
2528 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2529 __func__, (uintmax_t)size, error);
2534 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2535 size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2536 if (error || dma->dma_paddr == 0) {
2537 device_printf(adapter->dev,
2538 "%s: bus_dmamap_load failed: %d\n",
2546 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2548 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2549 bus_dma_tag_destroy(dma->dma_tag);
2551 dma->dma_map = NULL;
2552 dma->dma_tag = NULL;
2558 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2560 if (dma->dma_tag == NULL)
2562 if (dma->dma_map != NULL) {
2563 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2564 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2565 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2566 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2567 dma->dma_map = NULL;
2569 bus_dma_tag_destroy(dma->dma_tag);
2570 dma->dma_tag = NULL;
2574 /*********************************************************************
2576 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2577 * the information needed to transmit a packet on the wire.
2579 **********************************************************************/
2581 lem_allocate_transmit_structures(struct adapter *adapter)
2583 device_t dev = adapter->dev;
2584 struct em_buffer *tx_buffer;
2588 * Create DMA tags for tx descriptors
2590 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2591 1, 0, /* alignment, bounds */
2592 BUS_SPACE_MAXADDR, /* lowaddr */
2593 BUS_SPACE_MAXADDR, /* highaddr */
2594 NULL, NULL, /* filter, filterarg */
2595 MCLBYTES * EM_MAX_SCATTER, /* maxsize */
2596 EM_MAX_SCATTER, /* nsegments */
2597 MCLBYTES, /* maxsegsize */
2599 NULL, /* lockfunc */
2601 &adapter->txtag)) != 0) {
2602 device_printf(dev, "Unable to allocate TX DMA tag\n");
2606 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2607 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2608 if (adapter->tx_buffer_area == NULL) {
2609 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2614 /* Create the descriptor buffer dma maps */
2615 for (int i = 0; i < adapter->num_tx_desc; i++) {
2616 tx_buffer = &adapter->tx_buffer_area[i];
2617 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2619 device_printf(dev, "Unable to create TX DMA map\n");
2622 tx_buffer->next_eop = -1;
2627 lem_free_transmit_structures(adapter);
2631 /*********************************************************************
2633 * (Re)Initialize transmit structures.
2635 **********************************************************************/
2637 lem_setup_transmit_structures(struct adapter *adapter)
2639 struct em_buffer *tx_buffer;
2641 /* Clear the old ring contents */
2642 bzero(adapter->tx_desc_base,
2643 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2645 /* Free any existing TX buffers */
2646 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2647 tx_buffer = &adapter->tx_buffer_area[i];
2648 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2649 BUS_DMASYNC_POSTWRITE);
2650 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2651 m_freem(tx_buffer->m_head);
2652 tx_buffer->m_head = NULL;
2653 tx_buffer->next_eop = -1;
2657 adapter->last_hw_offload = 0;
2658 adapter->next_avail_tx_desc = 0;
2659 adapter->next_tx_to_clean = 0;
2660 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2662 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2663 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2668 /*********************************************************************
2670 * Enable transmit unit.
2672 **********************************************************************/
2674 lem_initialize_transmit_unit(struct adapter *adapter)
2679 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2680 /* Setup the Base and Length of the Tx Descriptor Ring */
2681 bus_addr = adapter->txdma.dma_paddr;
2682 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2683 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2684 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2685 (u32)(bus_addr >> 32));
2686 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2688 /* Setup the HW Tx Head and Tail descriptor pointers */
2689 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2690 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2692 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2693 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2694 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2696 /* Set the default values for the Tx Inter Packet Gap timer */
2697 switch (adapter->hw.mac.type) {
2699 tipg = DEFAULT_82542_TIPG_IPGT;
2700 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2701 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2704 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2705 (adapter->hw.phy.media_type ==
2706 e1000_media_type_internal_serdes))
2707 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2709 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2710 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2711 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2714 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2715 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2716 if(adapter->hw.mac.type >= e1000_82540)
2717 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2718 adapter->tx_abs_int_delay.value);
2720 /* Program the Transmit Control Register */
2721 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2722 tctl &= ~E1000_TCTL_CT;
2723 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2724 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2726 /* This write will effectively turn on the transmit unit. */
2727 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2729 /* Setup Transmit Descriptor Base Settings */
2730 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2732 if (adapter->tx_int_delay.value > 0)
2733 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2736 /*********************************************************************
2738 * Free all transmit related data structures.
2740 **********************************************************************/
2742 lem_free_transmit_structures(struct adapter *adapter)
2744 struct em_buffer *tx_buffer;
2746 INIT_DEBUGOUT("free_transmit_structures: begin");
2748 if (adapter->tx_buffer_area != NULL) {
2749 for (int i = 0; i < adapter->num_tx_desc; i++) {
2750 tx_buffer = &adapter->tx_buffer_area[i];
2751 if (tx_buffer->m_head != NULL) {
2752 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2753 BUS_DMASYNC_POSTWRITE);
2754 bus_dmamap_unload(adapter->txtag,
2756 m_freem(tx_buffer->m_head);
2757 tx_buffer->m_head = NULL;
2758 } else if (tx_buffer->map != NULL)
2759 bus_dmamap_unload(adapter->txtag,
2761 if (tx_buffer->map != NULL) {
2762 bus_dmamap_destroy(adapter->txtag,
2764 tx_buffer->map = NULL;
2768 if (adapter->tx_buffer_area != NULL) {
2769 free(adapter->tx_buffer_area, M_DEVBUF);
2770 adapter->tx_buffer_area = NULL;
2772 if (adapter->txtag != NULL) {
2773 bus_dma_tag_destroy(adapter->txtag);
2774 adapter->txtag = NULL;
2776 #if __FreeBSD_version >= 800000
2777 if (adapter->br != NULL)
2778 buf_ring_free(adapter->br, M_DEVBUF);
2782 /*********************************************************************
2784 * The offload context needs to be set when we transfer the first
2785 * packet of a particular protocol (TCP/UDP). This routine has been
2786 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2788 * Added back the old method of keeping the current context type
2789 * and not setting if unnecessary, as this is reported to be a
2790 * big performance win. -jfv
2791 **********************************************************************/
2793 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2794 u32 *txd_upper, u32 *txd_lower)
2796 struct e1000_context_desc *TXD = NULL;
2797 struct em_buffer *tx_buffer;
2798 struct ether_vlan_header *eh;
2799 struct ip *ip = NULL;
2800 struct ip6_hdr *ip6;
2801 int curr_txd, ehdrlen;
2802 u32 cmd, hdr_len, ip_hlen;
2807 cmd = hdr_len = ipproto = 0;
2808 *txd_upper = *txd_lower = 0;
2809 curr_txd = adapter->next_avail_tx_desc;
2812 * Determine where frame payload starts.
2813 * Jump over vlan headers if already present,
2814 * helpful for QinQ too.
2816 eh = mtod(mp, struct ether_vlan_header *);
2817 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2818 etype = ntohs(eh->evl_proto);
2819 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2821 etype = ntohs(eh->evl_encap_proto);
2822 ehdrlen = ETHER_HDR_LEN;
2826 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2827 * TODO: Support SCTP too when it hits the tree.
2831 ip = (struct ip *)(mp->m_data + ehdrlen);
2832 ip_hlen = ip->ip_hl << 2;
2834 /* Setup of IP header checksum. */
2835 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2837 * Start offset for header checksum calculation.
2838 * End offset for header checksum calculation.
2839 * Offset of place to put the checksum.
2841 TXD = (struct e1000_context_desc *)
2842 &adapter->tx_desc_base[curr_txd];
2843 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2844 TXD->lower_setup.ip_fields.ipcse =
2845 htole16(ehdrlen + ip_hlen);
2846 TXD->lower_setup.ip_fields.ipcso =
2847 ehdrlen + offsetof(struct ip, ip_sum);
2848 cmd |= E1000_TXD_CMD_IP;
2849 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2852 hdr_len = ehdrlen + ip_hlen;
2856 case ETHERTYPE_IPV6:
2857 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2858 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2860 /* IPv6 doesn't have a header checksum. */
2862 hdr_len = ehdrlen + ip_hlen;
2863 ipproto = ip6->ip6_nxt;
2872 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2873 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2874 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2875 /* no need for context if already set */
2876 if (adapter->last_hw_offload == CSUM_TCP)
2878 adapter->last_hw_offload = CSUM_TCP;
2880 * Start offset for payload checksum calculation.
2881 * End offset for payload checksum calculation.
2882 * Offset of place to put the checksum.
2884 TXD = (struct e1000_context_desc *)
2885 &adapter->tx_desc_base[curr_txd];
2886 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2887 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2888 TXD->upper_setup.tcp_fields.tucso =
2889 hdr_len + offsetof(struct tcphdr, th_sum);
2890 cmd |= E1000_TXD_CMD_TCP;
2895 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2896 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2897 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2898 /* no need for context if already set */
2899 if (adapter->last_hw_offload == CSUM_UDP)
2901 adapter->last_hw_offload = CSUM_UDP;
2903 * Start offset for header checksum calculation.
2904 * End offset for header checksum calculation.
2905 * Offset of place to put the checksum.
2907 TXD = (struct e1000_context_desc *)
2908 &adapter->tx_desc_base[curr_txd];
2909 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2910 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2911 TXD->upper_setup.tcp_fields.tucso =
2912 hdr_len + offsetof(struct udphdr, uh_sum);
2922 TXD->tcp_seg_setup.data = htole32(0);
2923 TXD->cmd_and_length =
2924 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
2925 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2926 tx_buffer->m_head = NULL;
2927 tx_buffer->next_eop = -1;
2929 if (++curr_txd == adapter->num_tx_desc)
2932 adapter->num_tx_desc_avail--;
2933 adapter->next_avail_tx_desc = curr_txd;
2937 /**********************************************************************
2939 * Examine each tx_buffer in the used queue. If the hardware is done
2940 * processing the packet then free associated resources. The
2941 * tx_buffer is put back on the free queue.
2943 **********************************************************************/
2945 lem_txeof(struct adapter *adapter)
2947 int first, last, done, num_avail;
2948 struct em_buffer *tx_buffer;
2949 struct e1000_tx_desc *tx_desc, *eop_desc;
2950 struct ifnet *ifp = adapter->ifp;
2952 EM_TX_LOCK_ASSERT(adapter);
2954 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2957 num_avail = adapter->num_tx_desc_avail;
2958 first = adapter->next_tx_to_clean;
2959 tx_desc = &adapter->tx_desc_base[first];
2960 tx_buffer = &adapter->tx_buffer_area[first];
2961 last = tx_buffer->next_eop;
2962 eop_desc = &adapter->tx_desc_base[last];
2965 * What this does is get the index of the
2966 * first descriptor AFTER the EOP of the
2967 * first packet, that way we can do the
2968 * simple comparison on the inner while loop.
2970 if (++last == adapter->num_tx_desc)
2974 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2975 BUS_DMASYNC_POSTREAD);
2977 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2978 /* We clean the range of the packet */
2979 while (first != done) {
2980 tx_desc->upper.data = 0;
2981 tx_desc->lower.data = 0;
2982 tx_desc->buffer_addr = 0;
2985 if (tx_buffer->m_head) {
2987 bus_dmamap_sync(adapter->txtag,
2989 BUS_DMASYNC_POSTWRITE);
2990 bus_dmamap_unload(adapter->txtag,
2993 m_freem(tx_buffer->m_head);
2994 tx_buffer->m_head = NULL;
2996 tx_buffer->next_eop = -1;
2997 adapter->watchdog_time = ticks;
2999 if (++first == adapter->num_tx_desc)
3002 tx_buffer = &adapter->tx_buffer_area[first];
3003 tx_desc = &adapter->tx_desc_base[first];
3005 /* See if we can continue to the next packet */
3006 last = tx_buffer->next_eop;
3008 eop_desc = &adapter->tx_desc_base[last];
3009 /* Get new done point */
3010 if (++last == adapter->num_tx_desc) last = 0;
3015 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3016 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3018 adapter->next_tx_to_clean = first;
3019 adapter->num_tx_desc_avail = num_avail;
3022 * If we have enough room, clear IFF_DRV_OACTIVE to
3023 * tell the stack that it is OK to send packets.
3024 * If there are no pending descriptors, clear the watchdog.
3026 if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {
3027 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3028 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3029 adapter->watchdog_check = FALSE;
3035 /*********************************************************************
3037 * When Link is lost sometimes there is work still in the TX ring
3038 * which may result in a watchdog, rather than allow that we do an
3039 * attempted cleanup and then reinit here. Note that this has been
3040 * seens mostly with fiber adapters.
3042 **********************************************************************/
3044 lem_tx_purge(struct adapter *adapter)
3046 if ((!adapter->link_active) && (adapter->watchdog_check)) {
3047 EM_TX_LOCK(adapter);
3049 EM_TX_UNLOCK(adapter);
3050 if (adapter->watchdog_check) /* Still outstanding? */
3051 lem_init_locked(adapter);
3055 /*********************************************************************
3057 * Get a buffer from system mbuf buffer pool.
3059 **********************************************************************/
3061 lem_get_buf(struct adapter *adapter, int i)
3064 bus_dma_segment_t segs[1];
3066 struct em_buffer *rx_buffer;
3069 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3071 adapter->mbuf_cluster_failed++;
3074 m->m_len = m->m_pkthdr.len = MCLBYTES;
3076 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3077 m_adj(m, ETHER_ALIGN);
3080 * Using memory from the mbuf cluster pool, invoke the
3081 * bus_dma machinery to arrange the memory mapping.
3083 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3084 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3090 /* If nsegs is wrong then the stack is corrupt. */
3091 KASSERT(nsegs == 1, ("Too many segments returned!"));
3093 rx_buffer = &adapter->rx_buffer_area[i];
3094 if (rx_buffer->m_head != NULL)
3095 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3097 map = rx_buffer->map;
3098 rx_buffer->map = adapter->rx_sparemap;
3099 adapter->rx_sparemap = map;
3100 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3101 rx_buffer->m_head = m;
3103 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3107 /*********************************************************************
3109 * Allocate memory for rx_buffer structures. Since we use one
3110 * rx_buffer per received packet, the maximum number of rx_buffer's
3111 * that we'll need is equal to the number of receive descriptors
3112 * that we've allocated.
3114 **********************************************************************/
3116 lem_allocate_receive_structures(struct adapter *adapter)
3118 device_t dev = adapter->dev;
3119 struct em_buffer *rx_buffer;
3122 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3123 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3124 if (adapter->rx_buffer_area == NULL) {
3125 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3129 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3130 1, 0, /* alignment, bounds */
3131 BUS_SPACE_MAXADDR, /* lowaddr */
3132 BUS_SPACE_MAXADDR, /* highaddr */
3133 NULL, NULL, /* filter, filterarg */
3134 MCLBYTES, /* maxsize */
3136 MCLBYTES, /* maxsegsize */
3138 NULL, /* lockfunc */
3142 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3147 /* Create the spare map (used by getbuf) */
3148 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3149 &adapter->rx_sparemap);
3151 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3156 rx_buffer = adapter->rx_buffer_area;
3157 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3158 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3161 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3170 lem_free_receive_structures(adapter);
3174 /*********************************************************************
3176 * (Re)initialize receive structures.
3178 **********************************************************************/
3180 lem_setup_receive_structures(struct adapter *adapter)
3182 struct em_buffer *rx_buffer;
3185 /* Reset descriptor ring */
3186 bzero(adapter->rx_desc_base,
3187 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3189 /* Free current RX buffers. */
3190 rx_buffer = adapter->rx_buffer_area;
3191 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3192 if (rx_buffer->m_head != NULL) {
3193 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3194 BUS_DMASYNC_POSTREAD);
3195 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3196 m_freem(rx_buffer->m_head);
3197 rx_buffer->m_head = NULL;
3201 /* Allocate new ones. */
3202 for (i = 0; i < adapter->num_rx_desc; i++) {
3203 error = lem_get_buf(adapter, i);
3208 /* Setup our descriptor pointers */
3209 adapter->next_rx_desc_to_check = 0;
3210 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3211 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3216 /*********************************************************************
3218 * Enable receive unit.
3220 **********************************************************************/
3221 #define MAX_INTS_PER_SEC 8000
3222 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
3225 lem_initialize_receive_unit(struct adapter *adapter)
3227 struct ifnet *ifp = adapter->ifp;
3231 INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3234 * Make sure receives are disabled while setting
3235 * up the descriptor ring
3237 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3238 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3240 if (adapter->hw.mac.type >= e1000_82540) {
3241 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3242 adapter->rx_abs_int_delay.value);
3244 * Set the interrupt throttling rate. Value is calculated
3245 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3247 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3251 ** When using MSIX interrupts we need to throttle
3252 ** using the EITR register (82574 only)
3255 for (int i = 0; i < 4; i++)
3256 E1000_WRITE_REG(&adapter->hw,
3257 E1000_EITR_82574(i), DEFAULT_ITR);
3259 /* Disable accelerated ackknowledge */
3260 if (adapter->hw.mac.type == e1000_82574)
3261 E1000_WRITE_REG(&adapter->hw,
3262 E1000_RFCTL, E1000_RFCTL_ACK_DIS);
3264 /* Setup the Base and Length of the Rx Descriptor Ring */
3265 bus_addr = adapter->rxdma.dma_paddr;
3266 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3267 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3268 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3269 (u32)(bus_addr >> 32));
3270 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3273 /* Setup the Receive Control Register */
3274 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3275 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3276 E1000_RCTL_RDMTS_HALF |
3277 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3279 /* Make sure VLAN Filters are off */
3280 rctl &= ~E1000_RCTL_VFE;
3282 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3283 rctl |= E1000_RCTL_SBP;
3285 rctl &= ~E1000_RCTL_SBP;
3287 switch (adapter->rx_buffer_len) {
3290 rctl |= E1000_RCTL_SZ_2048;
3293 rctl |= E1000_RCTL_SZ_4096 |
3294 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3297 rctl |= E1000_RCTL_SZ_8192 |
3298 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3301 rctl |= E1000_RCTL_SZ_16384 |
3302 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3306 if (ifp->if_mtu > ETHERMTU)
3307 rctl |= E1000_RCTL_LPE;
3309 rctl &= ~E1000_RCTL_LPE;
3311 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3312 if ((adapter->hw.mac.type >= e1000_82543) &&
3313 (ifp->if_capenable & IFCAP_RXCSUM)) {
3314 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3315 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3316 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3319 /* Enable Receives */
3320 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3323 * Setup the HW Rx Head and
3324 * Tail Descriptor Pointers
3326 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3327 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
3332 /*********************************************************************
3334 * Free receive related data structures.
3336 **********************************************************************/
3338 lem_free_receive_structures(struct adapter *adapter)
3340 struct em_buffer *rx_buffer;
3343 INIT_DEBUGOUT("free_receive_structures: begin");
3345 if (adapter->rx_sparemap) {
3346 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3347 adapter->rx_sparemap = NULL;
3350 /* Cleanup any existing buffers */
3351 if (adapter->rx_buffer_area != NULL) {
3352 rx_buffer = adapter->rx_buffer_area;
3353 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3354 if (rx_buffer->m_head != NULL) {
3355 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3356 BUS_DMASYNC_POSTREAD);
3357 bus_dmamap_unload(adapter->rxtag,
3359 m_freem(rx_buffer->m_head);
3360 rx_buffer->m_head = NULL;
3361 } else if (rx_buffer->map != NULL)
3362 bus_dmamap_unload(adapter->rxtag,
3364 if (rx_buffer->map != NULL) {
3365 bus_dmamap_destroy(adapter->rxtag,
3367 rx_buffer->map = NULL;
3372 if (adapter->rx_buffer_area != NULL) {
3373 free(adapter->rx_buffer_area, M_DEVBUF);
3374 adapter->rx_buffer_area = NULL;
3377 if (adapter->rxtag != NULL) {
3378 bus_dma_tag_destroy(adapter->rxtag);
3379 adapter->rxtag = NULL;
3383 /*********************************************************************
3385 * This routine executes in interrupt context. It replenishes
3386 * the mbufs in the descriptor and sends data which has been
3387 * dma'ed into host memory to upper layer.
3389 * We loop at most count times if count is > 0, or until done if
3392 * For polling we also now return the number of cleaned packets
3393 *********************************************************************/
3395 lem_rxeof(struct adapter *adapter, int count, int *done)
3397 struct ifnet *ifp = adapter->ifp;;
3399 u8 status = 0, accept_frame = 0, eop = 0;
3400 u16 len, desc_len, prev_len_adj;
3402 struct e1000_rx_desc *current_desc;
3404 EM_RX_LOCK(adapter);
3405 i = adapter->next_rx_desc_to_check;
3406 current_desc = &adapter->rx_desc_base[i];
3407 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3408 BUS_DMASYNC_POSTREAD);
3410 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3413 EM_RX_UNLOCK(adapter);
3417 while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3418 struct mbuf *m = NULL;
3420 status = current_desc->status;
3421 if ((status & E1000_RXD_STAT_DD) == 0)
3424 mp = adapter->rx_buffer_area[i].m_head;
3426 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3427 * needs to access the last received byte in the mbuf.
3429 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3430 BUS_DMASYNC_POSTREAD);
3434 desc_len = le16toh(current_desc->length);
3435 if (status & E1000_RXD_STAT_EOP) {
3438 if (desc_len < ETHER_CRC_LEN) {
3440 prev_len_adj = ETHER_CRC_LEN - desc_len;
3442 len = desc_len - ETHER_CRC_LEN;
3448 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3450 u32 pkt_len = desc_len;
3452 if (adapter->fmp != NULL)
3453 pkt_len += adapter->fmp->m_pkthdr.len;
3455 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3456 if (TBI_ACCEPT(&adapter->hw, status,
3457 current_desc->errors, pkt_len, last_byte,
3458 adapter->min_frame_size, adapter->max_frame_size)) {
3459 e1000_tbi_adjust_stats_82543(&adapter->hw,
3460 &adapter->stats, pkt_len,
3461 adapter->hw.mac.addr,
3462 adapter->max_frame_size);
3470 if (lem_get_buf(adapter, i) != 0) {
3475 /* Assign correct length to the current fragment */
3478 if (adapter->fmp == NULL) {
3479 mp->m_pkthdr.len = len;
3480 adapter->fmp = mp; /* Store the first mbuf */
3483 /* Chain mbuf's together */
3484 mp->m_flags &= ~M_PKTHDR;
3486 * Adjust length of previous mbuf in chain if
3487 * we received less than 4 bytes in the last
3490 if (prev_len_adj > 0) {
3491 adapter->lmp->m_len -= prev_len_adj;
3492 adapter->fmp->m_pkthdr.len -=
3495 adapter->lmp->m_next = mp;
3496 adapter->lmp = adapter->lmp->m_next;
3497 adapter->fmp->m_pkthdr.len += len;
3501 adapter->fmp->m_pkthdr.rcvif = ifp;
3503 lem_receive_checksum(adapter, current_desc,
3505 #ifndef __NO_STRICT_ALIGNMENT
3506 if (adapter->max_frame_size >
3507 (MCLBYTES - ETHER_ALIGN) &&
3508 lem_fixup_rx(adapter) != 0)
3511 if (status & E1000_RXD_STAT_VP) {
3512 adapter->fmp->m_pkthdr.ether_vtag =
3513 (le16toh(current_desc->special) &
3514 E1000_RXD_SPC_VLAN_MASK);
3515 adapter->fmp->m_flags |= M_VLANTAG;
3517 #ifndef __NO_STRICT_ALIGNMENT
3521 adapter->fmp = NULL;
3522 adapter->lmp = NULL;
3527 /* Reuse loaded DMA map and just update mbuf chain */
3528 mp = adapter->rx_buffer_area[i].m_head;
3529 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3530 mp->m_data = mp->m_ext.ext_buf;
3532 if (adapter->max_frame_size <=
3533 (MCLBYTES - ETHER_ALIGN))
3534 m_adj(mp, ETHER_ALIGN);
3535 if (adapter->fmp != NULL) {
3536 m_freem(adapter->fmp);
3537 adapter->fmp = NULL;
3538 adapter->lmp = NULL;
3543 /* Zero out the receive descriptors status. */
3544 current_desc->status = 0;
3545 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3546 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3548 /* Advance our pointers to the next descriptor. */
3549 if (++i == adapter->num_rx_desc)
3551 /* Call into the stack */
3553 adapter->next_rx_desc_to_check = i;
3554 EM_RX_UNLOCK(adapter);
3555 (*ifp->if_input)(ifp, m);
3556 EM_RX_LOCK(adapter);
3558 i = adapter->next_rx_desc_to_check;
3560 current_desc = &adapter->rx_desc_base[i];
3562 adapter->next_rx_desc_to_check = i;
3564 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3566 i = adapter->num_rx_desc - 1;
3567 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3570 EM_RX_UNLOCK(adapter);
3571 return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3574 #ifndef __NO_STRICT_ALIGNMENT
3576 * When jumbo frames are enabled we should realign entire payload on
3577 * architecures with strict alignment. This is serious design mistake of 8254x
3578 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3579 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3580 * payload. On architecures without strict alignment restrictions 8254x still
3581 * performs unaligned memory access which would reduce the performance too.
3582 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3583 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3584 * existing mbuf chain.
3586 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3587 * not used at all on architectures with strict alignment.
3590 lem_fixup_rx(struct adapter *adapter)
3597 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3598 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3599 m->m_data += ETHER_HDR_LEN;
3601 MGETHDR(n, M_DONTWAIT, MT_DATA);
3603 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3604 m->m_data += ETHER_HDR_LEN;
3605 m->m_len -= ETHER_HDR_LEN;
3606 n->m_len = ETHER_HDR_LEN;
3607 M_MOVE_PKTHDR(n, m);
3611 adapter->dropped_pkts++;
3612 m_freem(adapter->fmp);
3613 adapter->fmp = NULL;
3622 /*********************************************************************
3624 * Verify that the hardware indicated that the checksum is valid.
3625 * Inform the stack about the status of checksum so that stack
3626 * doesn't spend time verifying the checksum.
3628 *********************************************************************/
3630 lem_receive_checksum(struct adapter *adapter,
3631 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3633 /* 82543 or newer only */
3634 if ((adapter->hw.mac.type < e1000_82543) ||
3635 /* Ignore Checksum bit is set */
3636 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3637 mp->m_pkthdr.csum_flags = 0;
3641 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3643 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3644 /* IP Checksum Good */
3645 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3646 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3649 mp->m_pkthdr.csum_flags = 0;
3653 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3655 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3656 mp->m_pkthdr.csum_flags |=
3657 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3658 mp->m_pkthdr.csum_data = htons(0xffff);
3664 * This routine is run via an vlan
3668 lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3670 struct adapter *adapter = ifp->if_softc;
3673 if (ifp->if_softc != arg) /* Not our event */
3676 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
3679 EM_CORE_LOCK(adapter);
3680 index = (vtag >> 5) & 0x7F;
3682 adapter->shadow_vfta[index] |= (1 << bit);
3683 ++adapter->num_vlans;
3684 /* Re-init to load the changes */
3685 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3686 lem_init_locked(adapter);
3687 EM_CORE_UNLOCK(adapter);
3691 * This routine is run via an vlan
3695 lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3697 struct adapter *adapter = ifp->if_softc;
3700 if (ifp->if_softc != arg)
3703 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3706 EM_CORE_LOCK(adapter);
3707 index = (vtag >> 5) & 0x7F;
3709 adapter->shadow_vfta[index] &= ~(1 << bit);
3710 --adapter->num_vlans;
3711 /* Re-init to load the changes */
3712 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3713 lem_init_locked(adapter);
3714 EM_CORE_UNLOCK(adapter);
3718 lem_setup_vlan_hw_support(struct adapter *adapter)
3720 struct e1000_hw *hw = &adapter->hw;
3724 ** We get here thru init_locked, meaning
3725 ** a soft reset, this has already cleared
3726 ** the VFTA and other state, so if there
3727 ** have been no vlan's registered do nothing.
3729 if (adapter->num_vlans == 0)
3733 ** A soft reset zero's out the VFTA, so
3734 ** we need to repopulate it now.
3736 for (int i = 0; i < EM_VFTA_SIZE; i++)
3737 if (adapter->shadow_vfta[i] != 0)
3738 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3739 i, adapter->shadow_vfta[i]);
3741 reg = E1000_READ_REG(hw, E1000_CTRL);
3742 reg |= E1000_CTRL_VME;
3743 E1000_WRITE_REG(hw, E1000_CTRL, reg);
3745 /* Enable the Filter Table */
3746 reg = E1000_READ_REG(hw, E1000_RCTL);
3747 reg &= ~E1000_RCTL_CFIEN;
3748 reg |= E1000_RCTL_VFE;
3749 E1000_WRITE_REG(hw, E1000_RCTL, reg);
3751 /* Update the frame size */
3752 E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
3753 adapter->max_frame_size + VLAN_TAG_SIZE);
3757 lem_enable_intr(struct adapter *adapter)
3759 struct e1000_hw *hw = &adapter->hw;
3760 u32 ims_mask = IMS_ENABLE_MASK;
3762 if (adapter->msix) {
3763 E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
3764 ims_mask |= EM_MSIX_MASK;
3766 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3770 lem_disable_intr(struct adapter *adapter)
3772 struct e1000_hw *hw = &adapter->hw;
3775 E1000_WRITE_REG(hw, EM_EIAC, 0);
3776 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
3780 * Bit of a misnomer, what this really means is
3781 * to enable OS management of the system... aka
3782 * to disable special hardware management features
3785 lem_init_manageability(struct adapter *adapter)
3787 /* A shared code workaround */
3788 if (adapter->has_manage) {
3789 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3790 /* disable hardware interception of ARP */
3791 manc &= ~(E1000_MANC_ARP_EN);
3792 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3797 * Give control back to hardware management
3798 * controller if there is one.
3801 lem_release_manageability(struct adapter *adapter)
3803 if (adapter->has_manage) {
3804 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3806 /* re-enable hardware interception of ARP */
3807 manc |= E1000_MANC_ARP_EN;
3808 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3813 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3814 * For ASF and Pass Through versions of f/w this means
3815 * that the driver is loaded. For AMT version type f/w
3816 * this means that the network i/f is open.
3819 lem_get_hw_control(struct adapter *adapter)
3823 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3824 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3825 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3830 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3831 * For ASF and Pass Through versions of f/w this means that
3832 * the driver is no longer loaded. For AMT versions of the
3833 * f/w this means that the network i/f is closed.
3836 lem_release_hw_control(struct adapter *adapter)
3840 if (!adapter->has_manage)
3843 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3844 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3845 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3850 lem_is_valid_ether_addr(u8 *addr)
3852 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3854 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3862 ** Parse the interface capabilities with regard
3863 ** to both system management and wake-on-lan for
3867 lem_get_wakeup(device_t dev)
3869 struct adapter *adapter = device_get_softc(dev);
3870 u16 eeprom_data = 0, device_id, apme_mask;
3872 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3873 apme_mask = EM_EEPROM_APME;
3875 switch (adapter->hw.mac.type) {
3880 e1000_read_nvm(&adapter->hw,
3881 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3882 apme_mask = EM_82544_APME;
3885 case e1000_82546_rev_3:
3886 if (adapter->hw.bus.func == 1) {
3887 e1000_read_nvm(&adapter->hw,
3888 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3891 e1000_read_nvm(&adapter->hw,
3892 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3895 e1000_read_nvm(&adapter->hw,
3896 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3899 if (eeprom_data & apme_mask)
3900 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
3902 * We have the eeprom settings, now apply the special cases
3903 * where the eeprom may be wrong or the board won't support
3904 * wake on lan on a particular port
3906 device_id = pci_get_device(dev);
3907 switch (device_id) {
3908 case E1000_DEV_ID_82546GB_PCIE:
3911 case E1000_DEV_ID_82546EB_FIBER:
3912 case E1000_DEV_ID_82546GB_FIBER:
3913 /* Wake events only supported on port A for dual fiber
3914 * regardless of eeprom setting */
3915 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3916 E1000_STATUS_FUNC_1)
3919 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
3920 /* if quad port adapter, disable WoL on all but port A */
3921 if (global_quad_port_a != 0)
3923 /* Reset for multiple quad port adapters */
3924 if (++global_quad_port_a == 4)
3925 global_quad_port_a = 0;
3933 * Enable PCI Wake On Lan capability
3936 lem_enable_wakeup(device_t dev)
3938 struct adapter *adapter = device_get_softc(dev);
3939 struct ifnet *ifp = adapter->ifp;
3940 u32 pmc, ctrl, ctrl_ext, rctl;
3943 if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
3946 /* Advertise the wakeup capability */
3947 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
3948 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
3949 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
3950 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3952 /* Keep the laser running on Fiber adapters */
3953 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
3954 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
3955 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3956 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
3957 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
3961 ** Determine type of Wakeup: note that wol
3962 ** is set with all bits on by default.
3964 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
3965 adapter->wol &= ~E1000_WUFC_MAG;
3967 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
3968 adapter->wol &= ~E1000_WUFC_MC;
3970 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3971 rctl |= E1000_RCTL_MPE;
3972 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3975 if (adapter->hw.mac.type == e1000_pchlan) {
3976 if (lem_enable_phy_wakeup(adapter))
3979 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3980 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
3985 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
3986 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3987 if (ifp->if_capenable & IFCAP_WOL)
3988 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3989 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
3995 ** WOL in the newer chipset interfaces (pchlan)
3996 ** require thing to be copied into the phy
3999 lem_enable_phy_wakeup(struct adapter *adapter)
4001 struct e1000_hw *hw = &adapter->hw;
4005 /* copy MAC RARs to PHY RARs */
4006 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4007 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4008 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4009 e1000_write_phy_reg(hw, BM_RAR_M(i),
4010 (u16)((mreg >> 16) & 0xFFFF));
4011 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4012 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4013 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4014 (u16)((mreg >> 16) & 0xFFFF));
4017 /* copy MAC MTA to PHY MTA */
4018 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4019 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4020 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4021 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4022 (u16)((mreg >> 16) & 0xFFFF));
4025 /* configure PHY Rx Control register */
4026 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4027 mreg = E1000_READ_REG(hw, E1000_RCTL);
4028 if (mreg & E1000_RCTL_UPE)
4029 preg |= BM_RCTL_UPE;
4030 if (mreg & E1000_RCTL_MPE)
4031 preg |= BM_RCTL_MPE;
4032 preg &= ~(BM_RCTL_MO_MASK);
4033 if (mreg & E1000_RCTL_MO_3)
4034 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4035 << BM_RCTL_MO_SHIFT);
4036 if (mreg & E1000_RCTL_BAM)
4037 preg |= BM_RCTL_BAM;
4038 if (mreg & E1000_RCTL_PMCF)
4039 preg |= BM_RCTL_PMCF;
4040 mreg = E1000_READ_REG(hw, E1000_CTRL);
4041 if (mreg & E1000_CTRL_RFCE)
4042 preg |= BM_RCTL_RFCE;
4043 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4045 /* enable PHY wakeup in MAC register */
4046 E1000_WRITE_REG(hw, E1000_WUC,
4047 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4048 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4050 /* configure and enable PHY wakeup in PHY registers */
4051 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4052 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4054 /* activate PHY wakeup */
4055 ret = hw->phy.ops.acquire(hw);
4057 printf("Could not acquire PHY\n");
4060 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4061 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4062 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4064 printf("Could not read PHY page 769\n");
4067 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4068 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4070 printf("Could not set PHY Host Wakeup bit\n");
4072 hw->phy.ops.release(hw);
4078 lem_led_func(void *arg, int onoff)
4080 struct adapter *adapter = arg;
4082 EM_CORE_LOCK(adapter);
4084 e1000_setup_led(&adapter->hw);
4085 e1000_led_on(&adapter->hw);
4087 e1000_led_off(&adapter->hw);
4088 e1000_cleanup_led(&adapter->hw);
4090 EM_CORE_UNLOCK(adapter);
4093 /*********************************************************************
4094 * 82544 Coexistence issue workaround.
4095 * There are 2 issues.
4096 * 1. Transmit Hang issue.
4097 * To detect this issue, following equation can be used...
4098 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4099 * If SUM[3:0] is in between 1 to 4, we will have this issue.
4102 * To detect this issue, following equation can be used...
4103 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4104 * If SUM[3:0] is in between 9 to c, we will have this issue.
4108 * Make sure we do not have ending address
4109 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4111 *************************************************************************/
4113 lem_fill_descriptors (bus_addr_t address, u32 length,
4114 PDESC_ARRAY desc_array)
4116 u32 safe_terminator;
4118 /* Since issue is sensitive to length and address.*/
4119 /* Let us first check the address...*/
4121 desc_array->descriptor[0].address = address;
4122 desc_array->descriptor[0].length = length;
4123 desc_array->elements = 1;
4124 return (desc_array->elements);
4126 safe_terminator = (u32)((((u32)address & 0x7) +
4127 (length & 0xF)) & 0xF);
4128 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4129 if (safe_terminator == 0 ||
4130 (safe_terminator > 4 &&
4131 safe_terminator < 9) ||
4132 (safe_terminator > 0xC &&
4133 safe_terminator <= 0xF)) {
4134 desc_array->descriptor[0].address = address;
4135 desc_array->descriptor[0].length = length;
4136 desc_array->elements = 1;
4137 return (desc_array->elements);
4140 desc_array->descriptor[0].address = address;
4141 desc_array->descriptor[0].length = length - 4;
4142 desc_array->descriptor[1].address = address + (length - 4);
4143 desc_array->descriptor[1].length = 4;
4144 desc_array->elements = 2;
4145 return (desc_array->elements);
4148 /**********************************************************************
4150 * Update the board statistics counters.
4152 **********************************************************************/
4154 lem_update_stats_counters(struct adapter *adapter)
4158 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4159 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4160 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4161 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4163 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4164 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4165 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4166 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4168 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4169 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4170 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4171 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4172 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4173 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4174 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4175 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4176 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4177 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4178 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4179 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4180 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4181 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4182 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4183 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4184 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4185 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4186 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4187 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4189 /* For the 64-bit byte counters the low dword must be read first. */
4190 /* Both registers clear on the read of the high dword */
4192 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4193 ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4194 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4195 ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4197 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4198 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4199 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4200 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4201 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4203 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4204 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4206 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4207 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4208 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4209 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4210 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4211 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4212 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4213 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4214 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4215 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4217 if (adapter->hw.mac.type >= e1000_82543) {
4218 adapter->stats.algnerrc +=
4219 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4220 adapter->stats.rxerrc +=
4221 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4222 adapter->stats.tncrs +=
4223 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4224 adapter->stats.cexterr +=
4225 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4226 adapter->stats.tsctc +=
4227 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4228 adapter->stats.tsctfc +=
4229 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4233 ifp->if_collisions = adapter->stats.colc;
4236 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4237 adapter->stats.crcerrs + adapter->stats.algnerrc +
4238 adapter->stats.ruc + adapter->stats.roc +
4239 adapter->stats.mpc + adapter->stats.cexterr;
4242 ifp->if_oerrors = adapter->stats.ecol +
4243 adapter->stats.latecol + adapter->watchdog_events;
4246 /* Export a single 32-bit register via a read-only sysctl. */
4248 lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4250 struct adapter *adapter;
4253 adapter = oidp->oid_arg1;
4254 val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4255 return (sysctl_handle_int(oidp, &val, 0, req));
4259 * Add sysctl variables, one per statistic, to the system.
4262 lem_add_hw_stats(struct adapter *adapter)
4264 device_t dev = adapter->dev;
4266 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4267 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4268 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4269 struct e1000_hw_stats *stats = &adapter->stats;
4271 struct sysctl_oid *stat_node;
4272 struct sysctl_oid_list *stat_list;
4274 /* Driver Statistics */
4275 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail",
4276 CTLFLAG_RD, &adapter->mbuf_alloc_failed,
4278 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
4279 CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4280 "Std mbuf cluster failed");
4281 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4282 CTLFLAG_RD, &adapter->dropped_pkts,
4283 "Driver dropped packets");
4284 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
4285 CTLFLAG_RD, &adapter->no_tx_dma_setup,
4286 "Driver tx dma failure in xmit");
4287 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4288 CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4289 "Not enough tx descriptors failure in xmit");
4290 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4291 CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4292 "Not enough tx descriptors failure in xmit");
4293 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4294 CTLFLAG_RD, &adapter->rx_overruns,
4296 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4297 CTLFLAG_RD, &adapter->watchdog_events,
4298 "Watchdog timeouts");
4300 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4301 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
4302 lem_sysctl_reg_handler, "IU",
4303 "Device Control Register");
4304 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4305 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
4306 lem_sysctl_reg_handler, "IU",
4307 "Receiver Control Register");
4308 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4309 CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4310 "Flow Control High Watermark");
4311 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
4312 CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4313 "Flow Control Low Watermark");
4314 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
4315 CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4316 "TX FIFO workaround events");
4317 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
4318 CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4321 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head",
4322 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
4323 lem_sysctl_reg_handler, "IU",
4324 "Transmit Descriptor Head");
4325 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail",
4326 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
4327 lem_sysctl_reg_handler, "IU",
4328 "Transmit Descriptor Tail");
4329 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head",
4330 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
4331 lem_sysctl_reg_handler, "IU",
4332 "Receive Descriptor Head");
4333 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail",
4334 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
4335 lem_sysctl_reg_handler, "IU",
4336 "Receive Descriptor Tail");
4339 /* MAC stats get their own sub node */
4341 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4342 CTLFLAG_RD, NULL, "Statistics");
4343 stat_list = SYSCTL_CHILDREN(stat_node);
4345 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4346 CTLFLAG_RD, &stats->ecol,
4347 "Excessive collisions");
4348 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4349 CTLFLAG_RD, &stats->scc,
4350 "Single collisions");
4351 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4352 CTLFLAG_RD, &stats->mcc,
4353 "Multiple collisions");
4354 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4355 CTLFLAG_RD, &stats->latecol,
4357 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4358 CTLFLAG_RD, &stats->colc,
4360 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4361 CTLFLAG_RD, &adapter->stats.symerrs,
4363 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4364 CTLFLAG_RD, &adapter->stats.sec,
4366 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4367 CTLFLAG_RD, &adapter->stats.dc,
4369 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4370 CTLFLAG_RD, &adapter->stats.mpc,
4372 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4373 CTLFLAG_RD, &adapter->stats.rnbc,
4374 "Receive No Buffers");
4375 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4376 CTLFLAG_RD, &adapter->stats.ruc,
4377 "Receive Undersize");
4378 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4379 CTLFLAG_RD, &adapter->stats.rfc,
4380 "Fragmented Packets Received ");
4381 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4382 CTLFLAG_RD, &adapter->stats.roc,
4383 "Oversized Packets Received");
4384 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4385 CTLFLAG_RD, &adapter->stats.rjc,
4387 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4388 CTLFLAG_RD, &adapter->stats.rxerrc,
4390 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4391 CTLFLAG_RD, &adapter->stats.crcerrs,
4393 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4394 CTLFLAG_RD, &adapter->stats.algnerrc,
4395 "Alignment Errors");
4396 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4397 CTLFLAG_RD, &adapter->stats.cexterr,
4398 "Collision/Carrier extension errors");
4399 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4400 CTLFLAG_RD, &adapter->stats.xonrxc,
4402 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4403 CTLFLAG_RD, &adapter->stats.xontxc,
4405 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4406 CTLFLAG_RD, &adapter->stats.xoffrxc,
4408 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4409 CTLFLAG_RD, &adapter->stats.xofftxc,
4410 "XOFF Transmitted");
4412 /* Packet Reception Stats */
4413 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4414 CTLFLAG_RD, &adapter->stats.tpr,
4415 "Total Packets Received ");
4416 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4417 CTLFLAG_RD, &adapter->stats.gprc,
4418 "Good Packets Received");
4419 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4420 CTLFLAG_RD, &adapter->stats.bprc,
4421 "Broadcast Packets Received");
4422 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4423 CTLFLAG_RD, &adapter->stats.mprc,
4424 "Multicast Packets Received");
4425 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4426 CTLFLAG_RD, &adapter->stats.prc64,
4427 "64 byte frames received ");
4428 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4429 CTLFLAG_RD, &adapter->stats.prc127,
4430 "65-127 byte frames received");
4431 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4432 CTLFLAG_RD, &adapter->stats.prc255,
4433 "128-255 byte frames received");
4434 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4435 CTLFLAG_RD, &adapter->stats.prc511,
4436 "256-511 byte frames received");
4437 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4438 CTLFLAG_RD, &adapter->stats.prc1023,
4439 "512-1023 byte frames received");
4440 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4441 CTLFLAG_RD, &adapter->stats.prc1522,
4442 "1023-1522 byte frames received");
4443 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4444 CTLFLAG_RD, &adapter->stats.gorc,
4445 "Good Octets Received");
4447 /* Packet Transmission Stats */
4448 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4449 CTLFLAG_RD, &adapter->stats.gotc,
4450 "Good Octets Transmitted");
4451 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4452 CTLFLAG_RD, &adapter->stats.tpt,
4453 "Total Packets Transmitted");
4454 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4455 CTLFLAG_RD, &adapter->stats.gptc,
4456 "Good Packets Transmitted");
4457 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4458 CTLFLAG_RD, &adapter->stats.bptc,
4459 "Broadcast Packets Transmitted");
4460 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4461 CTLFLAG_RD, &adapter->stats.mptc,
4462 "Multicast Packets Transmitted");
4463 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4464 CTLFLAG_RD, &adapter->stats.ptc64,
4465 "64 byte frames transmitted ");
4466 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4467 CTLFLAG_RD, &adapter->stats.ptc127,
4468 "65-127 byte frames transmitted");
4469 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4470 CTLFLAG_RD, &adapter->stats.ptc255,
4471 "128-255 byte frames transmitted");
4472 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4473 CTLFLAG_RD, &adapter->stats.ptc511,
4474 "256-511 byte frames transmitted");
4475 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4476 CTLFLAG_RD, &adapter->stats.ptc1023,
4477 "512-1023 byte frames transmitted");
4478 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4479 CTLFLAG_RD, &adapter->stats.ptc1522,
4480 "1024-1522 byte frames transmitted");
4481 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4482 CTLFLAG_RD, &adapter->stats.tsctc,
4483 "TSO Contexts Transmitted");
4484 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4485 CTLFLAG_RD, &adapter->stats.tsctfc,
4486 "TSO Contexts Failed");
4489 /**********************************************************************
4491 * This routine provides a way to dump out the adapter eeprom,
4492 * often a useful debug/service tool. This only dumps the first
4493 * 32 words, stuff that matters is in that extent.
4495 **********************************************************************/
4498 lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4500 struct adapter *adapter;
4505 error = sysctl_handle_int(oidp, &result, 0, req);
4507 if (error || !req->newptr)
4511 * This value will cause a hex dump of the
4512 * first 32 16-bit words of the EEPROM to
4516 adapter = (struct adapter *)arg1;
4517 lem_print_nvm_info(adapter);
4524 lem_print_nvm_info(struct adapter *adapter)
4529 /* Its a bit crude, but it gets the job done */
4530 printf("\nInterface EEPROM Dump:\n");
4531 printf("Offset\n0x0000 ");
4532 for (i = 0, j = 0; i < 32; i++, j++) {
4533 if (j == 8) { /* Make the offset block */
4535 printf("\n0x00%x0 ",row);
4537 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4538 printf("%04x ", eeprom_data);
4544 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4546 struct em_int_delay_info *info;
4547 struct adapter *adapter;
4553 info = (struct em_int_delay_info *)arg1;
4554 usecs = info->value;
4555 error = sysctl_handle_int(oidp, &usecs, 0, req);
4556 if (error != 0 || req->newptr == NULL)
4558 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4560 info->value = usecs;
4561 ticks = EM_USECS_TO_TICKS(usecs);
4563 adapter = info->adapter;
4565 EM_CORE_LOCK(adapter);
4566 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4567 regval = (regval & ~0xffff) | (ticks & 0xffff);
4568 /* Handle a few special cases. */
4569 switch (info->offset) {
4574 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4575 /* Don't write 0 into the TIDV register. */
4578 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4581 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4582 EM_CORE_UNLOCK(adapter);
4587 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4588 const char *description, struct em_int_delay_info *info,
4589 int offset, int value)
4591 info->adapter = adapter;
4592 info->offset = offset;
4593 info->value = value;
4594 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4595 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4596 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4597 info, 0, lem_sysctl_int_delay, "I", description);
4601 lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4602 const char *description, int *limit, int value)
4605 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4606 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4607 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4610 #ifndef EM_LEGACY_IRQ
4612 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4613 const char *description, int *limit, int value)
4616 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4617 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4618 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);