1 /******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
38 #include "opt_inet6.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
44 #include <sys/endian.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/malloc.h>
49 #include <sys/module.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/sysctl.h>
54 #include <sys/taskqueue.h>
55 #include <sys/eventhandler.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <net/ethernet.h>
62 #include <net/if_arp.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
66 #include <net/if_types.h>
67 #include <net/if_vlan_var.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/in.h>
71 #include <netinet/if_ether.h>
72 #include <netinet/ip.h>
73 #include <netinet/ip6.h>
74 #include <netinet/tcp.h>
75 #include <netinet/udp.h>
77 #include <machine/in_cksum.h>
78 #include <dev/led/led.h>
79 #include <dev/pci/pcivar.h>
80 #include <dev/pci/pcireg.h>
82 #include "e1000_api.h"
85 /*********************************************************************
86 * Legacy Em Driver version:
87 *********************************************************************/
88 char lem_driver_version[] = "1.0.6";
90 /*********************************************************************
93 * Used by probe to select devices to load on
94 * Last field stores an index into e1000_strings
95 * Last entry must be all 0s
97 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
98 *********************************************************************/
100 static em_vendor_info_t lem_vendor_info_array[] =
102 /* Intel(R) PRO/1000 Network Connection */
103 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
104 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
105 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
106 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
107 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
109 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
110 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
111 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
112 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
113 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
128 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
131 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
142 PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
147 /* required last entry */
151 /*********************************************************************
152 * Table of branding strings for all supported NICs.
153 *********************************************************************/
155 static char *lem_strings[] = {
156 "Intel(R) PRO/1000 Legacy Network Connection"
159 /*********************************************************************
160 * Function prototypes
161 *********************************************************************/
162 static int lem_probe(device_t);
163 static int lem_attach(device_t);
164 static int lem_detach(device_t);
165 static int lem_shutdown(device_t);
166 static int lem_suspend(device_t);
167 static int lem_resume(device_t);
168 static void lem_start(struct ifnet *);
169 static void lem_start_locked(struct ifnet *ifp);
170 static int lem_ioctl(struct ifnet *, u_long, caddr_t);
171 static void lem_init(void *);
172 static void lem_init_locked(struct adapter *);
173 static void lem_stop(void *);
174 static void lem_media_status(struct ifnet *, struct ifmediareq *);
175 static int lem_media_change(struct ifnet *);
176 static void lem_identify_hardware(struct adapter *);
177 static int lem_allocate_pci_resources(struct adapter *);
178 static int lem_allocate_irq(struct adapter *adapter);
179 static void lem_free_pci_resources(struct adapter *);
180 static void lem_local_timer(void *);
181 static int lem_hardware_init(struct adapter *);
182 static int lem_setup_interface(device_t, struct adapter *);
183 static void lem_setup_transmit_structures(struct adapter *);
184 static void lem_initialize_transmit_unit(struct adapter *);
185 static int lem_setup_receive_structures(struct adapter *);
186 static void lem_initialize_receive_unit(struct adapter *);
187 static void lem_enable_intr(struct adapter *);
188 static void lem_disable_intr(struct adapter *);
189 static void lem_free_transmit_structures(struct adapter *);
190 static void lem_free_receive_structures(struct adapter *);
191 static void lem_update_stats_counters(struct adapter *);
192 static void lem_add_hw_stats(struct adapter *adapter);
193 static void lem_txeof(struct adapter *);
194 static void lem_tx_purge(struct adapter *);
195 static int lem_allocate_receive_structures(struct adapter *);
196 static int lem_allocate_transmit_structures(struct adapter *);
197 static bool lem_rxeof(struct adapter *, int, int *);
198 #ifndef __NO_STRICT_ALIGNMENT
199 static int lem_fixup_rx(struct adapter *);
201 static void lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
203 static void lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
205 static void lem_set_promisc(struct adapter *);
206 static void lem_disable_promisc(struct adapter *);
207 static void lem_set_multi(struct adapter *);
208 static void lem_update_link_status(struct adapter *);
209 static int lem_get_buf(struct adapter *, int);
210 static void lem_register_vlan(void *, struct ifnet *, u16);
211 static void lem_unregister_vlan(void *, struct ifnet *, u16);
212 static void lem_setup_vlan_hw_support(struct adapter *);
213 static int lem_xmit(struct adapter *, struct mbuf **);
214 static void lem_smartspeed(struct adapter *);
215 static int lem_82547_fifo_workaround(struct adapter *, int);
216 static void lem_82547_update_fifo_head(struct adapter *, int);
217 static int lem_82547_tx_fifo_reset(struct adapter *);
218 static void lem_82547_move_tail(void *);
219 static int lem_dma_malloc(struct adapter *, bus_size_t,
220 struct em_dma_alloc *, int);
221 static void lem_dma_free(struct adapter *, struct em_dma_alloc *);
222 static int lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
223 static void lem_print_nvm_info(struct adapter *);
224 static int lem_is_valid_ether_addr(u8 *);
225 static u32 lem_fill_descriptors (bus_addr_t address, u32 length,
226 PDESC_ARRAY desc_array);
227 static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
228 static void lem_add_int_delay_sysctl(struct adapter *, const char *,
229 const char *, struct em_int_delay_info *, int, int);
230 static void lem_set_flow_cntrl(struct adapter *, const char *,
231 const char *, int *, int);
232 /* Management and WOL Support */
233 static void lem_init_manageability(struct adapter *);
234 static void lem_release_manageability(struct adapter *);
235 static void lem_get_hw_control(struct adapter *);
236 static void lem_release_hw_control(struct adapter *);
237 static void lem_get_wakeup(device_t);
238 static void lem_enable_wakeup(device_t);
239 static int lem_enable_phy_wakeup(struct adapter *);
240 static void lem_led_func(void *, int);
242 static void lem_intr(void *);
243 static int lem_irq_fast(void *);
244 static void lem_handle_rxtx(void *context, int pending);
245 static void lem_handle_link(void *context, int pending);
246 static void lem_add_rx_process_limit(struct adapter *, const char *,
247 const char *, int *, int);
249 #ifdef DEVICE_POLLING
250 static poll_handler_t lem_poll;
253 /*********************************************************************
254 * FreeBSD Device Interface Entry Points
255 *********************************************************************/
257 static device_method_t lem_methods[] = {
258 /* Device interface */
259 DEVMETHOD(device_probe, lem_probe),
260 DEVMETHOD(device_attach, lem_attach),
261 DEVMETHOD(device_detach, lem_detach),
262 DEVMETHOD(device_shutdown, lem_shutdown),
263 DEVMETHOD(device_suspend, lem_suspend),
264 DEVMETHOD(device_resume, lem_resume),
268 static driver_t lem_driver = {
269 "em", lem_methods, sizeof(struct adapter),
272 extern devclass_t em_devclass;
273 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
274 MODULE_DEPEND(lem, pci, 1, 1, 1);
275 MODULE_DEPEND(lem, ether, 1, 1, 1);
277 /*********************************************************************
278 * Tunable default values.
279 *********************************************************************/
281 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
282 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
284 #define MAX_INTS_PER_SEC 8000
285 #define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256))
287 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
288 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
289 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
290 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
291 static int lem_rxd = EM_DEFAULT_RXD;
292 static int lem_txd = EM_DEFAULT_TXD;
293 static int lem_smart_pwr_down = FALSE;
295 /* Controls whether promiscuous also shows bad packets */
296 static int lem_debug_sbp = FALSE;
298 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
299 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
300 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
301 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
302 TUNABLE_INT("hw.em.rxd", &lem_rxd);
303 TUNABLE_INT("hw.em.txd", &lem_txd);
304 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
305 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
307 /* Interrupt style - default to fast */
308 static int lem_use_legacy_irq = 0;
309 TUNABLE_INT("hw.em.use_legacy_irq", &lem_use_legacy_irq);
311 /* How many packets rxeof tries to clean at a time */
312 static int lem_rx_process_limit = 100;
313 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
315 /* Flow control setting - default to FULL */
316 static int lem_fc_setting = e1000_fc_full;
317 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
319 /* Global used in WOL setup with multiport cards */
320 static int global_quad_port_a = 0;
322 #ifdef DEV_NETMAP /* see ixgbe.c for details */
323 #include <dev/netmap/if_lem_netmap.h>
324 #endif /* DEV_NETMAP */
326 /*********************************************************************
327 * Device identification routine
329 * em_probe determines if the driver should be loaded on
330 * adapter based on PCI vendor/device id of the adapter.
332 * return BUS_PROBE_DEFAULT on success, positive on failure
333 *********************************************************************/
336 lem_probe(device_t dev)
338 char adapter_name[60];
339 u16 pci_vendor_id = 0;
340 u16 pci_device_id = 0;
341 u16 pci_subvendor_id = 0;
342 u16 pci_subdevice_id = 0;
343 em_vendor_info_t *ent;
345 INIT_DEBUGOUT("em_probe: begin");
347 pci_vendor_id = pci_get_vendor(dev);
348 if (pci_vendor_id != EM_VENDOR_ID)
351 pci_device_id = pci_get_device(dev);
352 pci_subvendor_id = pci_get_subvendor(dev);
353 pci_subdevice_id = pci_get_subdevice(dev);
355 ent = lem_vendor_info_array;
356 while (ent->vendor_id != 0) {
357 if ((pci_vendor_id == ent->vendor_id) &&
358 (pci_device_id == ent->device_id) &&
360 ((pci_subvendor_id == ent->subvendor_id) ||
361 (ent->subvendor_id == PCI_ANY_ID)) &&
363 ((pci_subdevice_id == ent->subdevice_id) ||
364 (ent->subdevice_id == PCI_ANY_ID))) {
365 sprintf(adapter_name, "%s %s",
366 lem_strings[ent->index],
368 device_set_desc_copy(dev, adapter_name);
369 return (BUS_PROBE_DEFAULT);
377 /*********************************************************************
378 * Device initialization routine
380 * The attach entry point is called when the driver is being loaded.
381 * This routine identifies the type of hardware, allocates all resources
382 * and initializes the hardware.
384 * return 0 on success, positive on failure
385 *********************************************************************/
388 lem_attach(device_t dev)
390 struct adapter *adapter;
394 INIT_DEBUGOUT("lem_attach: begin");
396 adapter = device_get_softc(dev);
397 adapter->dev = adapter->osdep.dev = dev;
398 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
399 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
400 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
403 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
404 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
405 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
406 lem_sysctl_nvm_info, "I", "NVM Information");
408 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
409 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
411 /* Determine hardware and mac info */
412 lem_identify_hardware(adapter);
414 /* Setup PCI resources */
415 if (lem_allocate_pci_resources(adapter)) {
416 device_printf(dev, "Allocation of PCI resources failed\n");
421 /* Do Shared Code initialization */
422 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
423 device_printf(dev, "Setup of Shared code failed\n");
428 e1000_get_bus_info(&adapter->hw);
430 /* Set up some sysctls for the tunable interrupt delays */
431 lem_add_int_delay_sysctl(adapter, "rx_int_delay",
432 "receive interrupt delay in usecs", &adapter->rx_int_delay,
433 E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
434 lem_add_int_delay_sysctl(adapter, "tx_int_delay",
435 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
436 E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
437 if (adapter->hw.mac.type >= e1000_82540) {
438 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
439 "receive interrupt delay limit in usecs",
440 &adapter->rx_abs_int_delay,
441 E1000_REGISTER(&adapter->hw, E1000_RADV),
442 lem_rx_abs_int_delay_dflt);
443 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
444 "transmit interrupt delay limit in usecs",
445 &adapter->tx_abs_int_delay,
446 E1000_REGISTER(&adapter->hw, E1000_TADV),
447 lem_tx_abs_int_delay_dflt);
448 lem_add_int_delay_sysctl(adapter, "itr",
449 "interrupt delay limit in usecs/4",
451 E1000_REGISTER(&adapter->hw, E1000_ITR),
455 /* Sysctls for limiting the amount of work done in the taskqueue */
456 lem_add_rx_process_limit(adapter, "rx_processing_limit",
457 "max number of rx packets to process", &adapter->rx_process_limit,
458 lem_rx_process_limit);
460 /* Sysctl for setting the interface flow control */
461 lem_set_flow_cntrl(adapter, "flow_control",
462 "flow control setting",
463 &adapter->fc_setting, lem_fc_setting);
466 * Validate number of transmit and receive descriptors. It
467 * must not exceed hardware maximum, and must be multiple
468 * of E1000_DBA_ALIGN.
470 if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
471 (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
472 (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
473 (lem_txd < EM_MIN_TXD)) {
474 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
475 EM_DEFAULT_TXD, lem_txd);
476 adapter->num_tx_desc = EM_DEFAULT_TXD;
478 adapter->num_tx_desc = lem_txd;
479 if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
480 (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
481 (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
482 (lem_rxd < EM_MIN_RXD)) {
483 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
484 EM_DEFAULT_RXD, lem_rxd);
485 adapter->num_rx_desc = EM_DEFAULT_RXD;
487 adapter->num_rx_desc = lem_rxd;
489 adapter->hw.mac.autoneg = DO_AUTO_NEG;
490 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
491 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
492 adapter->rx_buffer_len = 2048;
494 e1000_init_script_state_82541(&adapter->hw, TRUE);
495 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
498 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
499 adapter->hw.phy.mdix = AUTO_ALL_MODES;
500 adapter->hw.phy.disable_polarity_correction = FALSE;
501 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
505 * Set the frame limits assuming
506 * standard ethernet sized frames.
508 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
509 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
512 * This controls when hardware reports transmit completion
515 adapter->hw.mac.report_tx_early = 1;
517 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
520 /* Allocate Transmit Descriptor ring */
521 if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
522 device_printf(dev, "Unable to allocate tx_desc memory\n");
526 adapter->tx_desc_base =
527 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
529 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
532 /* Allocate Receive Descriptor ring */
533 if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
534 device_printf(dev, "Unable to allocate rx_desc memory\n");
538 adapter->rx_desc_base =
539 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
541 /* Allocate multicast array memory. */
542 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
543 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
544 if (adapter->mta == NULL) {
545 device_printf(dev, "Can not allocate multicast setup array\n");
551 ** Start from a known state, this is
552 ** important in reading the nvm and
555 e1000_reset_hw(&adapter->hw);
557 /* Make sure we have a good EEPROM before we read from it */
558 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
560 ** Some PCI-E parts fail the first check due to
561 ** the link being in sleep state, call it again,
562 ** if it fails a second time its a real issue.
564 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
566 "The EEPROM Checksum Is Not Valid\n");
572 /* Copy the permanent MAC address out of the EEPROM */
573 if (e1000_read_mac_addr(&adapter->hw) < 0) {
574 device_printf(dev, "EEPROM read error while reading MAC"
580 if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
581 device_printf(dev, "Invalid MAC address\n");
586 /* Initialize the hardware */
587 if (lem_hardware_init(adapter)) {
588 device_printf(dev, "Unable to initialize the hardware\n");
593 /* Allocate transmit descriptors and buffers */
594 if (lem_allocate_transmit_structures(adapter)) {
595 device_printf(dev, "Could not setup transmit structures\n");
600 /* Allocate receive descriptors and buffers */
601 if (lem_allocate_receive_structures(adapter)) {
602 device_printf(dev, "Could not setup receive structures\n");
608 ** Do interrupt configuration
610 error = lem_allocate_irq(adapter);
615 * Get Wake-on-Lan and Management info for later use
619 /* Setup OS specific network interface */
620 if (lem_setup_interface(dev, adapter) != 0)
623 /* Initialize statistics */
624 lem_update_stats_counters(adapter);
626 adapter->hw.mac.get_link_status = 1;
627 lem_update_link_status(adapter);
629 /* Indicate SOL/IDER usage */
630 if (e1000_check_reset_block(&adapter->hw))
632 "PHY reset is blocked due to SOL/IDER session.\n");
634 /* Do we need workaround for 82544 PCI-X adapter? */
635 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
636 adapter->hw.mac.type == e1000_82544)
637 adapter->pcix_82544 = TRUE;
639 adapter->pcix_82544 = FALSE;
641 /* Register for VLAN events */
642 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
643 lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
644 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
645 lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
647 lem_add_hw_stats(adapter);
649 /* Non-AMT based hardware can now take control from firmware */
650 if (adapter->has_manage && !adapter->has_amt)
651 lem_get_hw_control(adapter);
653 /* Tell the stack that the interface is not active */
654 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
656 adapter->led_dev = led_create(lem_led_func, adapter,
657 device_get_nameunit(dev));
660 lem_netmap_attach(adapter);
661 #endif /* DEV_NETMAP */
662 INIT_DEBUGOUT("lem_attach: end");
667 lem_free_transmit_structures(adapter);
670 lem_release_hw_control(adapter);
671 lem_dma_free(adapter, &adapter->rxdma);
673 lem_dma_free(adapter, &adapter->txdma);
676 if (adapter->ifp != NULL)
677 if_free(adapter->ifp);
678 lem_free_pci_resources(adapter);
679 free(adapter->mta, M_DEVBUF);
680 EM_TX_LOCK_DESTROY(adapter);
681 EM_RX_LOCK_DESTROY(adapter);
682 EM_CORE_LOCK_DESTROY(adapter);
687 /*********************************************************************
688 * Device removal routine
690 * The detach entry point is called when the driver is being removed.
691 * This routine stops the adapter and deallocates all the resources
692 * that were allocated for driver operation.
694 * return 0 on success, positive on failure
695 *********************************************************************/
698 lem_detach(device_t dev)
700 struct adapter *adapter = device_get_softc(dev);
701 struct ifnet *ifp = adapter->ifp;
703 INIT_DEBUGOUT("em_detach: begin");
705 /* Make sure VLANS are not using driver */
706 if (adapter->ifp->if_vlantrunk != NULL) {
707 device_printf(dev,"Vlan in use, detach first\n");
711 #ifdef DEVICE_POLLING
712 if (ifp->if_capenable & IFCAP_POLLING)
713 ether_poll_deregister(ifp);
716 if (adapter->led_dev != NULL)
717 led_destroy(adapter->led_dev);
719 EM_CORE_LOCK(adapter);
721 adapter->in_detach = 1;
723 e1000_phy_hw_reset(&adapter->hw);
725 lem_release_manageability(adapter);
727 EM_TX_UNLOCK(adapter);
728 EM_CORE_UNLOCK(adapter);
730 /* Unregister VLAN events */
731 if (adapter->vlan_attach != NULL)
732 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
733 if (adapter->vlan_detach != NULL)
734 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
736 ether_ifdetach(adapter->ifp);
737 callout_drain(&adapter->timer);
738 callout_drain(&adapter->tx_fifo_timer);
742 #endif /* DEV_NETMAP */
743 lem_free_pci_resources(adapter);
744 bus_generic_detach(dev);
747 lem_free_transmit_structures(adapter);
748 lem_free_receive_structures(adapter);
750 /* Free Transmit Descriptor ring */
751 if (adapter->tx_desc_base) {
752 lem_dma_free(adapter, &adapter->txdma);
753 adapter->tx_desc_base = NULL;
756 /* Free Receive Descriptor ring */
757 if (adapter->rx_desc_base) {
758 lem_dma_free(adapter, &adapter->rxdma);
759 adapter->rx_desc_base = NULL;
762 lem_release_hw_control(adapter);
763 free(adapter->mta, M_DEVBUF);
764 EM_TX_LOCK_DESTROY(adapter);
765 EM_RX_LOCK_DESTROY(adapter);
766 EM_CORE_LOCK_DESTROY(adapter);
771 /*********************************************************************
773 * Shutdown entry point
775 **********************************************************************/
778 lem_shutdown(device_t dev)
780 return lem_suspend(dev);
784 * Suspend/resume device methods.
787 lem_suspend(device_t dev)
789 struct adapter *adapter = device_get_softc(dev);
791 EM_CORE_LOCK(adapter);
793 lem_release_manageability(adapter);
794 lem_release_hw_control(adapter);
795 lem_enable_wakeup(dev);
797 EM_CORE_UNLOCK(adapter);
799 return bus_generic_suspend(dev);
803 lem_resume(device_t dev)
805 struct adapter *adapter = device_get_softc(dev);
806 struct ifnet *ifp = adapter->ifp;
808 EM_CORE_LOCK(adapter);
809 lem_init_locked(adapter);
810 lem_init_manageability(adapter);
811 EM_CORE_UNLOCK(adapter);
814 return bus_generic_resume(dev);
819 lem_start_locked(struct ifnet *ifp)
821 struct adapter *adapter = ifp->if_softc;
824 EM_TX_LOCK_ASSERT(adapter);
826 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
829 if (!adapter->link_active)
833 * Force a cleanup if number of TX descriptors
834 * available hits the threshold
836 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
838 /* Now do we at least have a minimal? */
839 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
840 adapter->no_tx_desc_avail1++;
845 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
847 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
851 * Encapsulation can modify our pointer, and or make it
852 * NULL on failure. In that event, we can't requeue.
854 if (lem_xmit(adapter, &m_head)) {
857 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
858 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
862 /* Send a copy of the frame to the BPF listener */
863 ETHER_BPF_MTAP(ifp, m_head);
865 /* Set timeout in case hardware has problems transmitting. */
866 adapter->watchdog_check = TRUE;
867 adapter->watchdog_time = ticks;
869 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
870 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
876 lem_start(struct ifnet *ifp)
878 struct adapter *adapter = ifp->if_softc;
881 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
882 lem_start_locked(ifp);
883 EM_TX_UNLOCK(adapter);
886 /*********************************************************************
889 * em_ioctl is called when the user wants to configure the
892 * return 0 on success, positive on failure
893 **********************************************************************/
896 lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
898 struct adapter *adapter = ifp->if_softc;
899 struct ifreq *ifr = (struct ifreq *)data;
900 #if defined(INET) || defined(INET6)
901 struct ifaddr *ifa = (struct ifaddr *)data;
903 bool avoid_reset = FALSE;
906 if (adapter->in_detach)
912 if (ifa->ifa_addr->sa_family == AF_INET)
916 if (ifa->ifa_addr->sa_family == AF_INET6)
920 ** Calling init results in link renegotiation,
921 ** so we avoid doing it when possible.
924 ifp->if_flags |= IFF_UP;
925 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
928 if (!(ifp->if_flags & IFF_NOARP))
929 arp_ifinit(ifp, ifa);
932 error = ether_ioctl(ifp, command, data);
938 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
940 EM_CORE_LOCK(adapter);
941 switch (adapter->hw.mac.type) {
943 max_frame_size = ETHER_MAX_LEN;
946 max_frame_size = MAX_JUMBO_FRAME_SIZE;
948 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
950 EM_CORE_UNLOCK(adapter);
955 ifp->if_mtu = ifr->ifr_mtu;
956 adapter->max_frame_size =
957 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
958 lem_init_locked(adapter);
959 EM_CORE_UNLOCK(adapter);
963 IOCTL_DEBUGOUT("ioctl rcv'd:\
964 SIOCSIFFLAGS (Set Interface Flags)");
965 EM_CORE_LOCK(adapter);
966 if (ifp->if_flags & IFF_UP) {
967 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
968 if ((ifp->if_flags ^ adapter->if_flags) &
969 (IFF_PROMISC | IFF_ALLMULTI)) {
970 lem_disable_promisc(adapter);
971 lem_set_promisc(adapter);
974 lem_init_locked(adapter);
976 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
979 EM_TX_UNLOCK(adapter);
981 adapter->if_flags = ifp->if_flags;
982 EM_CORE_UNLOCK(adapter);
986 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
987 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
988 EM_CORE_LOCK(adapter);
989 lem_disable_intr(adapter);
990 lem_set_multi(adapter);
991 if (adapter->hw.mac.type == e1000_82542 &&
992 adapter->hw.revision_id == E1000_REVISION_2) {
993 lem_initialize_receive_unit(adapter);
995 #ifdef DEVICE_POLLING
996 if (!(ifp->if_capenable & IFCAP_POLLING))
998 lem_enable_intr(adapter);
999 EM_CORE_UNLOCK(adapter);
1003 /* Check SOL/IDER usage */
1004 EM_CORE_LOCK(adapter);
1005 if (e1000_check_reset_block(&adapter->hw)) {
1006 EM_CORE_UNLOCK(adapter);
1007 device_printf(adapter->dev, "Media change is"
1008 " blocked due to SOL/IDER session.\n");
1011 EM_CORE_UNLOCK(adapter);
1013 IOCTL_DEBUGOUT("ioctl rcv'd: \
1014 SIOCxIFMEDIA (Get/Set Interface Media)");
1015 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1021 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1023 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1024 #ifdef DEVICE_POLLING
1025 if (mask & IFCAP_POLLING) {
1026 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1027 error = ether_poll_register(lem_poll, ifp);
1030 EM_CORE_LOCK(adapter);
1031 lem_disable_intr(adapter);
1032 ifp->if_capenable |= IFCAP_POLLING;
1033 EM_CORE_UNLOCK(adapter);
1035 error = ether_poll_deregister(ifp);
1036 /* Enable interrupt even in error case */
1037 EM_CORE_LOCK(adapter);
1038 lem_enable_intr(adapter);
1039 ifp->if_capenable &= ~IFCAP_POLLING;
1040 EM_CORE_UNLOCK(adapter);
1044 if (mask & IFCAP_HWCSUM) {
1045 ifp->if_capenable ^= IFCAP_HWCSUM;
1048 if (mask & IFCAP_VLAN_HWTAGGING) {
1049 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1052 if ((mask & IFCAP_WOL) &&
1053 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1054 if (mask & IFCAP_WOL_MCAST)
1055 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1056 if (mask & IFCAP_WOL_MAGIC)
1057 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1059 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1061 VLAN_CAPABILITIES(ifp);
1066 error = ether_ioctl(ifp, command, data);
1074 /*********************************************************************
1077 * This routine is used in two ways. It is used by the stack as
1078 * init entry point in network interface structure. It is also used
1079 * by the driver as a hw/sw initialization routine to get to a
1082 * return 0 on success, positive on failure
1083 **********************************************************************/
1086 lem_init_locked(struct adapter *adapter)
1088 struct ifnet *ifp = adapter->ifp;
1089 device_t dev = adapter->dev;
1092 INIT_DEBUGOUT("lem_init: begin");
1094 EM_CORE_LOCK_ASSERT(adapter);
1096 EM_TX_LOCK(adapter);
1098 EM_TX_UNLOCK(adapter);
1101 * Packet Buffer Allocation (PBA)
1102 * Writing PBA sets the receive portion of the buffer
1103 * the remainder is used for the transmit buffer.
1105 * Devices before the 82547 had a Packet Buffer of 64K.
1106 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1107 * After the 82547 the buffer was reduced to 40K.
1108 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1109 * Note: default does not leave enough room for Jumbo Frame >10k.
1111 switch (adapter->hw.mac.type) {
1113 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1114 if (adapter->max_frame_size > 8192)
1115 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1117 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1118 adapter->tx_fifo_head = 0;
1119 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1120 adapter->tx_fifo_size =
1121 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1124 /* Devices before 82547 had a Packet Buffer of 64K. */
1125 if (adapter->max_frame_size > 8192)
1126 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1128 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1131 INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1132 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1134 /* Get the latest mac address, User can use a LAA */
1135 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1138 /* Put the address into the Receive Address Array */
1139 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1141 /* Initialize the hardware */
1142 if (lem_hardware_init(adapter)) {
1143 device_printf(dev, "Unable to initialize the hardware\n");
1146 lem_update_link_status(adapter);
1148 /* Setup VLAN support, basic and offload if available */
1149 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1151 /* Set hardware offload abilities */
1152 ifp->if_hwassist = 0;
1153 if (adapter->hw.mac.type >= e1000_82543) {
1154 if (ifp->if_capenable & IFCAP_TXCSUM)
1155 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1158 /* Configure for OS presence */
1159 lem_init_manageability(adapter);
1161 /* Prepare transmit descriptors and buffers */
1162 lem_setup_transmit_structures(adapter);
1163 lem_initialize_transmit_unit(adapter);
1165 /* Setup Multicast table */
1166 lem_set_multi(adapter);
1168 /* Prepare receive descriptors and buffers */
1169 if (lem_setup_receive_structures(adapter)) {
1170 device_printf(dev, "Could not setup receive structures\n");
1171 EM_TX_LOCK(adapter);
1173 EM_TX_UNLOCK(adapter);
1176 lem_initialize_receive_unit(adapter);
1178 /* Use real VLAN Filter support? */
1179 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1180 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1181 /* Use real VLAN Filter support */
1182 lem_setup_vlan_hw_support(adapter);
1185 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1186 ctrl |= E1000_CTRL_VME;
1187 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1191 /* Don't lose promiscuous settings */
1192 lem_set_promisc(adapter);
1194 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1195 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1197 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1198 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1200 #ifdef DEVICE_POLLING
1202 * Only enable interrupts if we are not polling, make sure
1203 * they are off otherwise.
1205 if (ifp->if_capenable & IFCAP_POLLING)
1206 lem_disable_intr(adapter);
1208 #endif /* DEVICE_POLLING */
1209 lem_enable_intr(adapter);
1211 /* AMT based hardware can now take control from firmware */
1212 if (adapter->has_manage && adapter->has_amt)
1213 lem_get_hw_control(adapter);
1219 struct adapter *adapter = arg;
1221 EM_CORE_LOCK(adapter);
1222 lem_init_locked(adapter);
1223 EM_CORE_UNLOCK(adapter);
1227 #ifdef DEVICE_POLLING
1228 /*********************************************************************
1230 * Legacy polling routine
1232 *********************************************************************/
1234 lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1236 struct adapter *adapter = ifp->if_softc;
1237 u32 reg_icr, rx_done = 0;
1239 EM_CORE_LOCK(adapter);
1240 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1241 EM_CORE_UNLOCK(adapter);
1245 if (cmd == POLL_AND_CHECK_STATUS) {
1246 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1247 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1248 callout_stop(&adapter->timer);
1249 adapter->hw.mac.get_link_status = 1;
1250 lem_update_link_status(adapter);
1251 callout_reset(&adapter->timer, hz,
1252 lem_local_timer, adapter);
1255 EM_CORE_UNLOCK(adapter);
1257 lem_rxeof(adapter, count, &rx_done);
1259 EM_TX_LOCK(adapter);
1261 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1262 lem_start_locked(ifp);
1263 EM_TX_UNLOCK(adapter);
1266 #endif /* DEVICE_POLLING */
1268 /*********************************************************************
1270 * Legacy Interrupt Service routine
1272 *********************************************************************/
1276 struct adapter *adapter = arg;
1277 struct ifnet *ifp = adapter->ifp;
1281 if ((ifp->if_capenable & IFCAP_POLLING) ||
1282 ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0))
1285 EM_CORE_LOCK(adapter);
1286 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1287 if (reg_icr & E1000_ICR_RXO)
1288 adapter->rx_overruns++;
1290 if ((reg_icr == 0xffffffff) || (reg_icr == 0)) {
1291 EM_CORE_UNLOCK(adapter);
1295 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1296 callout_stop(&adapter->timer);
1297 adapter->hw.mac.get_link_status = 1;
1298 lem_update_link_status(adapter);
1299 /* Deal with TX cruft when link lost */
1300 lem_tx_purge(adapter);
1301 callout_reset(&adapter->timer, hz,
1302 lem_local_timer, adapter);
1303 EM_CORE_UNLOCK(adapter);
1307 EM_CORE_UNLOCK(adapter);
1308 lem_rxeof(adapter, -1, NULL);
1310 EM_TX_LOCK(adapter);
1312 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1313 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1314 lem_start_locked(ifp);
1315 EM_TX_UNLOCK(adapter);
1321 lem_handle_link(void *context, int pending)
1323 struct adapter *adapter = context;
1324 struct ifnet *ifp = adapter->ifp;
1326 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1329 EM_CORE_LOCK(adapter);
1330 callout_stop(&adapter->timer);
1331 lem_update_link_status(adapter);
1332 /* Deal with TX cruft when link lost */
1333 lem_tx_purge(adapter);
1334 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1335 EM_CORE_UNLOCK(adapter);
1339 /* Combined RX/TX handler, used by Legacy and MSI */
1341 lem_handle_rxtx(void *context, int pending)
1343 struct adapter *adapter = context;
1344 struct ifnet *ifp = adapter->ifp;
1347 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1348 bool more = lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1349 EM_TX_LOCK(adapter);
1351 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1352 lem_start_locked(ifp);
1353 EM_TX_UNLOCK(adapter);
1355 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1360 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1361 lem_enable_intr(adapter);
1364 /*********************************************************************
1366 * Fast Legacy/MSI Combined Interrupt Service routine
1368 *********************************************************************/
1370 lem_irq_fast(void *arg)
1372 struct adapter *adapter = arg;
1378 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1381 if (reg_icr == 0xffffffff)
1382 return FILTER_STRAY;
1384 /* Definitely not our interrupt. */
1386 return FILTER_STRAY;
1389 * Mask interrupts until the taskqueue is finished running. This is
1390 * cheap, just assume that it is needed. This also works around the
1391 * MSI message reordering errata on certain systems.
1393 lem_disable_intr(adapter);
1394 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1396 /* Link status change */
1397 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1398 adapter->hw.mac.get_link_status = 1;
1399 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1402 if (reg_icr & E1000_ICR_RXO)
1403 adapter->rx_overruns++;
1404 return FILTER_HANDLED;
1408 /*********************************************************************
1410 * Media Ioctl callback
1412 * This routine is called whenever the user queries the status of
1413 * the interface using ifconfig.
1415 **********************************************************************/
1417 lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1419 struct adapter *adapter = ifp->if_softc;
1420 u_char fiber_type = IFM_1000_SX;
1422 INIT_DEBUGOUT("lem_media_status: begin");
1424 EM_CORE_LOCK(adapter);
1425 lem_update_link_status(adapter);
1427 ifmr->ifm_status = IFM_AVALID;
1428 ifmr->ifm_active = IFM_ETHER;
1430 if (!adapter->link_active) {
1431 EM_CORE_UNLOCK(adapter);
1435 ifmr->ifm_status |= IFM_ACTIVE;
1437 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1438 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1439 if (adapter->hw.mac.type == e1000_82545)
1440 fiber_type = IFM_1000_LX;
1441 ifmr->ifm_active |= fiber_type | IFM_FDX;
1443 switch (adapter->link_speed) {
1445 ifmr->ifm_active |= IFM_10_T;
1448 ifmr->ifm_active |= IFM_100_TX;
1451 ifmr->ifm_active |= IFM_1000_T;
1454 if (adapter->link_duplex == FULL_DUPLEX)
1455 ifmr->ifm_active |= IFM_FDX;
1457 ifmr->ifm_active |= IFM_HDX;
1459 EM_CORE_UNLOCK(adapter);
1462 /*********************************************************************
1464 * Media Ioctl callback
1466 * This routine is called when the user changes speed/duplex using
1467 * media/mediopt option with ifconfig.
1469 **********************************************************************/
1471 lem_media_change(struct ifnet *ifp)
1473 struct adapter *adapter = ifp->if_softc;
1474 struct ifmedia *ifm = &adapter->media;
1476 INIT_DEBUGOUT("lem_media_change: begin");
1478 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1481 EM_CORE_LOCK(adapter);
1482 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1484 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1485 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1490 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1491 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1494 adapter->hw.mac.autoneg = FALSE;
1495 adapter->hw.phy.autoneg_advertised = 0;
1496 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1497 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1499 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1502 adapter->hw.mac.autoneg = FALSE;
1503 adapter->hw.phy.autoneg_advertised = 0;
1504 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1505 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1507 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1510 device_printf(adapter->dev, "Unsupported media type\n");
1513 lem_init_locked(adapter);
1514 EM_CORE_UNLOCK(adapter);
1519 /*********************************************************************
1521 * This routine maps the mbufs to tx descriptors.
1523 * return 0 on success, positive on failure
1524 **********************************************************************/
1527 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1529 bus_dma_segment_t segs[EM_MAX_SCATTER];
1531 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1532 struct e1000_tx_desc *ctxd = NULL;
1533 struct mbuf *m_head;
1534 u32 txd_upper, txd_lower, txd_used, txd_saved;
1535 int error, nsegs, i, j, first, last = 0;
1538 txd_upper = txd_lower = txd_used = txd_saved = 0;
1541 ** When doing checksum offload, it is critical to
1542 ** make sure the first mbuf has more than header,
1543 ** because that routine expects data to be present.
1545 if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1546 (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1547 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1554 * Map the packet for DMA
1556 * Capture the first descriptor index,
1557 * this descriptor will have the index
1558 * of the EOP which is the only one that
1559 * now gets a DONE bit writeback.
1561 first = adapter->next_avail_tx_desc;
1562 tx_buffer = &adapter->tx_buffer_area[first];
1563 tx_buffer_mapped = tx_buffer;
1564 map = tx_buffer->map;
1566 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1567 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1570 * There are two types of errors we can (try) to handle:
1571 * - EFBIG means the mbuf chain was too long and bus_dma ran
1572 * out of segments. Defragment the mbuf chain and try again.
1573 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1574 * at this point in time. Defer sending and try again later.
1575 * All other errors, in particular EINVAL, are fatal and prevent the
1576 * mbuf chain from ever going through. Drop it and report error.
1578 if (error == EFBIG) {
1581 m = m_defrag(*m_headp, M_NOWAIT);
1583 adapter->mbuf_alloc_failed++;
1591 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1592 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1595 adapter->no_tx_dma_setup++;
1600 } else if (error != 0) {
1601 adapter->no_tx_dma_setup++;
1605 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1606 adapter->no_tx_desc_avail2++;
1607 bus_dmamap_unload(adapter->txtag, map);
1612 /* Do hardware assists */
1613 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1614 lem_transmit_checksum_setup(adapter, m_head,
1615 &txd_upper, &txd_lower);
1617 i = adapter->next_avail_tx_desc;
1618 if (adapter->pcix_82544)
1621 /* Set up our transmit descriptors */
1622 for (j = 0; j < nsegs; j++) {
1624 bus_addr_t seg_addr;
1625 /* If adapter is 82544 and on PCIX bus */
1626 if(adapter->pcix_82544) {
1627 DESC_ARRAY desc_array;
1628 u32 array_elements, counter;
1630 * Check the Address and Length combination and
1631 * split the data accordingly
1633 array_elements = lem_fill_descriptors(segs[j].ds_addr,
1634 segs[j].ds_len, &desc_array);
1635 for (counter = 0; counter < array_elements; counter++) {
1636 if (txd_used == adapter->num_tx_desc_avail) {
1637 adapter->next_avail_tx_desc = txd_saved;
1638 adapter->no_tx_desc_avail2++;
1639 bus_dmamap_unload(adapter->txtag, map);
1642 tx_buffer = &adapter->tx_buffer_area[i];
1643 ctxd = &adapter->tx_desc_base[i];
1644 ctxd->buffer_addr = htole64(
1645 desc_array.descriptor[counter].address);
1646 ctxd->lower.data = htole32(
1647 (adapter->txd_cmd | txd_lower | (u16)
1648 desc_array.descriptor[counter].length));
1650 htole32((txd_upper));
1652 if (++i == adapter->num_tx_desc)
1654 tx_buffer->m_head = NULL;
1655 tx_buffer->next_eop = -1;
1659 tx_buffer = &adapter->tx_buffer_area[i];
1660 ctxd = &adapter->tx_desc_base[i];
1661 seg_addr = segs[j].ds_addr;
1662 seg_len = segs[j].ds_len;
1663 ctxd->buffer_addr = htole64(seg_addr);
1664 ctxd->lower.data = htole32(
1665 adapter->txd_cmd | txd_lower | seg_len);
1669 if (++i == adapter->num_tx_desc)
1671 tx_buffer->m_head = NULL;
1672 tx_buffer->next_eop = -1;
1676 adapter->next_avail_tx_desc = i;
1678 if (adapter->pcix_82544)
1679 adapter->num_tx_desc_avail -= txd_used;
1681 adapter->num_tx_desc_avail -= nsegs;
1683 if (m_head->m_flags & M_VLANTAG) {
1684 /* Set the vlan id. */
1685 ctxd->upper.fields.special =
1686 htole16(m_head->m_pkthdr.ether_vtag);
1687 /* Tell hardware to add tag */
1688 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1691 tx_buffer->m_head = m_head;
1692 tx_buffer_mapped->map = tx_buffer->map;
1693 tx_buffer->map = map;
1694 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1697 * Last Descriptor of Packet
1698 * needs End Of Packet (EOP)
1699 * and Report Status (RS)
1702 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1704 * Keep track in the first buffer which
1705 * descriptor will be written back
1707 tx_buffer = &adapter->tx_buffer_area[first];
1708 tx_buffer->next_eop = last;
1709 adapter->watchdog_time = ticks;
1712 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1713 * that this frame is available to transmit.
1715 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1716 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1717 if (adapter->hw.mac.type == e1000_82547 &&
1718 adapter->link_duplex == HALF_DUPLEX)
1719 lem_82547_move_tail(adapter);
1721 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1722 if (adapter->hw.mac.type == e1000_82547)
1723 lem_82547_update_fifo_head(adapter,
1724 m_head->m_pkthdr.len);
1730 /*********************************************************************
1732 * 82547 workaround to avoid controller hang in half-duplex environment.
1733 * The workaround is to avoid queuing a large packet that would span
1734 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1735 * in this case. We do that only when FIFO is quiescent.
1737 **********************************************************************/
1739 lem_82547_move_tail(void *arg)
1741 struct adapter *adapter = arg;
1742 struct e1000_tx_desc *tx_desc;
1743 u16 hw_tdt, sw_tdt, length = 0;
1746 EM_TX_LOCK_ASSERT(adapter);
1748 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1749 sw_tdt = adapter->next_avail_tx_desc;
1751 while (hw_tdt != sw_tdt) {
1752 tx_desc = &adapter->tx_desc_base[hw_tdt];
1753 length += tx_desc->lower.flags.length;
1754 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1755 if (++hw_tdt == adapter->num_tx_desc)
1759 if (lem_82547_fifo_workaround(adapter, length)) {
1760 adapter->tx_fifo_wrk_cnt++;
1761 callout_reset(&adapter->tx_fifo_timer, 1,
1762 lem_82547_move_tail, adapter);
1765 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1766 lem_82547_update_fifo_head(adapter, length);
1773 lem_82547_fifo_workaround(struct adapter *adapter, int len)
1775 int fifo_space, fifo_pkt_len;
1777 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1779 if (adapter->link_duplex == HALF_DUPLEX) {
1780 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1782 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1783 if (lem_82547_tx_fifo_reset(adapter))
1794 lem_82547_update_fifo_head(struct adapter *adapter, int len)
1796 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1798 /* tx_fifo_head is always 16 byte aligned */
1799 adapter->tx_fifo_head += fifo_pkt_len;
1800 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1801 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1807 lem_82547_tx_fifo_reset(struct adapter *adapter)
1811 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1812 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1813 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1814 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1815 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1816 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1817 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1818 /* Disable TX unit */
1819 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1820 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1821 tctl & ~E1000_TCTL_EN);
1823 /* Reset FIFO pointers */
1824 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1825 adapter->tx_head_addr);
1826 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1827 adapter->tx_head_addr);
1828 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1829 adapter->tx_head_addr);
1830 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1831 adapter->tx_head_addr);
1833 /* Re-enable TX unit */
1834 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1835 E1000_WRITE_FLUSH(&adapter->hw);
1837 adapter->tx_fifo_head = 0;
1838 adapter->tx_fifo_reset_cnt++;
1848 lem_set_promisc(struct adapter *adapter)
1850 struct ifnet *ifp = adapter->ifp;
1853 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1855 if (ifp->if_flags & IFF_PROMISC) {
1856 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1857 /* Turn this on if you want to see bad packets */
1859 reg_rctl |= E1000_RCTL_SBP;
1860 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1861 } else if (ifp->if_flags & IFF_ALLMULTI) {
1862 reg_rctl |= E1000_RCTL_MPE;
1863 reg_rctl &= ~E1000_RCTL_UPE;
1864 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1869 lem_disable_promisc(struct adapter *adapter)
1871 struct ifnet *ifp = adapter->ifp;
1875 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1876 reg_rctl &= (~E1000_RCTL_UPE);
1877 if (ifp->if_flags & IFF_ALLMULTI)
1878 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1880 struct ifmultiaddr *ifma;
1881 #if __FreeBSD_version < 800000
1884 if_maddr_rlock(ifp);
1886 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1887 if (ifma->ifma_addr->sa_family != AF_LINK)
1889 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1893 #if __FreeBSD_version < 800000
1894 IF_ADDR_UNLOCK(ifp);
1896 if_maddr_runlock(ifp);
1899 /* Don't disable if in MAX groups */
1900 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1901 reg_rctl &= (~E1000_RCTL_MPE);
1902 reg_rctl &= (~E1000_RCTL_SBP);
1903 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1907 /*********************************************************************
1910 * This routine is called whenever multicast address list is updated.
1912 **********************************************************************/
1915 lem_set_multi(struct adapter *adapter)
1917 struct ifnet *ifp = adapter->ifp;
1918 struct ifmultiaddr *ifma;
1920 u8 *mta; /* Multicast array memory */
1923 IOCTL_DEBUGOUT("lem_set_multi: begin");
1926 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1928 if (adapter->hw.mac.type == e1000_82542 &&
1929 adapter->hw.revision_id == E1000_REVISION_2) {
1930 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1931 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1932 e1000_pci_clear_mwi(&adapter->hw);
1933 reg_rctl |= E1000_RCTL_RST;
1934 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1938 #if __FreeBSD_version < 800000
1941 if_maddr_rlock(ifp);
1943 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1944 if (ifma->ifma_addr->sa_family != AF_LINK)
1947 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1950 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1951 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1954 #if __FreeBSD_version < 800000
1955 IF_ADDR_UNLOCK(ifp);
1957 if_maddr_runlock(ifp);
1959 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1960 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1961 reg_rctl |= E1000_RCTL_MPE;
1962 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1964 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1966 if (adapter->hw.mac.type == e1000_82542 &&
1967 adapter->hw.revision_id == E1000_REVISION_2) {
1968 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1969 reg_rctl &= ~E1000_RCTL_RST;
1970 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1972 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1973 e1000_pci_set_mwi(&adapter->hw);
1978 /*********************************************************************
1981 * This routine checks for link status and updates statistics.
1983 **********************************************************************/
1986 lem_local_timer(void *arg)
1988 struct adapter *adapter = arg;
1990 EM_CORE_LOCK_ASSERT(adapter);
1992 lem_update_link_status(adapter);
1993 lem_update_stats_counters(adapter);
1995 lem_smartspeed(adapter);
1998 * We check the watchdog: the time since
1999 * the last TX descriptor was cleaned.
2000 * This implies a functional TX engine.
2002 if ((adapter->watchdog_check == TRUE) &&
2003 (ticks - adapter->watchdog_time > EM_WATCHDOG))
2006 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
2009 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2010 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2011 adapter->watchdog_events++;
2012 lem_init_locked(adapter);
2016 lem_update_link_status(struct adapter *adapter)
2018 struct e1000_hw *hw = &adapter->hw;
2019 struct ifnet *ifp = adapter->ifp;
2020 device_t dev = adapter->dev;
2023 /* Get the cached link value or read phy for real */
2024 switch (hw->phy.media_type) {
2025 case e1000_media_type_copper:
2026 if (hw->mac.get_link_status) {
2027 /* Do the work to read phy */
2028 e1000_check_for_link(hw);
2029 link_check = !hw->mac.get_link_status;
2030 if (link_check) /* ESB2 fix */
2031 e1000_cfg_on_link_up(hw);
2035 case e1000_media_type_fiber:
2036 e1000_check_for_link(hw);
2037 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2040 case e1000_media_type_internal_serdes:
2041 e1000_check_for_link(hw);
2042 link_check = adapter->hw.mac.serdes_has_link;
2045 case e1000_media_type_unknown:
2049 /* Now check for a transition */
2050 if (link_check && (adapter->link_active == 0)) {
2051 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2052 &adapter->link_duplex);
2054 device_printf(dev, "Link is up %d Mbps %s\n",
2055 adapter->link_speed,
2056 ((adapter->link_duplex == FULL_DUPLEX) ?
2057 "Full Duplex" : "Half Duplex"));
2058 adapter->link_active = 1;
2059 adapter->smartspeed = 0;
2060 ifp->if_baudrate = adapter->link_speed * 1000000;
2061 if_link_state_change(ifp, LINK_STATE_UP);
2062 } else if (!link_check && (adapter->link_active == 1)) {
2063 ifp->if_baudrate = adapter->link_speed = 0;
2064 adapter->link_duplex = 0;
2066 device_printf(dev, "Link is Down\n");
2067 adapter->link_active = 0;
2068 /* Link down, disable watchdog */
2069 adapter->watchdog_check = FALSE;
2070 if_link_state_change(ifp, LINK_STATE_DOWN);
2074 /*********************************************************************
2076 * This routine disables all traffic on the adapter by issuing a
2077 * global reset on the MAC and deallocates TX/RX buffers.
2079 * This routine should always be called with BOTH the CORE
2081 **********************************************************************/
2086 struct adapter *adapter = arg;
2087 struct ifnet *ifp = adapter->ifp;
2089 EM_CORE_LOCK_ASSERT(adapter);
2090 EM_TX_LOCK_ASSERT(adapter);
2092 INIT_DEBUGOUT("lem_stop: begin");
2094 lem_disable_intr(adapter);
2095 callout_stop(&adapter->timer);
2096 callout_stop(&adapter->tx_fifo_timer);
2098 /* Tell the stack that the interface is no longer active */
2099 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2101 e1000_reset_hw(&adapter->hw);
2102 if (adapter->hw.mac.type >= e1000_82544)
2103 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2105 e1000_led_off(&adapter->hw);
2106 e1000_cleanup_led(&adapter->hw);
2110 /*********************************************************************
2112 * Determine hardware revision.
2114 **********************************************************************/
2116 lem_identify_hardware(struct adapter *adapter)
2118 device_t dev = adapter->dev;
2120 /* Make sure our PCI config space has the necessary stuff set */
2121 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2122 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2123 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2124 device_printf(dev, "Memory Access and/or Bus Master bits "
2126 adapter->hw.bus.pci_cmd_word |=
2127 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2128 pci_write_config(dev, PCIR_COMMAND,
2129 adapter->hw.bus.pci_cmd_word, 2);
2132 /* Save off the information about this board */
2133 adapter->hw.vendor_id = pci_get_vendor(dev);
2134 adapter->hw.device_id = pci_get_device(dev);
2135 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2136 adapter->hw.subsystem_vendor_id =
2137 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2138 adapter->hw.subsystem_device_id =
2139 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2141 /* Do Shared Code Init and Setup */
2142 if (e1000_set_mac_type(&adapter->hw)) {
2143 device_printf(dev, "Setup init failure\n");
2149 lem_allocate_pci_resources(struct adapter *adapter)
2151 device_t dev = adapter->dev;
2152 int val, rid, error = E1000_SUCCESS;
2155 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2157 if (adapter->memory == NULL) {
2158 device_printf(dev, "Unable to allocate bus resource: memory\n");
2161 adapter->osdep.mem_bus_space_tag =
2162 rman_get_bustag(adapter->memory);
2163 adapter->osdep.mem_bus_space_handle =
2164 rman_get_bushandle(adapter->memory);
2165 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2167 /* Only older adapters use IO mapping */
2168 if (adapter->hw.mac.type > e1000_82543) {
2169 /* Figure our where our IO BAR is ? */
2170 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2171 val = pci_read_config(dev, rid, 4);
2172 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2173 adapter->io_rid = rid;
2177 /* check for 64bit BAR */
2178 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2181 if (rid >= PCIR_CIS) {
2182 device_printf(dev, "Unable to locate IO BAR\n");
2185 adapter->ioport = bus_alloc_resource_any(dev,
2186 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2187 if (adapter->ioport == NULL) {
2188 device_printf(dev, "Unable to allocate bus resource: "
2192 adapter->hw.io_base = 0;
2193 adapter->osdep.io_bus_space_tag =
2194 rman_get_bustag(adapter->ioport);
2195 adapter->osdep.io_bus_space_handle =
2196 rman_get_bushandle(adapter->ioport);
2199 adapter->hw.back = &adapter->osdep;
2204 /*********************************************************************
2206 * Setup the Legacy or MSI Interrupt handler
2208 **********************************************************************/
2210 lem_allocate_irq(struct adapter *adapter)
2212 device_t dev = adapter->dev;
2215 /* Manually turn off all interrupts */
2216 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2218 /* We allocate a single interrupt resource */
2219 adapter->res[0] = bus_alloc_resource_any(dev,
2220 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2221 if (adapter->res[0] == NULL) {
2222 device_printf(dev, "Unable to allocate bus resource: "
2227 /* Do Legacy setup? */
2228 if (lem_use_legacy_irq) {
2229 if ((error = bus_setup_intr(dev, adapter->res[0],
2230 INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2231 &adapter->tag[0])) != 0) {
2233 "Failed to register interrupt handler");
2240 * Use a Fast interrupt and the associated
2241 * deferred processing contexts.
2243 TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2244 TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2245 adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2246 taskqueue_thread_enqueue, &adapter->tq);
2247 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2248 device_get_nameunit(adapter->dev));
2249 if ((error = bus_setup_intr(dev, adapter->res[0],
2250 INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2251 &adapter->tag[0])) != 0) {
2252 device_printf(dev, "Failed to register fast interrupt "
2253 "handler: %d\n", error);
2254 taskqueue_free(adapter->tq);
2264 lem_free_pci_resources(struct adapter *adapter)
2266 device_t dev = adapter->dev;
2269 if (adapter->tag[0] != NULL) {
2270 bus_teardown_intr(dev, adapter->res[0],
2272 adapter->tag[0] = NULL;
2275 if (adapter->res[0] != NULL) {
2276 bus_release_resource(dev, SYS_RES_IRQ,
2277 0, adapter->res[0]);
2280 if (adapter->memory != NULL)
2281 bus_release_resource(dev, SYS_RES_MEMORY,
2282 PCIR_BAR(0), adapter->memory);
2284 if (adapter->ioport != NULL)
2285 bus_release_resource(dev, SYS_RES_IOPORT,
2286 adapter->io_rid, adapter->ioport);
2290 /*********************************************************************
2292 * Initialize the hardware to a configuration
2293 * as specified by the adapter structure.
2295 **********************************************************************/
2297 lem_hardware_init(struct adapter *adapter)
2299 device_t dev = adapter->dev;
2302 INIT_DEBUGOUT("lem_hardware_init: begin");
2304 /* Issue a global reset */
2305 e1000_reset_hw(&adapter->hw);
2307 /* When hardware is reset, fifo_head is also reset */
2308 adapter->tx_fifo_head = 0;
2311 * These parameters control the automatic generation (Tx) and
2312 * response (Rx) to Ethernet PAUSE frames.
2313 * - High water mark should allow for at least two frames to be
2314 * received after sending an XOFF.
2315 * - Low water mark works best when it is very near the high water mark.
2316 * This allows the receiver to restart by sending XON when it has
2317 * drained a bit. Here we use an arbitary value of 1500 which will
2318 * restart after one full frame is pulled from the buffer. There
2319 * could be several smaller frames in the buffer and if so they will
2320 * not trigger the XON until their total number reduces the buffer
2322 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2324 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2327 adapter->hw.fc.high_water = rx_buffer_size -
2328 roundup2(adapter->max_frame_size, 1024);
2329 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2331 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2332 adapter->hw.fc.send_xon = TRUE;
2334 /* Set Flow control, use the tunable location if sane */
2335 if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2336 adapter->hw.fc.requested_mode = lem_fc_setting;
2338 adapter->hw.fc.requested_mode = e1000_fc_none;
2340 if (e1000_init_hw(&adapter->hw) < 0) {
2341 device_printf(dev, "Hardware Initialization Failed\n");
2345 e1000_check_for_link(&adapter->hw);
2350 /*********************************************************************
2352 * Setup networking device structure and register an interface.
2354 **********************************************************************/
2356 lem_setup_interface(device_t dev, struct adapter *adapter)
2360 INIT_DEBUGOUT("lem_setup_interface: begin");
2362 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2364 device_printf(dev, "can not allocate ifnet structure\n");
2367 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2368 ifp->if_init = lem_init;
2369 ifp->if_softc = adapter;
2370 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2371 ifp->if_ioctl = lem_ioctl;
2372 ifp->if_start = lem_start;
2373 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2374 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2375 IFQ_SET_READY(&ifp->if_snd);
2377 ether_ifattach(ifp, adapter->hw.mac.addr);
2379 ifp->if_capabilities = ifp->if_capenable = 0;
2381 if (adapter->hw.mac.type >= e1000_82543) {
2382 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2383 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2387 * Tell the upper layer(s) we support long frames.
2389 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2390 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2391 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2394 ** Dont turn this on by default, if vlans are
2395 ** created on another pseudo device (eg. lagg)
2396 ** then vlan events are not passed thru, breaking
2397 ** operation, but with HW FILTER off it works. If
2398 ** using vlans directly on the em driver you can
2399 ** enable this and get full hardware tag filtering.
2401 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2403 #ifdef DEVICE_POLLING
2404 ifp->if_capabilities |= IFCAP_POLLING;
2407 /* Enable only WOL MAGIC by default */
2409 ifp->if_capabilities |= IFCAP_WOL;
2410 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2414 * Specify the media types supported by this adapter and register
2415 * callbacks to update media and link information
2417 ifmedia_init(&adapter->media, IFM_IMASK,
2418 lem_media_change, lem_media_status);
2419 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2420 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2421 u_char fiber_type = IFM_1000_SX; /* default type */
2423 if (adapter->hw.mac.type == e1000_82545)
2424 fiber_type = IFM_1000_LX;
2425 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2427 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2429 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2430 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2432 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2434 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2436 if (adapter->hw.phy.type != e1000_phy_ife) {
2437 ifmedia_add(&adapter->media,
2438 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2439 ifmedia_add(&adapter->media,
2440 IFM_ETHER | IFM_1000_T, 0, NULL);
2443 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2444 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2449 /*********************************************************************
2451 * Workaround for SmartSpeed on 82541 and 82547 controllers
2453 **********************************************************************/
2455 lem_smartspeed(struct adapter *adapter)
2459 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2460 adapter->hw.mac.autoneg == 0 ||
2461 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2464 if (adapter->smartspeed == 0) {
2465 /* If Master/Slave config fault is asserted twice,
2466 * we assume back-to-back */
2467 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2468 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2470 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2471 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2472 e1000_read_phy_reg(&adapter->hw,
2473 PHY_1000T_CTRL, &phy_tmp);
2474 if(phy_tmp & CR_1000T_MS_ENABLE) {
2475 phy_tmp &= ~CR_1000T_MS_ENABLE;
2476 e1000_write_phy_reg(&adapter->hw,
2477 PHY_1000T_CTRL, phy_tmp);
2478 adapter->smartspeed++;
2479 if(adapter->hw.mac.autoneg &&
2480 !e1000_copper_link_autoneg(&adapter->hw) &&
2481 !e1000_read_phy_reg(&adapter->hw,
2482 PHY_CONTROL, &phy_tmp)) {
2483 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2484 MII_CR_RESTART_AUTO_NEG);
2485 e1000_write_phy_reg(&adapter->hw,
2486 PHY_CONTROL, phy_tmp);
2491 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2492 /* If still no link, perhaps using 2/3 pair cable */
2493 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2494 phy_tmp |= CR_1000T_MS_ENABLE;
2495 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2496 if(adapter->hw.mac.autoneg &&
2497 !e1000_copper_link_autoneg(&adapter->hw) &&
2498 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2499 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2500 MII_CR_RESTART_AUTO_NEG);
2501 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2504 /* Restart process after EM_SMARTSPEED_MAX iterations */
2505 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2506 adapter->smartspeed = 0;
2511 * Manage DMA'able memory.
2514 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2518 *(bus_addr_t *) arg = segs[0].ds_addr;
2522 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2523 struct em_dma_alloc *dma, int mapflags)
2527 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2528 EM_DBA_ALIGN, 0, /* alignment, bounds */
2529 BUS_SPACE_MAXADDR, /* lowaddr */
2530 BUS_SPACE_MAXADDR, /* highaddr */
2531 NULL, NULL, /* filter, filterarg */
2534 size, /* maxsegsize */
2536 NULL, /* lockfunc */
2540 device_printf(adapter->dev,
2541 "%s: bus_dma_tag_create failed: %d\n",
2546 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2547 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2549 device_printf(adapter->dev,
2550 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2551 __func__, (uintmax_t)size, error);
2556 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2557 size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2558 if (error || dma->dma_paddr == 0) {
2559 device_printf(adapter->dev,
2560 "%s: bus_dmamap_load failed: %d\n",
2568 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2570 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2571 bus_dma_tag_destroy(dma->dma_tag);
2573 dma->dma_map = NULL;
2574 dma->dma_tag = NULL;
2580 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2582 if (dma->dma_tag == NULL)
2584 if (dma->dma_map != NULL) {
2585 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2586 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2587 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2588 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2589 dma->dma_map = NULL;
2591 bus_dma_tag_destroy(dma->dma_tag);
2592 dma->dma_tag = NULL;
2596 /*********************************************************************
2598 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2599 * the information needed to transmit a packet on the wire.
2601 **********************************************************************/
2603 lem_allocate_transmit_structures(struct adapter *adapter)
2605 device_t dev = adapter->dev;
2606 struct em_buffer *tx_buffer;
2610 * Create DMA tags for tx descriptors
2612 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2613 1, 0, /* alignment, bounds */
2614 BUS_SPACE_MAXADDR, /* lowaddr */
2615 BUS_SPACE_MAXADDR, /* highaddr */
2616 NULL, NULL, /* filter, filterarg */
2617 MCLBYTES * EM_MAX_SCATTER, /* maxsize */
2618 EM_MAX_SCATTER, /* nsegments */
2619 MCLBYTES, /* maxsegsize */
2621 NULL, /* lockfunc */
2623 &adapter->txtag)) != 0) {
2624 device_printf(dev, "Unable to allocate TX DMA tag\n");
2628 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2629 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2630 if (adapter->tx_buffer_area == NULL) {
2631 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2636 /* Create the descriptor buffer dma maps */
2637 for (int i = 0; i < adapter->num_tx_desc; i++) {
2638 tx_buffer = &adapter->tx_buffer_area[i];
2639 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2641 device_printf(dev, "Unable to create TX DMA map\n");
2644 tx_buffer->next_eop = -1;
2649 lem_free_transmit_structures(adapter);
2653 /*********************************************************************
2655 * (Re)Initialize transmit structures.
2657 **********************************************************************/
2659 lem_setup_transmit_structures(struct adapter *adapter)
2661 struct em_buffer *tx_buffer;
2663 /* we are already locked */
2664 struct netmap_adapter *na = NA(adapter->ifp);
2665 struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
2666 #endif /* DEV_NETMAP */
2668 /* Clear the old ring contents */
2669 bzero(adapter->tx_desc_base,
2670 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2672 /* Free any existing TX buffers */
2673 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2674 tx_buffer = &adapter->tx_buffer_area[i];
2675 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2676 BUS_DMASYNC_POSTWRITE);
2677 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2678 m_freem(tx_buffer->m_head);
2679 tx_buffer->m_head = NULL;
2682 /* the i-th NIC entry goes to slot si */
2683 int si = netmap_idx_n2k(&na->tx_rings[0], i);
2687 addr = PNMB(slot + si, &paddr);
2688 adapter->tx_desc_base[si].buffer_addr = htole64(paddr);
2689 /* reload the map for netmap mode */
2690 netmap_load_map(adapter->txtag, tx_buffer->map, addr);
2692 #endif /* DEV_NETMAP */
2693 tx_buffer->next_eop = -1;
2697 adapter->last_hw_offload = 0;
2698 adapter->next_avail_tx_desc = 0;
2699 adapter->next_tx_to_clean = 0;
2700 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2702 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2703 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2708 /*********************************************************************
2710 * Enable transmit unit.
2712 **********************************************************************/
2714 lem_initialize_transmit_unit(struct adapter *adapter)
2719 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2720 /* Setup the Base and Length of the Tx Descriptor Ring */
2721 bus_addr = adapter->txdma.dma_paddr;
2722 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2723 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2724 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2725 (u32)(bus_addr >> 32));
2726 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2728 /* Setup the HW Tx Head and Tail descriptor pointers */
2729 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2730 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2732 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2733 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2734 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2736 /* Set the default values for the Tx Inter Packet Gap timer */
2737 switch (adapter->hw.mac.type) {
2739 tipg = DEFAULT_82542_TIPG_IPGT;
2740 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2741 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2744 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2745 (adapter->hw.phy.media_type ==
2746 e1000_media_type_internal_serdes))
2747 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2749 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2750 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2751 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2754 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2755 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2756 if(adapter->hw.mac.type >= e1000_82540)
2757 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2758 adapter->tx_abs_int_delay.value);
2760 /* Program the Transmit Control Register */
2761 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2762 tctl &= ~E1000_TCTL_CT;
2763 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2764 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2766 /* This write will effectively turn on the transmit unit. */
2767 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2769 /* Setup Transmit Descriptor Base Settings */
2770 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2772 if (adapter->tx_int_delay.value > 0)
2773 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2776 /*********************************************************************
2778 * Free all transmit related data structures.
2780 **********************************************************************/
2782 lem_free_transmit_structures(struct adapter *adapter)
2784 struct em_buffer *tx_buffer;
2786 INIT_DEBUGOUT("free_transmit_structures: begin");
2788 if (adapter->tx_buffer_area != NULL) {
2789 for (int i = 0; i < adapter->num_tx_desc; i++) {
2790 tx_buffer = &adapter->tx_buffer_area[i];
2791 if (tx_buffer->m_head != NULL) {
2792 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2793 BUS_DMASYNC_POSTWRITE);
2794 bus_dmamap_unload(adapter->txtag,
2796 m_freem(tx_buffer->m_head);
2797 tx_buffer->m_head = NULL;
2798 } else if (tx_buffer->map != NULL)
2799 bus_dmamap_unload(adapter->txtag,
2801 if (tx_buffer->map != NULL) {
2802 bus_dmamap_destroy(adapter->txtag,
2804 tx_buffer->map = NULL;
2808 if (adapter->tx_buffer_area != NULL) {
2809 free(adapter->tx_buffer_area, M_DEVBUF);
2810 adapter->tx_buffer_area = NULL;
2812 if (adapter->txtag != NULL) {
2813 bus_dma_tag_destroy(adapter->txtag);
2814 adapter->txtag = NULL;
2816 #if __FreeBSD_version >= 800000
2817 if (adapter->br != NULL)
2818 buf_ring_free(adapter->br, M_DEVBUF);
2822 /*********************************************************************
2824 * The offload context needs to be set when we transfer the first
2825 * packet of a particular protocol (TCP/UDP). This routine has been
2826 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2828 * Added back the old method of keeping the current context type
2829 * and not setting if unnecessary, as this is reported to be a
2830 * big performance win. -jfv
2831 **********************************************************************/
2833 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2834 u32 *txd_upper, u32 *txd_lower)
2836 struct e1000_context_desc *TXD = NULL;
2837 struct em_buffer *tx_buffer;
2838 struct ether_vlan_header *eh;
2839 struct ip *ip = NULL;
2840 struct ip6_hdr *ip6;
2841 int curr_txd, ehdrlen;
2842 u32 cmd, hdr_len, ip_hlen;
2847 cmd = hdr_len = ipproto = 0;
2848 *txd_upper = *txd_lower = 0;
2849 curr_txd = adapter->next_avail_tx_desc;
2852 * Determine where frame payload starts.
2853 * Jump over vlan headers if already present,
2854 * helpful for QinQ too.
2856 eh = mtod(mp, struct ether_vlan_header *);
2857 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2858 etype = ntohs(eh->evl_proto);
2859 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2861 etype = ntohs(eh->evl_encap_proto);
2862 ehdrlen = ETHER_HDR_LEN;
2866 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2867 * TODO: Support SCTP too when it hits the tree.
2871 ip = (struct ip *)(mp->m_data + ehdrlen);
2872 ip_hlen = ip->ip_hl << 2;
2874 /* Setup of IP header checksum. */
2875 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2877 * Start offset for header checksum calculation.
2878 * End offset for header checksum calculation.
2879 * Offset of place to put the checksum.
2881 TXD = (struct e1000_context_desc *)
2882 &adapter->tx_desc_base[curr_txd];
2883 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2884 TXD->lower_setup.ip_fields.ipcse =
2885 htole16(ehdrlen + ip_hlen);
2886 TXD->lower_setup.ip_fields.ipcso =
2887 ehdrlen + offsetof(struct ip, ip_sum);
2888 cmd |= E1000_TXD_CMD_IP;
2889 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2892 hdr_len = ehdrlen + ip_hlen;
2896 case ETHERTYPE_IPV6:
2897 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2898 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2900 /* IPv6 doesn't have a header checksum. */
2902 hdr_len = ehdrlen + ip_hlen;
2903 ipproto = ip6->ip6_nxt;
2912 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2913 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2914 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2915 /* no need for context if already set */
2916 if (adapter->last_hw_offload == CSUM_TCP)
2918 adapter->last_hw_offload = CSUM_TCP;
2920 * Start offset for payload checksum calculation.
2921 * End offset for payload checksum calculation.
2922 * Offset of place to put the checksum.
2924 TXD = (struct e1000_context_desc *)
2925 &adapter->tx_desc_base[curr_txd];
2926 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2927 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2928 TXD->upper_setup.tcp_fields.tucso =
2929 hdr_len + offsetof(struct tcphdr, th_sum);
2930 cmd |= E1000_TXD_CMD_TCP;
2935 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2936 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2937 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2938 /* no need for context if already set */
2939 if (adapter->last_hw_offload == CSUM_UDP)
2941 adapter->last_hw_offload = CSUM_UDP;
2943 * Start offset for header checksum calculation.
2944 * End offset for header checksum calculation.
2945 * Offset of place to put the checksum.
2947 TXD = (struct e1000_context_desc *)
2948 &adapter->tx_desc_base[curr_txd];
2949 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2950 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2951 TXD->upper_setup.tcp_fields.tucso =
2952 hdr_len + offsetof(struct udphdr, uh_sum);
2962 TXD->tcp_seg_setup.data = htole32(0);
2963 TXD->cmd_and_length =
2964 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
2965 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2966 tx_buffer->m_head = NULL;
2967 tx_buffer->next_eop = -1;
2969 if (++curr_txd == adapter->num_tx_desc)
2972 adapter->num_tx_desc_avail--;
2973 adapter->next_avail_tx_desc = curr_txd;
2977 /**********************************************************************
2979 * Examine each tx_buffer in the used queue. If the hardware is done
2980 * processing the packet then free associated resources. The
2981 * tx_buffer is put back on the free queue.
2983 **********************************************************************/
2985 lem_txeof(struct adapter *adapter)
2987 int first, last, done, num_avail;
2988 struct em_buffer *tx_buffer;
2989 struct e1000_tx_desc *tx_desc, *eop_desc;
2990 struct ifnet *ifp = adapter->ifp;
2992 EM_TX_LOCK_ASSERT(adapter);
2995 if (netmap_tx_irq(ifp, 0 | (NETMAP_LOCKED_ENTER|NETMAP_LOCKED_EXIT)))
2997 #endif /* DEV_NETMAP */
2998 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3001 num_avail = adapter->num_tx_desc_avail;
3002 first = adapter->next_tx_to_clean;
3003 tx_desc = &adapter->tx_desc_base[first];
3004 tx_buffer = &adapter->tx_buffer_area[first];
3005 last = tx_buffer->next_eop;
3006 eop_desc = &adapter->tx_desc_base[last];
3009 * What this does is get the index of the
3010 * first descriptor AFTER the EOP of the
3011 * first packet, that way we can do the
3012 * simple comparison on the inner while loop.
3014 if (++last == adapter->num_tx_desc)
3018 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3019 BUS_DMASYNC_POSTREAD);
3021 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3022 /* We clean the range of the packet */
3023 while (first != done) {
3024 tx_desc->upper.data = 0;
3025 tx_desc->lower.data = 0;
3026 tx_desc->buffer_addr = 0;
3029 if (tx_buffer->m_head) {
3031 bus_dmamap_sync(adapter->txtag,
3033 BUS_DMASYNC_POSTWRITE);
3034 bus_dmamap_unload(adapter->txtag,
3037 m_freem(tx_buffer->m_head);
3038 tx_buffer->m_head = NULL;
3040 tx_buffer->next_eop = -1;
3041 adapter->watchdog_time = ticks;
3043 if (++first == adapter->num_tx_desc)
3046 tx_buffer = &adapter->tx_buffer_area[first];
3047 tx_desc = &adapter->tx_desc_base[first];
3049 /* See if we can continue to the next packet */
3050 last = tx_buffer->next_eop;
3052 eop_desc = &adapter->tx_desc_base[last];
3053 /* Get new done point */
3054 if (++last == adapter->num_tx_desc) last = 0;
3059 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3060 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3062 adapter->next_tx_to_clean = first;
3063 adapter->num_tx_desc_avail = num_avail;
3066 * If we have enough room, clear IFF_DRV_OACTIVE to
3067 * tell the stack that it is OK to send packets.
3068 * If there are no pending descriptors, clear the watchdog.
3070 if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {
3071 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3072 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3073 adapter->watchdog_check = FALSE;
3079 /*********************************************************************
3081 * When Link is lost sometimes there is work still in the TX ring
3082 * which may result in a watchdog, rather than allow that we do an
3083 * attempted cleanup and then reinit here. Note that this has been
3084 * seens mostly with fiber adapters.
3086 **********************************************************************/
3088 lem_tx_purge(struct adapter *adapter)
3090 if ((!adapter->link_active) && (adapter->watchdog_check)) {
3091 EM_TX_LOCK(adapter);
3093 EM_TX_UNLOCK(adapter);
3094 if (adapter->watchdog_check) /* Still outstanding? */
3095 lem_init_locked(adapter);
3099 /*********************************************************************
3101 * Get a buffer from system mbuf buffer pool.
3103 **********************************************************************/
3105 lem_get_buf(struct adapter *adapter, int i)
3108 bus_dma_segment_t segs[1];
3110 struct em_buffer *rx_buffer;
3113 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3115 adapter->mbuf_cluster_failed++;
3118 m->m_len = m->m_pkthdr.len = MCLBYTES;
3120 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3121 m_adj(m, ETHER_ALIGN);
3124 * Using memory from the mbuf cluster pool, invoke the
3125 * bus_dma machinery to arrange the memory mapping.
3127 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3128 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3134 /* If nsegs is wrong then the stack is corrupt. */
3135 KASSERT(nsegs == 1, ("Too many segments returned!"));
3137 rx_buffer = &adapter->rx_buffer_area[i];
3138 if (rx_buffer->m_head != NULL)
3139 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3141 map = rx_buffer->map;
3142 rx_buffer->map = adapter->rx_sparemap;
3143 adapter->rx_sparemap = map;
3144 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3145 rx_buffer->m_head = m;
3147 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3151 /*********************************************************************
3153 * Allocate memory for rx_buffer structures. Since we use one
3154 * rx_buffer per received packet, the maximum number of rx_buffer's
3155 * that we'll need is equal to the number of receive descriptors
3156 * that we've allocated.
3158 **********************************************************************/
3160 lem_allocate_receive_structures(struct adapter *adapter)
3162 device_t dev = adapter->dev;
3163 struct em_buffer *rx_buffer;
3166 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3167 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3168 if (adapter->rx_buffer_area == NULL) {
3169 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3173 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3174 1, 0, /* alignment, bounds */
3175 BUS_SPACE_MAXADDR, /* lowaddr */
3176 BUS_SPACE_MAXADDR, /* highaddr */
3177 NULL, NULL, /* filter, filterarg */
3178 MCLBYTES, /* maxsize */
3180 MCLBYTES, /* maxsegsize */
3182 NULL, /* lockfunc */
3186 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3191 /* Create the spare map (used by getbuf) */
3192 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3193 &adapter->rx_sparemap);
3195 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3200 rx_buffer = adapter->rx_buffer_area;
3201 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3202 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3205 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3214 lem_free_receive_structures(adapter);
3218 /*********************************************************************
3220 * (Re)initialize receive structures.
3222 **********************************************************************/
3224 lem_setup_receive_structures(struct adapter *adapter)
3226 struct em_buffer *rx_buffer;
3229 /* we are already under lock */
3230 struct netmap_adapter *na = NA(adapter->ifp);
3231 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
3234 /* Reset descriptor ring */
3235 bzero(adapter->rx_desc_base,
3236 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3238 /* Free current RX buffers. */
3239 rx_buffer = adapter->rx_buffer_area;
3240 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3241 if (rx_buffer->m_head != NULL) {
3242 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3243 BUS_DMASYNC_POSTREAD);
3244 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3245 m_freem(rx_buffer->m_head);
3246 rx_buffer->m_head = NULL;
3250 /* Allocate new ones. */
3251 for (i = 0; i < adapter->num_rx_desc; i++) {
3254 /* the i-th NIC entry goes to slot si */
3255 int si = netmap_idx_n2k(&na->rx_rings[0], i);
3259 addr = PNMB(slot + si, &paddr);
3260 netmap_load_map(adapter->rxtag, rx_buffer->map, addr);
3261 /* Update descriptor */
3262 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
3265 #endif /* DEV_NETMAP */
3266 error = lem_get_buf(adapter, i);
3271 /* Setup our descriptor pointers */
3272 adapter->next_rx_desc_to_check = 0;
3273 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3274 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3279 /*********************************************************************
3281 * Enable receive unit.
3283 **********************************************************************/
3286 lem_initialize_receive_unit(struct adapter *adapter)
3288 struct ifnet *ifp = adapter->ifp;
3292 INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3295 * Make sure receives are disabled while setting
3296 * up the descriptor ring
3298 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3299 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3301 if (adapter->hw.mac.type >= e1000_82540) {
3302 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3303 adapter->rx_abs_int_delay.value);
3305 * Set the interrupt throttling rate. Value is calculated
3306 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3308 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3311 /* Setup the Base and Length of the Rx Descriptor Ring */
3312 bus_addr = adapter->rxdma.dma_paddr;
3313 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3314 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3315 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3316 (u32)(bus_addr >> 32));
3317 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3320 /* Setup the Receive Control Register */
3321 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3322 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3323 E1000_RCTL_RDMTS_HALF |
3324 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3326 /* Make sure VLAN Filters are off */
3327 rctl &= ~E1000_RCTL_VFE;
3329 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3330 rctl |= E1000_RCTL_SBP;
3332 rctl &= ~E1000_RCTL_SBP;
3334 switch (adapter->rx_buffer_len) {
3337 rctl |= E1000_RCTL_SZ_2048;
3340 rctl |= E1000_RCTL_SZ_4096 |
3341 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3344 rctl |= E1000_RCTL_SZ_8192 |
3345 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3348 rctl |= E1000_RCTL_SZ_16384 |
3349 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3353 if (ifp->if_mtu > ETHERMTU)
3354 rctl |= E1000_RCTL_LPE;
3356 rctl &= ~E1000_RCTL_LPE;
3358 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3359 if ((adapter->hw.mac.type >= e1000_82543) &&
3360 (ifp->if_capenable & IFCAP_RXCSUM)) {
3361 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3362 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3363 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3366 /* Enable Receives */
3367 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3370 * Setup the HW Rx Head and
3371 * Tail Descriptor Pointers
3373 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3374 rctl = adapter->num_rx_desc - 1; /* default RDT value */
3376 /* preserve buffers already made available to clients */
3377 if (ifp->if_capenable & IFCAP_NETMAP)
3378 rctl -= NA(adapter->ifp)->rx_rings[0].nr_hwavail;
3379 #endif /* DEV_NETMAP */
3380 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rctl);
3385 /*********************************************************************
3387 * Free receive related data structures.
3389 **********************************************************************/
3391 lem_free_receive_structures(struct adapter *adapter)
3393 struct em_buffer *rx_buffer;
3396 INIT_DEBUGOUT("free_receive_structures: begin");
3398 if (adapter->rx_sparemap) {
3399 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3400 adapter->rx_sparemap = NULL;
3403 /* Cleanup any existing buffers */
3404 if (adapter->rx_buffer_area != NULL) {
3405 rx_buffer = adapter->rx_buffer_area;
3406 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3407 if (rx_buffer->m_head != NULL) {
3408 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3409 BUS_DMASYNC_POSTREAD);
3410 bus_dmamap_unload(adapter->rxtag,
3412 m_freem(rx_buffer->m_head);
3413 rx_buffer->m_head = NULL;
3414 } else if (rx_buffer->map != NULL)
3415 bus_dmamap_unload(adapter->rxtag,
3417 if (rx_buffer->map != NULL) {
3418 bus_dmamap_destroy(adapter->rxtag,
3420 rx_buffer->map = NULL;
3425 if (adapter->rx_buffer_area != NULL) {
3426 free(adapter->rx_buffer_area, M_DEVBUF);
3427 adapter->rx_buffer_area = NULL;
3430 if (adapter->rxtag != NULL) {
3431 bus_dma_tag_destroy(adapter->rxtag);
3432 adapter->rxtag = NULL;
3436 /*********************************************************************
3438 * This routine executes in interrupt context. It replenishes
3439 * the mbufs in the descriptor and sends data which has been
3440 * dma'ed into host memory to upper layer.
3442 * We loop at most count times if count is > 0, or until done if
3445 * For polling we also now return the number of cleaned packets
3446 *********************************************************************/
3448 lem_rxeof(struct adapter *adapter, int count, int *done)
3450 struct ifnet *ifp = adapter->ifp;
3452 u8 status = 0, accept_frame = 0, eop = 0;
3453 u16 len, desc_len, prev_len_adj;
3455 struct e1000_rx_desc *current_desc;
3457 EM_RX_LOCK(adapter);
3458 i = adapter->next_rx_desc_to_check;
3459 current_desc = &adapter->rx_desc_base[i];
3460 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3461 BUS_DMASYNC_POSTREAD);
3464 if (netmap_rx_irq(ifp, 0 | NETMAP_LOCKED_ENTER, &rx_sent))
3466 #endif /* DEV_NETMAP */
3468 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3471 EM_RX_UNLOCK(adapter);
3475 while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3476 struct mbuf *m = NULL;
3478 status = current_desc->status;
3479 if ((status & E1000_RXD_STAT_DD) == 0)
3482 mp = adapter->rx_buffer_area[i].m_head;
3484 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3485 * needs to access the last received byte in the mbuf.
3487 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3488 BUS_DMASYNC_POSTREAD);
3492 desc_len = le16toh(current_desc->length);
3493 if (status & E1000_RXD_STAT_EOP) {
3496 if (desc_len < ETHER_CRC_LEN) {
3498 prev_len_adj = ETHER_CRC_LEN - desc_len;
3500 len = desc_len - ETHER_CRC_LEN;
3506 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3508 u32 pkt_len = desc_len;
3510 if (adapter->fmp != NULL)
3511 pkt_len += adapter->fmp->m_pkthdr.len;
3513 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3514 if (TBI_ACCEPT(&adapter->hw, status,
3515 current_desc->errors, pkt_len, last_byte,
3516 adapter->min_frame_size, adapter->max_frame_size)) {
3517 e1000_tbi_adjust_stats_82543(&adapter->hw,
3518 &adapter->stats, pkt_len,
3519 adapter->hw.mac.addr,
3520 adapter->max_frame_size);
3528 if (lem_get_buf(adapter, i) != 0) {
3533 /* Assign correct length to the current fragment */
3536 if (adapter->fmp == NULL) {
3537 mp->m_pkthdr.len = len;
3538 adapter->fmp = mp; /* Store the first mbuf */
3541 /* Chain mbuf's together */
3542 mp->m_flags &= ~M_PKTHDR;
3544 * Adjust length of previous mbuf in chain if
3545 * we received less than 4 bytes in the last
3548 if (prev_len_adj > 0) {
3549 adapter->lmp->m_len -= prev_len_adj;
3550 adapter->fmp->m_pkthdr.len -=
3553 adapter->lmp->m_next = mp;
3554 adapter->lmp = adapter->lmp->m_next;
3555 adapter->fmp->m_pkthdr.len += len;
3559 adapter->fmp->m_pkthdr.rcvif = ifp;
3561 lem_receive_checksum(adapter, current_desc,
3563 #ifndef __NO_STRICT_ALIGNMENT
3564 if (adapter->max_frame_size >
3565 (MCLBYTES - ETHER_ALIGN) &&
3566 lem_fixup_rx(adapter) != 0)
3569 if (status & E1000_RXD_STAT_VP) {
3570 adapter->fmp->m_pkthdr.ether_vtag =
3571 le16toh(current_desc->special);
3572 adapter->fmp->m_flags |= M_VLANTAG;
3574 #ifndef __NO_STRICT_ALIGNMENT
3578 adapter->fmp = NULL;
3579 adapter->lmp = NULL;
3582 adapter->dropped_pkts++;
3584 /* Reuse loaded DMA map and just update mbuf chain */
3585 mp = adapter->rx_buffer_area[i].m_head;
3586 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3587 mp->m_data = mp->m_ext.ext_buf;
3589 if (adapter->max_frame_size <=
3590 (MCLBYTES - ETHER_ALIGN))
3591 m_adj(mp, ETHER_ALIGN);
3592 if (adapter->fmp != NULL) {
3593 m_freem(adapter->fmp);
3594 adapter->fmp = NULL;
3595 adapter->lmp = NULL;
3600 /* Zero out the receive descriptors status. */
3601 current_desc->status = 0;
3602 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3603 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3605 /* Advance our pointers to the next descriptor. */
3606 if (++i == adapter->num_rx_desc)
3608 /* Call into the stack */
3610 adapter->next_rx_desc_to_check = i;
3611 EM_RX_UNLOCK(adapter);
3612 (*ifp->if_input)(ifp, m);
3613 EM_RX_LOCK(adapter);
3615 i = adapter->next_rx_desc_to_check;
3617 current_desc = &adapter->rx_desc_base[i];
3619 adapter->next_rx_desc_to_check = i;
3621 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3623 i = adapter->num_rx_desc - 1;
3624 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3627 EM_RX_UNLOCK(adapter);
3628 return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3631 #ifndef __NO_STRICT_ALIGNMENT
3633 * When jumbo frames are enabled we should realign entire payload on
3634 * architecures with strict alignment. This is serious design mistake of 8254x
3635 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3636 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3637 * payload. On architecures without strict alignment restrictions 8254x still
3638 * performs unaligned memory access which would reduce the performance too.
3639 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3640 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3641 * existing mbuf chain.
3643 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3644 * not used at all on architectures with strict alignment.
3647 lem_fixup_rx(struct adapter *adapter)
3654 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3655 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3656 m->m_data += ETHER_HDR_LEN;
3658 MGETHDR(n, M_NOWAIT, MT_DATA);
3660 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3661 m->m_data += ETHER_HDR_LEN;
3662 m->m_len -= ETHER_HDR_LEN;
3663 n->m_len = ETHER_HDR_LEN;
3664 M_MOVE_PKTHDR(n, m);
3668 adapter->dropped_pkts++;
3669 m_freem(adapter->fmp);
3670 adapter->fmp = NULL;
3679 /*********************************************************************
3681 * Verify that the hardware indicated that the checksum is valid.
3682 * Inform the stack about the status of checksum so that stack
3683 * doesn't spend time verifying the checksum.
3685 *********************************************************************/
3687 lem_receive_checksum(struct adapter *adapter,
3688 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3690 /* 82543 or newer only */
3691 if ((adapter->hw.mac.type < e1000_82543) ||
3692 /* Ignore Checksum bit is set */
3693 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3694 mp->m_pkthdr.csum_flags = 0;
3698 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3700 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3701 /* IP Checksum Good */
3702 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3703 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3706 mp->m_pkthdr.csum_flags = 0;
3710 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3712 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3713 mp->m_pkthdr.csum_flags |=
3714 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3715 mp->m_pkthdr.csum_data = htons(0xffff);
3721 * This routine is run via an vlan
3725 lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3727 struct adapter *adapter = ifp->if_softc;
3730 if (ifp->if_softc != arg) /* Not our event */
3733 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
3736 EM_CORE_LOCK(adapter);
3737 index = (vtag >> 5) & 0x7F;
3739 adapter->shadow_vfta[index] |= (1 << bit);
3740 ++adapter->num_vlans;
3741 /* Re-init to load the changes */
3742 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3743 lem_init_locked(adapter);
3744 EM_CORE_UNLOCK(adapter);
3748 * This routine is run via an vlan
3752 lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3754 struct adapter *adapter = ifp->if_softc;
3757 if (ifp->if_softc != arg)
3760 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3763 EM_CORE_LOCK(adapter);
3764 index = (vtag >> 5) & 0x7F;
3766 adapter->shadow_vfta[index] &= ~(1 << bit);
3767 --adapter->num_vlans;
3768 /* Re-init to load the changes */
3769 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3770 lem_init_locked(adapter);
3771 EM_CORE_UNLOCK(adapter);
3775 lem_setup_vlan_hw_support(struct adapter *adapter)
3777 struct e1000_hw *hw = &adapter->hw;
3781 ** We get here thru init_locked, meaning
3782 ** a soft reset, this has already cleared
3783 ** the VFTA and other state, so if there
3784 ** have been no vlan's registered do nothing.
3786 if (adapter->num_vlans == 0)
3790 ** A soft reset zero's out the VFTA, so
3791 ** we need to repopulate it now.
3793 for (int i = 0; i < EM_VFTA_SIZE; i++)
3794 if (adapter->shadow_vfta[i] != 0)
3795 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3796 i, adapter->shadow_vfta[i]);
3798 reg = E1000_READ_REG(hw, E1000_CTRL);
3799 reg |= E1000_CTRL_VME;
3800 E1000_WRITE_REG(hw, E1000_CTRL, reg);
3802 /* Enable the Filter Table */
3803 reg = E1000_READ_REG(hw, E1000_RCTL);
3804 reg &= ~E1000_RCTL_CFIEN;
3805 reg |= E1000_RCTL_VFE;
3806 E1000_WRITE_REG(hw, E1000_RCTL, reg);
3810 lem_enable_intr(struct adapter *adapter)
3812 struct e1000_hw *hw = &adapter->hw;
3813 u32 ims_mask = IMS_ENABLE_MASK;
3815 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3819 lem_disable_intr(struct adapter *adapter)
3821 struct e1000_hw *hw = &adapter->hw;
3823 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3827 * Bit of a misnomer, what this really means is
3828 * to enable OS management of the system... aka
3829 * to disable special hardware management features
3832 lem_init_manageability(struct adapter *adapter)
3834 /* A shared code workaround */
3835 if (adapter->has_manage) {
3836 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3837 /* disable hardware interception of ARP */
3838 manc &= ~(E1000_MANC_ARP_EN);
3839 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3844 * Give control back to hardware management
3845 * controller if there is one.
3848 lem_release_manageability(struct adapter *adapter)
3850 if (adapter->has_manage) {
3851 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3853 /* re-enable hardware interception of ARP */
3854 manc |= E1000_MANC_ARP_EN;
3855 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3860 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3861 * For ASF and Pass Through versions of f/w this means
3862 * that the driver is loaded. For AMT version type f/w
3863 * this means that the network i/f is open.
3866 lem_get_hw_control(struct adapter *adapter)
3870 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3871 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3872 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3877 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3878 * For ASF and Pass Through versions of f/w this means that
3879 * the driver is no longer loaded. For AMT versions of the
3880 * f/w this means that the network i/f is closed.
3883 lem_release_hw_control(struct adapter *adapter)
3887 if (!adapter->has_manage)
3890 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3891 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3892 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3897 lem_is_valid_ether_addr(u8 *addr)
3899 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3901 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3909 ** Parse the interface capabilities with regard
3910 ** to both system management and wake-on-lan for
3914 lem_get_wakeup(device_t dev)
3916 struct adapter *adapter = device_get_softc(dev);
3917 u16 eeprom_data = 0, device_id, apme_mask;
3919 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3920 apme_mask = EM_EEPROM_APME;
3922 switch (adapter->hw.mac.type) {
3927 e1000_read_nvm(&adapter->hw,
3928 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3929 apme_mask = EM_82544_APME;
3932 case e1000_82546_rev_3:
3933 if (adapter->hw.bus.func == 1) {
3934 e1000_read_nvm(&adapter->hw,
3935 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3938 e1000_read_nvm(&adapter->hw,
3939 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3942 e1000_read_nvm(&adapter->hw,
3943 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3946 if (eeprom_data & apme_mask)
3947 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
3949 * We have the eeprom settings, now apply the special cases
3950 * where the eeprom may be wrong or the board won't support
3951 * wake on lan on a particular port
3953 device_id = pci_get_device(dev);
3954 switch (device_id) {
3955 case E1000_DEV_ID_82546GB_PCIE:
3958 case E1000_DEV_ID_82546EB_FIBER:
3959 case E1000_DEV_ID_82546GB_FIBER:
3960 /* Wake events only supported on port A for dual fiber
3961 * regardless of eeprom setting */
3962 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3963 E1000_STATUS_FUNC_1)
3966 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
3967 /* if quad port adapter, disable WoL on all but port A */
3968 if (global_quad_port_a != 0)
3970 /* Reset for multiple quad port adapters */
3971 if (++global_quad_port_a == 4)
3972 global_quad_port_a = 0;
3980 * Enable PCI Wake On Lan capability
3983 lem_enable_wakeup(device_t dev)
3985 struct adapter *adapter = device_get_softc(dev);
3986 struct ifnet *ifp = adapter->ifp;
3987 u32 pmc, ctrl, ctrl_ext, rctl;
3990 if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
3993 /* Advertise the wakeup capability */
3994 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
3995 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
3996 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
3997 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3999 /* Keep the laser running on Fiber adapters */
4000 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4001 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4002 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4003 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4004 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4008 ** Determine type of Wakeup: note that wol
4009 ** is set with all bits on by default.
4011 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
4012 adapter->wol &= ~E1000_WUFC_MAG;
4014 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
4015 adapter->wol &= ~E1000_WUFC_MC;
4017 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4018 rctl |= E1000_RCTL_MPE;
4019 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4022 if (adapter->hw.mac.type == e1000_pchlan) {
4023 if (lem_enable_phy_wakeup(adapter))
4026 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4027 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4032 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4033 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4034 if (ifp->if_capenable & IFCAP_WOL)
4035 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4036 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4042 ** WOL in the newer chipset interfaces (pchlan)
4043 ** require thing to be copied into the phy
4046 lem_enable_phy_wakeup(struct adapter *adapter)
4048 struct e1000_hw *hw = &adapter->hw;
4052 /* copy MAC RARs to PHY RARs */
4053 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4054 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4055 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4056 e1000_write_phy_reg(hw, BM_RAR_M(i),
4057 (u16)((mreg >> 16) & 0xFFFF));
4058 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4059 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4060 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4061 (u16)((mreg >> 16) & 0xFFFF));
4064 /* copy MAC MTA to PHY MTA */
4065 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4066 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4067 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4068 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4069 (u16)((mreg >> 16) & 0xFFFF));
4072 /* configure PHY Rx Control register */
4073 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4074 mreg = E1000_READ_REG(hw, E1000_RCTL);
4075 if (mreg & E1000_RCTL_UPE)
4076 preg |= BM_RCTL_UPE;
4077 if (mreg & E1000_RCTL_MPE)
4078 preg |= BM_RCTL_MPE;
4079 preg &= ~(BM_RCTL_MO_MASK);
4080 if (mreg & E1000_RCTL_MO_3)
4081 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4082 << BM_RCTL_MO_SHIFT);
4083 if (mreg & E1000_RCTL_BAM)
4084 preg |= BM_RCTL_BAM;
4085 if (mreg & E1000_RCTL_PMCF)
4086 preg |= BM_RCTL_PMCF;
4087 mreg = E1000_READ_REG(hw, E1000_CTRL);
4088 if (mreg & E1000_CTRL_RFCE)
4089 preg |= BM_RCTL_RFCE;
4090 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4092 /* enable PHY wakeup in MAC register */
4093 E1000_WRITE_REG(hw, E1000_WUC,
4094 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4095 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4097 /* configure and enable PHY wakeup in PHY registers */
4098 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4099 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4101 /* activate PHY wakeup */
4102 ret = hw->phy.ops.acquire(hw);
4104 printf("Could not acquire PHY\n");
4107 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4108 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4109 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4111 printf("Could not read PHY page 769\n");
4114 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4115 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4117 printf("Could not set PHY Host Wakeup bit\n");
4119 hw->phy.ops.release(hw);
4125 lem_led_func(void *arg, int onoff)
4127 struct adapter *adapter = arg;
4129 EM_CORE_LOCK(adapter);
4131 e1000_setup_led(&adapter->hw);
4132 e1000_led_on(&adapter->hw);
4134 e1000_led_off(&adapter->hw);
4135 e1000_cleanup_led(&adapter->hw);
4137 EM_CORE_UNLOCK(adapter);
4140 /*********************************************************************
4141 * 82544 Coexistence issue workaround.
4142 * There are 2 issues.
4143 * 1. Transmit Hang issue.
4144 * To detect this issue, following equation can be used...
4145 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4146 * If SUM[3:0] is in between 1 to 4, we will have this issue.
4149 * To detect this issue, following equation can be used...
4150 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4151 * If SUM[3:0] is in between 9 to c, we will have this issue.
4155 * Make sure we do not have ending address
4156 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4158 *************************************************************************/
4160 lem_fill_descriptors (bus_addr_t address, u32 length,
4161 PDESC_ARRAY desc_array)
4163 u32 safe_terminator;
4165 /* Since issue is sensitive to length and address.*/
4166 /* Let us first check the address...*/
4168 desc_array->descriptor[0].address = address;
4169 desc_array->descriptor[0].length = length;
4170 desc_array->elements = 1;
4171 return (desc_array->elements);
4173 safe_terminator = (u32)((((u32)address & 0x7) +
4174 (length & 0xF)) & 0xF);
4175 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4176 if (safe_terminator == 0 ||
4177 (safe_terminator > 4 &&
4178 safe_terminator < 9) ||
4179 (safe_terminator > 0xC &&
4180 safe_terminator <= 0xF)) {
4181 desc_array->descriptor[0].address = address;
4182 desc_array->descriptor[0].length = length;
4183 desc_array->elements = 1;
4184 return (desc_array->elements);
4187 desc_array->descriptor[0].address = address;
4188 desc_array->descriptor[0].length = length - 4;
4189 desc_array->descriptor[1].address = address + (length - 4);
4190 desc_array->descriptor[1].length = 4;
4191 desc_array->elements = 2;
4192 return (desc_array->elements);
4195 /**********************************************************************
4197 * Update the board statistics counters.
4199 **********************************************************************/
4201 lem_update_stats_counters(struct adapter *adapter)
4205 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4206 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4207 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4208 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4210 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4211 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4212 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4213 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4215 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4216 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4217 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4218 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4219 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4220 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4221 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4222 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4223 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4224 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4225 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4226 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4227 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4228 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4229 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4230 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4231 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4232 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4233 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4234 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4236 /* For the 64-bit byte counters the low dword must be read first. */
4237 /* Both registers clear on the read of the high dword */
4239 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4240 ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4241 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4242 ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4244 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4245 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4246 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4247 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4248 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4250 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4251 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4253 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4254 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4255 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4256 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4257 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4258 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4259 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4260 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4261 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4262 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4264 if (adapter->hw.mac.type >= e1000_82543) {
4265 adapter->stats.algnerrc +=
4266 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4267 adapter->stats.rxerrc +=
4268 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4269 adapter->stats.tncrs +=
4270 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4271 adapter->stats.cexterr +=
4272 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4273 adapter->stats.tsctc +=
4274 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4275 adapter->stats.tsctfc +=
4276 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4280 ifp->if_collisions = adapter->stats.colc;
4283 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4284 adapter->stats.crcerrs + adapter->stats.algnerrc +
4285 adapter->stats.ruc + adapter->stats.roc +
4286 adapter->stats.mpc + adapter->stats.cexterr;
4289 ifp->if_oerrors = adapter->stats.ecol +
4290 adapter->stats.latecol + adapter->watchdog_events;
4293 /* Export a single 32-bit register via a read-only sysctl. */
4295 lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4297 struct adapter *adapter;
4300 adapter = oidp->oid_arg1;
4301 val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4302 return (sysctl_handle_int(oidp, &val, 0, req));
4306 * Add sysctl variables, one per statistic, to the system.
4309 lem_add_hw_stats(struct adapter *adapter)
4311 device_t dev = adapter->dev;
4313 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4314 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4315 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4316 struct e1000_hw_stats *stats = &adapter->stats;
4318 struct sysctl_oid *stat_node;
4319 struct sysctl_oid_list *stat_list;
4321 /* Driver Statistics */
4322 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail",
4323 CTLFLAG_RD, &adapter->mbuf_alloc_failed,
4325 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
4326 CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4327 "Std mbuf cluster failed");
4328 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4329 CTLFLAG_RD, &adapter->dropped_pkts,
4330 "Driver dropped packets");
4331 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
4332 CTLFLAG_RD, &adapter->no_tx_dma_setup,
4333 "Driver tx dma failure in xmit");
4334 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4335 CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4336 "Not enough tx descriptors failure in xmit");
4337 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4338 CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4339 "Not enough tx descriptors failure in xmit");
4340 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4341 CTLFLAG_RD, &adapter->rx_overruns,
4343 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4344 CTLFLAG_RD, &adapter->watchdog_events,
4345 "Watchdog timeouts");
4347 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4348 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
4349 lem_sysctl_reg_handler, "IU",
4350 "Device Control Register");
4351 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4352 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
4353 lem_sysctl_reg_handler, "IU",
4354 "Receiver Control Register");
4355 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4356 CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4357 "Flow Control High Watermark");
4358 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
4359 CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4360 "Flow Control Low Watermark");
4361 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
4362 CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4363 "TX FIFO workaround events");
4364 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
4365 CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4368 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head",
4369 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
4370 lem_sysctl_reg_handler, "IU",
4371 "Transmit Descriptor Head");
4372 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail",
4373 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
4374 lem_sysctl_reg_handler, "IU",
4375 "Transmit Descriptor Tail");
4376 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head",
4377 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
4378 lem_sysctl_reg_handler, "IU",
4379 "Receive Descriptor Head");
4380 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail",
4381 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
4382 lem_sysctl_reg_handler, "IU",
4383 "Receive Descriptor Tail");
4386 /* MAC stats get their own sub node */
4388 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4389 CTLFLAG_RD, NULL, "Statistics");
4390 stat_list = SYSCTL_CHILDREN(stat_node);
4392 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4393 CTLFLAG_RD, &stats->ecol,
4394 "Excessive collisions");
4395 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4396 CTLFLAG_RD, &stats->scc,
4397 "Single collisions");
4398 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4399 CTLFLAG_RD, &stats->mcc,
4400 "Multiple collisions");
4401 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4402 CTLFLAG_RD, &stats->latecol,
4404 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4405 CTLFLAG_RD, &stats->colc,
4407 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4408 CTLFLAG_RD, &adapter->stats.symerrs,
4410 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4411 CTLFLAG_RD, &adapter->stats.sec,
4413 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4414 CTLFLAG_RD, &adapter->stats.dc,
4416 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4417 CTLFLAG_RD, &adapter->stats.mpc,
4419 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4420 CTLFLAG_RD, &adapter->stats.rnbc,
4421 "Receive No Buffers");
4422 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4423 CTLFLAG_RD, &adapter->stats.ruc,
4424 "Receive Undersize");
4425 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4426 CTLFLAG_RD, &adapter->stats.rfc,
4427 "Fragmented Packets Received ");
4428 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4429 CTLFLAG_RD, &adapter->stats.roc,
4430 "Oversized Packets Received");
4431 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4432 CTLFLAG_RD, &adapter->stats.rjc,
4434 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4435 CTLFLAG_RD, &adapter->stats.rxerrc,
4437 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4438 CTLFLAG_RD, &adapter->stats.crcerrs,
4440 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4441 CTLFLAG_RD, &adapter->stats.algnerrc,
4442 "Alignment Errors");
4443 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4444 CTLFLAG_RD, &adapter->stats.cexterr,
4445 "Collision/Carrier extension errors");
4446 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4447 CTLFLAG_RD, &adapter->stats.xonrxc,
4449 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4450 CTLFLAG_RD, &adapter->stats.xontxc,
4452 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4453 CTLFLAG_RD, &adapter->stats.xoffrxc,
4455 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4456 CTLFLAG_RD, &adapter->stats.xofftxc,
4457 "XOFF Transmitted");
4459 /* Packet Reception Stats */
4460 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4461 CTLFLAG_RD, &adapter->stats.tpr,
4462 "Total Packets Received ");
4463 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4464 CTLFLAG_RD, &adapter->stats.gprc,
4465 "Good Packets Received");
4466 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4467 CTLFLAG_RD, &adapter->stats.bprc,
4468 "Broadcast Packets Received");
4469 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4470 CTLFLAG_RD, &adapter->stats.mprc,
4471 "Multicast Packets Received");
4472 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4473 CTLFLAG_RD, &adapter->stats.prc64,
4474 "64 byte frames received ");
4475 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4476 CTLFLAG_RD, &adapter->stats.prc127,
4477 "65-127 byte frames received");
4478 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4479 CTLFLAG_RD, &adapter->stats.prc255,
4480 "128-255 byte frames received");
4481 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4482 CTLFLAG_RD, &adapter->stats.prc511,
4483 "256-511 byte frames received");
4484 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4485 CTLFLAG_RD, &adapter->stats.prc1023,
4486 "512-1023 byte frames received");
4487 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4488 CTLFLAG_RD, &adapter->stats.prc1522,
4489 "1023-1522 byte frames received");
4490 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4491 CTLFLAG_RD, &adapter->stats.gorc,
4492 "Good Octets Received");
4494 /* Packet Transmission Stats */
4495 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4496 CTLFLAG_RD, &adapter->stats.gotc,
4497 "Good Octets Transmitted");
4498 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4499 CTLFLAG_RD, &adapter->stats.tpt,
4500 "Total Packets Transmitted");
4501 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4502 CTLFLAG_RD, &adapter->stats.gptc,
4503 "Good Packets Transmitted");
4504 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4505 CTLFLAG_RD, &adapter->stats.bptc,
4506 "Broadcast Packets Transmitted");
4507 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4508 CTLFLAG_RD, &adapter->stats.mptc,
4509 "Multicast Packets Transmitted");
4510 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4511 CTLFLAG_RD, &adapter->stats.ptc64,
4512 "64 byte frames transmitted ");
4513 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4514 CTLFLAG_RD, &adapter->stats.ptc127,
4515 "65-127 byte frames transmitted");
4516 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4517 CTLFLAG_RD, &adapter->stats.ptc255,
4518 "128-255 byte frames transmitted");
4519 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4520 CTLFLAG_RD, &adapter->stats.ptc511,
4521 "256-511 byte frames transmitted");
4522 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4523 CTLFLAG_RD, &adapter->stats.ptc1023,
4524 "512-1023 byte frames transmitted");
4525 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4526 CTLFLAG_RD, &adapter->stats.ptc1522,
4527 "1024-1522 byte frames transmitted");
4528 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4529 CTLFLAG_RD, &adapter->stats.tsctc,
4530 "TSO Contexts Transmitted");
4531 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4532 CTLFLAG_RD, &adapter->stats.tsctfc,
4533 "TSO Contexts Failed");
4536 /**********************************************************************
4538 * This routine provides a way to dump out the adapter eeprom,
4539 * often a useful debug/service tool. This only dumps the first
4540 * 32 words, stuff that matters is in that extent.
4542 **********************************************************************/
4545 lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4547 struct adapter *adapter;
4552 error = sysctl_handle_int(oidp, &result, 0, req);
4554 if (error || !req->newptr)
4558 * This value will cause a hex dump of the
4559 * first 32 16-bit words of the EEPROM to
4563 adapter = (struct adapter *)arg1;
4564 lem_print_nvm_info(adapter);
4571 lem_print_nvm_info(struct adapter *adapter)
4576 /* Its a bit crude, but it gets the job done */
4577 printf("\nInterface EEPROM Dump:\n");
4578 printf("Offset\n0x0000 ");
4579 for (i = 0, j = 0; i < 32; i++, j++) {
4580 if (j == 8) { /* Make the offset block */
4582 printf("\n0x00%x0 ",row);
4584 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4585 printf("%04x ", eeprom_data);
4591 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4593 struct em_int_delay_info *info;
4594 struct adapter *adapter;
4600 info = (struct em_int_delay_info *)arg1;
4601 usecs = info->value;
4602 error = sysctl_handle_int(oidp, &usecs, 0, req);
4603 if (error != 0 || req->newptr == NULL)
4605 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4607 info->value = usecs;
4608 ticks = EM_USECS_TO_TICKS(usecs);
4609 if (info->offset == E1000_ITR) /* units are 256ns here */
4612 adapter = info->adapter;
4614 EM_CORE_LOCK(adapter);
4615 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4616 regval = (regval & ~0xffff) | (ticks & 0xffff);
4617 /* Handle a few special cases. */
4618 switch (info->offset) {
4623 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4624 /* Don't write 0 into the TIDV register. */
4627 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4630 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4631 EM_CORE_UNLOCK(adapter);
4636 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4637 const char *description, struct em_int_delay_info *info,
4638 int offset, int value)
4640 info->adapter = adapter;
4641 info->offset = offset;
4642 info->value = value;
4643 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4644 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4645 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4646 info, 0, lem_sysctl_int_delay, "I", description);
4650 lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4651 const char *description, int *limit, int value)
4654 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4655 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4656 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4660 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4661 const char *description, int *limit, int value)
4664 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4665 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4666 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);