1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 * Uncomment the following extensions for better performance in a VM,
37 * especially if you have support in the hypervisor.
38 * See http://info.iet.unipi.it/~luigi/netmap/
40 // #define BATCH_DISPATCH
41 // #define NIC_SEND_COMBINING
42 // #define NIC_PARAVIRT /* enable virtio-like synchronization */
45 #include "opt_inet6.h"
47 #ifdef HAVE_KERNEL_OPTION_HEADERS
48 #include "opt_device_polling.h"
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/buf_ring.h>
55 #include <sys/endian.h>
56 #include <sys/kernel.h>
57 #include <sys/kthread.h>
58 #include <sys/malloc.h>
60 #include <sys/module.h>
62 #include <sys/socket.h>
63 #include <sys/sockio.h>
64 #include <sys/sysctl.h>
65 #include <sys/taskqueue.h>
66 #include <sys/eventhandler.h>
67 #include <machine/bus.h>
68 #include <machine/resource.h>
71 #include <net/ethernet.h>
73 #include <net/if_var.h>
74 #include <net/if_arp.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
78 #include <net/if_types.h>
79 #include <net/if_vlan_var.h>
81 #include <netinet/in_systm.h>
82 #include <netinet/in.h>
83 #include <netinet/if_ether.h>
84 #include <netinet/ip.h>
85 #include <netinet/ip6.h>
86 #include <netinet/tcp.h>
87 #include <netinet/udp.h>
89 #include <machine/in_cksum.h>
90 #include <dev/led/led.h>
91 #include <dev/pci/pcivar.h>
92 #include <dev/pci/pcireg.h>
94 #include "e1000_api.h"
97 /*********************************************************************
98 * Legacy Em Driver version:
99 *********************************************************************/
100 char lem_driver_version[] = "1.1.0";
102 /*********************************************************************
103 * PCI Device ID Table
105 * Used by probe to select devices to load on
106 * Last field stores an index into e1000_strings
107 * Last entry must be all 0s
109 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
110 *********************************************************************/
112 static em_vendor_info_t lem_vendor_info_array[] =
114 /* Intel(R) PRO/1000 Network Connection */
115 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
118 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
126 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
131 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
132 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
142 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
143 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
147 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
148 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
149 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
150 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
151 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
152 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
153 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
154 PCI_ANY_ID, PCI_ANY_ID, 0},
156 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
157 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
158 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
159 /* required last entry */
163 /*********************************************************************
164 * Table of branding strings for all supported NICs.
165 *********************************************************************/
167 static char *lem_strings[] = {
168 "Intel(R) PRO/1000 Legacy Network Connection"
171 /*********************************************************************
172 * Function prototypes
173 *********************************************************************/
174 static int lem_probe(device_t);
175 static int lem_attach(device_t);
176 static int lem_detach(device_t);
177 static int lem_shutdown(device_t);
178 static int lem_suspend(device_t);
179 static int lem_resume(device_t);
180 static void lem_start(if_t);
181 static void lem_start_locked(if_t ifp);
182 static int lem_ioctl(if_t, u_long, caddr_t);
183 static uint64_t lem_get_counter(if_t, ift_counter);
184 static void lem_init(void *);
185 static void lem_init_locked(struct adapter *);
186 static void lem_stop(void *);
187 static void lem_media_status(if_t, struct ifmediareq *);
188 static int lem_media_change(if_t);
189 static void lem_identify_hardware(struct adapter *);
190 static int lem_allocate_pci_resources(struct adapter *);
191 static int lem_allocate_irq(struct adapter *adapter);
192 static void lem_free_pci_resources(struct adapter *);
193 static void lem_local_timer(void *);
194 static int lem_hardware_init(struct adapter *);
195 static int lem_setup_interface(device_t, struct adapter *);
196 static void lem_setup_transmit_structures(struct adapter *);
197 static void lem_initialize_transmit_unit(struct adapter *);
198 static int lem_setup_receive_structures(struct adapter *);
199 static void lem_initialize_receive_unit(struct adapter *);
200 static void lem_enable_intr(struct adapter *);
201 static void lem_disable_intr(struct adapter *);
202 static void lem_free_transmit_structures(struct adapter *);
203 static void lem_free_receive_structures(struct adapter *);
204 static void lem_update_stats_counters(struct adapter *);
205 static void lem_add_hw_stats(struct adapter *adapter);
206 static void lem_txeof(struct adapter *);
207 static void lem_tx_purge(struct adapter *);
208 static int lem_allocate_receive_structures(struct adapter *);
209 static int lem_allocate_transmit_structures(struct adapter *);
210 static bool lem_rxeof(struct adapter *, int, int *);
211 #ifndef __NO_STRICT_ALIGNMENT
212 static int lem_fixup_rx(struct adapter *);
214 static void lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
216 static void lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
218 static void lem_set_promisc(struct adapter *);
219 static void lem_disable_promisc(struct adapter *);
220 static void lem_set_multi(struct adapter *);
221 static void lem_update_link_status(struct adapter *);
222 static int lem_get_buf(struct adapter *, int);
223 static void lem_register_vlan(void *, if_t, u16);
224 static void lem_unregister_vlan(void *, if_t, u16);
225 static void lem_setup_vlan_hw_support(struct adapter *);
226 static int lem_xmit(struct adapter *, struct mbuf **);
227 static void lem_smartspeed(struct adapter *);
228 static int lem_82547_fifo_workaround(struct adapter *, int);
229 static void lem_82547_update_fifo_head(struct adapter *, int);
230 static int lem_82547_tx_fifo_reset(struct adapter *);
231 static void lem_82547_move_tail(void *);
232 static int lem_dma_malloc(struct adapter *, bus_size_t,
233 struct em_dma_alloc *, int);
234 static void lem_dma_free(struct adapter *, struct em_dma_alloc *);
235 static int lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
236 static void lem_print_nvm_info(struct adapter *);
237 static int lem_is_valid_ether_addr(u8 *);
238 static u32 lem_fill_descriptors (bus_addr_t address, u32 length,
239 PDESC_ARRAY desc_array);
240 static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
241 static void lem_add_int_delay_sysctl(struct adapter *, const char *,
242 const char *, struct em_int_delay_info *, int, int);
243 static void lem_set_flow_cntrl(struct adapter *, const char *,
244 const char *, int *, int);
245 /* Management and WOL Support */
246 static void lem_init_manageability(struct adapter *);
247 static void lem_release_manageability(struct adapter *);
248 static void lem_get_hw_control(struct adapter *);
249 static void lem_release_hw_control(struct adapter *);
250 static void lem_get_wakeup(device_t);
251 static void lem_enable_wakeup(device_t);
252 static int lem_enable_phy_wakeup(struct adapter *);
253 static void lem_led_func(void *, int);
255 static void lem_intr(void *);
256 static int lem_irq_fast(void *);
257 static void lem_handle_rxtx(void *context, int pending);
258 static void lem_handle_link(void *context, int pending);
259 static void lem_add_rx_process_limit(struct adapter *, const char *,
260 const char *, int *, int);
262 #ifdef DEVICE_POLLING
263 static poll_handler_t lem_poll;
266 /*********************************************************************
267 * FreeBSD Device Interface Entry Points
268 *********************************************************************/
270 static device_method_t lem_methods[] = {
271 /* Device interface */
272 DEVMETHOD(device_probe, lem_probe),
273 DEVMETHOD(device_attach, lem_attach),
274 DEVMETHOD(device_detach, lem_detach),
275 DEVMETHOD(device_shutdown, lem_shutdown),
276 DEVMETHOD(device_suspend, lem_suspend),
277 DEVMETHOD(device_resume, lem_resume),
281 static driver_t lem_driver = {
282 "em", lem_methods, sizeof(struct adapter),
285 extern devclass_t em_devclass;
286 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
287 MODULE_DEPEND(lem, pci, 1, 1, 1);
288 MODULE_DEPEND(lem, ether, 1, 1, 1);
290 MODULE_DEPEND(lem, netmap, 1, 1, 1);
291 #endif /* DEV_NETMAP */
293 /*********************************************************************
294 * Tunable default values.
295 *********************************************************************/
297 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
298 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
300 #define MAX_INTS_PER_SEC 8000
301 #define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256))
303 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
304 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
305 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
306 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
308 * increase lem_rxd and lem_txd to at least 2048 in netmap mode
309 * for better performance.
311 static int lem_rxd = EM_DEFAULT_RXD;
312 static int lem_txd = EM_DEFAULT_TXD;
313 static int lem_smart_pwr_down = FALSE;
315 /* Controls whether promiscuous also shows bad packets */
316 static int lem_debug_sbp = FALSE;
318 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
319 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
320 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
321 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
322 TUNABLE_INT("hw.em.rxd", &lem_rxd);
323 TUNABLE_INT("hw.em.txd", &lem_txd);
324 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
325 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
327 /* Interrupt style - default to fast */
328 static int lem_use_legacy_irq = 0;
329 TUNABLE_INT("hw.em.use_legacy_irq", &lem_use_legacy_irq);
331 /* How many packets rxeof tries to clean at a time */
332 static int lem_rx_process_limit = 100;
333 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
335 /* Flow control setting - default to FULL */
336 static int lem_fc_setting = e1000_fc_full;
337 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
339 /* Global used in WOL setup with multiport cards */
340 static int global_quad_port_a = 0;
342 #ifdef DEV_NETMAP /* see ixgbe.c for details */
343 #include <dev/netmap/if_lem_netmap.h>
344 #endif /* DEV_NETMAP */
346 /*********************************************************************
347 * Device identification routine
349 * em_probe determines if the driver should be loaded on
350 * adapter based on PCI vendor/device id of the adapter.
352 * return BUS_PROBE_DEFAULT on success, positive on failure
353 *********************************************************************/
356 lem_probe(device_t dev)
358 char adapter_name[60];
359 u16 pci_vendor_id = 0;
360 u16 pci_device_id = 0;
361 u16 pci_subvendor_id = 0;
362 u16 pci_subdevice_id = 0;
363 em_vendor_info_t *ent;
365 INIT_DEBUGOUT("em_probe: begin");
367 pci_vendor_id = pci_get_vendor(dev);
368 if (pci_vendor_id != EM_VENDOR_ID)
371 pci_device_id = pci_get_device(dev);
372 pci_subvendor_id = pci_get_subvendor(dev);
373 pci_subdevice_id = pci_get_subdevice(dev);
375 ent = lem_vendor_info_array;
376 while (ent->vendor_id != 0) {
377 if ((pci_vendor_id == ent->vendor_id) &&
378 (pci_device_id == ent->device_id) &&
380 ((pci_subvendor_id == ent->subvendor_id) ||
381 (ent->subvendor_id == PCI_ANY_ID)) &&
383 ((pci_subdevice_id == ent->subdevice_id) ||
384 (ent->subdevice_id == PCI_ANY_ID))) {
385 sprintf(adapter_name, "%s %s",
386 lem_strings[ent->index],
388 device_set_desc_copy(dev, adapter_name);
389 return (BUS_PROBE_DEFAULT);
397 /*********************************************************************
398 * Device initialization routine
400 * The attach entry point is called when the driver is being loaded.
401 * This routine identifies the type of hardware, allocates all resources
402 * and initializes the hardware.
404 * return 0 on success, positive on failure
405 *********************************************************************/
408 lem_attach(device_t dev)
410 struct adapter *adapter;
414 INIT_DEBUGOUT("lem_attach: begin");
416 adapter = device_get_softc(dev);
417 adapter->dev = adapter->osdep.dev = dev;
418 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
419 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
420 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
423 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
424 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
425 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
426 lem_sysctl_nvm_info, "I", "NVM Information");
428 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
429 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
431 /* Determine hardware and mac info */
432 lem_identify_hardware(adapter);
434 /* Setup PCI resources */
435 if (lem_allocate_pci_resources(adapter)) {
436 device_printf(dev, "Allocation of PCI resources failed\n");
441 /* Do Shared Code initialization */
442 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
443 device_printf(dev, "Setup of Shared code failed\n");
448 e1000_get_bus_info(&adapter->hw);
450 /* Set up some sysctls for the tunable interrupt delays */
451 lem_add_int_delay_sysctl(adapter, "rx_int_delay",
452 "receive interrupt delay in usecs", &adapter->rx_int_delay,
453 E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
454 lem_add_int_delay_sysctl(adapter, "tx_int_delay",
455 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
456 E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
457 if (adapter->hw.mac.type >= e1000_82540) {
458 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
459 "receive interrupt delay limit in usecs",
460 &adapter->rx_abs_int_delay,
461 E1000_REGISTER(&adapter->hw, E1000_RADV),
462 lem_rx_abs_int_delay_dflt);
463 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
464 "transmit interrupt delay limit in usecs",
465 &adapter->tx_abs_int_delay,
466 E1000_REGISTER(&adapter->hw, E1000_TADV),
467 lem_tx_abs_int_delay_dflt);
468 lem_add_int_delay_sysctl(adapter, "itr",
469 "interrupt delay limit in usecs/4",
471 E1000_REGISTER(&adapter->hw, E1000_ITR),
475 /* Sysctls for limiting the amount of work done in the taskqueue */
476 lem_add_rx_process_limit(adapter, "rx_processing_limit",
477 "max number of rx packets to process", &adapter->rx_process_limit,
478 lem_rx_process_limit);
480 #ifdef NIC_SEND_COMBINING
481 /* Sysctls to control mitigation */
482 lem_add_rx_process_limit(adapter, "sc_enable",
483 "driver TDT mitigation", &adapter->sc_enable, 0);
484 #endif /* NIC_SEND_COMBINING */
485 #ifdef BATCH_DISPATCH
486 lem_add_rx_process_limit(adapter, "batch_enable",
487 "driver rx batch", &adapter->batch_enable, 0);
488 #endif /* BATCH_DISPATCH */
490 lem_add_rx_process_limit(adapter, "rx_retries",
491 "driver rx retries", &adapter->rx_retries, 0);
492 #endif /* NIC_PARAVIRT */
494 /* Sysctl for setting the interface flow control */
495 lem_set_flow_cntrl(adapter, "flow_control",
496 "flow control setting",
497 &adapter->fc_setting, lem_fc_setting);
500 * Validate number of transmit and receive descriptors. It
501 * must not exceed hardware maximum, and must be multiple
502 * of E1000_DBA_ALIGN.
504 if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
505 (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
506 (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
507 (lem_txd < EM_MIN_TXD)) {
508 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
509 EM_DEFAULT_TXD, lem_txd);
510 adapter->num_tx_desc = EM_DEFAULT_TXD;
512 adapter->num_tx_desc = lem_txd;
513 if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
514 (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
515 (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
516 (lem_rxd < EM_MIN_RXD)) {
517 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
518 EM_DEFAULT_RXD, lem_rxd);
519 adapter->num_rx_desc = EM_DEFAULT_RXD;
521 adapter->num_rx_desc = lem_rxd;
523 adapter->hw.mac.autoneg = DO_AUTO_NEG;
524 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
525 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
526 adapter->rx_buffer_len = 2048;
528 e1000_init_script_state_82541(&adapter->hw, TRUE);
529 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
532 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
533 adapter->hw.phy.mdix = AUTO_ALL_MODES;
534 adapter->hw.phy.disable_polarity_correction = FALSE;
535 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
539 * Set the frame limits assuming
540 * standard ethernet sized frames.
542 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
543 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
546 * This controls when hardware reports transmit completion
549 adapter->hw.mac.report_tx_early = 1;
552 device_printf(dev, "driver supports paravirt, subdev 0x%x\n",
553 adapter->hw.subsystem_device_id);
554 if (adapter->hw.subsystem_device_id == E1000_PARA_SUBDEV) {
557 device_printf(dev, "paravirt support on dev %p\n", adapter);
558 tsize = 4096; // XXX one page for the csb
559 if (lem_dma_malloc(adapter, tsize, &adapter->csb_mem, BUS_DMA_NOWAIT)) {
560 device_printf(dev, "Unable to allocate csb memory\n");
564 /* Setup the Base of the CSB */
565 adapter->csb = (struct paravirt_csb *)adapter->csb_mem.dma_vaddr;
566 /* force the first kick */
567 adapter->csb->host_need_txkick = 1; /* txring empty */
568 adapter->csb->guest_need_rxkick = 1; /* no rx packets */
569 bus_addr = adapter->csb_mem.dma_paddr;
570 lem_add_rx_process_limit(adapter, "csb_on",
571 "enable paravirt.", &adapter->csb->guest_csb_on, 0);
572 lem_add_rx_process_limit(adapter, "txc_lim",
573 "txc_lim", &adapter->csb->host_txcycles_lim, 1);
576 #define PA_SC(name, var, val) \
577 lem_add_rx_process_limit(adapter, name, name, var, val)
578 PA_SC("host_need_txkick",&adapter->csb->host_need_txkick, 1);
579 PA_SC("host_rxkick_at",&adapter->csb->host_rxkick_at, ~0);
580 PA_SC("guest_need_txkick",&adapter->csb->guest_need_txkick, 0);
581 PA_SC("guest_need_rxkick",&adapter->csb->guest_need_rxkick, 1);
582 PA_SC("tdt_reg_count",&adapter->tdt_reg_count, 0);
583 PA_SC("tdt_csb_count",&adapter->tdt_csb_count, 0);
584 PA_SC("tdt_int_count",&adapter->tdt_int_count, 0);
585 PA_SC("guest_need_kick_count",&adapter->guest_need_kick_count, 0);
586 /* tell the host where the block is */
587 E1000_WRITE_REG(&adapter->hw, E1000_CSBAH,
588 (u32)(bus_addr >> 32));
589 E1000_WRITE_REG(&adapter->hw, E1000_CSBAL,
592 #endif /* NIC_PARAVIRT */
594 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
597 /* Allocate Transmit Descriptor ring */
598 if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
599 device_printf(dev, "Unable to allocate tx_desc memory\n");
603 adapter->tx_desc_base =
604 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
606 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
609 /* Allocate Receive Descriptor ring */
610 if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
611 device_printf(dev, "Unable to allocate rx_desc memory\n");
615 adapter->rx_desc_base =
616 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
618 /* Allocate multicast array memory. */
619 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
620 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
621 if (adapter->mta == NULL) {
622 device_printf(dev, "Can not allocate multicast setup array\n");
628 ** Start from a known state, this is
629 ** important in reading the nvm and
632 e1000_reset_hw(&adapter->hw);
634 /* Make sure we have a good EEPROM before we read from it */
635 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
637 ** Some PCI-E parts fail the first check due to
638 ** the link being in sleep state, call it again,
639 ** if it fails a second time its a real issue.
641 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
643 "The EEPROM Checksum Is Not Valid\n");
649 /* Copy the permanent MAC address out of the EEPROM */
650 if (e1000_read_mac_addr(&adapter->hw) < 0) {
651 device_printf(dev, "EEPROM read error while reading MAC"
657 if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
658 device_printf(dev, "Invalid MAC address\n");
663 /* Initialize the hardware */
664 if (lem_hardware_init(adapter)) {
665 device_printf(dev, "Unable to initialize the hardware\n");
670 /* Allocate transmit descriptors and buffers */
671 if (lem_allocate_transmit_structures(adapter)) {
672 device_printf(dev, "Could not setup transmit structures\n");
677 /* Allocate receive descriptors and buffers */
678 if (lem_allocate_receive_structures(adapter)) {
679 device_printf(dev, "Could not setup receive structures\n");
685 ** Do interrupt configuration
687 error = lem_allocate_irq(adapter);
692 * Get Wake-on-Lan and Management info for later use
696 /* Setup OS specific network interface */
697 if (lem_setup_interface(dev, adapter) != 0)
700 /* Initialize statistics */
701 lem_update_stats_counters(adapter);
703 adapter->hw.mac.get_link_status = 1;
704 lem_update_link_status(adapter);
706 /* Indicate SOL/IDER usage */
707 if (e1000_check_reset_block(&adapter->hw))
709 "PHY reset is blocked due to SOL/IDER session.\n");
711 /* Do we need workaround for 82544 PCI-X adapter? */
712 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
713 adapter->hw.mac.type == e1000_82544)
714 adapter->pcix_82544 = TRUE;
716 adapter->pcix_82544 = FALSE;
718 /* Register for VLAN events */
719 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
720 lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
721 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
722 lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
724 lem_add_hw_stats(adapter);
726 /* Non-AMT based hardware can now take control from firmware */
727 if (adapter->has_manage && !adapter->has_amt)
728 lem_get_hw_control(adapter);
730 /* Tell the stack that the interface is not active */
731 if_setdrvflagbits(adapter->ifp, 0, IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
733 adapter->led_dev = led_create(lem_led_func, adapter,
734 device_get_nameunit(dev));
737 lem_netmap_attach(adapter);
738 #endif /* DEV_NETMAP */
739 INIT_DEBUGOUT("lem_attach: end");
744 lem_free_transmit_structures(adapter);
747 lem_release_hw_control(adapter);
748 lem_dma_free(adapter, &adapter->rxdma);
750 lem_dma_free(adapter, &adapter->txdma);
753 lem_dma_free(adapter, &adapter->csb_mem);
755 #endif /* NIC_PARAVIRT */
758 if (adapter->ifp != (void *)NULL)
759 if_free(adapter->ifp);
760 lem_free_pci_resources(adapter);
761 free(adapter->mta, M_DEVBUF);
762 EM_TX_LOCK_DESTROY(adapter);
763 EM_RX_LOCK_DESTROY(adapter);
764 EM_CORE_LOCK_DESTROY(adapter);
769 /*********************************************************************
770 * Device removal routine
772 * The detach entry point is called when the driver is being removed.
773 * This routine stops the adapter and deallocates all the resources
774 * that were allocated for driver operation.
776 * return 0 on success, positive on failure
777 *********************************************************************/
780 lem_detach(device_t dev)
782 struct adapter *adapter = device_get_softc(dev);
783 if_t ifp = adapter->ifp;
785 INIT_DEBUGOUT("em_detach: begin");
787 /* Make sure VLANS are not using driver */
788 if (if_vlantrunkinuse(ifp)) {
789 device_printf(dev,"Vlan in use, detach first\n");
793 #ifdef DEVICE_POLLING
794 if (if_getcapenable(ifp) & IFCAP_POLLING)
795 ether_poll_deregister(ifp);
798 if (adapter->led_dev != NULL)
799 led_destroy(adapter->led_dev);
801 EM_CORE_LOCK(adapter);
803 adapter->in_detach = 1;
805 e1000_phy_hw_reset(&adapter->hw);
807 lem_release_manageability(adapter);
809 EM_TX_UNLOCK(adapter);
810 EM_CORE_UNLOCK(adapter);
812 /* Unregister VLAN events */
813 if (adapter->vlan_attach != NULL)
814 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
815 if (adapter->vlan_detach != NULL)
816 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
818 ether_ifdetach(adapter->ifp);
819 callout_drain(&adapter->timer);
820 callout_drain(&adapter->tx_fifo_timer);
824 #endif /* DEV_NETMAP */
825 lem_free_pci_resources(adapter);
826 bus_generic_detach(dev);
829 lem_free_transmit_structures(adapter);
830 lem_free_receive_structures(adapter);
832 /* Free Transmit Descriptor ring */
833 if (adapter->tx_desc_base) {
834 lem_dma_free(adapter, &adapter->txdma);
835 adapter->tx_desc_base = NULL;
838 /* Free Receive Descriptor ring */
839 if (adapter->rx_desc_base) {
840 lem_dma_free(adapter, &adapter->rxdma);
841 adapter->rx_desc_base = NULL;
846 lem_dma_free(adapter, &adapter->csb_mem);
849 #endif /* NIC_PARAVIRT */
850 lem_release_hw_control(adapter);
851 free(adapter->mta, M_DEVBUF);
852 EM_TX_LOCK_DESTROY(adapter);
853 EM_RX_LOCK_DESTROY(adapter);
854 EM_CORE_LOCK_DESTROY(adapter);
859 /*********************************************************************
861 * Shutdown entry point
863 **********************************************************************/
866 lem_shutdown(device_t dev)
868 return lem_suspend(dev);
872 * Suspend/resume device methods.
875 lem_suspend(device_t dev)
877 struct adapter *adapter = device_get_softc(dev);
879 EM_CORE_LOCK(adapter);
881 lem_release_manageability(adapter);
882 lem_release_hw_control(adapter);
883 lem_enable_wakeup(dev);
885 EM_CORE_UNLOCK(adapter);
887 return bus_generic_suspend(dev);
891 lem_resume(device_t dev)
893 struct adapter *adapter = device_get_softc(dev);
894 if_t ifp = adapter->ifp;
896 EM_CORE_LOCK(adapter);
897 lem_init_locked(adapter);
898 lem_init_manageability(adapter);
899 EM_CORE_UNLOCK(adapter);
902 return bus_generic_resume(dev);
907 lem_start_locked(if_t ifp)
909 struct adapter *adapter = if_getsoftc(ifp);
912 EM_TX_LOCK_ASSERT(adapter);
914 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
917 if (!adapter->link_active)
921 * Force a cleanup if number of TX descriptors
922 * available hits the threshold
924 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
926 /* Now do we at least have a minimal? */
927 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
928 adapter->no_tx_desc_avail1++;
933 while (!if_sendq_empty(ifp)) {
934 m_head = if_dequeue(ifp);
939 * Encapsulation can modify our pointer, and or make it
940 * NULL on failure. In that event, we can't requeue.
942 if (lem_xmit(adapter, &m_head)) {
945 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
946 if_sendq_prepend(ifp, m_head);
950 /* Send a copy of the frame to the BPF listener */
951 if_etherbpfmtap(ifp, m_head);
953 /* Set timeout in case hardware has problems transmitting. */
954 adapter->watchdog_check = TRUE;
955 adapter->watchdog_time = ticks;
957 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
958 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
960 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE && adapter->csb &&
961 adapter->csb->guest_csb_on &&
962 !(adapter->csb->guest_need_txkick & 1)) {
963 adapter->csb->guest_need_txkick = 1;
964 adapter->guest_need_kick_count++;
965 // XXX memory barrier
966 lem_txeof(adapter); // XXX possibly clear IFF_DRV_OACTIVE
968 #endif /* NIC_PARAVIRT */
976 struct adapter *adapter = if_getsoftc(ifp);
979 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
980 lem_start_locked(ifp);
981 EM_TX_UNLOCK(adapter);
984 /*********************************************************************
987 * em_ioctl is called when the user wants to configure the
990 * return 0 on success, positive on failure
991 **********************************************************************/
994 lem_ioctl(if_t ifp, u_long command, caddr_t data)
996 struct adapter *adapter = if_getsoftc(ifp);
997 struct ifreq *ifr = (struct ifreq *)data;
998 #if defined(INET) || defined(INET6)
999 struct ifaddr *ifa = (struct ifaddr *)data;
1001 bool avoid_reset = FALSE;
1004 if (adapter->in_detach)
1010 if (ifa->ifa_addr->sa_family == AF_INET)
1014 if (ifa->ifa_addr->sa_family == AF_INET6)
1018 ** Calling init results in link renegotiation,
1019 ** so we avoid doing it when possible.
1022 if_setflagbits(ifp, IFF_UP, 0);
1023 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
1026 if (!(if_getflags(ifp) & IFF_NOARP))
1027 arp_ifinit(ifp, ifa);
1030 error = ether_ioctl(ifp, command, data);
1036 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1038 EM_CORE_LOCK(adapter);
1039 switch (adapter->hw.mac.type) {
1041 max_frame_size = ETHER_MAX_LEN;
1044 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1046 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1048 EM_CORE_UNLOCK(adapter);
1053 if_setmtu(ifp, ifr->ifr_mtu);
1054 adapter->max_frame_size =
1055 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
1056 lem_init_locked(adapter);
1057 EM_CORE_UNLOCK(adapter);
1061 IOCTL_DEBUGOUT("ioctl rcv'd:\
1062 SIOCSIFFLAGS (Set Interface Flags)");
1063 EM_CORE_LOCK(adapter);
1064 if (if_getflags(ifp) & IFF_UP) {
1065 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
1066 if ((if_getflags(ifp) ^ adapter->if_flags) &
1067 (IFF_PROMISC | IFF_ALLMULTI)) {
1068 lem_disable_promisc(adapter);
1069 lem_set_promisc(adapter);
1072 lem_init_locked(adapter);
1074 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1075 EM_TX_LOCK(adapter);
1077 EM_TX_UNLOCK(adapter);
1079 adapter->if_flags = if_getflags(ifp);
1080 EM_CORE_UNLOCK(adapter);
1084 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1085 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1086 EM_CORE_LOCK(adapter);
1087 lem_disable_intr(adapter);
1088 lem_set_multi(adapter);
1089 if (adapter->hw.mac.type == e1000_82542 &&
1090 adapter->hw.revision_id == E1000_REVISION_2) {
1091 lem_initialize_receive_unit(adapter);
1093 #ifdef DEVICE_POLLING
1094 if (!(if_getcapenable(ifp) & IFCAP_POLLING))
1096 lem_enable_intr(adapter);
1097 EM_CORE_UNLOCK(adapter);
1101 /* Check SOL/IDER usage */
1102 EM_CORE_LOCK(adapter);
1103 if (e1000_check_reset_block(&adapter->hw)) {
1104 EM_CORE_UNLOCK(adapter);
1105 device_printf(adapter->dev, "Media change is"
1106 " blocked due to SOL/IDER session.\n");
1109 EM_CORE_UNLOCK(adapter);
1111 IOCTL_DEBUGOUT("ioctl rcv'd: \
1112 SIOCxIFMEDIA (Get/Set Interface Media)");
1113 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1119 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1121 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1122 #ifdef DEVICE_POLLING
1123 if (mask & IFCAP_POLLING) {
1124 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1125 error = ether_poll_register(lem_poll, ifp);
1128 EM_CORE_LOCK(adapter);
1129 lem_disable_intr(adapter);
1130 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1131 EM_CORE_UNLOCK(adapter);
1133 error = ether_poll_deregister(ifp);
1134 /* Enable interrupt even in error case */
1135 EM_CORE_LOCK(adapter);
1136 lem_enable_intr(adapter);
1137 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1138 EM_CORE_UNLOCK(adapter);
1142 if (mask & IFCAP_HWCSUM) {
1143 if_togglecapenable(ifp, IFCAP_HWCSUM);
1146 if (mask & IFCAP_VLAN_HWTAGGING) {
1147 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
1150 if ((mask & IFCAP_WOL) &&
1151 (if_getcapabilities(ifp) & IFCAP_WOL) != 0) {
1152 if (mask & IFCAP_WOL_MCAST)
1153 if_togglecapenable(ifp, IFCAP_WOL_MCAST);
1154 if (mask & IFCAP_WOL_MAGIC)
1155 if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
1157 if (reinit && (if_getdrvflags(ifp) & IFF_DRV_RUNNING))
1164 error = ether_ioctl(ifp, command, data);
1172 /*********************************************************************
1175 * This routine is used in two ways. It is used by the stack as
1176 * init entry point in network interface structure. It is also used
1177 * by the driver as a hw/sw initialization routine to get to a
1180 * return 0 on success, positive on failure
1181 **********************************************************************/
1184 lem_init_locked(struct adapter *adapter)
1186 if_t ifp = adapter->ifp;
1187 device_t dev = adapter->dev;
1190 INIT_DEBUGOUT("lem_init: begin");
1192 EM_CORE_LOCK_ASSERT(adapter);
1194 EM_TX_LOCK(adapter);
1196 EM_TX_UNLOCK(adapter);
1199 * Packet Buffer Allocation (PBA)
1200 * Writing PBA sets the receive portion of the buffer
1201 * the remainder is used for the transmit buffer.
1203 * Devices before the 82547 had a Packet Buffer of 64K.
1204 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1205 * After the 82547 the buffer was reduced to 40K.
1206 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1207 * Note: default does not leave enough room for Jumbo Frame >10k.
1209 switch (adapter->hw.mac.type) {
1211 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1212 if (adapter->max_frame_size > 8192)
1213 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1215 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1216 adapter->tx_fifo_head = 0;
1217 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1218 adapter->tx_fifo_size =
1219 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1222 /* Devices before 82547 had a Packet Buffer of 64K. */
1223 if (adapter->max_frame_size > 8192)
1224 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1226 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1229 INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1230 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1232 /* Get the latest mac address, User can use a LAA */
1233 bcopy(if_getlladdr(adapter->ifp), adapter->hw.mac.addr,
1236 /* Put the address into the Receive Address Array */
1237 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1239 /* Initialize the hardware */
1240 if (lem_hardware_init(adapter)) {
1241 device_printf(dev, "Unable to initialize the hardware\n");
1244 lem_update_link_status(adapter);
1246 /* Setup VLAN support, basic and offload if available */
1247 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1249 /* Set hardware offload abilities */
1250 if_clearhwassist(ifp);
1251 if (adapter->hw.mac.type >= e1000_82543) {
1252 if (if_getcapenable(ifp) & IFCAP_TXCSUM)
1253 if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0);
1256 /* Configure for OS presence */
1257 lem_init_manageability(adapter);
1259 /* Prepare transmit descriptors and buffers */
1260 lem_setup_transmit_structures(adapter);
1261 lem_initialize_transmit_unit(adapter);
1263 /* Setup Multicast table */
1264 lem_set_multi(adapter);
1266 /* Prepare receive descriptors and buffers */
1267 if (lem_setup_receive_structures(adapter)) {
1268 device_printf(dev, "Could not setup receive structures\n");
1269 EM_TX_LOCK(adapter);
1271 EM_TX_UNLOCK(adapter);
1274 lem_initialize_receive_unit(adapter);
1276 /* Use real VLAN Filter support? */
1277 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
1278 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
1279 /* Use real VLAN Filter support */
1280 lem_setup_vlan_hw_support(adapter);
1283 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1284 ctrl |= E1000_CTRL_VME;
1285 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1289 /* Don't lose promiscuous settings */
1290 lem_set_promisc(adapter);
1292 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
1294 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1295 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1297 #ifdef DEVICE_POLLING
1299 * Only enable interrupts if we are not polling, make sure
1300 * they are off otherwise.
1302 if (if_getcapenable(ifp) & IFCAP_POLLING)
1303 lem_disable_intr(adapter);
1305 #endif /* DEVICE_POLLING */
1306 lem_enable_intr(adapter);
1308 /* AMT based hardware can now take control from firmware */
1309 if (adapter->has_manage && adapter->has_amt)
1310 lem_get_hw_control(adapter);
1316 struct adapter *adapter = arg;
1318 EM_CORE_LOCK(adapter);
1319 lem_init_locked(adapter);
1320 EM_CORE_UNLOCK(adapter);
1324 #ifdef DEVICE_POLLING
1325 /*********************************************************************
1327 * Legacy polling routine
1329 *********************************************************************/
1331 lem_poll(if_t ifp, enum poll_cmd cmd, int count)
1333 struct adapter *adapter = if_getsoftc(ifp);
1334 u32 reg_icr, rx_done = 0;
1336 EM_CORE_LOCK(adapter);
1337 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1338 EM_CORE_UNLOCK(adapter);
1342 if (cmd == POLL_AND_CHECK_STATUS) {
1343 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1344 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1345 callout_stop(&adapter->timer);
1346 adapter->hw.mac.get_link_status = 1;
1347 lem_update_link_status(adapter);
1348 callout_reset(&adapter->timer, hz,
1349 lem_local_timer, adapter);
1352 EM_CORE_UNLOCK(adapter);
1354 lem_rxeof(adapter, count, &rx_done);
1356 EM_TX_LOCK(adapter);
1358 if(!if_sendq_empty(ifp))
1359 lem_start_locked(ifp);
1360 EM_TX_UNLOCK(adapter);
1363 #endif /* DEVICE_POLLING */
1365 /*********************************************************************
1367 * Legacy Interrupt Service routine
1369 *********************************************************************/
1373 struct adapter *adapter = arg;
1374 if_t ifp = adapter->ifp;
1378 if ((if_getcapenable(ifp) & IFCAP_POLLING) ||
1379 ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
1382 EM_CORE_LOCK(adapter);
1383 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1384 if (reg_icr & E1000_ICR_RXO)
1385 adapter->rx_overruns++;
1387 if ((reg_icr == 0xffffffff) || (reg_icr == 0)) {
1388 EM_CORE_UNLOCK(adapter);
1392 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1393 callout_stop(&adapter->timer);
1394 adapter->hw.mac.get_link_status = 1;
1395 lem_update_link_status(adapter);
1396 /* Deal with TX cruft when link lost */
1397 lem_tx_purge(adapter);
1398 callout_reset(&adapter->timer, hz,
1399 lem_local_timer, adapter);
1400 EM_CORE_UNLOCK(adapter);
1404 EM_CORE_UNLOCK(adapter);
1405 lem_rxeof(adapter, -1, NULL);
1407 EM_TX_LOCK(adapter);
1409 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) &&
1410 (!if_sendq_empty(ifp)))
1411 lem_start_locked(ifp);
1412 EM_TX_UNLOCK(adapter);
1418 lem_handle_link(void *context, int pending)
1420 struct adapter *adapter = context;
1421 if_t ifp = adapter->ifp;
1423 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
1426 EM_CORE_LOCK(adapter);
1427 callout_stop(&adapter->timer);
1428 lem_update_link_status(adapter);
1429 /* Deal with TX cruft when link lost */
1430 lem_tx_purge(adapter);
1431 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1432 EM_CORE_UNLOCK(adapter);
1436 /* Combined RX/TX handler, used by Legacy and MSI */
1438 lem_handle_rxtx(void *context, int pending)
1440 struct adapter *adapter = context;
1441 if_t ifp = adapter->ifp;
1444 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1445 bool more = lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1446 EM_TX_LOCK(adapter);
1448 if(!if_sendq_empty(ifp))
1449 lem_start_locked(ifp);
1450 EM_TX_UNLOCK(adapter);
1452 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1457 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1458 lem_enable_intr(adapter);
1461 /*********************************************************************
1463 * Fast Legacy/MSI Combined Interrupt Service routine
1465 *********************************************************************/
1467 lem_irq_fast(void *arg)
1469 struct adapter *adapter = arg;
1475 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1478 if (reg_icr == 0xffffffff)
1479 return FILTER_STRAY;
1481 /* Definitely not our interrupt. */
1483 return FILTER_STRAY;
1486 * Mask interrupts until the taskqueue is finished running. This is
1487 * cheap, just assume that it is needed. This also works around the
1488 * MSI message reordering errata on certain systems.
1490 lem_disable_intr(adapter);
1491 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1493 /* Link status change */
1494 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1495 adapter->hw.mac.get_link_status = 1;
1496 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1499 if (reg_icr & E1000_ICR_RXO)
1500 adapter->rx_overruns++;
1501 return FILTER_HANDLED;
1505 /*********************************************************************
1507 * Media Ioctl callback
1509 * This routine is called whenever the user queries the status of
1510 * the interface using ifconfig.
1512 **********************************************************************/
1514 lem_media_status(if_t ifp, struct ifmediareq *ifmr)
1516 struct adapter *adapter = if_getsoftc(ifp);
1517 u_char fiber_type = IFM_1000_SX;
1519 INIT_DEBUGOUT("lem_media_status: begin");
1521 EM_CORE_LOCK(adapter);
1522 lem_update_link_status(adapter);
1524 ifmr->ifm_status = IFM_AVALID;
1525 ifmr->ifm_active = IFM_ETHER;
1527 if (!adapter->link_active) {
1528 EM_CORE_UNLOCK(adapter);
1532 ifmr->ifm_status |= IFM_ACTIVE;
1534 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1535 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1536 if (adapter->hw.mac.type == e1000_82545)
1537 fiber_type = IFM_1000_LX;
1538 ifmr->ifm_active |= fiber_type | IFM_FDX;
1540 switch (adapter->link_speed) {
1542 ifmr->ifm_active |= IFM_10_T;
1545 ifmr->ifm_active |= IFM_100_TX;
1548 ifmr->ifm_active |= IFM_1000_T;
1551 if (adapter->link_duplex == FULL_DUPLEX)
1552 ifmr->ifm_active |= IFM_FDX;
1554 ifmr->ifm_active |= IFM_HDX;
1556 EM_CORE_UNLOCK(adapter);
1559 /*********************************************************************
1561 * Media Ioctl callback
1563 * This routine is called when the user changes speed/duplex using
1564 * media/mediopt option with ifconfig.
1566 **********************************************************************/
1568 lem_media_change(if_t ifp)
1570 struct adapter *adapter = if_getsoftc(ifp);
1571 struct ifmedia *ifm = &adapter->media;
1573 INIT_DEBUGOUT("lem_media_change: begin");
1575 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1578 EM_CORE_LOCK(adapter);
1579 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1581 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1582 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1587 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1588 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1591 adapter->hw.mac.autoneg = FALSE;
1592 adapter->hw.phy.autoneg_advertised = 0;
1593 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1594 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1596 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1599 adapter->hw.mac.autoneg = FALSE;
1600 adapter->hw.phy.autoneg_advertised = 0;
1601 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1602 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1604 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1607 device_printf(adapter->dev, "Unsupported media type\n");
1610 lem_init_locked(adapter);
1611 EM_CORE_UNLOCK(adapter);
1616 /*********************************************************************
1618 * This routine maps the mbufs to tx descriptors.
1620 * return 0 on success, positive on failure
1621 **********************************************************************/
1624 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1626 bus_dma_segment_t segs[EM_MAX_SCATTER];
1628 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1629 struct e1000_tx_desc *ctxd = NULL;
1630 struct mbuf *m_head;
1631 u32 txd_upper, txd_lower, txd_used, txd_saved;
1632 int error, nsegs, i, j, first, last = 0;
1635 txd_upper = txd_lower = txd_used = txd_saved = 0;
1638 ** When doing checksum offload, it is critical to
1639 ** make sure the first mbuf has more than header,
1640 ** because that routine expects data to be present.
1642 if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1643 (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1644 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1651 * Map the packet for DMA
1653 * Capture the first descriptor index,
1654 * this descriptor will have the index
1655 * of the EOP which is the only one that
1656 * now gets a DONE bit writeback.
1658 first = adapter->next_avail_tx_desc;
1659 tx_buffer = &adapter->tx_buffer_area[first];
1660 tx_buffer_mapped = tx_buffer;
1661 map = tx_buffer->map;
1663 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1664 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1667 * There are two types of errors we can (try) to handle:
1668 * - EFBIG means the mbuf chain was too long and bus_dma ran
1669 * out of segments. Defragment the mbuf chain and try again.
1670 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1671 * at this point in time. Defer sending and try again later.
1672 * All other errors, in particular EINVAL, are fatal and prevent the
1673 * mbuf chain from ever going through. Drop it and report error.
1675 if (error == EFBIG) {
1678 m = m_collapse(*m_headp, M_NOWAIT, EM_MAX_SCATTER);
1680 adapter->mbuf_defrag_failed++;
1688 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1689 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1692 adapter->no_tx_dma_setup++;
1697 } else if (error != 0) {
1698 adapter->no_tx_dma_setup++;
1702 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1703 adapter->no_tx_desc_avail2++;
1704 bus_dmamap_unload(adapter->txtag, map);
1709 /* Do hardware assists */
1710 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1711 lem_transmit_checksum_setup(adapter, m_head,
1712 &txd_upper, &txd_lower);
1714 i = adapter->next_avail_tx_desc;
1715 if (adapter->pcix_82544)
1718 /* Set up our transmit descriptors */
1719 for (j = 0; j < nsegs; j++) {
1721 bus_addr_t seg_addr;
1722 /* If adapter is 82544 and on PCIX bus */
1723 if(adapter->pcix_82544) {
1724 DESC_ARRAY desc_array;
1725 u32 array_elements, counter;
1727 * Check the Address and Length combination and
1728 * split the data accordingly
1730 array_elements = lem_fill_descriptors(segs[j].ds_addr,
1731 segs[j].ds_len, &desc_array);
1732 for (counter = 0; counter < array_elements; counter++) {
1733 if (txd_used == adapter->num_tx_desc_avail) {
1734 adapter->next_avail_tx_desc = txd_saved;
1735 adapter->no_tx_desc_avail2++;
1736 bus_dmamap_unload(adapter->txtag, map);
1739 tx_buffer = &adapter->tx_buffer_area[i];
1740 ctxd = &adapter->tx_desc_base[i];
1741 ctxd->buffer_addr = htole64(
1742 desc_array.descriptor[counter].address);
1743 ctxd->lower.data = htole32(
1744 (adapter->txd_cmd | txd_lower | (u16)
1745 desc_array.descriptor[counter].length));
1747 htole32((txd_upper));
1749 if (++i == adapter->num_tx_desc)
1751 tx_buffer->m_head = NULL;
1752 tx_buffer->next_eop = -1;
1756 tx_buffer = &adapter->tx_buffer_area[i];
1757 ctxd = &adapter->tx_desc_base[i];
1758 seg_addr = segs[j].ds_addr;
1759 seg_len = segs[j].ds_len;
1760 ctxd->buffer_addr = htole64(seg_addr);
1761 ctxd->lower.data = htole32(
1762 adapter->txd_cmd | txd_lower | seg_len);
1766 if (++i == adapter->num_tx_desc)
1768 tx_buffer->m_head = NULL;
1769 tx_buffer->next_eop = -1;
1773 adapter->next_avail_tx_desc = i;
1775 if (adapter->pcix_82544)
1776 adapter->num_tx_desc_avail -= txd_used;
1778 adapter->num_tx_desc_avail -= nsegs;
1780 if (m_head->m_flags & M_VLANTAG) {
1781 /* Set the vlan id. */
1782 ctxd->upper.fields.special =
1783 htole16(m_head->m_pkthdr.ether_vtag);
1784 /* Tell hardware to add tag */
1785 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1788 tx_buffer->m_head = m_head;
1789 tx_buffer_mapped->map = tx_buffer->map;
1790 tx_buffer->map = map;
1791 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1794 * Last Descriptor of Packet
1795 * needs End Of Packet (EOP)
1796 * and Report Status (RS)
1799 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1801 * Keep track in the first buffer which
1802 * descriptor will be written back
1804 tx_buffer = &adapter->tx_buffer_area[first];
1805 tx_buffer->next_eop = last;
1806 adapter->watchdog_time = ticks;
1809 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1810 * that this frame is available to transmit.
1812 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1813 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1817 adapter->csb->guest_tdt = i;
1818 /* XXX memory barrier ? */
1819 if (adapter->csb->guest_csb_on &&
1820 !(adapter->csb->host_need_txkick & 1)) {
1821 /* XXX maybe useless
1822 * clean the ring. maybe do it before ?
1823 * maybe a little bit of histeresys ?
1825 if (adapter->num_tx_desc_avail <= 64) {// XXX
1831 #endif /* NIC_PARAVIRT */
1833 #ifdef NIC_SEND_COMBINING
1834 if (adapter->sc_enable) {
1835 if (adapter->shadow_tdt & MIT_PENDING_INT) {
1836 /* signal intr and data pending */
1837 adapter->shadow_tdt = MIT_PENDING_TDT | (i & 0xffff);
1840 adapter->shadow_tdt = MIT_PENDING_INT;
1843 #endif /* NIC_SEND_COMBINING */
1845 if (adapter->hw.mac.type == e1000_82547 &&
1846 adapter->link_duplex == HALF_DUPLEX)
1847 lem_82547_move_tail(adapter);
1849 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1850 if (adapter->hw.mac.type == e1000_82547)
1851 lem_82547_update_fifo_head(adapter,
1852 m_head->m_pkthdr.len);
1858 /*********************************************************************
1860 * 82547 workaround to avoid controller hang in half-duplex environment.
1861 * The workaround is to avoid queuing a large packet that would span
1862 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1863 * in this case. We do that only when FIFO is quiescent.
1865 **********************************************************************/
1867 lem_82547_move_tail(void *arg)
1869 struct adapter *adapter = arg;
1870 struct e1000_tx_desc *tx_desc;
1871 u16 hw_tdt, sw_tdt, length = 0;
1874 EM_TX_LOCK_ASSERT(adapter);
1876 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1877 sw_tdt = adapter->next_avail_tx_desc;
1879 while (hw_tdt != sw_tdt) {
1880 tx_desc = &adapter->tx_desc_base[hw_tdt];
1881 length += tx_desc->lower.flags.length;
1882 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1883 if (++hw_tdt == adapter->num_tx_desc)
1887 if (lem_82547_fifo_workaround(adapter, length)) {
1888 adapter->tx_fifo_wrk_cnt++;
1889 callout_reset(&adapter->tx_fifo_timer, 1,
1890 lem_82547_move_tail, adapter);
1893 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1894 lem_82547_update_fifo_head(adapter, length);
1901 lem_82547_fifo_workaround(struct adapter *adapter, int len)
1903 int fifo_space, fifo_pkt_len;
1905 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1907 if (adapter->link_duplex == HALF_DUPLEX) {
1908 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1910 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1911 if (lem_82547_tx_fifo_reset(adapter))
1922 lem_82547_update_fifo_head(struct adapter *adapter, int len)
1924 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1926 /* tx_fifo_head is always 16 byte aligned */
1927 adapter->tx_fifo_head += fifo_pkt_len;
1928 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1929 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1935 lem_82547_tx_fifo_reset(struct adapter *adapter)
1939 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1940 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1941 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1942 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1943 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1944 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1945 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1946 /* Disable TX unit */
1947 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1948 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1949 tctl & ~E1000_TCTL_EN);
1951 /* Reset FIFO pointers */
1952 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1953 adapter->tx_head_addr);
1954 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1955 adapter->tx_head_addr);
1956 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1957 adapter->tx_head_addr);
1958 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1959 adapter->tx_head_addr);
1961 /* Re-enable TX unit */
1962 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1963 E1000_WRITE_FLUSH(&adapter->hw);
1965 adapter->tx_fifo_head = 0;
1966 adapter->tx_fifo_reset_cnt++;
1976 lem_set_promisc(struct adapter *adapter)
1978 if_t ifp = adapter->ifp;
1981 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1983 if (if_getflags(ifp) & IFF_PROMISC) {
1984 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1985 /* Turn this on if you want to see bad packets */
1987 reg_rctl |= E1000_RCTL_SBP;
1988 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1989 } else if (if_getflags(ifp) & IFF_ALLMULTI) {
1990 reg_rctl |= E1000_RCTL_MPE;
1991 reg_rctl &= ~E1000_RCTL_UPE;
1992 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1997 lem_disable_promisc(struct adapter *adapter)
1999 if_t ifp = adapter->ifp;
2003 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2004 reg_rctl &= (~E1000_RCTL_UPE);
2005 if (if_getflags(ifp) & IFF_ALLMULTI)
2006 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2008 mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
2010 /* Don't disable if in MAX groups */
2011 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2012 reg_rctl &= (~E1000_RCTL_MPE);
2013 reg_rctl &= (~E1000_RCTL_SBP);
2014 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2018 /*********************************************************************
2021 * This routine is called whenever multicast address list is updated.
2023 **********************************************************************/
2026 lem_set_multi(struct adapter *adapter)
2028 if_t ifp = adapter->ifp;
2030 u8 *mta; /* Multicast array memory */
2033 IOCTL_DEBUGOUT("lem_set_multi: begin");
2036 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
2038 if (adapter->hw.mac.type == e1000_82542 &&
2039 adapter->hw.revision_id == E1000_REVISION_2) {
2040 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2041 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2042 e1000_pci_clear_mwi(&adapter->hw);
2043 reg_rctl |= E1000_RCTL_RST;
2044 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2048 if_multiaddr_array(ifp, mta, &mcnt, MAX_NUM_MULTICAST_ADDRESSES);
2050 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2051 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2052 reg_rctl |= E1000_RCTL_MPE;
2053 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2055 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
2057 if (adapter->hw.mac.type == e1000_82542 &&
2058 adapter->hw.revision_id == E1000_REVISION_2) {
2059 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2060 reg_rctl &= ~E1000_RCTL_RST;
2061 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2063 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2064 e1000_pci_set_mwi(&adapter->hw);
2069 /*********************************************************************
2072 * This routine checks for link status and updates statistics.
2074 **********************************************************************/
2077 lem_local_timer(void *arg)
2079 struct adapter *adapter = arg;
2081 EM_CORE_LOCK_ASSERT(adapter);
2083 lem_update_link_status(adapter);
2084 lem_update_stats_counters(adapter);
2086 lem_smartspeed(adapter);
2089 /* recover space if needed */
2090 if (adapter->csb && adapter->csb->guest_csb_on &&
2091 (adapter->watchdog_check == TRUE) &&
2092 (ticks - adapter->watchdog_time > EM_WATCHDOG) &&
2093 (adapter->num_tx_desc_avail != adapter->num_tx_desc) ) {
2096 * lem_txeof() normally (except when space in the queue
2097 * runs low XXX) cleans watchdog_check so that
2101 #endif /* NIC_PARAVIRT */
2103 * We check the watchdog: the time since
2104 * the last TX descriptor was cleaned.
2105 * This implies a functional TX engine.
2107 if ((adapter->watchdog_check == TRUE) &&
2108 (ticks - adapter->watchdog_time > EM_WATCHDOG))
2111 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
2114 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2115 if_setdrvflagbits(adapter->ifp, 0, IFF_DRV_RUNNING);
2116 adapter->watchdog_events++;
2117 lem_init_locked(adapter);
2121 lem_update_link_status(struct adapter *adapter)
2123 struct e1000_hw *hw = &adapter->hw;
2124 if_t ifp = adapter->ifp;
2125 device_t dev = adapter->dev;
2128 /* Get the cached link value or read phy for real */
2129 switch (hw->phy.media_type) {
2130 case e1000_media_type_copper:
2131 if (hw->mac.get_link_status) {
2132 /* Do the work to read phy */
2133 e1000_check_for_link(hw);
2134 link_check = !hw->mac.get_link_status;
2135 if (link_check) /* ESB2 fix */
2136 e1000_cfg_on_link_up(hw);
2140 case e1000_media_type_fiber:
2141 e1000_check_for_link(hw);
2142 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2145 case e1000_media_type_internal_serdes:
2146 e1000_check_for_link(hw);
2147 link_check = adapter->hw.mac.serdes_has_link;
2150 case e1000_media_type_unknown:
2154 /* Now check for a transition */
2155 if (link_check && (adapter->link_active == 0)) {
2156 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2157 &adapter->link_duplex);
2159 device_printf(dev, "Link is up %d Mbps %s\n",
2160 adapter->link_speed,
2161 ((adapter->link_duplex == FULL_DUPLEX) ?
2162 "Full Duplex" : "Half Duplex"));
2163 adapter->link_active = 1;
2164 adapter->smartspeed = 0;
2165 if_setbaudrate(ifp, adapter->link_speed * 1000000);
2166 if_link_state_change(ifp, LINK_STATE_UP);
2167 } else if (!link_check && (adapter->link_active == 1)) {
2168 if_setbaudrate(ifp, 0);
2169 adapter->link_speed = 0;
2170 adapter->link_duplex = 0;
2172 device_printf(dev, "Link is Down\n");
2173 adapter->link_active = 0;
2174 /* Link down, disable watchdog */
2175 adapter->watchdog_check = FALSE;
2176 if_link_state_change(ifp, LINK_STATE_DOWN);
2180 /*********************************************************************
2182 * This routine disables all traffic on the adapter by issuing a
2183 * global reset on the MAC and deallocates TX/RX buffers.
2185 * This routine should always be called with BOTH the CORE
2187 **********************************************************************/
2192 struct adapter *adapter = arg;
2193 if_t ifp = adapter->ifp;
2195 EM_CORE_LOCK_ASSERT(adapter);
2196 EM_TX_LOCK_ASSERT(adapter);
2198 INIT_DEBUGOUT("lem_stop: begin");
2200 lem_disable_intr(adapter);
2201 callout_stop(&adapter->timer);
2202 callout_stop(&adapter->tx_fifo_timer);
2204 /* Tell the stack that the interface is no longer active */
2205 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2207 e1000_reset_hw(&adapter->hw);
2208 if (adapter->hw.mac.type >= e1000_82544)
2209 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2211 e1000_led_off(&adapter->hw);
2212 e1000_cleanup_led(&adapter->hw);
2216 /*********************************************************************
2218 * Determine hardware revision.
2220 **********************************************************************/
2222 lem_identify_hardware(struct adapter *adapter)
2224 device_t dev = adapter->dev;
2226 /* Make sure our PCI config space has the necessary stuff set */
2227 pci_enable_busmaster(dev);
2228 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2230 /* Save off the information about this board */
2231 adapter->hw.vendor_id = pci_get_vendor(dev);
2232 adapter->hw.device_id = pci_get_device(dev);
2233 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2234 adapter->hw.subsystem_vendor_id =
2235 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2236 adapter->hw.subsystem_device_id =
2237 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2239 /* Do Shared Code Init and Setup */
2240 if (e1000_set_mac_type(&adapter->hw)) {
2241 device_printf(dev, "Setup init failure\n");
2247 lem_allocate_pci_resources(struct adapter *adapter)
2249 device_t dev = adapter->dev;
2250 int val, rid, error = E1000_SUCCESS;
2253 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2255 if (adapter->memory == NULL) {
2256 device_printf(dev, "Unable to allocate bus resource: memory\n");
2259 adapter->osdep.mem_bus_space_tag =
2260 rman_get_bustag(adapter->memory);
2261 adapter->osdep.mem_bus_space_handle =
2262 rman_get_bushandle(adapter->memory);
2263 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2265 /* Only older adapters use IO mapping */
2266 if (adapter->hw.mac.type > e1000_82543) {
2267 /* Figure our where our IO BAR is ? */
2268 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2269 val = pci_read_config(dev, rid, 4);
2270 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2271 adapter->io_rid = rid;
2275 /* check for 64bit BAR */
2276 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2279 if (rid >= PCIR_CIS) {
2280 device_printf(dev, "Unable to locate IO BAR\n");
2283 adapter->ioport = bus_alloc_resource_any(dev,
2284 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2285 if (adapter->ioport == NULL) {
2286 device_printf(dev, "Unable to allocate bus resource: "
2290 adapter->hw.io_base = 0;
2291 adapter->osdep.io_bus_space_tag =
2292 rman_get_bustag(adapter->ioport);
2293 adapter->osdep.io_bus_space_handle =
2294 rman_get_bushandle(adapter->ioport);
2297 adapter->hw.back = &adapter->osdep;
2302 /*********************************************************************
2304 * Setup the Legacy or MSI Interrupt handler
2306 **********************************************************************/
2308 lem_allocate_irq(struct adapter *adapter)
2310 device_t dev = adapter->dev;
2313 /* Manually turn off all interrupts */
2314 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2316 /* We allocate a single interrupt resource */
2317 adapter->res[0] = bus_alloc_resource_any(dev,
2318 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2319 if (adapter->res[0] == NULL) {
2320 device_printf(dev, "Unable to allocate bus resource: "
2325 /* Do Legacy setup? */
2326 if (lem_use_legacy_irq) {
2327 if ((error = bus_setup_intr(dev, adapter->res[0],
2328 INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2329 &adapter->tag[0])) != 0) {
2331 "Failed to register interrupt handler");
2338 * Use a Fast interrupt and the associated
2339 * deferred processing contexts.
2341 TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2342 TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2343 adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2344 taskqueue_thread_enqueue, &adapter->tq);
2345 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2346 device_get_nameunit(adapter->dev));
2347 if ((error = bus_setup_intr(dev, adapter->res[0],
2348 INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2349 &adapter->tag[0])) != 0) {
2350 device_printf(dev, "Failed to register fast interrupt "
2351 "handler: %d\n", error);
2352 taskqueue_free(adapter->tq);
2362 lem_free_pci_resources(struct adapter *adapter)
2364 device_t dev = adapter->dev;
2367 if (adapter->tag[0] != NULL) {
2368 bus_teardown_intr(dev, adapter->res[0],
2370 adapter->tag[0] = NULL;
2373 if (adapter->res[0] != NULL) {
2374 bus_release_resource(dev, SYS_RES_IRQ,
2375 0, adapter->res[0]);
2378 if (adapter->memory != NULL)
2379 bus_release_resource(dev, SYS_RES_MEMORY,
2380 PCIR_BAR(0), adapter->memory);
2382 if (adapter->ioport != NULL)
2383 bus_release_resource(dev, SYS_RES_IOPORT,
2384 adapter->io_rid, adapter->ioport);
2388 /*********************************************************************
2390 * Initialize the hardware to a configuration
2391 * as specified by the adapter structure.
2393 **********************************************************************/
2395 lem_hardware_init(struct adapter *adapter)
2397 device_t dev = adapter->dev;
2400 INIT_DEBUGOUT("lem_hardware_init: begin");
2402 /* Issue a global reset */
2403 e1000_reset_hw(&adapter->hw);
2405 /* When hardware is reset, fifo_head is also reset */
2406 adapter->tx_fifo_head = 0;
2409 * These parameters control the automatic generation (Tx) and
2410 * response (Rx) to Ethernet PAUSE frames.
2411 * - High water mark should allow for at least two frames to be
2412 * received after sending an XOFF.
2413 * - Low water mark works best when it is very near the high water mark.
2414 * This allows the receiver to restart by sending XON when it has
2415 * drained a bit. Here we use an arbitary value of 1500 which will
2416 * restart after one full frame is pulled from the buffer. There
2417 * could be several smaller frames in the buffer and if so they will
2418 * not trigger the XON until their total number reduces the buffer
2420 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2422 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2425 adapter->hw.fc.high_water = rx_buffer_size -
2426 roundup2(adapter->max_frame_size, 1024);
2427 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2429 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2430 adapter->hw.fc.send_xon = TRUE;
2432 /* Set Flow control, use the tunable location if sane */
2433 if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2434 adapter->hw.fc.requested_mode = lem_fc_setting;
2436 adapter->hw.fc.requested_mode = e1000_fc_none;
2438 if (e1000_init_hw(&adapter->hw) < 0) {
2439 device_printf(dev, "Hardware Initialization Failed\n");
2443 e1000_check_for_link(&adapter->hw);
2448 /*********************************************************************
2450 * Setup networking device structure and register an interface.
2452 **********************************************************************/
2454 lem_setup_interface(device_t dev, struct adapter *adapter)
2458 INIT_DEBUGOUT("lem_setup_interface: begin");
2460 ifp = adapter->ifp = if_gethandle(IFT_ETHER);
2461 if (ifp == (void *)NULL) {
2462 device_printf(dev, "can not allocate ifnet structure\n");
2465 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2466 if_setinitfn(ifp, lem_init);
2467 if_setsoftc(ifp, adapter);
2468 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2469 if_setioctlfn(ifp, lem_ioctl);
2470 if_setstartfn(ifp, lem_start);
2471 if_setgetcounterfn(ifp, lem_get_counter);
2472 if_setsendqlen(ifp, adapter->num_tx_desc - 1);
2473 if_setsendqready(ifp);
2475 ether_ifattach(ifp, adapter->hw.mac.addr);
2477 if_setcapabilities(ifp, 0);
2479 if (adapter->hw.mac.type >= e1000_82543) {
2480 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM, 0);
2481 if_setcapenablebit(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM, 0);
2485 * Tell the upper layer(s) we support long frames.
2487 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2488 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU, 0);
2489 if_setcapenablebit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU, 0);
2492 ** Dont turn this on by default, if vlans are
2493 ** created on another pseudo device (eg. lagg)
2494 ** then vlan events are not passed thru, breaking
2495 ** operation, but with HW FILTER off it works. If
2496 ** using vlans directly on the em driver you can
2497 ** enable this and get full hardware tag filtering.
2499 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0);
2501 #ifdef DEVICE_POLLING
2502 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
2505 /* Enable only WOL MAGIC by default */
2507 if_setcapabilitiesbit(ifp, IFCAP_WOL, 0);
2508 if_setcapenablebit(ifp, IFCAP_WOL_MAGIC, 0);
2512 * Specify the media types supported by this adapter and register
2513 * callbacks to update media and link information
2515 ifmedia_init(&adapter->media, IFM_IMASK,
2516 lem_media_change, lem_media_status);
2517 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2518 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2519 u_char fiber_type = IFM_1000_SX; /* default type */
2521 if (adapter->hw.mac.type == e1000_82545)
2522 fiber_type = IFM_1000_LX;
2523 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2525 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2527 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2528 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2530 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2532 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2534 if (adapter->hw.phy.type != e1000_phy_ife) {
2535 ifmedia_add(&adapter->media,
2536 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2537 ifmedia_add(&adapter->media,
2538 IFM_ETHER | IFM_1000_T, 0, NULL);
2541 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2542 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2547 /*********************************************************************
2549 * Workaround for SmartSpeed on 82541 and 82547 controllers
2551 **********************************************************************/
2553 lem_smartspeed(struct adapter *adapter)
2557 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2558 adapter->hw.mac.autoneg == 0 ||
2559 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2562 if (adapter->smartspeed == 0) {
2563 /* If Master/Slave config fault is asserted twice,
2564 * we assume back-to-back */
2565 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2566 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2568 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2569 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2570 e1000_read_phy_reg(&adapter->hw,
2571 PHY_1000T_CTRL, &phy_tmp);
2572 if(phy_tmp & CR_1000T_MS_ENABLE) {
2573 phy_tmp &= ~CR_1000T_MS_ENABLE;
2574 e1000_write_phy_reg(&adapter->hw,
2575 PHY_1000T_CTRL, phy_tmp);
2576 adapter->smartspeed++;
2577 if(adapter->hw.mac.autoneg &&
2578 !e1000_copper_link_autoneg(&adapter->hw) &&
2579 !e1000_read_phy_reg(&adapter->hw,
2580 PHY_CONTROL, &phy_tmp)) {
2581 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2582 MII_CR_RESTART_AUTO_NEG);
2583 e1000_write_phy_reg(&adapter->hw,
2584 PHY_CONTROL, phy_tmp);
2589 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2590 /* If still no link, perhaps using 2/3 pair cable */
2591 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2592 phy_tmp |= CR_1000T_MS_ENABLE;
2593 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2594 if(adapter->hw.mac.autoneg &&
2595 !e1000_copper_link_autoneg(&adapter->hw) &&
2596 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2597 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2598 MII_CR_RESTART_AUTO_NEG);
2599 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2602 /* Restart process after EM_SMARTSPEED_MAX iterations */
2603 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2604 adapter->smartspeed = 0;
2609 * Manage DMA'able memory.
2612 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2616 *(bus_addr_t *) arg = segs[0].ds_addr;
2620 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2621 struct em_dma_alloc *dma, int mapflags)
2625 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2626 EM_DBA_ALIGN, 0, /* alignment, bounds */
2627 BUS_SPACE_MAXADDR, /* lowaddr */
2628 BUS_SPACE_MAXADDR, /* highaddr */
2629 NULL, NULL, /* filter, filterarg */
2632 size, /* maxsegsize */
2634 NULL, /* lockfunc */
2638 device_printf(adapter->dev,
2639 "%s: bus_dma_tag_create failed: %d\n",
2644 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2645 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2647 device_printf(adapter->dev,
2648 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2649 __func__, (uintmax_t)size, error);
2654 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2655 size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2656 if (error || dma->dma_paddr == 0) {
2657 device_printf(adapter->dev,
2658 "%s: bus_dmamap_load failed: %d\n",
2666 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2668 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2669 bus_dma_tag_destroy(dma->dma_tag);
2671 dma->dma_tag = NULL;
2677 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2679 if (dma->dma_tag == NULL)
2681 if (dma->dma_paddr != 0) {
2682 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2683 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2684 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2687 if (dma->dma_vaddr != NULL) {
2688 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2689 dma->dma_vaddr = NULL;
2691 bus_dma_tag_destroy(dma->dma_tag);
2692 dma->dma_tag = NULL;
2696 /*********************************************************************
2698 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2699 * the information needed to transmit a packet on the wire.
2701 **********************************************************************/
2703 lem_allocate_transmit_structures(struct adapter *adapter)
2705 device_t dev = adapter->dev;
2706 struct em_buffer *tx_buffer;
2710 * Create DMA tags for tx descriptors
2712 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2713 1, 0, /* alignment, bounds */
2714 BUS_SPACE_MAXADDR, /* lowaddr */
2715 BUS_SPACE_MAXADDR, /* highaddr */
2716 NULL, NULL, /* filter, filterarg */
2717 MCLBYTES * EM_MAX_SCATTER, /* maxsize */
2718 EM_MAX_SCATTER, /* nsegments */
2719 MCLBYTES, /* maxsegsize */
2721 NULL, /* lockfunc */
2723 &adapter->txtag)) != 0) {
2724 device_printf(dev, "Unable to allocate TX DMA tag\n");
2728 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2729 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2730 if (adapter->tx_buffer_area == NULL) {
2731 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2736 /* Create the descriptor buffer dma maps */
2737 for (int i = 0; i < adapter->num_tx_desc; i++) {
2738 tx_buffer = &adapter->tx_buffer_area[i];
2739 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2741 device_printf(dev, "Unable to create TX DMA map\n");
2744 tx_buffer->next_eop = -1;
2749 lem_free_transmit_structures(adapter);
2753 /*********************************************************************
2755 * (Re)Initialize transmit structures.
2757 **********************************************************************/
2759 lem_setup_transmit_structures(struct adapter *adapter)
2761 struct em_buffer *tx_buffer;
2763 /* we are already locked */
2764 struct netmap_adapter *na = netmap_getna(adapter->ifp);
2765 struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
2766 #endif /* DEV_NETMAP */
2768 /* Clear the old ring contents */
2769 bzero(adapter->tx_desc_base,
2770 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2772 /* Free any existing TX buffers */
2773 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2774 tx_buffer = &adapter->tx_buffer_area[i];
2775 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2776 BUS_DMASYNC_POSTWRITE);
2777 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2778 m_freem(tx_buffer->m_head);
2779 tx_buffer->m_head = NULL;
2782 /* the i-th NIC entry goes to slot si */
2783 int si = netmap_idx_n2k(&na->tx_rings[0], i);
2787 addr = PNMB(na, slot + si, &paddr);
2788 adapter->tx_desc_base[i].buffer_addr = htole64(paddr);
2789 /* reload the map for netmap mode */
2790 netmap_load_map(na, adapter->txtag, tx_buffer->map, addr);
2792 #endif /* DEV_NETMAP */
2793 tx_buffer->next_eop = -1;
2797 adapter->last_hw_offload = 0;
2798 adapter->next_avail_tx_desc = 0;
2799 adapter->next_tx_to_clean = 0;
2800 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2802 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2803 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2808 /*********************************************************************
2810 * Enable transmit unit.
2812 **********************************************************************/
2814 lem_initialize_transmit_unit(struct adapter *adapter)
2819 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2820 /* Setup the Base and Length of the Tx Descriptor Ring */
2821 bus_addr = adapter->txdma.dma_paddr;
2822 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2823 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2824 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2825 (u32)(bus_addr >> 32));
2826 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2828 /* Setup the HW Tx Head and Tail descriptor pointers */
2829 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2830 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2832 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2833 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2834 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2836 /* Set the default values for the Tx Inter Packet Gap timer */
2837 switch (adapter->hw.mac.type) {
2839 tipg = DEFAULT_82542_TIPG_IPGT;
2840 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2841 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2844 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2845 (adapter->hw.phy.media_type ==
2846 e1000_media_type_internal_serdes))
2847 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2849 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2850 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2851 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2854 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2855 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2856 if(adapter->hw.mac.type >= e1000_82540)
2857 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2858 adapter->tx_abs_int_delay.value);
2860 /* Program the Transmit Control Register */
2861 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2862 tctl &= ~E1000_TCTL_CT;
2863 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2864 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2866 /* This write will effectively turn on the transmit unit. */
2867 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2869 /* Setup Transmit Descriptor Base Settings */
2870 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2872 if (adapter->tx_int_delay.value > 0)
2873 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2876 /*********************************************************************
2878 * Free all transmit related data structures.
2880 **********************************************************************/
2882 lem_free_transmit_structures(struct adapter *adapter)
2884 struct em_buffer *tx_buffer;
2886 INIT_DEBUGOUT("free_transmit_structures: begin");
2888 if (adapter->tx_buffer_area != NULL) {
2889 for (int i = 0; i < adapter->num_tx_desc; i++) {
2890 tx_buffer = &adapter->tx_buffer_area[i];
2891 if (tx_buffer->m_head != NULL) {
2892 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2893 BUS_DMASYNC_POSTWRITE);
2894 bus_dmamap_unload(adapter->txtag,
2896 m_freem(tx_buffer->m_head);
2897 tx_buffer->m_head = NULL;
2898 } else if (tx_buffer->map != NULL)
2899 bus_dmamap_unload(adapter->txtag,
2901 if (tx_buffer->map != NULL) {
2902 bus_dmamap_destroy(adapter->txtag,
2904 tx_buffer->map = NULL;
2908 if (adapter->tx_buffer_area != NULL) {
2909 free(adapter->tx_buffer_area, M_DEVBUF);
2910 adapter->tx_buffer_area = NULL;
2912 if (adapter->txtag != NULL) {
2913 bus_dma_tag_destroy(adapter->txtag);
2914 adapter->txtag = NULL;
2918 /*********************************************************************
2920 * The offload context needs to be set when we transfer the first
2921 * packet of a particular protocol (TCP/UDP). This routine has been
2922 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2924 * Added back the old method of keeping the current context type
2925 * and not setting if unnecessary, as this is reported to be a
2926 * big performance win. -jfv
2927 **********************************************************************/
2929 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2930 u32 *txd_upper, u32 *txd_lower)
2932 struct e1000_context_desc *TXD = NULL;
2933 struct em_buffer *tx_buffer;
2934 struct ether_vlan_header *eh;
2935 struct ip *ip = NULL;
2936 struct ip6_hdr *ip6;
2937 int curr_txd, ehdrlen;
2938 u32 cmd, hdr_len, ip_hlen;
2943 cmd = hdr_len = ipproto = 0;
2944 *txd_upper = *txd_lower = 0;
2945 curr_txd = adapter->next_avail_tx_desc;
2948 * Determine where frame payload starts.
2949 * Jump over vlan headers if already present,
2950 * helpful for QinQ too.
2952 eh = mtod(mp, struct ether_vlan_header *);
2953 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2954 etype = ntohs(eh->evl_proto);
2955 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2957 etype = ntohs(eh->evl_encap_proto);
2958 ehdrlen = ETHER_HDR_LEN;
2962 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2963 * TODO: Support SCTP too when it hits the tree.
2967 ip = (struct ip *)(mp->m_data + ehdrlen);
2968 ip_hlen = ip->ip_hl << 2;
2970 /* Setup of IP header checksum. */
2971 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2973 * Start offset for header checksum calculation.
2974 * End offset for header checksum calculation.
2975 * Offset of place to put the checksum.
2977 TXD = (struct e1000_context_desc *)
2978 &adapter->tx_desc_base[curr_txd];
2979 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2980 TXD->lower_setup.ip_fields.ipcse =
2981 htole16(ehdrlen + ip_hlen);
2982 TXD->lower_setup.ip_fields.ipcso =
2983 ehdrlen + offsetof(struct ip, ip_sum);
2984 cmd |= E1000_TXD_CMD_IP;
2985 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2988 hdr_len = ehdrlen + ip_hlen;
2992 case ETHERTYPE_IPV6:
2993 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2994 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2996 /* IPv6 doesn't have a header checksum. */
2998 hdr_len = ehdrlen + ip_hlen;
2999 ipproto = ip6->ip6_nxt;
3008 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3009 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3010 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3011 /* no need for context if already set */
3012 if (adapter->last_hw_offload == CSUM_TCP)
3014 adapter->last_hw_offload = CSUM_TCP;
3016 * Start offset for payload checksum calculation.
3017 * End offset for payload checksum calculation.
3018 * Offset of place to put the checksum.
3020 TXD = (struct e1000_context_desc *)
3021 &adapter->tx_desc_base[curr_txd];
3022 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3023 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3024 TXD->upper_setup.tcp_fields.tucso =
3025 hdr_len + offsetof(struct tcphdr, th_sum);
3026 cmd |= E1000_TXD_CMD_TCP;
3031 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3032 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3033 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3034 /* no need for context if already set */
3035 if (adapter->last_hw_offload == CSUM_UDP)
3037 adapter->last_hw_offload = CSUM_UDP;
3039 * Start offset for header checksum calculation.
3040 * End offset for header checksum calculation.
3041 * Offset of place to put the checksum.
3043 TXD = (struct e1000_context_desc *)
3044 &adapter->tx_desc_base[curr_txd];
3045 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3046 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3047 TXD->upper_setup.tcp_fields.tucso =
3048 hdr_len + offsetof(struct udphdr, uh_sum);
3058 TXD->tcp_seg_setup.data = htole32(0);
3059 TXD->cmd_and_length =
3060 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3061 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3062 tx_buffer->m_head = NULL;
3063 tx_buffer->next_eop = -1;
3065 if (++curr_txd == adapter->num_tx_desc)
3068 adapter->num_tx_desc_avail--;
3069 adapter->next_avail_tx_desc = curr_txd;
3073 /**********************************************************************
3075 * Examine each tx_buffer in the used queue. If the hardware is done
3076 * processing the packet then free associated resources. The
3077 * tx_buffer is put back on the free queue.
3079 **********************************************************************/
3081 lem_txeof(struct adapter *adapter)
3083 int first, last, done, num_avail;
3084 struct em_buffer *tx_buffer;
3085 struct e1000_tx_desc *tx_desc, *eop_desc;
3086 if_t ifp = adapter->ifp;
3088 EM_TX_LOCK_ASSERT(adapter);
3091 if (netmap_tx_irq(ifp, 0))
3093 #endif /* DEV_NETMAP */
3094 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3097 num_avail = adapter->num_tx_desc_avail;
3098 first = adapter->next_tx_to_clean;
3099 tx_desc = &adapter->tx_desc_base[first];
3100 tx_buffer = &adapter->tx_buffer_area[first];
3101 last = tx_buffer->next_eop;
3102 eop_desc = &adapter->tx_desc_base[last];
3105 * What this does is get the index of the
3106 * first descriptor AFTER the EOP of the
3107 * first packet, that way we can do the
3108 * simple comparison on the inner while loop.
3110 if (++last == adapter->num_tx_desc)
3114 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3115 BUS_DMASYNC_POSTREAD);
3117 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3118 /* We clean the range of the packet */
3119 while (first != done) {
3120 tx_desc->upper.data = 0;
3121 tx_desc->lower.data = 0;
3122 tx_desc->buffer_addr = 0;
3125 if (tx_buffer->m_head) {
3126 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
3127 bus_dmamap_sync(adapter->txtag,
3129 BUS_DMASYNC_POSTWRITE);
3130 bus_dmamap_unload(adapter->txtag,
3133 m_freem(tx_buffer->m_head);
3134 tx_buffer->m_head = NULL;
3136 tx_buffer->next_eop = -1;
3137 adapter->watchdog_time = ticks;
3139 if (++first == adapter->num_tx_desc)
3142 tx_buffer = &adapter->tx_buffer_area[first];
3143 tx_desc = &adapter->tx_desc_base[first];
3145 /* See if we can continue to the next packet */
3146 last = tx_buffer->next_eop;
3148 eop_desc = &adapter->tx_desc_base[last];
3149 /* Get new done point */
3150 if (++last == adapter->num_tx_desc) last = 0;
3155 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3156 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3158 adapter->next_tx_to_clean = first;
3159 adapter->num_tx_desc_avail = num_avail;
3161 #ifdef NIC_SEND_COMBINING
3162 if ((adapter->shadow_tdt & MIT_PENDING_TDT) == MIT_PENDING_TDT) {
3163 /* a tdt write is pending, do it */
3164 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0),
3165 0xffff & adapter->shadow_tdt);
3166 adapter->shadow_tdt = MIT_PENDING_INT;
3168 adapter->shadow_tdt = 0; // disable
3170 #endif /* NIC_SEND_COMBINING */
3172 * If we have enough room, clear IFF_DRV_OACTIVE to
3173 * tell the stack that it is OK to send packets.
3174 * If there are no pending descriptors, clear the watchdog.
3176 if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {
3177 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3179 if (adapter->csb) { // XXX also csb_on ?
3180 adapter->csb->guest_need_txkick = 2; /* acked */
3181 // XXX memory barrier
3183 #endif /* NIC_PARAVIRT */
3184 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3185 adapter->watchdog_check = FALSE;
3191 /*********************************************************************
3193 * When Link is lost sometimes there is work still in the TX ring
3194 * which may result in a watchdog, rather than allow that we do an
3195 * attempted cleanup and then reinit here. Note that this has been
3196 * seens mostly with fiber adapters.
3198 **********************************************************************/
3200 lem_tx_purge(struct adapter *adapter)
3202 if ((!adapter->link_active) && (adapter->watchdog_check)) {
3203 EM_TX_LOCK(adapter);
3205 EM_TX_UNLOCK(adapter);
3206 if (adapter->watchdog_check) /* Still outstanding? */
3207 lem_init_locked(adapter);
3211 /*********************************************************************
3213 * Get a buffer from system mbuf buffer pool.
3215 **********************************************************************/
3217 lem_get_buf(struct adapter *adapter, int i)
3220 bus_dma_segment_t segs[1];
3222 struct em_buffer *rx_buffer;
3225 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3227 adapter->mbuf_cluster_failed++;
3230 m->m_len = m->m_pkthdr.len = MCLBYTES;
3232 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3233 m_adj(m, ETHER_ALIGN);
3236 * Using memory from the mbuf cluster pool, invoke the
3237 * bus_dma machinery to arrange the memory mapping.
3239 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3240 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3246 /* If nsegs is wrong then the stack is corrupt. */
3247 KASSERT(nsegs == 1, ("Too many segments returned!"));
3249 rx_buffer = &adapter->rx_buffer_area[i];
3250 if (rx_buffer->m_head != NULL)
3251 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3253 map = rx_buffer->map;
3254 rx_buffer->map = adapter->rx_sparemap;
3255 adapter->rx_sparemap = map;
3256 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3257 rx_buffer->m_head = m;
3259 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3263 /*********************************************************************
3265 * Allocate memory for rx_buffer structures. Since we use one
3266 * rx_buffer per received packet, the maximum number of rx_buffer's
3267 * that we'll need is equal to the number of receive descriptors
3268 * that we've allocated.
3270 **********************************************************************/
3272 lem_allocate_receive_structures(struct adapter *adapter)
3274 device_t dev = adapter->dev;
3275 struct em_buffer *rx_buffer;
3278 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3279 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3280 if (adapter->rx_buffer_area == NULL) {
3281 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3285 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3286 1, 0, /* alignment, bounds */
3287 BUS_SPACE_MAXADDR, /* lowaddr */
3288 BUS_SPACE_MAXADDR, /* highaddr */
3289 NULL, NULL, /* filter, filterarg */
3290 MCLBYTES, /* maxsize */
3292 MCLBYTES, /* maxsegsize */
3294 NULL, /* lockfunc */
3298 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3303 /* Create the spare map (used by getbuf) */
3304 error = bus_dmamap_create(adapter->rxtag, 0, &adapter->rx_sparemap);
3306 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3311 rx_buffer = adapter->rx_buffer_area;
3312 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3313 error = bus_dmamap_create(adapter->rxtag, 0, &rx_buffer->map);
3315 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3324 lem_free_receive_structures(adapter);
3328 /*********************************************************************
3330 * (Re)initialize receive structures.
3332 **********************************************************************/
3334 lem_setup_receive_structures(struct adapter *adapter)
3336 struct em_buffer *rx_buffer;
3339 /* we are already under lock */
3340 struct netmap_adapter *na = netmap_getna(adapter->ifp);
3341 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
3344 /* Reset descriptor ring */
3345 bzero(adapter->rx_desc_base,
3346 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3348 /* Free current RX buffers. */
3349 rx_buffer = adapter->rx_buffer_area;
3350 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3351 if (rx_buffer->m_head != NULL) {
3352 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3353 BUS_DMASYNC_POSTREAD);
3354 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3355 m_freem(rx_buffer->m_head);
3356 rx_buffer->m_head = NULL;
3360 /* Allocate new ones. */
3361 for (i = 0; i < adapter->num_rx_desc; i++) {
3364 /* the i-th NIC entry goes to slot si */
3365 int si = netmap_idx_n2k(&na->rx_rings[0], i);
3369 addr = PNMB(na, slot + si, &paddr);
3370 netmap_load_map(na, adapter->rxtag, rx_buffer->map, addr);
3371 /* Update descriptor */
3372 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
3375 #endif /* DEV_NETMAP */
3376 error = lem_get_buf(adapter, i);
3381 /* Setup our descriptor pointers */
3382 adapter->next_rx_desc_to_check = 0;
3383 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3384 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3389 /*********************************************************************
3391 * Enable receive unit.
3393 **********************************************************************/
3396 lem_initialize_receive_unit(struct adapter *adapter)
3398 if_t ifp = adapter->ifp;
3402 INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3405 * Make sure receives are disabled while setting
3406 * up the descriptor ring
3408 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3409 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3411 if (adapter->hw.mac.type >= e1000_82540) {
3412 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3413 adapter->rx_abs_int_delay.value);
3415 * Set the interrupt throttling rate. Value is calculated
3416 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3418 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3421 /* Setup the Base and Length of the Rx Descriptor Ring */
3422 bus_addr = adapter->rxdma.dma_paddr;
3423 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3424 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3425 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3426 (u32)(bus_addr >> 32));
3427 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3430 /* Setup the Receive Control Register */
3431 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3432 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3433 E1000_RCTL_RDMTS_HALF |
3434 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3436 /* Make sure VLAN Filters are off */
3437 rctl &= ~E1000_RCTL_VFE;
3439 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3440 rctl |= E1000_RCTL_SBP;
3442 rctl &= ~E1000_RCTL_SBP;
3444 switch (adapter->rx_buffer_len) {
3447 rctl |= E1000_RCTL_SZ_2048;
3450 rctl |= E1000_RCTL_SZ_4096 |
3451 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3454 rctl |= E1000_RCTL_SZ_8192 |
3455 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3458 rctl |= E1000_RCTL_SZ_16384 |
3459 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3463 if (if_getmtu(ifp) > ETHERMTU)
3464 rctl |= E1000_RCTL_LPE;
3466 rctl &= ~E1000_RCTL_LPE;
3468 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3469 if ((adapter->hw.mac.type >= e1000_82543) &&
3470 (if_getcapenable(ifp) & IFCAP_RXCSUM)) {
3471 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3472 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3473 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3476 /* Enable Receives */
3477 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3480 * Setup the HW Rx Head and
3481 * Tail Descriptor Pointers
3483 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3484 rctl = adapter->num_rx_desc - 1; /* default RDT value */
3486 /* preserve buffers already made available to clients */
3487 if (if_getcapenable(ifp) & IFCAP_NETMAP) {
3488 struct netmap_adapter *na = netmap_getna(adapter->ifp);
3489 rctl -= nm_kr_rxspace(&na->rx_rings[0]);
3491 #endif /* DEV_NETMAP */
3492 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rctl);
3497 /*********************************************************************
3499 * Free receive related data structures.
3501 **********************************************************************/
3503 lem_free_receive_structures(struct adapter *adapter)
3505 struct em_buffer *rx_buffer;
3508 INIT_DEBUGOUT("free_receive_structures: begin");
3510 if (adapter->rx_sparemap) {
3511 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3512 adapter->rx_sparemap = NULL;
3515 /* Cleanup any existing buffers */
3516 if (adapter->rx_buffer_area != NULL) {
3517 rx_buffer = adapter->rx_buffer_area;
3518 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3519 if (rx_buffer->m_head != NULL) {
3520 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3521 BUS_DMASYNC_POSTREAD);
3522 bus_dmamap_unload(adapter->rxtag,
3524 m_freem(rx_buffer->m_head);
3525 rx_buffer->m_head = NULL;
3526 } else if (rx_buffer->map != NULL)
3527 bus_dmamap_unload(adapter->rxtag,
3529 if (rx_buffer->map != NULL) {
3530 bus_dmamap_destroy(adapter->rxtag,
3532 rx_buffer->map = NULL;
3537 if (adapter->rx_buffer_area != NULL) {
3538 free(adapter->rx_buffer_area, M_DEVBUF);
3539 adapter->rx_buffer_area = NULL;
3542 if (adapter->rxtag != NULL) {
3543 bus_dma_tag_destroy(adapter->rxtag);
3544 adapter->rxtag = NULL;
3548 /*********************************************************************
3550 * This routine executes in interrupt context. It replenishes
3551 * the mbufs in the descriptor and sends data which has been
3552 * dma'ed into host memory to upper layer.
3554 * We loop at most count times if count is > 0, or until done if
3557 * For polling we also now return the number of cleaned packets
3558 *********************************************************************/
3560 lem_rxeof(struct adapter *adapter, int count, int *done)
3562 if_t ifp = adapter->ifp;
3564 u8 status = 0, accept_frame = 0, eop = 0;
3565 u16 len, desc_len, prev_len_adj;
3567 struct e1000_rx_desc *current_desc;
3569 #ifdef BATCH_DISPATCH
3570 struct mbuf *mh = NULL, *mt = NULL;
3571 #endif /* BATCH_DISPATCH */
3574 struct paravirt_csb* csb = adapter->csb;
3575 int csb_mode = csb && csb->guest_csb_on;
3577 //ND("clear guest_rxkick at %d", adapter->next_rx_desc_to_check);
3578 if (csb_mode && csb->guest_need_rxkick)
3579 csb->guest_need_rxkick = 0;
3580 #endif /* NIC_PARAVIRT */
3581 EM_RX_LOCK(adapter);
3583 #ifdef BATCH_DISPATCH
3585 #endif /* BATCH_DISPATCH */
3586 i = adapter->next_rx_desc_to_check;
3587 current_desc = &adapter->rx_desc_base[i];
3588 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3589 BUS_DMASYNC_POSTREAD);
3592 if (netmap_rx_irq(ifp, 0, &rx_sent)) {
3593 EM_RX_UNLOCK(adapter);
3596 #endif /* DEV_NETMAP */
3598 #if 1 // XXX optimization ?
3599 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3602 EM_RX_UNLOCK(adapter);
3607 while (count != 0 && if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
3608 struct mbuf *m = NULL;
3610 status = current_desc->status;
3611 if ((status & E1000_RXD_STAT_DD) == 0) {
3614 /* buffer not ready yet. Retry a few times before giving up */
3615 if (++retries <= adapter->rx_retries) {
3618 if (csb->guest_need_rxkick == 0) {
3619 // ND("set guest_rxkick at %d", adapter->next_rx_desc_to_check);
3620 csb->guest_need_rxkick = 1;
3621 // XXX memory barrier, status volatile ?
3622 continue; /* double check */
3625 /* no buffer ready, give up */
3626 #endif /* NIC_PARAVIRT */
3631 if (csb->guest_need_rxkick)
3632 // ND("clear again guest_rxkick at %d", adapter->next_rx_desc_to_check);
3633 csb->guest_need_rxkick = 0;
3636 #endif /* NIC_PARAVIRT */
3638 mp = adapter->rx_buffer_area[i].m_head;
3640 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3641 * needs to access the last received byte in the mbuf.
3643 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3644 BUS_DMASYNC_POSTREAD);
3648 desc_len = le16toh(current_desc->length);
3649 if (status & E1000_RXD_STAT_EOP) {
3652 if (desc_len < ETHER_CRC_LEN) {
3654 prev_len_adj = ETHER_CRC_LEN - desc_len;
3656 len = desc_len - ETHER_CRC_LEN;
3662 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3664 u32 pkt_len = desc_len;
3666 if (adapter->fmp != NULL)
3667 pkt_len += adapter->fmp->m_pkthdr.len;
3669 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3670 if (TBI_ACCEPT(&adapter->hw, status,
3671 current_desc->errors, pkt_len, last_byte,
3672 adapter->min_frame_size, adapter->max_frame_size)) {
3673 e1000_tbi_adjust_stats_82543(&adapter->hw,
3674 &adapter->stats, pkt_len,
3675 adapter->hw.mac.addr,
3676 adapter->max_frame_size);
3684 if (lem_get_buf(adapter, i) != 0) {
3685 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
3689 /* Assign correct length to the current fragment */
3692 if (adapter->fmp == NULL) {
3693 mp->m_pkthdr.len = len;
3694 adapter->fmp = mp; /* Store the first mbuf */
3697 /* Chain mbuf's together */
3698 mp->m_flags &= ~M_PKTHDR;
3700 * Adjust length of previous mbuf in chain if
3701 * we received less than 4 bytes in the last
3704 if (prev_len_adj > 0) {
3705 adapter->lmp->m_len -= prev_len_adj;
3706 adapter->fmp->m_pkthdr.len -=
3709 adapter->lmp->m_next = mp;
3710 adapter->lmp = adapter->lmp->m_next;
3711 adapter->fmp->m_pkthdr.len += len;
3715 if_setrcvif(adapter->fmp, ifp);
3716 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3717 lem_receive_checksum(adapter, current_desc,
3719 #ifndef __NO_STRICT_ALIGNMENT
3720 if (adapter->max_frame_size >
3721 (MCLBYTES - ETHER_ALIGN) &&
3722 lem_fixup_rx(adapter) != 0)
3725 if (status & E1000_RXD_STAT_VP) {
3726 adapter->fmp->m_pkthdr.ether_vtag =
3727 le16toh(current_desc->special);
3728 adapter->fmp->m_flags |= M_VLANTAG;
3730 #ifndef __NO_STRICT_ALIGNMENT
3734 adapter->fmp = NULL;
3735 adapter->lmp = NULL;
3738 adapter->dropped_pkts++;
3740 /* Reuse loaded DMA map and just update mbuf chain */
3741 mp = adapter->rx_buffer_area[i].m_head;
3742 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3743 mp->m_data = mp->m_ext.ext_buf;
3745 if (adapter->max_frame_size <=
3746 (MCLBYTES - ETHER_ALIGN))
3747 m_adj(mp, ETHER_ALIGN);
3748 if (adapter->fmp != NULL) {
3749 m_freem(adapter->fmp);
3750 adapter->fmp = NULL;
3751 adapter->lmp = NULL;
3756 /* Zero out the receive descriptors status. */
3757 current_desc->status = 0;
3758 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3759 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3763 /* the buffer at i has been already replaced by lem_get_buf()
3764 * so it is safe to set guest_rdt = i and possibly send a kick.
3765 * XXX see if we can optimize it later.
3768 // XXX memory barrier
3769 if (i == csb->host_rxkick_at)
3770 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3772 #endif /* NIC_PARAVIRT */
3773 /* Advance our pointers to the next descriptor. */
3774 if (++i == adapter->num_rx_desc)
3776 /* Call into the stack */
3778 #ifdef BATCH_DISPATCH
3779 if (adapter->batch_enable) {
3785 m->m_nextpkt = NULL;
3787 current_desc = &adapter->rx_desc_base[i];
3790 #endif /* BATCH_DISPATCH */
3791 adapter->next_rx_desc_to_check = i;
3792 EM_RX_UNLOCK(adapter);
3794 EM_RX_LOCK(adapter);
3796 i = adapter->next_rx_desc_to_check;
3798 current_desc = &adapter->rx_desc_base[i];
3800 adapter->next_rx_desc_to_check = i;
3801 #ifdef BATCH_DISPATCH
3803 EM_RX_UNLOCK(adapter);
3804 while ( (mt = mh) != NULL) {
3806 mt->m_nextpkt = NULL;
3809 EM_RX_LOCK(adapter);
3810 i = adapter->next_rx_desc_to_check; /* in case of interrupts */
3814 #endif /* BATCH_DISPATCH */
3816 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3818 i = adapter->num_rx_desc - 1;
3820 if (!csb_mode) /* filter out writes */
3821 #endif /* NIC_PARAVIRT */
3822 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3825 EM_RX_UNLOCK(adapter);
3826 return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3829 #ifndef __NO_STRICT_ALIGNMENT
3831 * When jumbo frames are enabled we should realign entire payload on
3832 * architecures with strict alignment. This is serious design mistake of 8254x
3833 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3834 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3835 * payload. On architecures without strict alignment restrictions 8254x still
3836 * performs unaligned memory access which would reduce the performance too.
3837 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3838 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3839 * existing mbuf chain.
3841 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3842 * not used at all on architectures with strict alignment.
3845 lem_fixup_rx(struct adapter *adapter)
3852 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3853 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3854 m->m_data += ETHER_HDR_LEN;
3856 MGETHDR(n, M_NOWAIT, MT_DATA);
3858 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3859 m->m_data += ETHER_HDR_LEN;
3860 m->m_len -= ETHER_HDR_LEN;
3861 n->m_len = ETHER_HDR_LEN;
3862 M_MOVE_PKTHDR(n, m);
3866 adapter->dropped_pkts++;
3867 m_freem(adapter->fmp);
3868 adapter->fmp = NULL;
3877 /*********************************************************************
3879 * Verify that the hardware indicated that the checksum is valid.
3880 * Inform the stack about the status of checksum so that stack
3881 * doesn't spend time verifying the checksum.
3883 *********************************************************************/
3885 lem_receive_checksum(struct adapter *adapter,
3886 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3888 /* 82543 or newer only */
3889 if ((adapter->hw.mac.type < e1000_82543) ||
3890 /* Ignore Checksum bit is set */
3891 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3892 mp->m_pkthdr.csum_flags = 0;
3896 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3898 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3899 /* IP Checksum Good */
3900 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3901 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3904 mp->m_pkthdr.csum_flags = 0;
3908 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3910 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3911 mp->m_pkthdr.csum_flags |=
3912 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3913 mp->m_pkthdr.csum_data = htons(0xffff);
3919 * This routine is run via an vlan
3923 lem_register_vlan(void *arg, if_t ifp, u16 vtag)
3925 struct adapter *adapter = if_getsoftc(ifp);
3928 if (if_getsoftc(ifp) != arg) /* Not our event */
3931 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
3934 EM_CORE_LOCK(adapter);
3935 index = (vtag >> 5) & 0x7F;
3937 adapter->shadow_vfta[index] |= (1 << bit);
3938 ++adapter->num_vlans;
3939 /* Re-init to load the changes */
3940 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
3941 lem_init_locked(adapter);
3942 EM_CORE_UNLOCK(adapter);
3946 * This routine is run via an vlan
3950 lem_unregister_vlan(void *arg, if_t ifp, u16 vtag)
3952 struct adapter *adapter = if_getsoftc(ifp);
3955 if (if_getsoftc(ifp) != arg)
3958 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3961 EM_CORE_LOCK(adapter);
3962 index = (vtag >> 5) & 0x7F;
3964 adapter->shadow_vfta[index] &= ~(1 << bit);
3965 --adapter->num_vlans;
3966 /* Re-init to load the changes */
3967 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
3968 lem_init_locked(adapter);
3969 EM_CORE_UNLOCK(adapter);
3973 lem_setup_vlan_hw_support(struct adapter *adapter)
3975 struct e1000_hw *hw = &adapter->hw;
3979 ** We get here thru init_locked, meaning
3980 ** a soft reset, this has already cleared
3981 ** the VFTA and other state, so if there
3982 ** have been no vlan's registered do nothing.
3984 if (adapter->num_vlans == 0)
3988 ** A soft reset zero's out the VFTA, so
3989 ** we need to repopulate it now.
3991 for (int i = 0; i < EM_VFTA_SIZE; i++)
3992 if (adapter->shadow_vfta[i] != 0)
3993 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3994 i, adapter->shadow_vfta[i]);
3996 reg = E1000_READ_REG(hw, E1000_CTRL);
3997 reg |= E1000_CTRL_VME;
3998 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4000 /* Enable the Filter Table */
4001 reg = E1000_READ_REG(hw, E1000_RCTL);
4002 reg &= ~E1000_RCTL_CFIEN;
4003 reg |= E1000_RCTL_VFE;
4004 E1000_WRITE_REG(hw, E1000_RCTL, reg);
4008 lem_enable_intr(struct adapter *adapter)
4010 struct e1000_hw *hw = &adapter->hw;
4011 u32 ims_mask = IMS_ENABLE_MASK;
4013 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4017 lem_disable_intr(struct adapter *adapter)
4019 struct e1000_hw *hw = &adapter->hw;
4021 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4025 * Bit of a misnomer, what this really means is
4026 * to enable OS management of the system... aka
4027 * to disable special hardware management features
4030 lem_init_manageability(struct adapter *adapter)
4032 /* A shared code workaround */
4033 if (adapter->has_manage) {
4034 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4035 /* disable hardware interception of ARP */
4036 manc &= ~(E1000_MANC_ARP_EN);
4037 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4042 * Give control back to hardware management
4043 * controller if there is one.
4046 lem_release_manageability(struct adapter *adapter)
4048 if (adapter->has_manage) {
4049 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4051 /* re-enable hardware interception of ARP */
4052 manc |= E1000_MANC_ARP_EN;
4053 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4058 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
4059 * For ASF and Pass Through versions of f/w this means
4060 * that the driver is loaded. For AMT version type f/w
4061 * this means that the network i/f is open.
4064 lem_get_hw_control(struct adapter *adapter)
4068 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4069 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4070 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4075 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4076 * For ASF and Pass Through versions of f/w this means that
4077 * the driver is no longer loaded. For AMT versions of the
4078 * f/w this means that the network i/f is closed.
4081 lem_release_hw_control(struct adapter *adapter)
4085 if (!adapter->has_manage)
4088 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4089 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4090 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4095 lem_is_valid_ether_addr(u8 *addr)
4097 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4099 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4107 ** Parse the interface capabilities with regard
4108 ** to both system management and wake-on-lan for
4112 lem_get_wakeup(device_t dev)
4114 struct adapter *adapter = device_get_softc(dev);
4115 u16 eeprom_data = 0, device_id, apme_mask;
4117 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
4118 apme_mask = EM_EEPROM_APME;
4120 switch (adapter->hw.mac.type) {
4125 e1000_read_nvm(&adapter->hw,
4126 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
4127 apme_mask = EM_82544_APME;
4130 case e1000_82546_rev_3:
4131 if (adapter->hw.bus.func == 1) {
4132 e1000_read_nvm(&adapter->hw,
4133 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
4136 e1000_read_nvm(&adapter->hw,
4137 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4140 e1000_read_nvm(&adapter->hw,
4141 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4144 if (eeprom_data & apme_mask)
4145 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
4147 * We have the eeprom settings, now apply the special cases
4148 * where the eeprom may be wrong or the board won't support
4149 * wake on lan on a particular port
4151 device_id = pci_get_device(dev);
4152 switch (device_id) {
4153 case E1000_DEV_ID_82546GB_PCIE:
4156 case E1000_DEV_ID_82546EB_FIBER:
4157 case E1000_DEV_ID_82546GB_FIBER:
4158 /* Wake events only supported on port A for dual fiber
4159 * regardless of eeprom setting */
4160 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
4161 E1000_STATUS_FUNC_1)
4164 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
4165 /* if quad port adapter, disable WoL on all but port A */
4166 if (global_quad_port_a != 0)
4168 /* Reset for multiple quad port adapters */
4169 if (++global_quad_port_a == 4)
4170 global_quad_port_a = 0;
4178 * Enable PCI Wake On Lan capability
4181 lem_enable_wakeup(device_t dev)
4183 struct adapter *adapter = device_get_softc(dev);
4184 if_t ifp = adapter->ifp;
4185 u32 pmc, ctrl, ctrl_ext, rctl;
4188 if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
4191 /* Advertise the wakeup capability */
4192 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4193 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4194 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4195 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4197 /* Keep the laser running on Fiber adapters */
4198 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4199 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4200 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4201 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4202 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4206 ** Determine type of Wakeup: note that wol
4207 ** is set with all bits on by default.
4209 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0)
4210 adapter->wol &= ~E1000_WUFC_MAG;
4212 if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0)
4213 adapter->wol &= ~E1000_WUFC_MC;
4215 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4216 rctl |= E1000_RCTL_MPE;
4217 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4220 if (adapter->hw.mac.type == e1000_pchlan) {
4221 if (lem_enable_phy_wakeup(adapter))
4224 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4225 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4230 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4231 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4232 if (if_getcapenable(ifp) & IFCAP_WOL)
4233 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4234 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4240 ** WOL in the newer chipset interfaces (pchlan)
4241 ** require thing to be copied into the phy
4244 lem_enable_phy_wakeup(struct adapter *adapter)
4246 struct e1000_hw *hw = &adapter->hw;
4250 /* copy MAC RARs to PHY RARs */
4251 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4252 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4253 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4254 e1000_write_phy_reg(hw, BM_RAR_M(i),
4255 (u16)((mreg >> 16) & 0xFFFF));
4256 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4257 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4258 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4259 (u16)((mreg >> 16) & 0xFFFF));
4262 /* copy MAC MTA to PHY MTA */
4263 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4264 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4265 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4266 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4267 (u16)((mreg >> 16) & 0xFFFF));
4270 /* configure PHY Rx Control register */
4271 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4272 mreg = E1000_READ_REG(hw, E1000_RCTL);
4273 if (mreg & E1000_RCTL_UPE)
4274 preg |= BM_RCTL_UPE;
4275 if (mreg & E1000_RCTL_MPE)
4276 preg |= BM_RCTL_MPE;
4277 preg &= ~(BM_RCTL_MO_MASK);
4278 if (mreg & E1000_RCTL_MO_3)
4279 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4280 << BM_RCTL_MO_SHIFT);
4281 if (mreg & E1000_RCTL_BAM)
4282 preg |= BM_RCTL_BAM;
4283 if (mreg & E1000_RCTL_PMCF)
4284 preg |= BM_RCTL_PMCF;
4285 mreg = E1000_READ_REG(hw, E1000_CTRL);
4286 if (mreg & E1000_CTRL_RFCE)
4287 preg |= BM_RCTL_RFCE;
4288 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4290 /* enable PHY wakeup in MAC register */
4291 E1000_WRITE_REG(hw, E1000_WUC,
4292 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4293 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4295 /* configure and enable PHY wakeup in PHY registers */
4296 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4297 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4299 /* activate PHY wakeup */
4300 ret = hw->phy.ops.acquire(hw);
4302 printf("Could not acquire PHY\n");
4305 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4306 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4307 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4309 printf("Could not read PHY page 769\n");
4312 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4313 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4315 printf("Could not set PHY Host Wakeup bit\n");
4317 hw->phy.ops.release(hw);
4323 lem_led_func(void *arg, int onoff)
4325 struct adapter *adapter = arg;
4327 EM_CORE_LOCK(adapter);
4329 e1000_setup_led(&adapter->hw);
4330 e1000_led_on(&adapter->hw);
4332 e1000_led_off(&adapter->hw);
4333 e1000_cleanup_led(&adapter->hw);
4335 EM_CORE_UNLOCK(adapter);
4338 /*********************************************************************
4339 * 82544 Coexistence issue workaround.
4340 * There are 2 issues.
4341 * 1. Transmit Hang issue.
4342 * To detect this issue, following equation can be used...
4343 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4344 * If SUM[3:0] is in between 1 to 4, we will have this issue.
4347 * To detect this issue, following equation can be used...
4348 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4349 * If SUM[3:0] is in between 9 to c, we will have this issue.
4353 * Make sure we do not have ending address
4354 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4356 *************************************************************************/
4358 lem_fill_descriptors (bus_addr_t address, u32 length,
4359 PDESC_ARRAY desc_array)
4361 u32 safe_terminator;
4363 /* Since issue is sensitive to length and address.*/
4364 /* Let us first check the address...*/
4366 desc_array->descriptor[0].address = address;
4367 desc_array->descriptor[0].length = length;
4368 desc_array->elements = 1;
4369 return (desc_array->elements);
4371 safe_terminator = (u32)((((u32)address & 0x7) +
4372 (length & 0xF)) & 0xF);
4373 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4374 if (safe_terminator == 0 ||
4375 (safe_terminator > 4 &&
4376 safe_terminator < 9) ||
4377 (safe_terminator > 0xC &&
4378 safe_terminator <= 0xF)) {
4379 desc_array->descriptor[0].address = address;
4380 desc_array->descriptor[0].length = length;
4381 desc_array->elements = 1;
4382 return (desc_array->elements);
4385 desc_array->descriptor[0].address = address;
4386 desc_array->descriptor[0].length = length - 4;
4387 desc_array->descriptor[1].address = address + (length - 4);
4388 desc_array->descriptor[1].length = 4;
4389 desc_array->elements = 2;
4390 return (desc_array->elements);
4393 /**********************************************************************
4395 * Update the board statistics counters.
4397 **********************************************************************/
4399 lem_update_stats_counters(struct adapter *adapter)
4402 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4403 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4404 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4405 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4407 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4408 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4409 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4410 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4412 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4413 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4414 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4415 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4416 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4417 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4418 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4419 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4420 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4421 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4422 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4423 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4424 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4425 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4426 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4427 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4428 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4429 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4430 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4431 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4433 /* For the 64-bit byte counters the low dword must be read first. */
4434 /* Both registers clear on the read of the high dword */
4436 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4437 ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4438 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4439 ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4441 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4442 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4443 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4444 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4445 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4447 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4448 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4450 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4451 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4452 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4453 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4454 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4455 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4456 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4457 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4458 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4459 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4461 if (adapter->hw.mac.type >= e1000_82543) {
4462 adapter->stats.algnerrc +=
4463 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4464 adapter->stats.rxerrc +=
4465 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4466 adapter->stats.tncrs +=
4467 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4468 adapter->stats.cexterr +=
4469 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4470 adapter->stats.tsctc +=
4471 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4472 adapter->stats.tsctfc +=
4473 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4478 lem_get_counter(if_t ifp, ift_counter cnt)
4480 struct adapter *adapter;
4482 adapter = if_getsoftc(ifp);
4485 case IFCOUNTER_COLLISIONS:
4486 return (adapter->stats.colc);
4487 case IFCOUNTER_IERRORS:
4488 return (adapter->dropped_pkts + adapter->stats.rxerrc +
4489 adapter->stats.crcerrs + adapter->stats.algnerrc +
4490 adapter->stats.ruc + adapter->stats.roc +
4491 adapter->stats.mpc + adapter->stats.cexterr);
4492 case IFCOUNTER_OERRORS:
4493 return (adapter->stats.ecol + adapter->stats.latecol +
4494 adapter->watchdog_events);
4496 return (if_get_counter_default(ifp, cnt));
4500 /* Export a single 32-bit register via a read-only sysctl. */
4502 lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4504 struct adapter *adapter;
4507 adapter = oidp->oid_arg1;
4508 val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4509 return (sysctl_handle_int(oidp, &val, 0, req));
4513 * Add sysctl variables, one per statistic, to the system.
4516 lem_add_hw_stats(struct adapter *adapter)
4518 device_t dev = adapter->dev;
4520 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4521 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4522 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4523 struct e1000_hw_stats *stats = &adapter->stats;
4525 struct sysctl_oid *stat_node;
4526 struct sysctl_oid_list *stat_list;
4528 /* Driver Statistics */
4529 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
4530 CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4531 "Std mbuf cluster failed");
4532 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_fail",
4533 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
4534 "Defragmenting mbuf chain failed");
4535 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4536 CTLFLAG_RD, &adapter->dropped_pkts,
4537 "Driver dropped packets");
4538 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
4539 CTLFLAG_RD, &adapter->no_tx_dma_setup,
4540 "Driver tx dma failure in xmit");
4541 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4542 CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4543 "Not enough tx descriptors failure in xmit");
4544 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4545 CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4546 "Not enough tx descriptors failure in xmit");
4547 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4548 CTLFLAG_RD, &adapter->rx_overruns,
4550 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4551 CTLFLAG_RD, &adapter->watchdog_events,
4552 "Watchdog timeouts");
4554 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4555 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
4556 lem_sysctl_reg_handler, "IU",
4557 "Device Control Register");
4558 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4559 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
4560 lem_sysctl_reg_handler, "IU",
4561 "Receiver Control Register");
4562 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4563 CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4564 "Flow Control High Watermark");
4565 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
4566 CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4567 "Flow Control Low Watermark");
4568 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
4569 CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4570 "TX FIFO workaround events");
4571 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
4572 CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4575 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head",
4576 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
4577 lem_sysctl_reg_handler, "IU",
4578 "Transmit Descriptor Head");
4579 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail",
4580 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
4581 lem_sysctl_reg_handler, "IU",
4582 "Transmit Descriptor Tail");
4583 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head",
4584 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
4585 lem_sysctl_reg_handler, "IU",
4586 "Receive Descriptor Head");
4587 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail",
4588 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
4589 lem_sysctl_reg_handler, "IU",
4590 "Receive Descriptor Tail");
4593 /* MAC stats get their own sub node */
4595 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4596 CTLFLAG_RD, NULL, "Statistics");
4597 stat_list = SYSCTL_CHILDREN(stat_node);
4599 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4600 CTLFLAG_RD, &stats->ecol,
4601 "Excessive collisions");
4602 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4603 CTLFLAG_RD, &stats->scc,
4604 "Single collisions");
4605 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4606 CTLFLAG_RD, &stats->mcc,
4607 "Multiple collisions");
4608 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4609 CTLFLAG_RD, &stats->latecol,
4611 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4612 CTLFLAG_RD, &stats->colc,
4614 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4615 CTLFLAG_RD, &adapter->stats.symerrs,
4617 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4618 CTLFLAG_RD, &adapter->stats.sec,
4620 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4621 CTLFLAG_RD, &adapter->stats.dc,
4623 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4624 CTLFLAG_RD, &adapter->stats.mpc,
4626 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4627 CTLFLAG_RD, &adapter->stats.rnbc,
4628 "Receive No Buffers");
4629 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4630 CTLFLAG_RD, &adapter->stats.ruc,
4631 "Receive Undersize");
4632 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4633 CTLFLAG_RD, &adapter->stats.rfc,
4634 "Fragmented Packets Received ");
4635 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4636 CTLFLAG_RD, &adapter->stats.roc,
4637 "Oversized Packets Received");
4638 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4639 CTLFLAG_RD, &adapter->stats.rjc,
4641 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4642 CTLFLAG_RD, &adapter->stats.rxerrc,
4644 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4645 CTLFLAG_RD, &adapter->stats.crcerrs,
4647 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4648 CTLFLAG_RD, &adapter->stats.algnerrc,
4649 "Alignment Errors");
4650 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4651 CTLFLAG_RD, &adapter->stats.cexterr,
4652 "Collision/Carrier extension errors");
4653 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4654 CTLFLAG_RD, &adapter->stats.xonrxc,
4656 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4657 CTLFLAG_RD, &adapter->stats.xontxc,
4659 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4660 CTLFLAG_RD, &adapter->stats.xoffrxc,
4662 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4663 CTLFLAG_RD, &adapter->stats.xofftxc,
4664 "XOFF Transmitted");
4666 /* Packet Reception Stats */
4667 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4668 CTLFLAG_RD, &adapter->stats.tpr,
4669 "Total Packets Received ");
4670 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4671 CTLFLAG_RD, &adapter->stats.gprc,
4672 "Good Packets Received");
4673 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4674 CTLFLAG_RD, &adapter->stats.bprc,
4675 "Broadcast Packets Received");
4676 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4677 CTLFLAG_RD, &adapter->stats.mprc,
4678 "Multicast Packets Received");
4679 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4680 CTLFLAG_RD, &adapter->stats.prc64,
4681 "64 byte frames received ");
4682 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4683 CTLFLAG_RD, &adapter->stats.prc127,
4684 "65-127 byte frames received");
4685 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4686 CTLFLAG_RD, &adapter->stats.prc255,
4687 "128-255 byte frames received");
4688 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4689 CTLFLAG_RD, &adapter->stats.prc511,
4690 "256-511 byte frames received");
4691 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4692 CTLFLAG_RD, &adapter->stats.prc1023,
4693 "512-1023 byte frames received");
4694 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4695 CTLFLAG_RD, &adapter->stats.prc1522,
4696 "1023-1522 byte frames received");
4697 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4698 CTLFLAG_RD, &adapter->stats.gorc,
4699 "Good Octets Received");
4701 /* Packet Transmission Stats */
4702 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4703 CTLFLAG_RD, &adapter->stats.gotc,
4704 "Good Octets Transmitted");
4705 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4706 CTLFLAG_RD, &adapter->stats.tpt,
4707 "Total Packets Transmitted");
4708 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4709 CTLFLAG_RD, &adapter->stats.gptc,
4710 "Good Packets Transmitted");
4711 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4712 CTLFLAG_RD, &adapter->stats.bptc,
4713 "Broadcast Packets Transmitted");
4714 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4715 CTLFLAG_RD, &adapter->stats.mptc,
4716 "Multicast Packets Transmitted");
4717 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4718 CTLFLAG_RD, &adapter->stats.ptc64,
4719 "64 byte frames transmitted ");
4720 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4721 CTLFLAG_RD, &adapter->stats.ptc127,
4722 "65-127 byte frames transmitted");
4723 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4724 CTLFLAG_RD, &adapter->stats.ptc255,
4725 "128-255 byte frames transmitted");
4726 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4727 CTLFLAG_RD, &adapter->stats.ptc511,
4728 "256-511 byte frames transmitted");
4729 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4730 CTLFLAG_RD, &adapter->stats.ptc1023,
4731 "512-1023 byte frames transmitted");
4732 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4733 CTLFLAG_RD, &adapter->stats.ptc1522,
4734 "1024-1522 byte frames transmitted");
4735 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4736 CTLFLAG_RD, &adapter->stats.tsctc,
4737 "TSO Contexts Transmitted");
4738 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4739 CTLFLAG_RD, &adapter->stats.tsctfc,
4740 "TSO Contexts Failed");
4743 /**********************************************************************
4745 * This routine provides a way to dump out the adapter eeprom,
4746 * often a useful debug/service tool. This only dumps the first
4747 * 32 words, stuff that matters is in that extent.
4749 **********************************************************************/
4752 lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4754 struct adapter *adapter;
4759 error = sysctl_handle_int(oidp, &result, 0, req);
4761 if (error || !req->newptr)
4765 * This value will cause a hex dump of the
4766 * first 32 16-bit words of the EEPROM to
4770 adapter = (struct adapter *)arg1;
4771 lem_print_nvm_info(adapter);
4778 lem_print_nvm_info(struct adapter *adapter)
4783 /* Its a bit crude, but it gets the job done */
4784 printf("\nInterface EEPROM Dump:\n");
4785 printf("Offset\n0x0000 ");
4786 for (i = 0, j = 0; i < 32; i++, j++) {
4787 if (j == 8) { /* Make the offset block */
4789 printf("\n0x00%x0 ",row);
4791 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4792 printf("%04x ", eeprom_data);
4798 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4800 struct em_int_delay_info *info;
4801 struct adapter *adapter;
4807 info = (struct em_int_delay_info *)arg1;
4808 usecs = info->value;
4809 error = sysctl_handle_int(oidp, &usecs, 0, req);
4810 if (error != 0 || req->newptr == NULL)
4812 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4814 info->value = usecs;
4815 ticks = EM_USECS_TO_TICKS(usecs);
4816 if (info->offset == E1000_ITR) /* units are 256ns here */
4819 adapter = info->adapter;
4821 EM_CORE_LOCK(adapter);
4822 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4823 regval = (regval & ~0xffff) | (ticks & 0xffff);
4824 /* Handle a few special cases. */
4825 switch (info->offset) {
4830 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4831 /* Don't write 0 into the TIDV register. */
4834 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4837 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4838 EM_CORE_UNLOCK(adapter);
4843 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4844 const char *description, struct em_int_delay_info *info,
4845 int offset, int value)
4847 info->adapter = adapter;
4848 info->offset = offset;
4849 info->value = value;
4850 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4851 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4852 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4853 info, 0, lem_sysctl_int_delay, "I", description);
4857 lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4858 const char *description, int *limit, int value)
4861 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4862 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4863 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
4867 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4868 const char *description, int *limit, int value)
4871 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4872 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4873 OID_AUTO, name, CTLFLAG_RW, limit, value, description);