1 /******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 * Uncomment the following extensions for better performance in a VM,
37 * especially if you have support in the hypervisor.
38 * See http://info.iet.unipi.it/~luigi/netmap/
40 // #define BATCH_DISPATCH
41 // #define NIC_SEND_COMBINING
42 // #define NIC_PARAVIRT /* enable virtio-like synchronization */
45 #include "opt_inet6.h"
47 #ifdef HAVE_KERNEL_OPTION_HEADERS
48 #include "opt_device_polling.h"
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/buf_ring.h>
55 #include <sys/endian.h>
56 #include <sys/kernel.h>
57 #include <sys/kthread.h>
58 #include <sys/malloc.h>
60 #include <sys/module.h>
62 #include <sys/socket.h>
63 #include <sys/sockio.h>
64 #include <sys/sysctl.h>
65 #include <sys/taskqueue.h>
66 #include <sys/eventhandler.h>
67 #include <machine/bus.h>
68 #include <machine/resource.h>
71 #include <net/ethernet.h>
73 #include <net/if_var.h>
74 #include <net/if_arp.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
78 #include <net/if_types.h>
79 #include <net/if_vlan_var.h>
81 #include <netinet/in_systm.h>
82 #include <netinet/in.h>
83 #include <netinet/if_ether.h>
84 #include <netinet/ip.h>
85 #include <netinet/ip6.h>
86 #include <netinet/tcp.h>
87 #include <netinet/udp.h>
89 #include <machine/in_cksum.h>
90 #include <dev/led/led.h>
91 #include <dev/pci/pcivar.h>
92 #include <dev/pci/pcireg.h>
94 #include "e1000_api.h"
97 /*********************************************************************
98 * Legacy Em Driver version:
99 *********************************************************************/
100 char lem_driver_version[] = "1.0.6";
102 /*********************************************************************
103 * PCI Device ID Table
105 * Used by probe to select devices to load on
106 * Last field stores an index into e1000_strings
107 * Last entry must be all 0s
109 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
110 *********************************************************************/
112 static em_vendor_info_t lem_vendor_info_array[] =
114 /* Intel(R) PRO/1000 Network Connection */
115 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
118 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
126 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
131 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
132 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
142 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
143 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
147 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
148 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
149 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
150 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
151 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
152 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
153 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
154 PCI_ANY_ID, PCI_ANY_ID, 0},
156 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
157 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
158 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
159 /* required last entry */
163 /*********************************************************************
164 * Table of branding strings for all supported NICs.
165 *********************************************************************/
167 static char *lem_strings[] = {
168 "Intel(R) PRO/1000 Legacy Network Connection"
171 /*********************************************************************
172 * Function prototypes
173 *********************************************************************/
174 static int lem_probe(device_t);
175 static int lem_attach(device_t);
176 static int lem_detach(device_t);
177 static int lem_shutdown(device_t);
178 static int lem_suspend(device_t);
179 static int lem_resume(device_t);
180 static void lem_start(if_t);
181 static void lem_start_locked(if_t ifp);
182 static int lem_ioctl(if_t, u_long, caddr_t);
183 static uint64_t lem_get_counter(if_t, ift_counter);
184 static void lem_init(void *);
185 static void lem_init_locked(struct adapter *);
186 static void lem_stop(void *);
187 static void lem_media_status(if_t, struct ifmediareq *);
188 static int lem_media_change(if_t);
189 static void lem_identify_hardware(struct adapter *);
190 static int lem_allocate_pci_resources(struct adapter *);
191 static int lem_allocate_irq(struct adapter *adapter);
192 static void lem_free_pci_resources(struct adapter *);
193 static void lem_local_timer(void *);
194 static int lem_hardware_init(struct adapter *);
195 static int lem_setup_interface(device_t, struct adapter *);
196 static void lem_setup_transmit_structures(struct adapter *);
197 static void lem_initialize_transmit_unit(struct adapter *);
198 static int lem_setup_receive_structures(struct adapter *);
199 static void lem_initialize_receive_unit(struct adapter *);
200 static void lem_enable_intr(struct adapter *);
201 static void lem_disable_intr(struct adapter *);
202 static void lem_free_transmit_structures(struct adapter *);
203 static void lem_free_receive_structures(struct adapter *);
204 static void lem_update_stats_counters(struct adapter *);
205 static void lem_add_hw_stats(struct adapter *adapter);
206 static void lem_txeof(struct adapter *);
207 static void lem_tx_purge(struct adapter *);
208 static int lem_allocate_receive_structures(struct adapter *);
209 static int lem_allocate_transmit_structures(struct adapter *);
210 static bool lem_rxeof(struct adapter *, int, int *);
211 #ifndef __NO_STRICT_ALIGNMENT
212 static int lem_fixup_rx(struct adapter *);
214 static void lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
216 static void lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
218 static void lem_set_promisc(struct adapter *);
219 static void lem_disable_promisc(struct adapter *);
220 static void lem_set_multi(struct adapter *);
221 static void lem_update_link_status(struct adapter *);
222 static int lem_get_buf(struct adapter *, int);
223 static void lem_register_vlan(void *, if_t, u16);
224 static void lem_unregister_vlan(void *, if_t, u16);
225 static void lem_setup_vlan_hw_support(struct adapter *);
226 static int lem_xmit(struct adapter *, struct mbuf **);
227 static void lem_smartspeed(struct adapter *);
228 static int lem_82547_fifo_workaround(struct adapter *, int);
229 static void lem_82547_update_fifo_head(struct adapter *, int);
230 static int lem_82547_tx_fifo_reset(struct adapter *);
231 static void lem_82547_move_tail(void *);
232 static int lem_dma_malloc(struct adapter *, bus_size_t,
233 struct em_dma_alloc *, int);
234 static void lem_dma_free(struct adapter *, struct em_dma_alloc *);
235 static int lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
236 static void lem_print_nvm_info(struct adapter *);
237 static int lem_is_valid_ether_addr(u8 *);
238 static u32 lem_fill_descriptors (bus_addr_t address, u32 length,
239 PDESC_ARRAY desc_array);
240 static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
241 static void lem_add_int_delay_sysctl(struct adapter *, const char *,
242 const char *, struct em_int_delay_info *, int, int);
243 static void lem_set_flow_cntrl(struct adapter *, const char *,
244 const char *, int *, int);
245 /* Management and WOL Support */
246 static void lem_init_manageability(struct adapter *);
247 static void lem_release_manageability(struct adapter *);
248 static void lem_get_hw_control(struct adapter *);
249 static void lem_release_hw_control(struct adapter *);
250 static void lem_get_wakeup(device_t);
251 static void lem_enable_wakeup(device_t);
252 static int lem_enable_phy_wakeup(struct adapter *);
253 static void lem_led_func(void *, int);
255 static void lem_intr(void *);
256 static int lem_irq_fast(void *);
257 static void lem_handle_rxtx(void *context, int pending);
258 static void lem_handle_link(void *context, int pending);
259 static void lem_add_rx_process_limit(struct adapter *, const char *,
260 const char *, int *, int);
262 #ifdef DEVICE_POLLING
263 static poll_handler_drv_t lem_poll;
266 /*********************************************************************
267 * FreeBSD Device Interface Entry Points
268 *********************************************************************/
270 static device_method_t lem_methods[] = {
271 /* Device interface */
272 DEVMETHOD(device_probe, lem_probe),
273 DEVMETHOD(device_attach, lem_attach),
274 DEVMETHOD(device_detach, lem_detach),
275 DEVMETHOD(device_shutdown, lem_shutdown),
276 DEVMETHOD(device_suspend, lem_suspend),
277 DEVMETHOD(device_resume, lem_resume),
281 static driver_t lem_driver = {
282 "em", lem_methods, sizeof(struct adapter),
285 extern devclass_t em_devclass;
286 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
287 MODULE_DEPEND(lem, pci, 1, 1, 1);
288 MODULE_DEPEND(lem, ether, 1, 1, 1);
290 /*********************************************************************
291 * Tunable default values.
292 *********************************************************************/
294 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
295 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
297 #define MAX_INTS_PER_SEC 8000
298 #define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256))
300 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
301 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
302 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
303 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
305 * increase lem_rxd and lem_txd to at least 2048 in netmap mode
306 * for better performance.
308 static int lem_rxd = EM_DEFAULT_RXD;
309 static int lem_txd = EM_DEFAULT_TXD;
310 static int lem_smart_pwr_down = FALSE;
312 /* Controls whether promiscuous also shows bad packets */
313 static int lem_debug_sbp = FALSE;
315 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
316 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
317 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
318 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
319 TUNABLE_INT("hw.em.rxd", &lem_rxd);
320 TUNABLE_INT("hw.em.txd", &lem_txd);
321 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
322 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
324 /* Interrupt style - default to fast */
325 static int lem_use_legacy_irq = 0;
326 TUNABLE_INT("hw.em.use_legacy_irq", &lem_use_legacy_irq);
328 /* How many packets rxeof tries to clean at a time */
329 static int lem_rx_process_limit = 100;
330 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
332 /* Flow control setting - default to FULL */
333 static int lem_fc_setting = e1000_fc_full;
334 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
336 /* Global used in WOL setup with multiport cards */
337 static int global_quad_port_a = 0;
339 #ifdef DEV_NETMAP /* see ixgbe.c for details */
340 #include <dev/netmap/if_lem_netmap.h>
341 #endif /* DEV_NETMAP */
343 /*********************************************************************
344 * Device identification routine
346 * em_probe determines if the driver should be loaded on
347 * adapter based on PCI vendor/device id of the adapter.
349 * return BUS_PROBE_DEFAULT on success, positive on failure
350 *********************************************************************/
353 lem_probe(device_t dev)
355 char adapter_name[60];
356 u16 pci_vendor_id = 0;
357 u16 pci_device_id = 0;
358 u16 pci_subvendor_id = 0;
359 u16 pci_subdevice_id = 0;
360 em_vendor_info_t *ent;
362 INIT_DEBUGOUT("em_probe: begin");
364 pci_vendor_id = pci_get_vendor(dev);
365 if (pci_vendor_id != EM_VENDOR_ID)
368 pci_device_id = pci_get_device(dev);
369 pci_subvendor_id = pci_get_subvendor(dev);
370 pci_subdevice_id = pci_get_subdevice(dev);
372 ent = lem_vendor_info_array;
373 while (ent->vendor_id != 0) {
374 if ((pci_vendor_id == ent->vendor_id) &&
375 (pci_device_id == ent->device_id) &&
377 ((pci_subvendor_id == ent->subvendor_id) ||
378 (ent->subvendor_id == PCI_ANY_ID)) &&
380 ((pci_subdevice_id == ent->subdevice_id) ||
381 (ent->subdevice_id == PCI_ANY_ID))) {
382 sprintf(adapter_name, "%s %s",
383 lem_strings[ent->index],
385 device_set_desc_copy(dev, adapter_name);
386 return (BUS_PROBE_DEFAULT);
394 /*********************************************************************
395 * Device initialization routine
397 * The attach entry point is called when the driver is being loaded.
398 * This routine identifies the type of hardware, allocates all resources
399 * and initializes the hardware.
401 * return 0 on success, positive on failure
402 *********************************************************************/
405 lem_attach(device_t dev)
407 struct adapter *adapter;
411 INIT_DEBUGOUT("lem_attach: begin");
413 adapter = device_get_softc(dev);
414 adapter->dev = adapter->osdep.dev = dev;
415 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
416 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
417 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
420 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
421 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
422 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
423 lem_sysctl_nvm_info, "I", "NVM Information");
425 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
426 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
428 /* Determine hardware and mac info */
429 lem_identify_hardware(adapter);
431 /* Setup PCI resources */
432 if (lem_allocate_pci_resources(adapter)) {
433 device_printf(dev, "Allocation of PCI resources failed\n");
438 /* Do Shared Code initialization */
439 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
440 device_printf(dev, "Setup of Shared code failed\n");
445 e1000_get_bus_info(&adapter->hw);
447 /* Set up some sysctls for the tunable interrupt delays */
448 lem_add_int_delay_sysctl(adapter, "rx_int_delay",
449 "receive interrupt delay in usecs", &adapter->rx_int_delay,
450 E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
451 lem_add_int_delay_sysctl(adapter, "tx_int_delay",
452 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
453 E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
454 if (adapter->hw.mac.type >= e1000_82540) {
455 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
456 "receive interrupt delay limit in usecs",
457 &adapter->rx_abs_int_delay,
458 E1000_REGISTER(&adapter->hw, E1000_RADV),
459 lem_rx_abs_int_delay_dflt);
460 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
461 "transmit interrupt delay limit in usecs",
462 &adapter->tx_abs_int_delay,
463 E1000_REGISTER(&adapter->hw, E1000_TADV),
464 lem_tx_abs_int_delay_dflt);
465 lem_add_int_delay_sysctl(adapter, "itr",
466 "interrupt delay limit in usecs/4",
468 E1000_REGISTER(&adapter->hw, E1000_ITR),
472 /* Sysctls for limiting the amount of work done in the taskqueue */
473 lem_add_rx_process_limit(adapter, "rx_processing_limit",
474 "max number of rx packets to process", &adapter->rx_process_limit,
475 lem_rx_process_limit);
477 #ifdef NIC_SEND_COMBINING
478 /* Sysctls to control mitigation */
479 lem_add_rx_process_limit(adapter, "sc_enable",
480 "driver TDT mitigation", &adapter->sc_enable, 0);
481 #endif /* NIC_SEND_COMBINING */
482 #ifdef BATCH_DISPATCH
483 lem_add_rx_process_limit(adapter, "batch_enable",
484 "driver rx batch", &adapter->batch_enable, 0);
485 #endif /* BATCH_DISPATCH */
487 lem_add_rx_process_limit(adapter, "rx_retries",
488 "driver rx retries", &adapter->rx_retries, 0);
489 #endif /* NIC_PARAVIRT */
491 /* Sysctl for setting the interface flow control */
492 lem_set_flow_cntrl(adapter, "flow_control",
493 "flow control setting",
494 &adapter->fc_setting, lem_fc_setting);
497 * Validate number of transmit and receive descriptors. It
498 * must not exceed hardware maximum, and must be multiple
499 * of E1000_DBA_ALIGN.
501 if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
502 (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
503 (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
504 (lem_txd < EM_MIN_TXD)) {
505 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
506 EM_DEFAULT_TXD, lem_txd);
507 adapter->num_tx_desc = EM_DEFAULT_TXD;
509 adapter->num_tx_desc = lem_txd;
510 if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
511 (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
512 (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
513 (lem_rxd < EM_MIN_RXD)) {
514 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
515 EM_DEFAULT_RXD, lem_rxd);
516 adapter->num_rx_desc = EM_DEFAULT_RXD;
518 adapter->num_rx_desc = lem_rxd;
520 adapter->hw.mac.autoneg = DO_AUTO_NEG;
521 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
522 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
523 adapter->rx_buffer_len = 2048;
525 e1000_init_script_state_82541(&adapter->hw, TRUE);
526 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
529 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
530 adapter->hw.phy.mdix = AUTO_ALL_MODES;
531 adapter->hw.phy.disable_polarity_correction = FALSE;
532 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
536 * Set the frame limits assuming
537 * standard ethernet sized frames.
539 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
540 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
543 * This controls when hardware reports transmit completion
546 adapter->hw.mac.report_tx_early = 1;
549 device_printf(dev, "driver supports paravirt, subdev 0x%x\n",
550 adapter->hw.subsystem_device_id);
551 if (adapter->hw.subsystem_device_id == E1000_PARA_SUBDEV) {
554 device_printf(dev, "paravirt support on dev %p\n", adapter);
555 tsize = 4096; // XXX one page for the csb
556 if (lem_dma_malloc(adapter, tsize, &adapter->csb_mem, BUS_DMA_NOWAIT)) {
557 device_printf(dev, "Unable to allocate csb memory\n");
561 /* Setup the Base of the CSB */
562 adapter->csb = (struct paravirt_csb *)adapter->csb_mem.dma_vaddr;
563 /* force the first kick */
564 adapter->csb->host_need_txkick = 1; /* txring empty */
565 adapter->csb->guest_need_rxkick = 1; /* no rx packets */
566 bus_addr = adapter->csb_mem.dma_paddr;
567 lem_add_rx_process_limit(adapter, "csb_on",
568 "enable paravirt.", &adapter->csb->guest_csb_on, 0);
569 lem_add_rx_process_limit(adapter, "txc_lim",
570 "txc_lim", &adapter->csb->host_txcycles_lim, 1);
573 #define PA_SC(name, var, val) \
574 lem_add_rx_process_limit(adapter, name, name, var, val)
575 PA_SC("host_need_txkick",&adapter->csb->host_need_txkick, 1);
576 PA_SC("host_rxkick_at",&adapter->csb->host_rxkick_at, ~0);
577 PA_SC("guest_need_txkick",&adapter->csb->guest_need_txkick, 0);
578 PA_SC("guest_need_rxkick",&adapter->csb->guest_need_rxkick, 1);
579 PA_SC("tdt_reg_count",&adapter->tdt_reg_count, 0);
580 PA_SC("tdt_csb_count",&adapter->tdt_csb_count, 0);
581 PA_SC("tdt_int_count",&adapter->tdt_int_count, 0);
582 PA_SC("guest_need_kick_count",&adapter->guest_need_kick_count, 0);
583 /* tell the host where the block is */
584 E1000_WRITE_REG(&adapter->hw, E1000_CSBAH,
585 (u32)(bus_addr >> 32));
586 E1000_WRITE_REG(&adapter->hw, E1000_CSBAL,
589 #endif /* NIC_PARAVIRT */
591 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
594 /* Allocate Transmit Descriptor ring */
595 if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
596 device_printf(dev, "Unable to allocate tx_desc memory\n");
600 adapter->tx_desc_base =
601 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
603 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
606 /* Allocate Receive Descriptor ring */
607 if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
608 device_printf(dev, "Unable to allocate rx_desc memory\n");
612 adapter->rx_desc_base =
613 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
615 /* Allocate multicast array memory. */
616 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
617 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
618 if (adapter->mta == NULL) {
619 device_printf(dev, "Can not allocate multicast setup array\n");
625 ** Start from a known state, this is
626 ** important in reading the nvm and
629 e1000_reset_hw(&adapter->hw);
631 /* Make sure we have a good EEPROM before we read from it */
632 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
634 ** Some PCI-E parts fail the first check due to
635 ** the link being in sleep state, call it again,
636 ** if it fails a second time its a real issue.
638 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
640 "The EEPROM Checksum Is Not Valid\n");
646 /* Copy the permanent MAC address out of the EEPROM */
647 if (e1000_read_mac_addr(&adapter->hw) < 0) {
648 device_printf(dev, "EEPROM read error while reading MAC"
654 if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
655 device_printf(dev, "Invalid MAC address\n");
660 /* Initialize the hardware */
661 if (lem_hardware_init(adapter)) {
662 device_printf(dev, "Unable to initialize the hardware\n");
667 /* Allocate transmit descriptors and buffers */
668 if (lem_allocate_transmit_structures(adapter)) {
669 device_printf(dev, "Could not setup transmit structures\n");
674 /* Allocate receive descriptors and buffers */
675 if (lem_allocate_receive_structures(adapter)) {
676 device_printf(dev, "Could not setup receive structures\n");
682 ** Do interrupt configuration
684 error = lem_allocate_irq(adapter);
689 * Get Wake-on-Lan and Management info for later use
693 /* Setup OS specific network interface */
694 if (lem_setup_interface(dev, adapter) != 0)
697 /* Initialize statistics */
698 lem_update_stats_counters(adapter);
700 adapter->hw.mac.get_link_status = 1;
701 lem_update_link_status(adapter);
703 /* Indicate SOL/IDER usage */
704 if (e1000_check_reset_block(&adapter->hw))
706 "PHY reset is blocked due to SOL/IDER session.\n");
708 /* Do we need workaround for 82544 PCI-X adapter? */
709 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
710 adapter->hw.mac.type == e1000_82544)
711 adapter->pcix_82544 = TRUE;
713 adapter->pcix_82544 = FALSE;
715 /* Register for VLAN events */
716 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
717 lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
718 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
719 lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
721 lem_add_hw_stats(adapter);
723 /* Non-AMT based hardware can now take control from firmware */
724 if (adapter->has_manage && !adapter->has_amt)
725 lem_get_hw_control(adapter);
727 /* Tell the stack that the interface is not active */
728 if_setdrvflagbits(adapter->ifp, 0, IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
730 adapter->led_dev = led_create(lem_led_func, adapter,
731 device_get_nameunit(dev));
734 lem_netmap_attach(adapter);
735 #endif /* DEV_NETMAP */
736 INIT_DEBUGOUT("lem_attach: end");
741 lem_free_transmit_structures(adapter);
744 lem_release_hw_control(adapter);
745 lem_dma_free(adapter, &adapter->rxdma);
747 lem_dma_free(adapter, &adapter->txdma);
750 lem_dma_free(adapter, &adapter->csb_mem);
752 #endif /* NIC_PARAVIRT */
755 if (adapter->ifp != (void *)NULL)
756 if_free(adapter->ifp);
757 lem_free_pci_resources(adapter);
758 free(adapter->mta, M_DEVBUF);
759 EM_TX_LOCK_DESTROY(adapter);
760 EM_RX_LOCK_DESTROY(adapter);
761 EM_CORE_LOCK_DESTROY(adapter);
766 /*********************************************************************
767 * Device removal routine
769 * The detach entry point is called when the driver is being removed.
770 * This routine stops the adapter and deallocates all the resources
771 * that were allocated for driver operation.
773 * return 0 on success, positive on failure
774 *********************************************************************/
777 lem_detach(device_t dev)
779 struct adapter *adapter = device_get_softc(dev);
780 if_t ifp = adapter->ifp;
782 INIT_DEBUGOUT("em_detach: begin");
784 /* Make sure VLANS are not using driver */
785 if (if_vlantrunkinuse(ifp)) {
786 device_printf(dev,"Vlan in use, detach first\n");
790 #ifdef DEVICE_POLLING
791 if (if_getcapenable(ifp) & IFCAP_POLLING)
792 ether_poll_deregister_drv(ifp);
795 if (adapter->led_dev != NULL)
796 led_destroy(adapter->led_dev);
798 EM_CORE_LOCK(adapter);
800 adapter->in_detach = 1;
802 e1000_phy_hw_reset(&adapter->hw);
804 lem_release_manageability(adapter);
806 EM_TX_UNLOCK(adapter);
807 EM_CORE_UNLOCK(adapter);
809 /* Unregister VLAN events */
810 if (adapter->vlan_attach != NULL)
811 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
812 if (adapter->vlan_detach != NULL)
813 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
815 ether_ifdetach(adapter->ifp);
816 callout_drain(&adapter->timer);
817 callout_drain(&adapter->tx_fifo_timer);
821 #endif /* DEV_NETMAP */
822 lem_free_pci_resources(adapter);
823 bus_generic_detach(dev);
826 lem_free_transmit_structures(adapter);
827 lem_free_receive_structures(adapter);
829 /* Free Transmit Descriptor ring */
830 if (adapter->tx_desc_base) {
831 lem_dma_free(adapter, &adapter->txdma);
832 adapter->tx_desc_base = NULL;
835 /* Free Receive Descriptor ring */
836 if (adapter->rx_desc_base) {
837 lem_dma_free(adapter, &adapter->rxdma);
838 adapter->rx_desc_base = NULL;
843 lem_dma_free(adapter, &adapter->csb_mem);
846 #endif /* NIC_PARAVIRT */
847 lem_release_hw_control(adapter);
848 free(adapter->mta, M_DEVBUF);
849 EM_TX_LOCK_DESTROY(adapter);
850 EM_RX_LOCK_DESTROY(adapter);
851 EM_CORE_LOCK_DESTROY(adapter);
856 /*********************************************************************
858 * Shutdown entry point
860 **********************************************************************/
863 lem_shutdown(device_t dev)
865 return lem_suspend(dev);
869 * Suspend/resume device methods.
872 lem_suspend(device_t dev)
874 struct adapter *adapter = device_get_softc(dev);
876 EM_CORE_LOCK(adapter);
878 lem_release_manageability(adapter);
879 lem_release_hw_control(adapter);
880 lem_enable_wakeup(dev);
882 EM_CORE_UNLOCK(adapter);
884 return bus_generic_suspend(dev);
888 lem_resume(device_t dev)
890 struct adapter *adapter = device_get_softc(dev);
891 if_t ifp = adapter->ifp;
893 EM_CORE_LOCK(adapter);
894 lem_init_locked(adapter);
895 lem_init_manageability(adapter);
896 EM_CORE_UNLOCK(adapter);
899 return bus_generic_resume(dev);
904 lem_start_locked(if_t ifp)
906 struct adapter *adapter = if_getsoftc(ifp);
909 EM_TX_LOCK_ASSERT(adapter);
911 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
914 if (!adapter->link_active)
918 * Force a cleanup if number of TX descriptors
919 * available hits the threshold
921 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
923 /* Now do we at least have a minimal? */
924 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
925 adapter->no_tx_desc_avail1++;
930 while (!if_sendq_empty(ifp)) {
931 m_head = if_dequeue(ifp);
936 * Encapsulation can modify our pointer, and or make it
937 * NULL on failure. In that event, we can't requeue.
939 if (lem_xmit(adapter, &m_head)) {
942 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
943 if_sendq_prepend(ifp, m_head);
947 /* Send a copy of the frame to the BPF listener */
948 if_etherbpfmtap(ifp, m_head);
950 /* Set timeout in case hardware has problems transmitting. */
951 adapter->watchdog_check = TRUE;
952 adapter->watchdog_time = ticks;
954 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
955 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
957 if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE && adapter->csb &&
958 adapter->csb->guest_csb_on &&
959 !(adapter->csb->guest_need_txkick & 1)) {
960 adapter->csb->guest_need_txkick = 1;
961 adapter->guest_need_kick_count++;
962 // XXX memory barrier
963 lem_txeof(adapter); // XXX possibly clear IFF_DRV_OACTIVE
965 #endif /* NIC_PARAVIRT */
973 struct adapter *adapter = if_getsoftc(ifp);
976 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
977 lem_start_locked(ifp);
978 EM_TX_UNLOCK(adapter);
981 /*********************************************************************
984 * em_ioctl is called when the user wants to configure the
987 * return 0 on success, positive on failure
988 **********************************************************************/
991 lem_ioctl(if_t ifp, u_long command, caddr_t data)
993 struct adapter *adapter = if_getsoftc(ifp);
994 struct ifreq *ifr = (struct ifreq *)data;
995 #if defined(INET) || defined(INET6)
996 struct ifaddr *ifa = (struct ifaddr *)data;
998 bool avoid_reset = FALSE;
1001 if (adapter->in_detach)
1007 if (ifa->ifa_addr->sa_family == AF_INET)
1011 if (ifa->ifa_addr->sa_family == AF_INET6)
1015 ** Calling init results in link renegotiation,
1016 ** so we avoid doing it when possible.
1019 if_setflagbits(ifp, IFF_UP, 0);
1020 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
1023 if (!(if_getflags(ifp) & IFF_NOARP))
1024 arp_ifinit(ifp, ifa);
1027 error = ether_ioctl(ifp, command, data);
1033 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1035 EM_CORE_LOCK(adapter);
1036 switch (adapter->hw.mac.type) {
1038 max_frame_size = ETHER_MAX_LEN;
1041 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1043 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1045 EM_CORE_UNLOCK(adapter);
1050 if_setmtu(ifp, ifr->ifr_mtu);
1051 adapter->max_frame_size =
1052 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
1053 lem_init_locked(adapter);
1054 EM_CORE_UNLOCK(adapter);
1058 IOCTL_DEBUGOUT("ioctl rcv'd:\
1059 SIOCSIFFLAGS (Set Interface Flags)");
1060 EM_CORE_LOCK(adapter);
1061 if (if_getflags(ifp) & IFF_UP) {
1062 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
1063 if ((if_getflags(ifp) ^ adapter->if_flags) &
1064 (IFF_PROMISC | IFF_ALLMULTI)) {
1065 lem_disable_promisc(adapter);
1066 lem_set_promisc(adapter);
1069 lem_init_locked(adapter);
1071 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1072 EM_TX_LOCK(adapter);
1074 EM_TX_UNLOCK(adapter);
1076 adapter->if_flags = if_getflags(ifp);
1077 EM_CORE_UNLOCK(adapter);
1081 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1082 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1083 EM_CORE_LOCK(adapter);
1084 lem_disable_intr(adapter);
1085 lem_set_multi(adapter);
1086 if (adapter->hw.mac.type == e1000_82542 &&
1087 adapter->hw.revision_id == E1000_REVISION_2) {
1088 lem_initialize_receive_unit(adapter);
1090 #ifdef DEVICE_POLLING
1091 if (!(if_getcapenable(ifp) & IFCAP_POLLING))
1093 lem_enable_intr(adapter);
1094 EM_CORE_UNLOCK(adapter);
1098 /* Check SOL/IDER usage */
1099 EM_CORE_LOCK(adapter);
1100 if (e1000_check_reset_block(&adapter->hw)) {
1101 EM_CORE_UNLOCK(adapter);
1102 device_printf(adapter->dev, "Media change is"
1103 " blocked due to SOL/IDER session.\n");
1106 EM_CORE_UNLOCK(adapter);
1108 IOCTL_DEBUGOUT("ioctl rcv'd: \
1109 SIOCxIFMEDIA (Get/Set Interface Media)");
1110 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1116 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1118 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1119 #ifdef DEVICE_POLLING
1120 if (mask & IFCAP_POLLING) {
1121 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1122 error = ether_poll_register_drv(lem_poll, ifp);
1125 EM_CORE_LOCK(adapter);
1126 lem_disable_intr(adapter);
1127 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1128 EM_CORE_UNLOCK(adapter);
1130 error = ether_poll_deregister_drv(ifp);
1131 /* Enable interrupt even in error case */
1132 EM_CORE_LOCK(adapter);
1133 lem_enable_intr(adapter);
1134 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1135 EM_CORE_UNLOCK(adapter);
1139 if (mask & IFCAP_HWCSUM) {
1140 if_togglecapenable(ifp, IFCAP_HWCSUM);
1143 if (mask & IFCAP_VLAN_HWTAGGING) {
1144 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
1147 if ((mask & IFCAP_WOL) &&
1148 (if_getcapabilities(ifp) & IFCAP_WOL) != 0) {
1149 if (mask & IFCAP_WOL_MCAST)
1150 if_togglecapenable(ifp, IFCAP_WOL_MCAST);
1151 if (mask & IFCAP_WOL_MAGIC)
1152 if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
1154 if (reinit && (if_getdrvflags(ifp) & IFF_DRV_RUNNING))
1161 error = ether_ioctl(ifp, command, data);
1169 /*********************************************************************
1172 * This routine is used in two ways. It is used by the stack as
1173 * init entry point in network interface structure. It is also used
1174 * by the driver as a hw/sw initialization routine to get to a
1177 * return 0 on success, positive on failure
1178 **********************************************************************/
1181 lem_init_locked(struct adapter *adapter)
1183 if_t ifp = adapter->ifp;
1184 device_t dev = adapter->dev;
1187 INIT_DEBUGOUT("lem_init: begin");
1189 EM_CORE_LOCK_ASSERT(adapter);
1191 EM_TX_LOCK(adapter);
1193 EM_TX_UNLOCK(adapter);
1196 * Packet Buffer Allocation (PBA)
1197 * Writing PBA sets the receive portion of the buffer
1198 * the remainder is used for the transmit buffer.
1200 * Devices before the 82547 had a Packet Buffer of 64K.
1201 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1202 * After the 82547 the buffer was reduced to 40K.
1203 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1204 * Note: default does not leave enough room for Jumbo Frame >10k.
1206 switch (adapter->hw.mac.type) {
1208 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1209 if (adapter->max_frame_size > 8192)
1210 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1212 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1213 adapter->tx_fifo_head = 0;
1214 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1215 adapter->tx_fifo_size =
1216 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1219 /* Devices before 82547 had a Packet Buffer of 64K. */
1220 if (adapter->max_frame_size > 8192)
1221 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1223 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1226 INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1227 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1229 /* Get the latest mac address, User can use a LAA */
1230 bcopy(if_getlladdr(adapter->ifp), adapter->hw.mac.addr,
1233 /* Put the address into the Receive Address Array */
1234 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1236 /* Initialize the hardware */
1237 if (lem_hardware_init(adapter)) {
1238 device_printf(dev, "Unable to initialize the hardware\n");
1241 lem_update_link_status(adapter);
1243 /* Setup VLAN support, basic and offload if available */
1244 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1246 /* Set hardware offload abilities */
1247 if_clearhwassist(ifp);
1248 if (adapter->hw.mac.type >= e1000_82543) {
1249 if (if_getcapenable(ifp) & IFCAP_TXCSUM)
1250 if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0);
1253 /* Configure for OS presence */
1254 lem_init_manageability(adapter);
1256 /* Prepare transmit descriptors and buffers */
1257 lem_setup_transmit_structures(adapter);
1258 lem_initialize_transmit_unit(adapter);
1260 /* Setup Multicast table */
1261 lem_set_multi(adapter);
1263 /* Prepare receive descriptors and buffers */
1264 if (lem_setup_receive_structures(adapter)) {
1265 device_printf(dev, "Could not setup receive structures\n");
1266 EM_TX_LOCK(adapter);
1268 EM_TX_UNLOCK(adapter);
1271 lem_initialize_receive_unit(adapter);
1273 /* Use real VLAN Filter support? */
1274 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
1275 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
1276 /* Use real VLAN Filter support */
1277 lem_setup_vlan_hw_support(adapter);
1280 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1281 ctrl |= E1000_CTRL_VME;
1282 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1286 /* Don't lose promiscuous settings */
1287 lem_set_promisc(adapter);
1289 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
1291 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1292 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1294 #ifdef DEVICE_POLLING
1296 * Only enable interrupts if we are not polling, make sure
1297 * they are off otherwise.
1299 if (if_getcapenable(ifp) & IFCAP_POLLING)
1300 lem_disable_intr(adapter);
1302 #endif /* DEVICE_POLLING */
1303 lem_enable_intr(adapter);
1305 /* AMT based hardware can now take control from firmware */
1306 if (adapter->has_manage && adapter->has_amt)
1307 lem_get_hw_control(adapter);
1313 struct adapter *adapter = arg;
1315 EM_CORE_LOCK(adapter);
1316 lem_init_locked(adapter);
1317 EM_CORE_UNLOCK(adapter);
1321 #ifdef DEVICE_POLLING
1322 /*********************************************************************
1324 * Legacy polling routine
1326 *********************************************************************/
1328 lem_poll(if_t ifp, enum poll_cmd cmd, int count)
1330 struct adapter *adapter = if_getsoftc(ifp);
1331 u32 reg_icr, rx_done = 0;
1333 EM_CORE_LOCK(adapter);
1334 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1335 EM_CORE_UNLOCK(adapter);
1339 if (cmd == POLL_AND_CHECK_STATUS) {
1340 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1341 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1342 callout_stop(&adapter->timer);
1343 adapter->hw.mac.get_link_status = 1;
1344 lem_update_link_status(adapter);
1345 callout_reset(&adapter->timer, hz,
1346 lem_local_timer, adapter);
1349 EM_CORE_UNLOCK(adapter);
1351 lem_rxeof(adapter, count, &rx_done);
1353 EM_TX_LOCK(adapter);
1355 if(!if_sendq_empty(ifp))
1356 lem_start_locked(ifp);
1357 EM_TX_UNLOCK(adapter);
1360 #endif /* DEVICE_POLLING */
1362 /*********************************************************************
1364 * Legacy Interrupt Service routine
1366 *********************************************************************/
1370 struct adapter *adapter = arg;
1371 if_t ifp = adapter->ifp;
1375 if ((if_getcapenable(ifp) & IFCAP_POLLING) ||
1376 ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
1379 EM_CORE_LOCK(adapter);
1380 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1381 if (reg_icr & E1000_ICR_RXO)
1382 adapter->rx_overruns++;
1384 if ((reg_icr == 0xffffffff) || (reg_icr == 0)) {
1385 EM_CORE_UNLOCK(adapter);
1389 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1390 callout_stop(&adapter->timer);
1391 adapter->hw.mac.get_link_status = 1;
1392 lem_update_link_status(adapter);
1393 /* Deal with TX cruft when link lost */
1394 lem_tx_purge(adapter);
1395 callout_reset(&adapter->timer, hz,
1396 lem_local_timer, adapter);
1397 EM_CORE_UNLOCK(adapter);
1401 EM_CORE_UNLOCK(adapter);
1402 lem_rxeof(adapter, -1, NULL);
1404 EM_TX_LOCK(adapter);
1406 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) &&
1407 (!if_sendq_empty(ifp)))
1408 lem_start_locked(ifp);
1409 EM_TX_UNLOCK(adapter);
1415 lem_handle_link(void *context, int pending)
1417 struct adapter *adapter = context;
1418 if_t ifp = adapter->ifp;
1420 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
1423 EM_CORE_LOCK(adapter);
1424 callout_stop(&adapter->timer);
1425 lem_update_link_status(adapter);
1426 /* Deal with TX cruft when link lost */
1427 lem_tx_purge(adapter);
1428 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1429 EM_CORE_UNLOCK(adapter);
1433 /* Combined RX/TX handler, used by Legacy and MSI */
1435 lem_handle_rxtx(void *context, int pending)
1437 struct adapter *adapter = context;
1438 if_t ifp = adapter->ifp;
1441 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1442 bool more = lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1443 EM_TX_LOCK(adapter);
1445 if(!if_sendq_empty(ifp))
1446 lem_start_locked(ifp);
1447 EM_TX_UNLOCK(adapter);
1449 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1454 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1455 lem_enable_intr(adapter);
1458 /*********************************************************************
1460 * Fast Legacy/MSI Combined Interrupt Service routine
1462 *********************************************************************/
1464 lem_irq_fast(void *arg)
1466 struct adapter *adapter = arg;
1472 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1475 if (reg_icr == 0xffffffff)
1476 return FILTER_STRAY;
1478 /* Definitely not our interrupt. */
1480 return FILTER_STRAY;
1483 * Mask interrupts until the taskqueue is finished running. This is
1484 * cheap, just assume that it is needed. This also works around the
1485 * MSI message reordering errata on certain systems.
1487 lem_disable_intr(adapter);
1488 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1490 /* Link status change */
1491 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1492 adapter->hw.mac.get_link_status = 1;
1493 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1496 if (reg_icr & E1000_ICR_RXO)
1497 adapter->rx_overruns++;
1498 return FILTER_HANDLED;
1502 /*********************************************************************
1504 * Media Ioctl callback
1506 * This routine is called whenever the user queries the status of
1507 * the interface using ifconfig.
1509 **********************************************************************/
1511 lem_media_status(if_t ifp, struct ifmediareq *ifmr)
1513 struct adapter *adapter = if_getsoftc(ifp);
1514 u_char fiber_type = IFM_1000_SX;
1516 INIT_DEBUGOUT("lem_media_status: begin");
1518 EM_CORE_LOCK(adapter);
1519 lem_update_link_status(adapter);
1521 ifmr->ifm_status = IFM_AVALID;
1522 ifmr->ifm_active = IFM_ETHER;
1524 if (!adapter->link_active) {
1525 EM_CORE_UNLOCK(adapter);
1529 ifmr->ifm_status |= IFM_ACTIVE;
1531 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1532 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1533 if (adapter->hw.mac.type == e1000_82545)
1534 fiber_type = IFM_1000_LX;
1535 ifmr->ifm_active |= fiber_type | IFM_FDX;
1537 switch (adapter->link_speed) {
1539 ifmr->ifm_active |= IFM_10_T;
1542 ifmr->ifm_active |= IFM_100_TX;
1545 ifmr->ifm_active |= IFM_1000_T;
1548 if (adapter->link_duplex == FULL_DUPLEX)
1549 ifmr->ifm_active |= IFM_FDX;
1551 ifmr->ifm_active |= IFM_HDX;
1553 EM_CORE_UNLOCK(adapter);
1556 /*********************************************************************
1558 * Media Ioctl callback
1560 * This routine is called when the user changes speed/duplex using
1561 * media/mediopt option with ifconfig.
1563 **********************************************************************/
1565 lem_media_change(if_t ifp)
1567 struct adapter *adapter = if_getsoftc(ifp);
1568 struct ifmedia *ifm = &adapter->media;
1570 INIT_DEBUGOUT("lem_media_change: begin");
1572 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1575 EM_CORE_LOCK(adapter);
1576 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1578 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1579 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1584 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1585 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1588 adapter->hw.mac.autoneg = FALSE;
1589 adapter->hw.phy.autoneg_advertised = 0;
1590 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1591 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1593 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1596 adapter->hw.mac.autoneg = FALSE;
1597 adapter->hw.phy.autoneg_advertised = 0;
1598 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1599 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1601 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1604 device_printf(adapter->dev, "Unsupported media type\n");
1607 lem_init_locked(adapter);
1608 EM_CORE_UNLOCK(adapter);
1613 /*********************************************************************
1615 * This routine maps the mbufs to tx descriptors.
1617 * return 0 on success, positive on failure
1618 **********************************************************************/
1621 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1623 bus_dma_segment_t segs[EM_MAX_SCATTER];
1625 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1626 struct e1000_tx_desc *ctxd = NULL;
1627 struct mbuf *m_head;
1628 u32 txd_upper, txd_lower, txd_used, txd_saved;
1629 int error, nsegs, i, j, first, last = 0;
1632 txd_upper = txd_lower = txd_used = txd_saved = 0;
1635 ** When doing checksum offload, it is critical to
1636 ** make sure the first mbuf has more than header,
1637 ** because that routine expects data to be present.
1639 if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1640 (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1641 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1648 * Map the packet for DMA
1650 * Capture the first descriptor index,
1651 * this descriptor will have the index
1652 * of the EOP which is the only one that
1653 * now gets a DONE bit writeback.
1655 first = adapter->next_avail_tx_desc;
1656 tx_buffer = &adapter->tx_buffer_area[first];
1657 tx_buffer_mapped = tx_buffer;
1658 map = tx_buffer->map;
1660 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1661 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1664 * There are two types of errors we can (try) to handle:
1665 * - EFBIG means the mbuf chain was too long and bus_dma ran
1666 * out of segments. Defragment the mbuf chain and try again.
1667 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1668 * at this point in time. Defer sending and try again later.
1669 * All other errors, in particular EINVAL, are fatal and prevent the
1670 * mbuf chain from ever going through. Drop it and report error.
1672 if (error == EFBIG) {
1675 m = m_defrag(*m_headp, M_NOWAIT);
1677 adapter->mbuf_alloc_failed++;
1685 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1686 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1689 adapter->no_tx_dma_setup++;
1694 } else if (error != 0) {
1695 adapter->no_tx_dma_setup++;
1699 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1700 adapter->no_tx_desc_avail2++;
1701 bus_dmamap_unload(adapter->txtag, map);
1706 /* Do hardware assists */
1707 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1708 lem_transmit_checksum_setup(adapter, m_head,
1709 &txd_upper, &txd_lower);
1711 i = adapter->next_avail_tx_desc;
1712 if (adapter->pcix_82544)
1715 /* Set up our transmit descriptors */
1716 for (j = 0; j < nsegs; j++) {
1718 bus_addr_t seg_addr;
1719 /* If adapter is 82544 and on PCIX bus */
1720 if(adapter->pcix_82544) {
1721 DESC_ARRAY desc_array;
1722 u32 array_elements, counter;
1724 * Check the Address and Length combination and
1725 * split the data accordingly
1727 array_elements = lem_fill_descriptors(segs[j].ds_addr,
1728 segs[j].ds_len, &desc_array);
1729 for (counter = 0; counter < array_elements; counter++) {
1730 if (txd_used == adapter->num_tx_desc_avail) {
1731 adapter->next_avail_tx_desc = txd_saved;
1732 adapter->no_tx_desc_avail2++;
1733 bus_dmamap_unload(adapter->txtag, map);
1736 tx_buffer = &adapter->tx_buffer_area[i];
1737 ctxd = &adapter->tx_desc_base[i];
1738 ctxd->buffer_addr = htole64(
1739 desc_array.descriptor[counter].address);
1740 ctxd->lower.data = htole32(
1741 (adapter->txd_cmd | txd_lower | (u16)
1742 desc_array.descriptor[counter].length));
1744 htole32((txd_upper));
1746 if (++i == adapter->num_tx_desc)
1748 tx_buffer->m_head = NULL;
1749 tx_buffer->next_eop = -1;
1753 tx_buffer = &adapter->tx_buffer_area[i];
1754 ctxd = &adapter->tx_desc_base[i];
1755 seg_addr = segs[j].ds_addr;
1756 seg_len = segs[j].ds_len;
1757 ctxd->buffer_addr = htole64(seg_addr);
1758 ctxd->lower.data = htole32(
1759 adapter->txd_cmd | txd_lower | seg_len);
1763 if (++i == adapter->num_tx_desc)
1765 tx_buffer->m_head = NULL;
1766 tx_buffer->next_eop = -1;
1770 adapter->next_avail_tx_desc = i;
1772 if (adapter->pcix_82544)
1773 adapter->num_tx_desc_avail -= txd_used;
1775 adapter->num_tx_desc_avail -= nsegs;
1777 if (m_head->m_flags & M_VLANTAG) {
1778 /* Set the vlan id. */
1779 ctxd->upper.fields.special =
1780 htole16(m_head->m_pkthdr.ether_vtag);
1781 /* Tell hardware to add tag */
1782 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1785 tx_buffer->m_head = m_head;
1786 tx_buffer_mapped->map = tx_buffer->map;
1787 tx_buffer->map = map;
1788 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1791 * Last Descriptor of Packet
1792 * needs End Of Packet (EOP)
1793 * and Report Status (RS)
1796 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1798 * Keep track in the first buffer which
1799 * descriptor will be written back
1801 tx_buffer = &adapter->tx_buffer_area[first];
1802 tx_buffer->next_eop = last;
1803 adapter->watchdog_time = ticks;
1806 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1807 * that this frame is available to transmit.
1809 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1810 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1814 adapter->csb->guest_tdt = i;
1815 /* XXX memory barrier ? */
1816 if (adapter->csb->guest_csb_on &&
1817 !(adapter->csb->host_need_txkick & 1)) {
1818 /* XXX maybe useless
1819 * clean the ring. maybe do it before ?
1820 * maybe a little bit of histeresys ?
1822 if (adapter->num_tx_desc_avail <= 64) {// XXX
1828 #endif /* NIC_PARAVIRT */
1830 #ifdef NIC_SEND_COMBINING
1831 if (adapter->sc_enable) {
1832 if (adapter->shadow_tdt & MIT_PENDING_INT) {
1833 /* signal intr and data pending */
1834 adapter->shadow_tdt = MIT_PENDING_TDT | (i & 0xffff);
1837 adapter->shadow_tdt = MIT_PENDING_INT;
1840 #endif /* NIC_SEND_COMBINING */
1842 if (adapter->hw.mac.type == e1000_82547 &&
1843 adapter->link_duplex == HALF_DUPLEX)
1844 lem_82547_move_tail(adapter);
1846 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1847 if (adapter->hw.mac.type == e1000_82547)
1848 lem_82547_update_fifo_head(adapter,
1849 m_head->m_pkthdr.len);
1855 /*********************************************************************
1857 * 82547 workaround to avoid controller hang in half-duplex environment.
1858 * The workaround is to avoid queuing a large packet that would span
1859 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1860 * in this case. We do that only when FIFO is quiescent.
1862 **********************************************************************/
1864 lem_82547_move_tail(void *arg)
1866 struct adapter *adapter = arg;
1867 struct e1000_tx_desc *tx_desc;
1868 u16 hw_tdt, sw_tdt, length = 0;
1871 EM_TX_LOCK_ASSERT(adapter);
1873 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1874 sw_tdt = adapter->next_avail_tx_desc;
1876 while (hw_tdt != sw_tdt) {
1877 tx_desc = &adapter->tx_desc_base[hw_tdt];
1878 length += tx_desc->lower.flags.length;
1879 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1880 if (++hw_tdt == adapter->num_tx_desc)
1884 if (lem_82547_fifo_workaround(adapter, length)) {
1885 adapter->tx_fifo_wrk_cnt++;
1886 callout_reset(&adapter->tx_fifo_timer, 1,
1887 lem_82547_move_tail, adapter);
1890 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1891 lem_82547_update_fifo_head(adapter, length);
1898 lem_82547_fifo_workaround(struct adapter *adapter, int len)
1900 int fifo_space, fifo_pkt_len;
1902 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1904 if (adapter->link_duplex == HALF_DUPLEX) {
1905 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1907 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1908 if (lem_82547_tx_fifo_reset(adapter))
1919 lem_82547_update_fifo_head(struct adapter *adapter, int len)
1921 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1923 /* tx_fifo_head is always 16 byte aligned */
1924 adapter->tx_fifo_head += fifo_pkt_len;
1925 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1926 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1932 lem_82547_tx_fifo_reset(struct adapter *adapter)
1936 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1937 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1938 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1939 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1940 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1941 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1942 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1943 /* Disable TX unit */
1944 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1945 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1946 tctl & ~E1000_TCTL_EN);
1948 /* Reset FIFO pointers */
1949 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1950 adapter->tx_head_addr);
1951 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1952 adapter->tx_head_addr);
1953 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1954 adapter->tx_head_addr);
1955 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1956 adapter->tx_head_addr);
1958 /* Re-enable TX unit */
1959 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1960 E1000_WRITE_FLUSH(&adapter->hw);
1962 adapter->tx_fifo_head = 0;
1963 adapter->tx_fifo_reset_cnt++;
1973 lem_set_promisc(struct adapter *adapter)
1975 if_t ifp = adapter->ifp;
1978 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1980 if (if_getflags(ifp) & IFF_PROMISC) {
1981 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1982 /* Turn this on if you want to see bad packets */
1984 reg_rctl |= E1000_RCTL_SBP;
1985 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1986 } else if (if_getflags(ifp) & IFF_ALLMULTI) {
1987 reg_rctl |= E1000_RCTL_MPE;
1988 reg_rctl &= ~E1000_RCTL_UPE;
1989 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1994 lem_disable_promisc(struct adapter *adapter)
1996 if_t ifp = adapter->ifp;
2000 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2001 reg_rctl &= (~E1000_RCTL_UPE);
2002 if (if_getflags(ifp) & IFF_ALLMULTI)
2003 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2005 mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES);
2007 /* Don't disable if in MAX groups */
2008 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2009 reg_rctl &= (~E1000_RCTL_MPE);
2010 reg_rctl &= (~E1000_RCTL_SBP);
2011 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2015 /*********************************************************************
2018 * This routine is called whenever multicast address list is updated.
2020 **********************************************************************/
2023 lem_set_multi(struct adapter *adapter)
2025 if_t ifp = adapter->ifp;
2027 u8 *mta; /* Multicast array memory */
2030 IOCTL_DEBUGOUT("lem_set_multi: begin");
2033 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
2035 if (adapter->hw.mac.type == e1000_82542 &&
2036 adapter->hw.revision_id == E1000_REVISION_2) {
2037 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2038 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2039 e1000_pci_clear_mwi(&adapter->hw);
2040 reg_rctl |= E1000_RCTL_RST;
2041 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2045 if_multiaddr_array(ifp, mta, &mcnt, MAX_NUM_MULTICAST_ADDRESSES);
2047 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2048 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2049 reg_rctl |= E1000_RCTL_MPE;
2050 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2052 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
2054 if (adapter->hw.mac.type == e1000_82542 &&
2055 adapter->hw.revision_id == E1000_REVISION_2) {
2056 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2057 reg_rctl &= ~E1000_RCTL_RST;
2058 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2060 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2061 e1000_pci_set_mwi(&adapter->hw);
2066 /*********************************************************************
2069 * This routine checks for link status and updates statistics.
2071 **********************************************************************/
2074 lem_local_timer(void *arg)
2076 struct adapter *adapter = arg;
2078 EM_CORE_LOCK_ASSERT(adapter);
2080 lem_update_link_status(adapter);
2081 lem_update_stats_counters(adapter);
2083 lem_smartspeed(adapter);
2086 /* recover space if needed */
2087 if (adapter->csb && adapter->csb->guest_csb_on &&
2088 (adapter->watchdog_check == TRUE) &&
2089 (ticks - adapter->watchdog_time > EM_WATCHDOG) &&
2090 (adapter->num_tx_desc_avail != adapter->num_tx_desc) ) {
2093 * lem_txeof() normally (except when space in the queue
2094 * runs low XXX) cleans watchdog_check so that
2098 #endif /* NIC_PARAVIRT */
2100 * We check the watchdog: the time since
2101 * the last TX descriptor was cleaned.
2102 * This implies a functional TX engine.
2104 if ((adapter->watchdog_check == TRUE) &&
2105 (ticks - adapter->watchdog_time > EM_WATCHDOG))
2108 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
2111 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2112 if_setdrvflagbits(adapter->ifp, 0, IFF_DRV_RUNNING);
2113 adapter->watchdog_events++;
2114 lem_init_locked(adapter);
2118 lem_update_link_status(struct adapter *adapter)
2120 struct e1000_hw *hw = &adapter->hw;
2121 if_t ifp = adapter->ifp;
2122 device_t dev = adapter->dev;
2125 /* Get the cached link value or read phy for real */
2126 switch (hw->phy.media_type) {
2127 case e1000_media_type_copper:
2128 if (hw->mac.get_link_status) {
2129 /* Do the work to read phy */
2130 e1000_check_for_link(hw);
2131 link_check = !hw->mac.get_link_status;
2132 if (link_check) /* ESB2 fix */
2133 e1000_cfg_on_link_up(hw);
2137 case e1000_media_type_fiber:
2138 e1000_check_for_link(hw);
2139 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2142 case e1000_media_type_internal_serdes:
2143 e1000_check_for_link(hw);
2144 link_check = adapter->hw.mac.serdes_has_link;
2147 case e1000_media_type_unknown:
2151 /* Now check for a transition */
2152 if (link_check && (adapter->link_active == 0)) {
2153 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2154 &adapter->link_duplex);
2156 device_printf(dev, "Link is up %d Mbps %s\n",
2157 adapter->link_speed,
2158 ((adapter->link_duplex == FULL_DUPLEX) ?
2159 "Full Duplex" : "Half Duplex"));
2160 adapter->link_active = 1;
2161 adapter->smartspeed = 0;
2162 if_setbaudrate(ifp, adapter->link_speed * 1000000);
2163 if_link_state_change(ifp, LINK_STATE_UP);
2164 } else if (!link_check && (adapter->link_active == 1)) {
2165 if_setbaudrate(ifp, 0);
2166 adapter->link_speed = 0;
2167 adapter->link_duplex = 0;
2169 device_printf(dev, "Link is Down\n");
2170 adapter->link_active = 0;
2171 /* Link down, disable watchdog */
2172 adapter->watchdog_check = FALSE;
2173 if_link_state_change(ifp, LINK_STATE_DOWN);
2177 /*********************************************************************
2179 * This routine disables all traffic on the adapter by issuing a
2180 * global reset on the MAC and deallocates TX/RX buffers.
2182 * This routine should always be called with BOTH the CORE
2184 **********************************************************************/
2189 struct adapter *adapter = arg;
2190 if_t ifp = adapter->ifp;
2192 EM_CORE_LOCK_ASSERT(adapter);
2193 EM_TX_LOCK_ASSERT(adapter);
2195 INIT_DEBUGOUT("lem_stop: begin");
2197 lem_disable_intr(adapter);
2198 callout_stop(&adapter->timer);
2199 callout_stop(&adapter->tx_fifo_timer);
2201 /* Tell the stack that the interface is no longer active */
2202 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2204 e1000_reset_hw(&adapter->hw);
2205 if (adapter->hw.mac.type >= e1000_82544)
2206 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2208 e1000_led_off(&adapter->hw);
2209 e1000_cleanup_led(&adapter->hw);
2213 /*********************************************************************
2215 * Determine hardware revision.
2217 **********************************************************************/
2219 lem_identify_hardware(struct adapter *adapter)
2221 device_t dev = adapter->dev;
2223 /* Make sure our PCI config space has the necessary stuff set */
2224 pci_enable_busmaster(dev);
2225 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2227 /* Save off the information about this board */
2228 adapter->hw.vendor_id = pci_get_vendor(dev);
2229 adapter->hw.device_id = pci_get_device(dev);
2230 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2231 adapter->hw.subsystem_vendor_id =
2232 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2233 adapter->hw.subsystem_device_id =
2234 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2236 /* Do Shared Code Init and Setup */
2237 if (e1000_set_mac_type(&adapter->hw)) {
2238 device_printf(dev, "Setup init failure\n");
2244 lem_allocate_pci_resources(struct adapter *adapter)
2246 device_t dev = adapter->dev;
2247 int val, rid, error = E1000_SUCCESS;
2250 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2252 if (adapter->memory == NULL) {
2253 device_printf(dev, "Unable to allocate bus resource: memory\n");
2256 adapter->osdep.mem_bus_space_tag =
2257 rman_get_bustag(adapter->memory);
2258 adapter->osdep.mem_bus_space_handle =
2259 rman_get_bushandle(adapter->memory);
2260 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2262 /* Only older adapters use IO mapping */
2263 if (adapter->hw.mac.type > e1000_82543) {
2264 /* Figure our where our IO BAR is ? */
2265 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2266 val = pci_read_config(dev, rid, 4);
2267 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2268 adapter->io_rid = rid;
2272 /* check for 64bit BAR */
2273 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2276 if (rid >= PCIR_CIS) {
2277 device_printf(dev, "Unable to locate IO BAR\n");
2280 adapter->ioport = bus_alloc_resource_any(dev,
2281 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2282 if (adapter->ioport == NULL) {
2283 device_printf(dev, "Unable to allocate bus resource: "
2287 adapter->hw.io_base = 0;
2288 adapter->osdep.io_bus_space_tag =
2289 rman_get_bustag(adapter->ioport);
2290 adapter->osdep.io_bus_space_handle =
2291 rman_get_bushandle(adapter->ioport);
2294 adapter->hw.back = &adapter->osdep;
2299 /*********************************************************************
2301 * Setup the Legacy or MSI Interrupt handler
2303 **********************************************************************/
2305 lem_allocate_irq(struct adapter *adapter)
2307 device_t dev = adapter->dev;
2310 /* Manually turn off all interrupts */
2311 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2313 /* We allocate a single interrupt resource */
2314 adapter->res[0] = bus_alloc_resource_any(dev,
2315 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2316 if (adapter->res[0] == NULL) {
2317 device_printf(dev, "Unable to allocate bus resource: "
2322 /* Do Legacy setup? */
2323 if (lem_use_legacy_irq) {
2324 if ((error = bus_setup_intr(dev, adapter->res[0],
2325 INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2326 &adapter->tag[0])) != 0) {
2328 "Failed to register interrupt handler");
2335 * Use a Fast interrupt and the associated
2336 * deferred processing contexts.
2338 TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2339 TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2340 adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2341 taskqueue_thread_enqueue, &adapter->tq);
2342 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2343 device_get_nameunit(adapter->dev));
2344 if ((error = bus_setup_intr(dev, adapter->res[0],
2345 INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2346 &adapter->tag[0])) != 0) {
2347 device_printf(dev, "Failed to register fast interrupt "
2348 "handler: %d\n", error);
2349 taskqueue_free(adapter->tq);
2359 lem_free_pci_resources(struct adapter *adapter)
2361 device_t dev = adapter->dev;
2364 if (adapter->tag[0] != NULL) {
2365 bus_teardown_intr(dev, adapter->res[0],
2367 adapter->tag[0] = NULL;
2370 if (adapter->res[0] != NULL) {
2371 bus_release_resource(dev, SYS_RES_IRQ,
2372 0, adapter->res[0]);
2375 if (adapter->memory != NULL)
2376 bus_release_resource(dev, SYS_RES_MEMORY,
2377 PCIR_BAR(0), adapter->memory);
2379 if (adapter->ioport != NULL)
2380 bus_release_resource(dev, SYS_RES_IOPORT,
2381 adapter->io_rid, adapter->ioport);
2385 /*********************************************************************
2387 * Initialize the hardware to a configuration
2388 * as specified by the adapter structure.
2390 **********************************************************************/
2392 lem_hardware_init(struct adapter *adapter)
2394 device_t dev = adapter->dev;
2397 INIT_DEBUGOUT("lem_hardware_init: begin");
2399 /* Issue a global reset */
2400 e1000_reset_hw(&adapter->hw);
2402 /* When hardware is reset, fifo_head is also reset */
2403 adapter->tx_fifo_head = 0;
2406 * These parameters control the automatic generation (Tx) and
2407 * response (Rx) to Ethernet PAUSE frames.
2408 * - High water mark should allow for at least two frames to be
2409 * received after sending an XOFF.
2410 * - Low water mark works best when it is very near the high water mark.
2411 * This allows the receiver to restart by sending XON when it has
2412 * drained a bit. Here we use an arbitary value of 1500 which will
2413 * restart after one full frame is pulled from the buffer. There
2414 * could be several smaller frames in the buffer and if so they will
2415 * not trigger the XON until their total number reduces the buffer
2417 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2419 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2422 adapter->hw.fc.high_water = rx_buffer_size -
2423 roundup2(adapter->max_frame_size, 1024);
2424 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2426 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2427 adapter->hw.fc.send_xon = TRUE;
2429 /* Set Flow control, use the tunable location if sane */
2430 if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2431 adapter->hw.fc.requested_mode = lem_fc_setting;
2433 adapter->hw.fc.requested_mode = e1000_fc_none;
2435 if (e1000_init_hw(&adapter->hw) < 0) {
2436 device_printf(dev, "Hardware Initialization Failed\n");
2440 e1000_check_for_link(&adapter->hw);
2445 /*********************************************************************
2447 * Setup networking device structure and register an interface.
2449 **********************************************************************/
2451 lem_setup_interface(device_t dev, struct adapter *adapter)
2455 INIT_DEBUGOUT("lem_setup_interface: begin");
2457 ifp = adapter->ifp = if_gethandle(IFT_ETHER);
2458 if (ifp == (void *)NULL) {
2459 device_printf(dev, "can not allocate ifnet structure\n");
2462 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2463 if_setinitfn(ifp, lem_init);
2464 if_setsoftc(ifp, adapter);
2465 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2466 if_setioctlfn(ifp, lem_ioctl);
2467 if_setstartfn(ifp, lem_start);
2468 if_setgetcounterfn(ifp, lem_get_counter);
2469 if_setsendqlen(ifp, adapter->num_tx_desc - 1);
2470 if_setsendqready(ifp);
2472 ether_ifattach(ifp, adapter->hw.mac.addr);
2474 if_setcapabilities(ifp, 0);
2476 if (adapter->hw.mac.type >= e1000_82543) {
2477 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM, 0);
2478 if_setcapenablebit(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM, 0);
2482 * Tell the upper layer(s) we support long frames.
2484 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2485 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU, 0);
2486 if_setcapenablebit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU, 0);
2489 ** Dont turn this on by default, if vlans are
2490 ** created on another pseudo device (eg. lagg)
2491 ** then vlan events are not passed thru, breaking
2492 ** operation, but with HW FILTER off it works. If
2493 ** using vlans directly on the em driver you can
2494 ** enable this and get full hardware tag filtering.
2496 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0);
2498 #ifdef DEVICE_POLLING
2499 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
2502 /* Enable only WOL MAGIC by default */
2504 if_setcapabilitiesbit(ifp, IFCAP_WOL, 0);
2505 if_setcapenablebit(ifp, IFCAP_WOL_MAGIC, 0);
2509 * Specify the media types supported by this adapter and register
2510 * callbacks to update media and link information
2512 ifmedia_init(&adapter->media, IFM_IMASK,
2513 lem_media_change, lem_media_status);
2514 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2515 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2516 u_char fiber_type = IFM_1000_SX; /* default type */
2518 if (adapter->hw.mac.type == e1000_82545)
2519 fiber_type = IFM_1000_LX;
2520 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2522 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2524 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2525 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2527 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2529 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2531 if (adapter->hw.phy.type != e1000_phy_ife) {
2532 ifmedia_add(&adapter->media,
2533 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2534 ifmedia_add(&adapter->media,
2535 IFM_ETHER | IFM_1000_T, 0, NULL);
2538 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2539 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2544 /*********************************************************************
2546 * Workaround for SmartSpeed on 82541 and 82547 controllers
2548 **********************************************************************/
2550 lem_smartspeed(struct adapter *adapter)
2554 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2555 adapter->hw.mac.autoneg == 0 ||
2556 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2559 if (adapter->smartspeed == 0) {
2560 /* If Master/Slave config fault is asserted twice,
2561 * we assume back-to-back */
2562 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2563 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2565 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2566 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2567 e1000_read_phy_reg(&adapter->hw,
2568 PHY_1000T_CTRL, &phy_tmp);
2569 if(phy_tmp & CR_1000T_MS_ENABLE) {
2570 phy_tmp &= ~CR_1000T_MS_ENABLE;
2571 e1000_write_phy_reg(&adapter->hw,
2572 PHY_1000T_CTRL, phy_tmp);
2573 adapter->smartspeed++;
2574 if(adapter->hw.mac.autoneg &&
2575 !e1000_copper_link_autoneg(&adapter->hw) &&
2576 !e1000_read_phy_reg(&adapter->hw,
2577 PHY_CONTROL, &phy_tmp)) {
2578 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2579 MII_CR_RESTART_AUTO_NEG);
2580 e1000_write_phy_reg(&adapter->hw,
2581 PHY_CONTROL, phy_tmp);
2586 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2587 /* If still no link, perhaps using 2/3 pair cable */
2588 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2589 phy_tmp |= CR_1000T_MS_ENABLE;
2590 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2591 if(adapter->hw.mac.autoneg &&
2592 !e1000_copper_link_autoneg(&adapter->hw) &&
2593 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2594 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2595 MII_CR_RESTART_AUTO_NEG);
2596 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2599 /* Restart process after EM_SMARTSPEED_MAX iterations */
2600 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2601 adapter->smartspeed = 0;
2606 * Manage DMA'able memory.
2609 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2613 *(bus_addr_t *) arg = segs[0].ds_addr;
2617 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2618 struct em_dma_alloc *dma, int mapflags)
2622 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2623 EM_DBA_ALIGN, 0, /* alignment, bounds */
2624 BUS_SPACE_MAXADDR, /* lowaddr */
2625 BUS_SPACE_MAXADDR, /* highaddr */
2626 NULL, NULL, /* filter, filterarg */
2629 size, /* maxsegsize */
2631 NULL, /* lockfunc */
2635 device_printf(adapter->dev,
2636 "%s: bus_dma_tag_create failed: %d\n",
2641 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2642 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2644 device_printf(adapter->dev,
2645 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2646 __func__, (uintmax_t)size, error);
2651 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2652 size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2653 if (error || dma->dma_paddr == 0) {
2654 device_printf(adapter->dev,
2655 "%s: bus_dmamap_load failed: %d\n",
2663 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2665 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2666 bus_dma_tag_destroy(dma->dma_tag);
2668 dma->dma_tag = NULL;
2674 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2676 if (dma->dma_tag == NULL)
2678 if (dma->dma_paddr != 0) {
2679 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2680 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2681 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2684 if (dma->dma_vaddr != NULL) {
2685 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2686 dma->dma_vaddr = NULL;
2688 bus_dma_tag_destroy(dma->dma_tag);
2689 dma->dma_tag = NULL;
2693 /*********************************************************************
2695 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2696 * the information needed to transmit a packet on the wire.
2698 **********************************************************************/
2700 lem_allocate_transmit_structures(struct adapter *adapter)
2702 device_t dev = adapter->dev;
2703 struct em_buffer *tx_buffer;
2707 * Create DMA tags for tx descriptors
2709 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2710 1, 0, /* alignment, bounds */
2711 BUS_SPACE_MAXADDR, /* lowaddr */
2712 BUS_SPACE_MAXADDR, /* highaddr */
2713 NULL, NULL, /* filter, filterarg */
2714 MCLBYTES * EM_MAX_SCATTER, /* maxsize */
2715 EM_MAX_SCATTER, /* nsegments */
2716 MCLBYTES, /* maxsegsize */
2718 NULL, /* lockfunc */
2720 &adapter->txtag)) != 0) {
2721 device_printf(dev, "Unable to allocate TX DMA tag\n");
2725 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2726 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2727 if (adapter->tx_buffer_area == NULL) {
2728 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2733 /* Create the descriptor buffer dma maps */
2734 for (int i = 0; i < adapter->num_tx_desc; i++) {
2735 tx_buffer = &adapter->tx_buffer_area[i];
2736 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2738 device_printf(dev, "Unable to create TX DMA map\n");
2741 tx_buffer->next_eop = -1;
2746 lem_free_transmit_structures(adapter);
2750 /*********************************************************************
2752 * (Re)Initialize transmit structures.
2754 **********************************************************************/
2756 lem_setup_transmit_structures(struct adapter *adapter)
2758 struct em_buffer *tx_buffer;
2760 /* we are already locked */
2761 struct netmap_adapter *na = netmap_getna(adapter->ifp);
2762 struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
2763 #endif /* DEV_NETMAP */
2765 /* Clear the old ring contents */
2766 bzero(adapter->tx_desc_base,
2767 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2769 /* Free any existing TX buffers */
2770 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2771 tx_buffer = &adapter->tx_buffer_area[i];
2772 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2773 BUS_DMASYNC_POSTWRITE);
2774 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2775 m_freem(tx_buffer->m_head);
2776 tx_buffer->m_head = NULL;
2779 /* the i-th NIC entry goes to slot si */
2780 int si = netmap_idx_n2k(&na->tx_rings[0], i);
2784 addr = PNMB(na, slot + si, &paddr);
2785 adapter->tx_desc_base[i].buffer_addr = htole64(paddr);
2786 /* reload the map for netmap mode */
2787 netmap_load_map(na, adapter->txtag, tx_buffer->map, addr);
2789 #endif /* DEV_NETMAP */
2790 tx_buffer->next_eop = -1;
2794 adapter->last_hw_offload = 0;
2795 adapter->next_avail_tx_desc = 0;
2796 adapter->next_tx_to_clean = 0;
2797 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2799 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2800 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2805 /*********************************************************************
2807 * Enable transmit unit.
2809 **********************************************************************/
2811 lem_initialize_transmit_unit(struct adapter *adapter)
2816 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2817 /* Setup the Base and Length of the Tx Descriptor Ring */
2818 bus_addr = adapter->txdma.dma_paddr;
2819 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2820 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2821 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2822 (u32)(bus_addr >> 32));
2823 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2825 /* Setup the HW Tx Head and Tail descriptor pointers */
2826 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2827 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2829 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2830 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2831 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2833 /* Set the default values for the Tx Inter Packet Gap timer */
2834 switch (adapter->hw.mac.type) {
2836 tipg = DEFAULT_82542_TIPG_IPGT;
2837 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2838 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2841 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2842 (adapter->hw.phy.media_type ==
2843 e1000_media_type_internal_serdes))
2844 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2846 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2847 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2848 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2851 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2852 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2853 if(adapter->hw.mac.type >= e1000_82540)
2854 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2855 adapter->tx_abs_int_delay.value);
2857 /* Program the Transmit Control Register */
2858 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2859 tctl &= ~E1000_TCTL_CT;
2860 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2861 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2863 /* This write will effectively turn on the transmit unit. */
2864 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2866 /* Setup Transmit Descriptor Base Settings */
2867 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2869 if (adapter->tx_int_delay.value > 0)
2870 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2873 /*********************************************************************
2875 * Free all transmit related data structures.
2877 **********************************************************************/
2879 lem_free_transmit_structures(struct adapter *adapter)
2881 struct em_buffer *tx_buffer;
2883 INIT_DEBUGOUT("free_transmit_structures: begin");
2885 if (adapter->tx_buffer_area != NULL) {
2886 for (int i = 0; i < adapter->num_tx_desc; i++) {
2887 tx_buffer = &adapter->tx_buffer_area[i];
2888 if (tx_buffer->m_head != NULL) {
2889 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2890 BUS_DMASYNC_POSTWRITE);
2891 bus_dmamap_unload(adapter->txtag,
2893 m_freem(tx_buffer->m_head);
2894 tx_buffer->m_head = NULL;
2895 } else if (tx_buffer->map != NULL)
2896 bus_dmamap_unload(adapter->txtag,
2898 if (tx_buffer->map != NULL) {
2899 bus_dmamap_destroy(adapter->txtag,
2901 tx_buffer->map = NULL;
2905 if (adapter->tx_buffer_area != NULL) {
2906 free(adapter->tx_buffer_area, M_DEVBUF);
2907 adapter->tx_buffer_area = NULL;
2909 if (adapter->txtag != NULL) {
2910 bus_dma_tag_destroy(adapter->txtag);
2911 adapter->txtag = NULL;
2913 #if __FreeBSD_version >= 800000
2914 if (adapter->br != NULL)
2915 buf_ring_free(adapter->br, M_DEVBUF);
2919 /*********************************************************************
2921 * The offload context needs to be set when we transfer the first
2922 * packet of a particular protocol (TCP/UDP). This routine has been
2923 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2925 * Added back the old method of keeping the current context type
2926 * and not setting if unnecessary, as this is reported to be a
2927 * big performance win. -jfv
2928 **********************************************************************/
2930 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2931 u32 *txd_upper, u32 *txd_lower)
2933 struct e1000_context_desc *TXD = NULL;
2934 struct em_buffer *tx_buffer;
2935 struct ether_vlan_header *eh;
2936 struct ip *ip = NULL;
2937 struct ip6_hdr *ip6;
2938 int curr_txd, ehdrlen;
2939 u32 cmd, hdr_len, ip_hlen;
2944 cmd = hdr_len = ipproto = 0;
2945 *txd_upper = *txd_lower = 0;
2946 curr_txd = adapter->next_avail_tx_desc;
2949 * Determine where frame payload starts.
2950 * Jump over vlan headers if already present,
2951 * helpful for QinQ too.
2953 eh = mtod(mp, struct ether_vlan_header *);
2954 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2955 etype = ntohs(eh->evl_proto);
2956 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2958 etype = ntohs(eh->evl_encap_proto);
2959 ehdrlen = ETHER_HDR_LEN;
2963 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2964 * TODO: Support SCTP too when it hits the tree.
2968 ip = (struct ip *)(mp->m_data + ehdrlen);
2969 ip_hlen = ip->ip_hl << 2;
2971 /* Setup of IP header checksum. */
2972 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2974 * Start offset for header checksum calculation.
2975 * End offset for header checksum calculation.
2976 * Offset of place to put the checksum.
2978 TXD = (struct e1000_context_desc *)
2979 &adapter->tx_desc_base[curr_txd];
2980 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2981 TXD->lower_setup.ip_fields.ipcse =
2982 htole16(ehdrlen + ip_hlen);
2983 TXD->lower_setup.ip_fields.ipcso =
2984 ehdrlen + offsetof(struct ip, ip_sum);
2985 cmd |= E1000_TXD_CMD_IP;
2986 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2989 hdr_len = ehdrlen + ip_hlen;
2993 case ETHERTYPE_IPV6:
2994 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2995 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2997 /* IPv6 doesn't have a header checksum. */
2999 hdr_len = ehdrlen + ip_hlen;
3000 ipproto = ip6->ip6_nxt;
3009 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3010 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3011 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3012 /* no need for context if already set */
3013 if (adapter->last_hw_offload == CSUM_TCP)
3015 adapter->last_hw_offload = CSUM_TCP;
3017 * Start offset for payload checksum calculation.
3018 * End offset for payload checksum calculation.
3019 * Offset of place to put the checksum.
3021 TXD = (struct e1000_context_desc *)
3022 &adapter->tx_desc_base[curr_txd];
3023 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3024 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3025 TXD->upper_setup.tcp_fields.tucso =
3026 hdr_len + offsetof(struct tcphdr, th_sum);
3027 cmd |= E1000_TXD_CMD_TCP;
3032 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3033 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3034 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3035 /* no need for context if already set */
3036 if (adapter->last_hw_offload == CSUM_UDP)
3038 adapter->last_hw_offload = CSUM_UDP;
3040 * Start offset for header checksum calculation.
3041 * End offset for header checksum calculation.
3042 * Offset of place to put the checksum.
3044 TXD = (struct e1000_context_desc *)
3045 &adapter->tx_desc_base[curr_txd];
3046 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3047 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3048 TXD->upper_setup.tcp_fields.tucso =
3049 hdr_len + offsetof(struct udphdr, uh_sum);
3059 TXD->tcp_seg_setup.data = htole32(0);
3060 TXD->cmd_and_length =
3061 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3062 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3063 tx_buffer->m_head = NULL;
3064 tx_buffer->next_eop = -1;
3066 if (++curr_txd == adapter->num_tx_desc)
3069 adapter->num_tx_desc_avail--;
3070 adapter->next_avail_tx_desc = curr_txd;
3074 /**********************************************************************
3076 * Examine each tx_buffer in the used queue. If the hardware is done
3077 * processing the packet then free associated resources. The
3078 * tx_buffer is put back on the free queue.
3080 **********************************************************************/
3082 lem_txeof(struct adapter *adapter)
3084 int first, last, done, num_avail;
3085 struct em_buffer *tx_buffer;
3086 struct e1000_tx_desc *tx_desc, *eop_desc;
3087 if_t ifp = adapter->ifp;
3089 EM_TX_LOCK_ASSERT(adapter);
3092 if (netmap_tx_irq(ifp, 0))
3094 #endif /* DEV_NETMAP */
3095 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3098 num_avail = adapter->num_tx_desc_avail;
3099 first = adapter->next_tx_to_clean;
3100 tx_desc = &adapter->tx_desc_base[first];
3101 tx_buffer = &adapter->tx_buffer_area[first];
3102 last = tx_buffer->next_eop;
3103 eop_desc = &adapter->tx_desc_base[last];
3106 * What this does is get the index of the
3107 * first descriptor AFTER the EOP of the
3108 * first packet, that way we can do the
3109 * simple comparison on the inner while loop.
3111 if (++last == adapter->num_tx_desc)
3115 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3116 BUS_DMASYNC_POSTREAD);
3118 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3119 /* We clean the range of the packet */
3120 while (first != done) {
3121 tx_desc->upper.data = 0;
3122 tx_desc->lower.data = 0;
3123 tx_desc->buffer_addr = 0;
3126 if (tx_buffer->m_head) {
3127 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
3128 bus_dmamap_sync(adapter->txtag,
3130 BUS_DMASYNC_POSTWRITE);
3131 bus_dmamap_unload(adapter->txtag,
3134 m_freem(tx_buffer->m_head);
3135 tx_buffer->m_head = NULL;
3137 tx_buffer->next_eop = -1;
3138 adapter->watchdog_time = ticks;
3140 if (++first == adapter->num_tx_desc)
3143 tx_buffer = &adapter->tx_buffer_area[first];
3144 tx_desc = &adapter->tx_desc_base[first];
3146 /* See if we can continue to the next packet */
3147 last = tx_buffer->next_eop;
3149 eop_desc = &adapter->tx_desc_base[last];
3150 /* Get new done point */
3151 if (++last == adapter->num_tx_desc) last = 0;
3156 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3157 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3159 adapter->next_tx_to_clean = first;
3160 adapter->num_tx_desc_avail = num_avail;
3162 #ifdef NIC_SEND_COMBINING
3163 if ((adapter->shadow_tdt & MIT_PENDING_TDT) == MIT_PENDING_TDT) {
3164 /* a tdt write is pending, do it */
3165 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0),
3166 0xffff & adapter->shadow_tdt);
3167 adapter->shadow_tdt = MIT_PENDING_INT;
3169 adapter->shadow_tdt = 0; // disable
3171 #endif /* NIC_SEND_COMBINING */
3173 * If we have enough room, clear IFF_DRV_OACTIVE to
3174 * tell the stack that it is OK to send packets.
3175 * If there are no pending descriptors, clear the watchdog.
3177 if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {
3178 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3180 if (adapter->csb) { // XXX also csb_on ?
3181 adapter->csb->guest_need_txkick = 2; /* acked */
3182 // XXX memory barrier
3184 #endif /* NIC_PARAVIRT */
3185 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3186 adapter->watchdog_check = FALSE;
3192 /*********************************************************************
3194 * When Link is lost sometimes there is work still in the TX ring
3195 * which may result in a watchdog, rather than allow that we do an
3196 * attempted cleanup and then reinit here. Note that this has been
3197 * seens mostly with fiber adapters.
3199 **********************************************************************/
3201 lem_tx_purge(struct adapter *adapter)
3203 if ((!adapter->link_active) && (adapter->watchdog_check)) {
3204 EM_TX_LOCK(adapter);
3206 EM_TX_UNLOCK(adapter);
3207 if (adapter->watchdog_check) /* Still outstanding? */
3208 lem_init_locked(adapter);
3212 /*********************************************************************
3214 * Get a buffer from system mbuf buffer pool.
3216 **********************************************************************/
3218 lem_get_buf(struct adapter *adapter, int i)
3221 bus_dma_segment_t segs[1];
3223 struct em_buffer *rx_buffer;
3226 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3228 adapter->mbuf_cluster_failed++;
3231 m->m_len = m->m_pkthdr.len = MCLBYTES;
3233 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3234 m_adj(m, ETHER_ALIGN);
3237 * Using memory from the mbuf cluster pool, invoke the
3238 * bus_dma machinery to arrange the memory mapping.
3240 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3241 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3247 /* If nsegs is wrong then the stack is corrupt. */
3248 KASSERT(nsegs == 1, ("Too many segments returned!"));
3250 rx_buffer = &adapter->rx_buffer_area[i];
3251 if (rx_buffer->m_head != NULL)
3252 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3254 map = rx_buffer->map;
3255 rx_buffer->map = adapter->rx_sparemap;
3256 adapter->rx_sparemap = map;
3257 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3258 rx_buffer->m_head = m;
3260 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3264 /*********************************************************************
3266 * Allocate memory for rx_buffer structures. Since we use one
3267 * rx_buffer per received packet, the maximum number of rx_buffer's
3268 * that we'll need is equal to the number of receive descriptors
3269 * that we've allocated.
3271 **********************************************************************/
3273 lem_allocate_receive_structures(struct adapter *adapter)
3275 device_t dev = adapter->dev;
3276 struct em_buffer *rx_buffer;
3279 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3280 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3281 if (adapter->rx_buffer_area == NULL) {
3282 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3286 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3287 1, 0, /* alignment, bounds */
3288 BUS_SPACE_MAXADDR, /* lowaddr */
3289 BUS_SPACE_MAXADDR, /* highaddr */
3290 NULL, NULL, /* filter, filterarg */
3291 MCLBYTES, /* maxsize */
3293 MCLBYTES, /* maxsegsize */
3295 NULL, /* lockfunc */
3299 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3304 /* Create the spare map (used by getbuf) */
3305 error = bus_dmamap_create(adapter->rxtag, 0, &adapter->rx_sparemap);
3307 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3312 rx_buffer = adapter->rx_buffer_area;
3313 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3314 error = bus_dmamap_create(adapter->rxtag, 0, &rx_buffer->map);
3316 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3325 lem_free_receive_structures(adapter);
3329 /*********************************************************************
3331 * (Re)initialize receive structures.
3333 **********************************************************************/
3335 lem_setup_receive_structures(struct adapter *adapter)
3337 struct em_buffer *rx_buffer;
3340 /* we are already under lock */
3341 struct netmap_adapter *na = netmap_getna(adapter->ifp);
3342 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
3345 /* Reset descriptor ring */
3346 bzero(adapter->rx_desc_base,
3347 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3349 /* Free current RX buffers. */
3350 rx_buffer = adapter->rx_buffer_area;
3351 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3352 if (rx_buffer->m_head != NULL) {
3353 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3354 BUS_DMASYNC_POSTREAD);
3355 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3356 m_freem(rx_buffer->m_head);
3357 rx_buffer->m_head = NULL;
3361 /* Allocate new ones. */
3362 for (i = 0; i < adapter->num_rx_desc; i++) {
3365 /* the i-th NIC entry goes to slot si */
3366 int si = netmap_idx_n2k(&na->rx_rings[0], i);
3370 addr = PNMB(na, slot + si, &paddr);
3371 netmap_load_map(na, adapter->rxtag, rx_buffer->map, addr);
3372 /* Update descriptor */
3373 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
3376 #endif /* DEV_NETMAP */
3377 error = lem_get_buf(adapter, i);
3382 /* Setup our descriptor pointers */
3383 adapter->next_rx_desc_to_check = 0;
3384 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3385 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3390 /*********************************************************************
3392 * Enable receive unit.
3394 **********************************************************************/
3397 lem_initialize_receive_unit(struct adapter *adapter)
3399 if_t ifp = adapter->ifp;
3403 INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3406 * Make sure receives are disabled while setting
3407 * up the descriptor ring
3409 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3410 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3412 if (adapter->hw.mac.type >= e1000_82540) {
3413 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3414 adapter->rx_abs_int_delay.value);
3416 * Set the interrupt throttling rate. Value is calculated
3417 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3419 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3422 /* Setup the Base and Length of the Rx Descriptor Ring */
3423 bus_addr = adapter->rxdma.dma_paddr;
3424 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3425 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3426 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3427 (u32)(bus_addr >> 32));
3428 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3431 /* Setup the Receive Control Register */
3432 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3433 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3434 E1000_RCTL_RDMTS_HALF |
3435 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3437 /* Make sure VLAN Filters are off */
3438 rctl &= ~E1000_RCTL_VFE;
3440 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3441 rctl |= E1000_RCTL_SBP;
3443 rctl &= ~E1000_RCTL_SBP;
3445 switch (adapter->rx_buffer_len) {
3448 rctl |= E1000_RCTL_SZ_2048;
3451 rctl |= E1000_RCTL_SZ_4096 |
3452 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3455 rctl |= E1000_RCTL_SZ_8192 |
3456 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3459 rctl |= E1000_RCTL_SZ_16384 |
3460 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3464 if (if_getmtu(ifp) > ETHERMTU)
3465 rctl |= E1000_RCTL_LPE;
3467 rctl &= ~E1000_RCTL_LPE;
3469 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3470 if ((adapter->hw.mac.type >= e1000_82543) &&
3471 (if_getcapenable(ifp) & IFCAP_RXCSUM)) {
3472 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3473 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3474 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3477 /* Enable Receives */
3478 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3481 * Setup the HW Rx Head and
3482 * Tail Descriptor Pointers
3484 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3485 rctl = adapter->num_rx_desc - 1; /* default RDT value */
3487 /* preserve buffers already made available to clients */
3488 if (if_getcapenable(ifp) & IFCAP_NETMAP) {
3489 struct netmap_adapter *na = netmap_getna(adapter->ifp);
3490 rctl -= nm_kr_rxspace(&na->rx_rings[0]);
3492 #endif /* DEV_NETMAP */
3493 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rctl);
3498 /*********************************************************************
3500 * Free receive related data structures.
3502 **********************************************************************/
3504 lem_free_receive_structures(struct adapter *adapter)
3506 struct em_buffer *rx_buffer;
3509 INIT_DEBUGOUT("free_receive_structures: begin");
3511 if (adapter->rx_sparemap) {
3512 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3513 adapter->rx_sparemap = NULL;
3516 /* Cleanup any existing buffers */
3517 if (adapter->rx_buffer_area != NULL) {
3518 rx_buffer = adapter->rx_buffer_area;
3519 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3520 if (rx_buffer->m_head != NULL) {
3521 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3522 BUS_DMASYNC_POSTREAD);
3523 bus_dmamap_unload(adapter->rxtag,
3525 m_freem(rx_buffer->m_head);
3526 rx_buffer->m_head = NULL;
3527 } else if (rx_buffer->map != NULL)
3528 bus_dmamap_unload(adapter->rxtag,
3530 if (rx_buffer->map != NULL) {
3531 bus_dmamap_destroy(adapter->rxtag,
3533 rx_buffer->map = NULL;
3538 if (adapter->rx_buffer_area != NULL) {
3539 free(adapter->rx_buffer_area, M_DEVBUF);
3540 adapter->rx_buffer_area = NULL;
3543 if (adapter->rxtag != NULL) {
3544 bus_dma_tag_destroy(adapter->rxtag);
3545 adapter->rxtag = NULL;
3549 /*********************************************************************
3551 * This routine executes in interrupt context. It replenishes
3552 * the mbufs in the descriptor and sends data which has been
3553 * dma'ed into host memory to upper layer.
3555 * We loop at most count times if count is > 0, or until done if
3558 * For polling we also now return the number of cleaned packets
3559 *********************************************************************/
3561 lem_rxeof(struct adapter *adapter, int count, int *done)
3563 if_t ifp = adapter->ifp;
3565 u8 status = 0, accept_frame = 0, eop = 0;
3566 u16 len, desc_len, prev_len_adj;
3568 struct e1000_rx_desc *current_desc;
3570 #ifdef BATCH_DISPATCH
3571 struct mbuf *mh = NULL, *mt = NULL;
3572 #endif /* BATCH_DISPATCH */
3575 struct paravirt_csb* csb = adapter->csb;
3576 int csb_mode = csb && csb->guest_csb_on;
3578 //ND("clear guest_rxkick at %d", adapter->next_rx_desc_to_check);
3579 if (csb_mode && csb->guest_need_rxkick)
3580 csb->guest_need_rxkick = 0;
3581 #endif /* NIC_PARAVIRT */
3582 EM_RX_LOCK(adapter);
3584 #ifdef BATCH_DISPATCH
3586 #endif /* BATCH_DISPATCH */
3587 i = adapter->next_rx_desc_to_check;
3588 current_desc = &adapter->rx_desc_base[i];
3589 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3590 BUS_DMASYNC_POSTREAD);
3593 if (netmap_rx_irq(ifp, 0, &rx_sent)) {
3594 EM_RX_UNLOCK(adapter);
3597 #endif /* DEV_NETMAP */
3599 #if 1 // XXX optimization ?
3600 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3603 EM_RX_UNLOCK(adapter);
3608 while (count != 0 && if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
3609 struct mbuf *m = NULL;
3611 status = current_desc->status;
3612 if ((status & E1000_RXD_STAT_DD) == 0) {
3615 /* buffer not ready yet. Retry a few times before giving up */
3616 if (++retries <= adapter->rx_retries) {
3619 if (csb->guest_need_rxkick == 0) {
3620 // ND("set guest_rxkick at %d", adapter->next_rx_desc_to_check);
3621 csb->guest_need_rxkick = 1;
3622 // XXX memory barrier, status volatile ?
3623 continue; /* double check */
3626 /* no buffer ready, give up */
3627 #endif /* NIC_PARAVIRT */
3632 if (csb->guest_need_rxkick)
3633 // ND("clear again guest_rxkick at %d", adapter->next_rx_desc_to_check);
3634 csb->guest_need_rxkick = 0;
3637 #endif /* NIC_PARAVIRT */
3639 mp = adapter->rx_buffer_area[i].m_head;
3641 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3642 * needs to access the last received byte in the mbuf.
3644 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3645 BUS_DMASYNC_POSTREAD);
3649 desc_len = le16toh(current_desc->length);
3650 if (status & E1000_RXD_STAT_EOP) {
3653 if (desc_len < ETHER_CRC_LEN) {
3655 prev_len_adj = ETHER_CRC_LEN - desc_len;
3657 len = desc_len - ETHER_CRC_LEN;
3663 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3665 u32 pkt_len = desc_len;
3667 if (adapter->fmp != NULL)
3668 pkt_len += adapter->fmp->m_pkthdr.len;
3670 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3671 if (TBI_ACCEPT(&adapter->hw, status,
3672 current_desc->errors, pkt_len, last_byte,
3673 adapter->min_frame_size, adapter->max_frame_size)) {
3674 e1000_tbi_adjust_stats_82543(&adapter->hw,
3675 &adapter->stats, pkt_len,
3676 adapter->hw.mac.addr,
3677 adapter->max_frame_size);
3685 if (lem_get_buf(adapter, i) != 0) {
3686 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
3690 /* Assign correct length to the current fragment */
3693 if (adapter->fmp == NULL) {
3694 mp->m_pkthdr.len = len;
3695 adapter->fmp = mp; /* Store the first mbuf */
3698 /* Chain mbuf's together */
3699 mp->m_flags &= ~M_PKTHDR;
3701 * Adjust length of previous mbuf in chain if
3702 * we received less than 4 bytes in the last
3705 if (prev_len_adj > 0) {
3706 adapter->lmp->m_len -= prev_len_adj;
3707 adapter->fmp->m_pkthdr.len -=
3710 adapter->lmp->m_next = mp;
3711 adapter->lmp = adapter->lmp->m_next;
3712 adapter->fmp->m_pkthdr.len += len;
3716 if_setrcvif(adapter->fmp, ifp);
3717 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3718 lem_receive_checksum(adapter, current_desc,
3720 #ifndef __NO_STRICT_ALIGNMENT
3721 if (adapter->max_frame_size >
3722 (MCLBYTES - ETHER_ALIGN) &&
3723 lem_fixup_rx(adapter) != 0)
3726 if (status & E1000_RXD_STAT_VP) {
3727 adapter->fmp->m_pkthdr.ether_vtag =
3728 le16toh(current_desc->special);
3729 adapter->fmp->m_flags |= M_VLANTAG;
3731 #ifndef __NO_STRICT_ALIGNMENT
3735 adapter->fmp = NULL;
3736 adapter->lmp = NULL;
3739 adapter->dropped_pkts++;
3741 /* Reuse loaded DMA map and just update mbuf chain */
3742 mp = adapter->rx_buffer_area[i].m_head;
3743 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3744 mp->m_data = mp->m_ext.ext_buf;
3746 if (adapter->max_frame_size <=
3747 (MCLBYTES - ETHER_ALIGN))
3748 m_adj(mp, ETHER_ALIGN);
3749 if (adapter->fmp != NULL) {
3750 m_freem(adapter->fmp);
3751 adapter->fmp = NULL;
3752 adapter->lmp = NULL;
3757 /* Zero out the receive descriptors status. */
3758 current_desc->status = 0;
3759 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3760 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3764 /* the buffer at i has been already replaced by lem_get_buf()
3765 * so it is safe to set guest_rdt = i and possibly send a kick.
3766 * XXX see if we can optimize it later.
3769 // XXX memory barrier
3770 if (i == csb->host_rxkick_at)
3771 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3773 #endif /* NIC_PARAVIRT */
3774 /* Advance our pointers to the next descriptor. */
3775 if (++i == adapter->num_rx_desc)
3777 /* Call into the stack */
3779 #ifdef BATCH_DISPATCH
3780 if (adapter->batch_enable) {
3786 m->m_nextpkt = NULL;
3788 current_desc = &adapter->rx_desc_base[i];
3791 #endif /* BATCH_DISPATCH */
3792 adapter->next_rx_desc_to_check = i;
3793 EM_RX_UNLOCK(adapter);
3795 EM_RX_LOCK(adapter);
3797 i = adapter->next_rx_desc_to_check;
3799 current_desc = &adapter->rx_desc_base[i];
3801 adapter->next_rx_desc_to_check = i;
3802 #ifdef BATCH_DISPATCH
3804 EM_RX_UNLOCK(adapter);
3805 while ( (mt = mh) != NULL) {
3807 mt->m_nextpkt = NULL;
3810 EM_RX_LOCK(adapter);
3811 i = adapter->next_rx_desc_to_check; /* in case of interrupts */
3815 #endif /* BATCH_DISPATCH */
3817 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3819 i = adapter->num_rx_desc - 1;
3821 if (!csb_mode) /* filter out writes */
3822 #endif /* NIC_PARAVIRT */
3823 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3826 EM_RX_UNLOCK(adapter);
3827 return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3830 #ifndef __NO_STRICT_ALIGNMENT
3832 * When jumbo frames are enabled we should realign entire payload on
3833 * architecures with strict alignment. This is serious design mistake of 8254x
3834 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3835 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3836 * payload. On architecures without strict alignment restrictions 8254x still
3837 * performs unaligned memory access which would reduce the performance too.
3838 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3839 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3840 * existing mbuf chain.
3842 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3843 * not used at all on architectures with strict alignment.
3846 lem_fixup_rx(struct adapter *adapter)
3853 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3854 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3855 m->m_data += ETHER_HDR_LEN;
3857 MGETHDR(n, M_NOWAIT, MT_DATA);
3859 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3860 m->m_data += ETHER_HDR_LEN;
3861 m->m_len -= ETHER_HDR_LEN;
3862 n->m_len = ETHER_HDR_LEN;
3863 M_MOVE_PKTHDR(n, m);
3867 adapter->dropped_pkts++;
3868 m_freem(adapter->fmp);
3869 adapter->fmp = NULL;
3878 /*********************************************************************
3880 * Verify that the hardware indicated that the checksum is valid.
3881 * Inform the stack about the status of checksum so that stack
3882 * doesn't spend time verifying the checksum.
3884 *********************************************************************/
3886 lem_receive_checksum(struct adapter *adapter,
3887 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3889 /* 82543 or newer only */
3890 if ((adapter->hw.mac.type < e1000_82543) ||
3891 /* Ignore Checksum bit is set */
3892 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3893 mp->m_pkthdr.csum_flags = 0;
3897 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3899 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3900 /* IP Checksum Good */
3901 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3902 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3905 mp->m_pkthdr.csum_flags = 0;
3909 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3911 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3912 mp->m_pkthdr.csum_flags |=
3913 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3914 mp->m_pkthdr.csum_data = htons(0xffff);
3920 * This routine is run via an vlan
3924 lem_register_vlan(void *arg, if_t ifp, u16 vtag)
3926 struct adapter *adapter = if_getsoftc(ifp);
3929 if (if_getsoftc(ifp) != arg) /* Not our event */
3932 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
3935 EM_CORE_LOCK(adapter);
3936 index = (vtag >> 5) & 0x7F;
3938 adapter->shadow_vfta[index] |= (1 << bit);
3939 ++adapter->num_vlans;
3940 /* Re-init to load the changes */
3941 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
3942 lem_init_locked(adapter);
3943 EM_CORE_UNLOCK(adapter);
3947 * This routine is run via an vlan
3951 lem_unregister_vlan(void *arg, if_t ifp, u16 vtag)
3953 struct adapter *adapter = if_getsoftc(ifp);
3956 if (if_getsoftc(ifp) != arg)
3959 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3962 EM_CORE_LOCK(adapter);
3963 index = (vtag >> 5) & 0x7F;
3965 adapter->shadow_vfta[index] &= ~(1 << bit);
3966 --adapter->num_vlans;
3967 /* Re-init to load the changes */
3968 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
3969 lem_init_locked(adapter);
3970 EM_CORE_UNLOCK(adapter);
3974 lem_setup_vlan_hw_support(struct adapter *adapter)
3976 struct e1000_hw *hw = &adapter->hw;
3980 ** We get here thru init_locked, meaning
3981 ** a soft reset, this has already cleared
3982 ** the VFTA and other state, so if there
3983 ** have been no vlan's registered do nothing.
3985 if (adapter->num_vlans == 0)
3989 ** A soft reset zero's out the VFTA, so
3990 ** we need to repopulate it now.
3992 for (int i = 0; i < EM_VFTA_SIZE; i++)
3993 if (adapter->shadow_vfta[i] != 0)
3994 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3995 i, adapter->shadow_vfta[i]);
3997 reg = E1000_READ_REG(hw, E1000_CTRL);
3998 reg |= E1000_CTRL_VME;
3999 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4001 /* Enable the Filter Table */
4002 reg = E1000_READ_REG(hw, E1000_RCTL);
4003 reg &= ~E1000_RCTL_CFIEN;
4004 reg |= E1000_RCTL_VFE;
4005 E1000_WRITE_REG(hw, E1000_RCTL, reg);
4009 lem_enable_intr(struct adapter *adapter)
4011 struct e1000_hw *hw = &adapter->hw;
4012 u32 ims_mask = IMS_ENABLE_MASK;
4014 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4018 lem_disable_intr(struct adapter *adapter)
4020 struct e1000_hw *hw = &adapter->hw;
4022 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4026 * Bit of a misnomer, what this really means is
4027 * to enable OS management of the system... aka
4028 * to disable special hardware management features
4031 lem_init_manageability(struct adapter *adapter)
4033 /* A shared code workaround */
4034 if (adapter->has_manage) {
4035 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4036 /* disable hardware interception of ARP */
4037 manc &= ~(E1000_MANC_ARP_EN);
4038 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4043 * Give control back to hardware management
4044 * controller if there is one.
4047 lem_release_manageability(struct adapter *adapter)
4049 if (adapter->has_manage) {
4050 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4052 /* re-enable hardware interception of ARP */
4053 manc |= E1000_MANC_ARP_EN;
4054 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4059 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
4060 * For ASF and Pass Through versions of f/w this means
4061 * that the driver is loaded. For AMT version type f/w
4062 * this means that the network i/f is open.
4065 lem_get_hw_control(struct adapter *adapter)
4069 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4070 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4071 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4076 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4077 * For ASF and Pass Through versions of f/w this means that
4078 * the driver is no longer loaded. For AMT versions of the
4079 * f/w this means that the network i/f is closed.
4082 lem_release_hw_control(struct adapter *adapter)
4086 if (!adapter->has_manage)
4089 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4090 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4091 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4096 lem_is_valid_ether_addr(u8 *addr)
4098 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4100 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4108 ** Parse the interface capabilities with regard
4109 ** to both system management and wake-on-lan for
4113 lem_get_wakeup(device_t dev)
4115 struct adapter *adapter = device_get_softc(dev);
4116 u16 eeprom_data = 0, device_id, apme_mask;
4118 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
4119 apme_mask = EM_EEPROM_APME;
4121 switch (adapter->hw.mac.type) {
4126 e1000_read_nvm(&adapter->hw,
4127 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
4128 apme_mask = EM_82544_APME;
4131 case e1000_82546_rev_3:
4132 if (adapter->hw.bus.func == 1) {
4133 e1000_read_nvm(&adapter->hw,
4134 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
4137 e1000_read_nvm(&adapter->hw,
4138 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4141 e1000_read_nvm(&adapter->hw,
4142 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4145 if (eeprom_data & apme_mask)
4146 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
4148 * We have the eeprom settings, now apply the special cases
4149 * where the eeprom may be wrong or the board won't support
4150 * wake on lan on a particular port
4152 device_id = pci_get_device(dev);
4153 switch (device_id) {
4154 case E1000_DEV_ID_82546GB_PCIE:
4157 case E1000_DEV_ID_82546EB_FIBER:
4158 case E1000_DEV_ID_82546GB_FIBER:
4159 /* Wake events only supported on port A for dual fiber
4160 * regardless of eeprom setting */
4161 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
4162 E1000_STATUS_FUNC_1)
4165 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
4166 /* if quad port adapter, disable WoL on all but port A */
4167 if (global_quad_port_a != 0)
4169 /* Reset for multiple quad port adapters */
4170 if (++global_quad_port_a == 4)
4171 global_quad_port_a = 0;
4179 * Enable PCI Wake On Lan capability
4182 lem_enable_wakeup(device_t dev)
4184 struct adapter *adapter = device_get_softc(dev);
4185 if_t ifp = adapter->ifp;
4186 u32 pmc, ctrl, ctrl_ext, rctl;
4189 if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
4192 /* Advertise the wakeup capability */
4193 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4194 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4195 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4196 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4198 /* Keep the laser running on Fiber adapters */
4199 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4200 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4201 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4202 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4203 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4207 ** Determine type of Wakeup: note that wol
4208 ** is set with all bits on by default.
4210 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0)
4211 adapter->wol &= ~E1000_WUFC_MAG;
4213 if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0)
4214 adapter->wol &= ~E1000_WUFC_MC;
4216 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4217 rctl |= E1000_RCTL_MPE;
4218 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4221 if (adapter->hw.mac.type == e1000_pchlan) {
4222 if (lem_enable_phy_wakeup(adapter))
4225 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4226 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4231 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4232 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4233 if (if_getcapenable(ifp) & IFCAP_WOL)
4234 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4235 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4241 ** WOL in the newer chipset interfaces (pchlan)
4242 ** require thing to be copied into the phy
4245 lem_enable_phy_wakeup(struct adapter *adapter)
4247 struct e1000_hw *hw = &adapter->hw;
4251 /* copy MAC RARs to PHY RARs */
4252 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4253 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4254 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4255 e1000_write_phy_reg(hw, BM_RAR_M(i),
4256 (u16)((mreg >> 16) & 0xFFFF));
4257 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4258 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4259 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4260 (u16)((mreg >> 16) & 0xFFFF));
4263 /* copy MAC MTA to PHY MTA */
4264 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4265 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4266 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4267 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4268 (u16)((mreg >> 16) & 0xFFFF));
4271 /* configure PHY Rx Control register */
4272 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4273 mreg = E1000_READ_REG(hw, E1000_RCTL);
4274 if (mreg & E1000_RCTL_UPE)
4275 preg |= BM_RCTL_UPE;
4276 if (mreg & E1000_RCTL_MPE)
4277 preg |= BM_RCTL_MPE;
4278 preg &= ~(BM_RCTL_MO_MASK);
4279 if (mreg & E1000_RCTL_MO_3)
4280 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4281 << BM_RCTL_MO_SHIFT);
4282 if (mreg & E1000_RCTL_BAM)
4283 preg |= BM_RCTL_BAM;
4284 if (mreg & E1000_RCTL_PMCF)
4285 preg |= BM_RCTL_PMCF;
4286 mreg = E1000_READ_REG(hw, E1000_CTRL);
4287 if (mreg & E1000_CTRL_RFCE)
4288 preg |= BM_RCTL_RFCE;
4289 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4291 /* enable PHY wakeup in MAC register */
4292 E1000_WRITE_REG(hw, E1000_WUC,
4293 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4294 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4296 /* configure and enable PHY wakeup in PHY registers */
4297 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4298 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4300 /* activate PHY wakeup */
4301 ret = hw->phy.ops.acquire(hw);
4303 printf("Could not acquire PHY\n");
4306 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4307 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4308 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4310 printf("Could not read PHY page 769\n");
4313 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4314 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4316 printf("Could not set PHY Host Wakeup bit\n");
4318 hw->phy.ops.release(hw);
4324 lem_led_func(void *arg, int onoff)
4326 struct adapter *adapter = arg;
4328 EM_CORE_LOCK(adapter);
4330 e1000_setup_led(&adapter->hw);
4331 e1000_led_on(&adapter->hw);
4333 e1000_led_off(&adapter->hw);
4334 e1000_cleanup_led(&adapter->hw);
4336 EM_CORE_UNLOCK(adapter);
4339 /*********************************************************************
4340 * 82544 Coexistence issue workaround.
4341 * There are 2 issues.
4342 * 1. Transmit Hang issue.
4343 * To detect this issue, following equation can be used...
4344 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4345 * If SUM[3:0] is in between 1 to 4, we will have this issue.
4348 * To detect this issue, following equation can be used...
4349 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4350 * If SUM[3:0] is in between 9 to c, we will have this issue.
4354 * Make sure we do not have ending address
4355 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4357 *************************************************************************/
4359 lem_fill_descriptors (bus_addr_t address, u32 length,
4360 PDESC_ARRAY desc_array)
4362 u32 safe_terminator;
4364 /* Since issue is sensitive to length and address.*/
4365 /* Let us first check the address...*/
4367 desc_array->descriptor[0].address = address;
4368 desc_array->descriptor[0].length = length;
4369 desc_array->elements = 1;
4370 return (desc_array->elements);
4372 safe_terminator = (u32)((((u32)address & 0x7) +
4373 (length & 0xF)) & 0xF);
4374 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4375 if (safe_terminator == 0 ||
4376 (safe_terminator > 4 &&
4377 safe_terminator < 9) ||
4378 (safe_terminator > 0xC &&
4379 safe_terminator <= 0xF)) {
4380 desc_array->descriptor[0].address = address;
4381 desc_array->descriptor[0].length = length;
4382 desc_array->elements = 1;
4383 return (desc_array->elements);
4386 desc_array->descriptor[0].address = address;
4387 desc_array->descriptor[0].length = length - 4;
4388 desc_array->descriptor[1].address = address + (length - 4);
4389 desc_array->descriptor[1].length = 4;
4390 desc_array->elements = 2;
4391 return (desc_array->elements);
4394 /**********************************************************************
4396 * Update the board statistics counters.
4398 **********************************************************************/
4400 lem_update_stats_counters(struct adapter *adapter)
4403 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4404 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4405 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4406 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4408 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4409 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4410 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4411 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4413 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4414 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4415 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4416 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4417 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4418 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4419 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4420 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4421 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4422 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4423 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4424 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4425 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4426 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4427 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4428 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4429 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4430 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4431 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4432 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4434 /* For the 64-bit byte counters the low dword must be read first. */
4435 /* Both registers clear on the read of the high dword */
4437 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4438 ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4439 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4440 ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4442 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4443 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4444 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4445 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4446 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4448 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4449 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4451 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4452 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4453 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4454 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4455 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4456 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4457 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4458 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4459 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4460 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4462 if (adapter->hw.mac.type >= e1000_82543) {
4463 adapter->stats.algnerrc +=
4464 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4465 adapter->stats.rxerrc +=
4466 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4467 adapter->stats.tncrs +=
4468 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4469 adapter->stats.cexterr +=
4470 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4471 adapter->stats.tsctc +=
4472 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4473 adapter->stats.tsctfc +=
4474 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4479 lem_get_counter(if_t ifp, ift_counter cnt)
4481 struct adapter *adapter;
4483 adapter = if_getsoftc(ifp);
4486 case IFCOUNTER_COLLISIONS:
4487 return (adapter->stats.colc);
4488 case IFCOUNTER_IERRORS:
4489 return (adapter->dropped_pkts + adapter->stats.rxerrc +
4490 adapter->stats.crcerrs + adapter->stats.algnerrc +
4491 adapter->stats.ruc + adapter->stats.roc +
4492 adapter->stats.mpc + adapter->stats.cexterr);
4493 case IFCOUNTER_OERRORS:
4494 return (adapter->stats.ecol + adapter->stats.latecol +
4495 adapter->watchdog_events);
4497 return (if_get_counter_default(ifp, cnt));
4501 /* Export a single 32-bit register via a read-only sysctl. */
4503 lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4505 struct adapter *adapter;
4508 adapter = oidp->oid_arg1;
4509 val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4510 return (sysctl_handle_int(oidp, &val, 0, req));
4514 * Add sysctl variables, one per statistic, to the system.
4517 lem_add_hw_stats(struct adapter *adapter)
4519 device_t dev = adapter->dev;
4521 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4522 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4523 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4524 struct e1000_hw_stats *stats = &adapter->stats;
4526 struct sysctl_oid *stat_node;
4527 struct sysctl_oid_list *stat_list;
4529 /* Driver Statistics */
4530 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail",
4531 CTLFLAG_RD, &adapter->mbuf_alloc_failed,
4533 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
4534 CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4535 "Std mbuf cluster failed");
4536 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4537 CTLFLAG_RD, &adapter->dropped_pkts,
4538 "Driver dropped packets");
4539 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
4540 CTLFLAG_RD, &adapter->no_tx_dma_setup,
4541 "Driver tx dma failure in xmit");
4542 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4543 CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4544 "Not enough tx descriptors failure in xmit");
4545 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4546 CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4547 "Not enough tx descriptors failure in xmit");
4548 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4549 CTLFLAG_RD, &adapter->rx_overruns,
4551 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4552 CTLFLAG_RD, &adapter->watchdog_events,
4553 "Watchdog timeouts");
4555 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4556 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
4557 lem_sysctl_reg_handler, "IU",
4558 "Device Control Register");
4559 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4560 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
4561 lem_sysctl_reg_handler, "IU",
4562 "Receiver Control Register");
4563 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4564 CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4565 "Flow Control High Watermark");
4566 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
4567 CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4568 "Flow Control Low Watermark");
4569 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
4570 CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4571 "TX FIFO workaround events");
4572 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
4573 CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4576 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head",
4577 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
4578 lem_sysctl_reg_handler, "IU",
4579 "Transmit Descriptor Head");
4580 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail",
4581 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
4582 lem_sysctl_reg_handler, "IU",
4583 "Transmit Descriptor Tail");
4584 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head",
4585 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
4586 lem_sysctl_reg_handler, "IU",
4587 "Receive Descriptor Head");
4588 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail",
4589 CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
4590 lem_sysctl_reg_handler, "IU",
4591 "Receive Descriptor Tail");
4594 /* MAC stats get their own sub node */
4596 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4597 CTLFLAG_RD, NULL, "Statistics");
4598 stat_list = SYSCTL_CHILDREN(stat_node);
4600 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4601 CTLFLAG_RD, &stats->ecol,
4602 "Excessive collisions");
4603 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4604 CTLFLAG_RD, &stats->scc,
4605 "Single collisions");
4606 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4607 CTLFLAG_RD, &stats->mcc,
4608 "Multiple collisions");
4609 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4610 CTLFLAG_RD, &stats->latecol,
4612 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4613 CTLFLAG_RD, &stats->colc,
4615 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4616 CTLFLAG_RD, &adapter->stats.symerrs,
4618 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4619 CTLFLAG_RD, &adapter->stats.sec,
4621 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4622 CTLFLAG_RD, &adapter->stats.dc,
4624 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4625 CTLFLAG_RD, &adapter->stats.mpc,
4627 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4628 CTLFLAG_RD, &adapter->stats.rnbc,
4629 "Receive No Buffers");
4630 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4631 CTLFLAG_RD, &adapter->stats.ruc,
4632 "Receive Undersize");
4633 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4634 CTLFLAG_RD, &adapter->stats.rfc,
4635 "Fragmented Packets Received ");
4636 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4637 CTLFLAG_RD, &adapter->stats.roc,
4638 "Oversized Packets Received");
4639 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4640 CTLFLAG_RD, &adapter->stats.rjc,
4642 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4643 CTLFLAG_RD, &adapter->stats.rxerrc,
4645 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4646 CTLFLAG_RD, &adapter->stats.crcerrs,
4648 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4649 CTLFLAG_RD, &adapter->stats.algnerrc,
4650 "Alignment Errors");
4651 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4652 CTLFLAG_RD, &adapter->stats.cexterr,
4653 "Collision/Carrier extension errors");
4654 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4655 CTLFLAG_RD, &adapter->stats.xonrxc,
4657 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4658 CTLFLAG_RD, &adapter->stats.xontxc,
4660 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4661 CTLFLAG_RD, &adapter->stats.xoffrxc,
4663 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4664 CTLFLAG_RD, &adapter->stats.xofftxc,
4665 "XOFF Transmitted");
4667 /* Packet Reception Stats */
4668 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4669 CTLFLAG_RD, &adapter->stats.tpr,
4670 "Total Packets Received ");
4671 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4672 CTLFLAG_RD, &adapter->stats.gprc,
4673 "Good Packets Received");
4674 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4675 CTLFLAG_RD, &adapter->stats.bprc,
4676 "Broadcast Packets Received");
4677 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4678 CTLFLAG_RD, &adapter->stats.mprc,
4679 "Multicast Packets Received");
4680 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4681 CTLFLAG_RD, &adapter->stats.prc64,
4682 "64 byte frames received ");
4683 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4684 CTLFLAG_RD, &adapter->stats.prc127,
4685 "65-127 byte frames received");
4686 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4687 CTLFLAG_RD, &adapter->stats.prc255,
4688 "128-255 byte frames received");
4689 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4690 CTLFLAG_RD, &adapter->stats.prc511,
4691 "256-511 byte frames received");
4692 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4693 CTLFLAG_RD, &adapter->stats.prc1023,
4694 "512-1023 byte frames received");
4695 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4696 CTLFLAG_RD, &adapter->stats.prc1522,
4697 "1023-1522 byte frames received");
4698 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4699 CTLFLAG_RD, &adapter->stats.gorc,
4700 "Good Octets Received");
4702 /* Packet Transmission Stats */
4703 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4704 CTLFLAG_RD, &adapter->stats.gotc,
4705 "Good Octets Transmitted");
4706 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4707 CTLFLAG_RD, &adapter->stats.tpt,
4708 "Total Packets Transmitted");
4709 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4710 CTLFLAG_RD, &adapter->stats.gptc,
4711 "Good Packets Transmitted");
4712 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4713 CTLFLAG_RD, &adapter->stats.bptc,
4714 "Broadcast Packets Transmitted");
4715 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4716 CTLFLAG_RD, &adapter->stats.mptc,
4717 "Multicast Packets Transmitted");
4718 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4719 CTLFLAG_RD, &adapter->stats.ptc64,
4720 "64 byte frames transmitted ");
4721 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4722 CTLFLAG_RD, &adapter->stats.ptc127,
4723 "65-127 byte frames transmitted");
4724 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4725 CTLFLAG_RD, &adapter->stats.ptc255,
4726 "128-255 byte frames transmitted");
4727 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4728 CTLFLAG_RD, &adapter->stats.ptc511,
4729 "256-511 byte frames transmitted");
4730 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4731 CTLFLAG_RD, &adapter->stats.ptc1023,
4732 "512-1023 byte frames transmitted");
4733 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4734 CTLFLAG_RD, &adapter->stats.ptc1522,
4735 "1024-1522 byte frames transmitted");
4736 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4737 CTLFLAG_RD, &adapter->stats.tsctc,
4738 "TSO Contexts Transmitted");
4739 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4740 CTLFLAG_RD, &adapter->stats.tsctfc,
4741 "TSO Contexts Failed");
4744 /**********************************************************************
4746 * This routine provides a way to dump out the adapter eeprom,
4747 * often a useful debug/service tool. This only dumps the first
4748 * 32 words, stuff that matters is in that extent.
4750 **********************************************************************/
4753 lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4755 struct adapter *adapter;
4760 error = sysctl_handle_int(oidp, &result, 0, req);
4762 if (error || !req->newptr)
4766 * This value will cause a hex dump of the
4767 * first 32 16-bit words of the EEPROM to
4771 adapter = (struct adapter *)arg1;
4772 lem_print_nvm_info(adapter);
4779 lem_print_nvm_info(struct adapter *adapter)
4784 /* Its a bit crude, but it gets the job done */
4785 printf("\nInterface EEPROM Dump:\n");
4786 printf("Offset\n0x0000 ");
4787 for (i = 0, j = 0; i < 32; i++, j++) {
4788 if (j == 8) { /* Make the offset block */
4790 printf("\n0x00%x0 ",row);
4792 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4793 printf("%04x ", eeprom_data);
4799 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4801 struct em_int_delay_info *info;
4802 struct adapter *adapter;
4808 info = (struct em_int_delay_info *)arg1;
4809 usecs = info->value;
4810 error = sysctl_handle_int(oidp, &usecs, 0, req);
4811 if (error != 0 || req->newptr == NULL)
4813 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4815 info->value = usecs;
4816 ticks = EM_USECS_TO_TICKS(usecs);
4817 if (info->offset == E1000_ITR) /* units are 256ns here */
4820 adapter = info->adapter;
4822 EM_CORE_LOCK(adapter);
4823 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4824 regval = (regval & ~0xffff) | (ticks & 0xffff);
4825 /* Handle a few special cases. */
4826 switch (info->offset) {
4831 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4832 /* Don't write 0 into the TIDV register. */
4835 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4838 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4839 EM_CORE_UNLOCK(adapter);
4844 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4845 const char *description, struct em_int_delay_info *info,
4846 int offset, int value)
4848 info->adapter = adapter;
4849 info->offset = offset;
4850 info->value = value;
4851 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4852 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4853 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4854 info, 0, lem_sysctl_int_delay, "I", description);
4858 lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4859 const char *description, int *limit, int value)
4862 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4863 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4864 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4868 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4869 const char *description, int *limit, int value)
4872 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4873 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4874 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);