1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
43 #include <sys/endian.h>
44 #include <sys/kernel.h>
45 #include <sys/kthread.h>
46 #include <sys/malloc.h>
48 #include <sys/module.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/taskqueue.h>
54 #if __FreeBSD_version >= 700029
55 #include <sys/eventhandler.h>
57 #include <machine/bus.h>
58 #include <machine/resource.h>
61 #include <net/ethernet.h>
63 #include <net/if_arp.h>
64 #include <net/if_dl.h>
65 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/if_vlan_var.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/in.h>
72 #include <netinet/if_ether.h>
73 #include <netinet/ip.h>
74 #include <netinet/ip6.h>
75 #include <netinet/tcp.h>
76 #include <netinet/udp.h>
78 #include <machine/in_cksum.h>
79 #include <dev/led/led.h>
80 #include <dev/pci/pcivar.h>
81 #include <dev/pci/pcireg.h>
83 #include "e1000_api.h"
86 /*********************************************************************
87 * Set this to one to display debug statistics
88 *********************************************************************/
89 int lem_display_debug_stats = 0;
91 /*********************************************************************
92 * Legacy Em Driver version:
93 *********************************************************************/
94 char lem_driver_version[] = "1.0.1";
97 /*********************************************************************
100 * Used by probe to select devices to load on
101 * Last field stores an index into e1000_strings
102 * Last entry must be all 0s
104 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
105 *********************************************************************/
107 static em_vendor_info_t lem_vendor_info_array[] =
109 /* Intel(R) PRO/1000 Network Connection */
110 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
111 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
112 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
113 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
118 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
126 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
131 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
132 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
142 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
143 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
147 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
148 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
149 PCI_ANY_ID, PCI_ANY_ID, 0},
151 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
152 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
153 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
154 /* required last entry */
158 /*********************************************************************
159 * Table of branding strings for all supported NICs.
160 *********************************************************************/
162 static char *lem_strings[] = {
163 "Intel(R) PRO/1000 Legacy Network Connection"
166 /*********************************************************************
167 * Function prototypes
168 *********************************************************************/
169 static int lem_probe(device_t);
170 static int lem_attach(device_t);
171 static int lem_detach(device_t);
172 static int lem_shutdown(device_t);
173 static int lem_suspend(device_t);
174 static int lem_resume(device_t);
175 static void lem_start(struct ifnet *);
176 static void lem_start_locked(struct ifnet *ifp);
177 static int lem_ioctl(struct ifnet *, u_long, caddr_t);
178 static void lem_init(void *);
179 static void lem_init_locked(struct adapter *);
180 static void lem_stop(void *);
181 static void lem_media_status(struct ifnet *, struct ifmediareq *);
182 static int lem_media_change(struct ifnet *);
183 static void lem_identify_hardware(struct adapter *);
184 static int lem_allocate_pci_resources(struct adapter *);
185 static int lem_allocate_irq(struct adapter *adapter);
186 static void lem_free_pci_resources(struct adapter *);
187 static void lem_local_timer(void *);
188 static int lem_hardware_init(struct adapter *);
189 static void lem_setup_interface(device_t, struct adapter *);
190 static void lem_setup_transmit_structures(struct adapter *);
191 static void lem_initialize_transmit_unit(struct adapter *);
192 static int lem_setup_receive_structures(struct adapter *);
193 static void lem_initialize_receive_unit(struct adapter *);
194 static void lem_enable_intr(struct adapter *);
195 static void lem_disable_intr(struct adapter *);
196 static void lem_free_transmit_structures(struct adapter *);
197 static void lem_free_receive_structures(struct adapter *);
198 static void lem_update_stats_counters(struct adapter *);
199 static void lem_txeof(struct adapter *);
200 static void lem_tx_purge(struct adapter *);
201 static int lem_allocate_receive_structures(struct adapter *);
202 static int lem_allocate_transmit_structures(struct adapter *);
203 static int lem_rxeof(struct adapter *, int);
204 #ifndef __NO_STRICT_ALIGNMENT
205 static int lem_fixup_rx(struct adapter *);
207 static void lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
209 static void lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
211 static void lem_set_promisc(struct adapter *);
212 static void lem_disable_promisc(struct adapter *);
213 static void lem_set_multi(struct adapter *);
214 static void lem_print_hw_stats(struct adapter *);
215 static void lem_update_link_status(struct adapter *);
216 static int lem_get_buf(struct adapter *, int);
217 #if __FreeBSD_version >= 700029
218 static void lem_register_vlan(void *, struct ifnet *, u16);
219 static void lem_unregister_vlan(void *, struct ifnet *, u16);
220 static void lem_setup_vlan_hw_support(struct adapter *);
222 static int lem_xmit(struct adapter *, struct mbuf **);
223 static void lem_smartspeed(struct adapter *);
224 static int lem_82547_fifo_workaround(struct adapter *, int);
225 static void lem_82547_update_fifo_head(struct adapter *, int);
226 static int lem_82547_tx_fifo_reset(struct adapter *);
227 static void lem_82547_move_tail(void *);
228 static int lem_dma_malloc(struct adapter *, bus_size_t,
229 struct em_dma_alloc *, int);
230 static void lem_dma_free(struct adapter *, struct em_dma_alloc *);
231 static void lem_print_debug_info(struct adapter *);
232 static void lem_print_nvm_info(struct adapter *);
233 static int lem_is_valid_ether_addr(u8 *);
234 static int lem_sysctl_stats(SYSCTL_HANDLER_ARGS);
235 static int lem_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
236 static u32 lem_fill_descriptors (bus_addr_t address, u32 length,
237 PDESC_ARRAY desc_array);
238 static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
239 static void lem_add_int_delay_sysctl(struct adapter *, const char *,
240 const char *, struct em_int_delay_info *, int, int);
241 /* Management and WOL Support */
242 static void lem_init_manageability(struct adapter *);
243 static void lem_release_manageability(struct adapter *);
244 static void lem_get_hw_control(struct adapter *);
245 static void lem_release_hw_control(struct adapter *);
246 static void lem_get_wakeup(device_t);
247 static void lem_enable_wakeup(device_t);
248 static int lem_enable_phy_wakeup(struct adapter *);
249 static void lem_led_func(void *, int);
252 static void lem_intr(void *);
254 #if __FreeBSD_version < 700000
255 static void lem_irq_fast(void *);
257 static int lem_irq_fast(void *);
259 static void lem_handle_rxtx(void *context, int pending);
260 static void lem_handle_link(void *context, int pending);
261 static void lem_add_rx_process_limit(struct adapter *, const char *,
262 const char *, int *, int);
263 #endif /* ~EM_LEGACY_IRQ */
265 #ifdef DEVICE_POLLING
266 static poll_handler_t lem_poll;
269 /*********************************************************************
270 * FreeBSD Device Interface Entry Points
271 *********************************************************************/
273 static device_method_t lem_methods[] = {
274 /* Device interface */
275 DEVMETHOD(device_probe, lem_probe),
276 DEVMETHOD(device_attach, lem_attach),
277 DEVMETHOD(device_detach, lem_detach),
278 DEVMETHOD(device_shutdown, lem_shutdown),
279 DEVMETHOD(device_suspend, lem_suspend),
280 DEVMETHOD(device_resume, lem_resume),
284 static driver_t lem_driver = {
285 "em", lem_methods, sizeof(struct adapter),
288 extern devclass_t em_devclass;
289 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
290 MODULE_DEPEND(lem, pci, 1, 1, 1);
291 MODULE_DEPEND(lem, ether, 1, 1, 1);
293 /*********************************************************************
294 * Tunable default values.
295 *********************************************************************/
297 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
298 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
300 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
301 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
302 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
303 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
304 static int lem_rxd = EM_DEFAULT_RXD;
305 static int lem_txd = EM_DEFAULT_TXD;
306 static int lem_smart_pwr_down = FALSE;
308 /* Controls whether promiscuous also shows bad packets */
309 static int lem_debug_sbp = FALSE;
311 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
312 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
313 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
314 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
315 TUNABLE_INT("hw.em.rxd", &lem_rxd);
316 TUNABLE_INT("hw.em.txd", &lem_txd);
317 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
318 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
320 #ifndef EM_LEGACY_IRQ
321 /* How many packets rxeof tries to clean at a time */
322 static int lem_rx_process_limit = 100;
323 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
326 /* Flow control setting - default to FULL */
327 static int lem_fc_setting = e1000_fc_full;
328 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
331 ** Shadow VFTA table, this is needed because
332 ** the real vlan filter table gets cleared during
333 ** a soft reset and the driver needs to be able
336 static u32 lem_shadow_vfta[EM_VFTA_SIZE];
338 /* Global used in WOL setup with multiport cards */
339 static int global_quad_port_a = 0;
341 /*********************************************************************
342 * Device identification routine
344 * em_probe determines if the driver should be loaded on
345 * adapter based on PCI vendor/device id of the adapter.
347 * return BUS_PROBE_DEFAULT on success, positive on failure
348 *********************************************************************/
351 lem_probe(device_t dev)
353 char adapter_name[60];
354 u16 pci_vendor_id = 0;
355 u16 pci_device_id = 0;
356 u16 pci_subvendor_id = 0;
357 u16 pci_subdevice_id = 0;
358 em_vendor_info_t *ent;
360 INIT_DEBUGOUT("em_probe: begin");
362 pci_vendor_id = pci_get_vendor(dev);
363 if (pci_vendor_id != EM_VENDOR_ID)
366 pci_device_id = pci_get_device(dev);
367 pci_subvendor_id = pci_get_subvendor(dev);
368 pci_subdevice_id = pci_get_subdevice(dev);
370 ent = lem_vendor_info_array;
371 while (ent->vendor_id != 0) {
372 if ((pci_vendor_id == ent->vendor_id) &&
373 (pci_device_id == ent->device_id) &&
375 ((pci_subvendor_id == ent->subvendor_id) ||
376 (ent->subvendor_id == PCI_ANY_ID)) &&
378 ((pci_subdevice_id == ent->subdevice_id) ||
379 (ent->subdevice_id == PCI_ANY_ID))) {
380 sprintf(adapter_name, "%s %s",
381 lem_strings[ent->index],
383 device_set_desc_copy(dev, adapter_name);
384 return (BUS_PROBE_DEFAULT);
392 /*********************************************************************
393 * Device initialization routine
395 * The attach entry point is called when the driver is being loaded.
396 * This routine identifies the type of hardware, allocates all resources
397 * and initializes the hardware.
399 * return 0 on success, positive on failure
400 *********************************************************************/
403 lem_attach(device_t dev)
405 struct adapter *adapter;
409 INIT_DEBUGOUT("lem_attach: begin");
411 adapter = device_get_softc(dev);
412 adapter->dev = adapter->osdep.dev = dev;
413 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
414 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
415 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
418 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
419 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
420 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
421 lem_sysctl_debug_info, "I", "Debug Information");
423 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
424 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
425 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
426 lem_sysctl_stats, "I", "Statistics");
428 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
429 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
431 /* Determine hardware and mac info */
432 lem_identify_hardware(adapter);
434 /* Setup PCI resources */
435 if (lem_allocate_pci_resources(adapter)) {
436 device_printf(dev, "Allocation of PCI resources failed\n");
441 /* Do Shared Code initialization */
442 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
443 device_printf(dev, "Setup of Shared code failed\n");
448 e1000_get_bus_info(&adapter->hw);
450 /* Set up some sysctls for the tunable interrupt delays */
451 lem_add_int_delay_sysctl(adapter, "rx_int_delay",
452 "receive interrupt delay in usecs", &adapter->rx_int_delay,
453 E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
454 lem_add_int_delay_sysctl(adapter, "tx_int_delay",
455 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
456 E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
457 if (adapter->hw.mac.type >= e1000_82540) {
458 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
459 "receive interrupt delay limit in usecs",
460 &adapter->rx_abs_int_delay,
461 E1000_REGISTER(&adapter->hw, E1000_RADV),
462 lem_rx_abs_int_delay_dflt);
463 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
464 "transmit interrupt delay limit in usecs",
465 &adapter->tx_abs_int_delay,
466 E1000_REGISTER(&adapter->hw, E1000_TADV),
467 lem_tx_abs_int_delay_dflt);
470 #ifndef EM_LEGACY_IRQ
471 /* Sysctls for limiting the amount of work done in the taskqueue */
472 lem_add_rx_process_limit(adapter, "rx_processing_limit",
473 "max number of rx packets to process", &adapter->rx_process_limit,
474 lem_rx_process_limit);
478 * Validate number of transmit and receive descriptors. It
479 * must not exceed hardware maximum, and must be multiple
480 * of E1000_DBA_ALIGN.
482 if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
483 (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
484 (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
485 (lem_txd < EM_MIN_TXD)) {
486 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
487 EM_DEFAULT_TXD, lem_txd);
488 adapter->num_tx_desc = EM_DEFAULT_TXD;
490 adapter->num_tx_desc = lem_txd;
491 if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
492 (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
493 (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
494 (lem_rxd < EM_MIN_RXD)) {
495 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
496 EM_DEFAULT_RXD, lem_rxd);
497 adapter->num_rx_desc = EM_DEFAULT_RXD;
499 adapter->num_rx_desc = lem_rxd;
501 adapter->hw.mac.autoneg = DO_AUTO_NEG;
502 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
503 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
504 adapter->rx_buffer_len = 2048;
506 e1000_init_script_state_82541(&adapter->hw, TRUE);
507 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
510 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
511 adapter->hw.phy.mdix = AUTO_ALL_MODES;
512 adapter->hw.phy.disable_polarity_correction = FALSE;
513 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
517 * Set the frame limits assuming
518 * standard ethernet sized frames.
520 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
521 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
524 * This controls when hardware reports transmit completion
527 adapter->hw.mac.report_tx_early = 1;
529 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
532 /* Allocate Transmit Descriptor ring */
533 if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
534 device_printf(dev, "Unable to allocate tx_desc memory\n");
538 adapter->tx_desc_base =
539 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
541 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
544 /* Allocate Receive Descriptor ring */
545 if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
546 device_printf(dev, "Unable to allocate rx_desc memory\n");
550 adapter->rx_desc_base =
551 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
554 ** Start from a known state, this is
555 ** important in reading the nvm and
558 e1000_reset_hw(&adapter->hw);
560 /* Make sure we have a good EEPROM before we read from it */
561 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
563 ** Some PCI-E parts fail the first check due to
564 ** the link being in sleep state, call it again,
565 ** if it fails a second time its a real issue.
567 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
569 "The EEPROM Checksum Is Not Valid\n");
575 /* Copy the permanent MAC address out of the EEPROM */
576 if (e1000_read_mac_addr(&adapter->hw) < 0) {
577 device_printf(dev, "EEPROM read error while reading MAC"
583 if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
584 device_printf(dev, "Invalid MAC address\n");
589 /* Initialize the hardware */
590 if (lem_hardware_init(adapter)) {
591 device_printf(dev, "Unable to initialize the hardware\n");
596 /* Allocate transmit descriptors and buffers */
597 if (lem_allocate_transmit_structures(adapter)) {
598 device_printf(dev, "Could not setup transmit structures\n");
603 /* Allocate receive descriptors and buffers */
604 if (lem_allocate_receive_structures(adapter)) {
605 device_printf(dev, "Could not setup receive structures\n");
611 ** Do interrupt configuration
613 error = lem_allocate_irq(adapter);
618 * Get Wake-on-Lan and Management info for later use
622 /* Setup OS specific network interface */
623 lem_setup_interface(dev, adapter);
625 /* Initialize statistics */
626 lem_update_stats_counters(adapter);
628 adapter->hw.mac.get_link_status = 1;
629 lem_update_link_status(adapter);
631 /* Indicate SOL/IDER usage */
632 if (e1000_check_reset_block(&adapter->hw))
634 "PHY reset is blocked due to SOL/IDER session.\n");
636 /* Do we need workaround for 82544 PCI-X adapter? */
637 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
638 adapter->hw.mac.type == e1000_82544)
639 adapter->pcix_82544 = TRUE;
641 adapter->pcix_82544 = FALSE;
643 #if __FreeBSD_version >= 700029
644 /* Register for VLAN events */
645 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
646 lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
647 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
648 lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
651 /* Non-AMT based hardware can now take control from firmware */
652 if (adapter->has_manage && !adapter->has_amt)
653 lem_get_hw_control(adapter);
655 /* Tell the stack that the interface is not active */
656 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
658 adapter->led_dev = led_create(lem_led_func, adapter,
659 device_get_nameunit(dev));
661 INIT_DEBUGOUT("lem_attach: end");
666 lem_free_transmit_structures(adapter);
669 lem_release_hw_control(adapter);
670 lem_dma_free(adapter, &adapter->rxdma);
672 lem_dma_free(adapter, &adapter->txdma);
675 lem_free_pci_resources(adapter);
676 EM_TX_LOCK_DESTROY(adapter);
677 EM_RX_LOCK_DESTROY(adapter);
678 EM_CORE_LOCK_DESTROY(adapter);
683 /*********************************************************************
684 * Device removal routine
686 * The detach entry point is called when the driver is being removed.
687 * This routine stops the adapter and deallocates all the resources
688 * that were allocated for driver operation.
690 * return 0 on success, positive on failure
691 *********************************************************************/
694 lem_detach(device_t dev)
696 struct adapter *adapter = device_get_softc(dev);
697 struct ifnet *ifp = adapter->ifp;
699 INIT_DEBUGOUT("em_detach: begin");
701 /* Make sure VLANS are not using driver */
702 #if __FreeBSD_version >= 700000
703 if (adapter->ifp->if_vlantrunk != NULL) {
705 if (adapter->ifp->if_nvlans != 0) {
707 device_printf(dev,"Vlan in use, detach first\n");
711 #ifdef DEVICE_POLLING
712 if (ifp->if_capenable & IFCAP_POLLING)
713 ether_poll_deregister(ifp);
716 if (adapter->led_dev != NULL)
717 led_destroy(adapter->led_dev);
719 EM_CORE_LOCK(adapter);
721 adapter->in_detach = 1;
723 e1000_phy_hw_reset(&adapter->hw);
725 lem_release_manageability(adapter);
727 EM_TX_UNLOCK(adapter);
728 EM_CORE_UNLOCK(adapter);
730 #if __FreeBSD_version >= 700029
731 /* Unregister VLAN events */
732 if (adapter->vlan_attach != NULL)
733 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
734 if (adapter->vlan_detach != NULL)
735 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
738 ether_ifdetach(adapter->ifp);
739 callout_drain(&adapter->timer);
740 callout_drain(&adapter->tx_fifo_timer);
742 lem_free_pci_resources(adapter);
743 bus_generic_detach(dev);
746 lem_free_transmit_structures(adapter);
747 lem_free_receive_structures(adapter);
749 /* Free Transmit Descriptor ring */
750 if (adapter->tx_desc_base) {
751 lem_dma_free(adapter, &adapter->txdma);
752 adapter->tx_desc_base = NULL;
755 /* Free Receive Descriptor ring */
756 if (adapter->rx_desc_base) {
757 lem_dma_free(adapter, &adapter->rxdma);
758 adapter->rx_desc_base = NULL;
761 lem_release_hw_control(adapter);
762 EM_TX_LOCK_DESTROY(adapter);
763 EM_RX_LOCK_DESTROY(adapter);
764 EM_CORE_LOCK_DESTROY(adapter);
769 /*********************************************************************
771 * Shutdown entry point
773 **********************************************************************/
776 lem_shutdown(device_t dev)
778 return lem_suspend(dev);
782 * Suspend/resume device methods.
785 lem_suspend(device_t dev)
787 struct adapter *adapter = device_get_softc(dev);
789 EM_CORE_LOCK(adapter);
791 lem_release_manageability(adapter);
792 lem_release_hw_control(adapter);
793 lem_enable_wakeup(dev);
795 EM_CORE_UNLOCK(adapter);
797 return bus_generic_suspend(dev);
801 lem_resume(device_t dev)
803 struct adapter *adapter = device_get_softc(dev);
804 struct ifnet *ifp = adapter->ifp;
806 EM_CORE_LOCK(adapter);
807 lem_init_locked(adapter);
808 lem_init_manageability(adapter);
809 EM_CORE_UNLOCK(adapter);
812 return bus_generic_resume(dev);
817 lem_start_locked(struct ifnet *ifp)
819 struct adapter *adapter = ifp->if_softc;
822 EM_TX_LOCK_ASSERT(adapter);
824 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
827 if (!adapter->link_active)
830 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
832 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
836 * Encapsulation can modify our pointer, and or make it
837 * NULL on failure. In that event, we can't requeue.
839 if (lem_xmit(adapter, &m_head)) {
842 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
843 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
847 /* Send a copy of the frame to the BPF listener */
848 ETHER_BPF_MTAP(ifp, m_head);
850 /* Set timeout in case hardware has problems transmitting. */
851 adapter->watchdog_check = TRUE;
852 adapter->watchdog_time = ticks;
854 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
855 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
861 lem_start(struct ifnet *ifp)
863 struct adapter *adapter = ifp->if_softc;
866 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
867 lem_start_locked(ifp);
868 EM_TX_UNLOCK(adapter);
871 /*********************************************************************
874 * em_ioctl is called when the user wants to configure the
877 * return 0 on success, positive on failure
878 **********************************************************************/
881 lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
883 struct adapter *adapter = ifp->if_softc;
884 struct ifreq *ifr = (struct ifreq *)data;
886 struct ifaddr *ifa = (struct ifaddr *)data;
890 if (adapter->in_detach)
896 if (ifa->ifa_addr->sa_family == AF_INET) {
899 * Since resetting hardware takes a very long time
900 * and results in link renegotiation we only
901 * initialize the hardware only when it is absolutely
904 ifp->if_flags |= IFF_UP;
905 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
906 EM_CORE_LOCK(adapter);
907 lem_init_locked(adapter);
908 EM_CORE_UNLOCK(adapter);
910 arp_ifinit(ifp, ifa);
913 error = ether_ioctl(ifp, command, data);
919 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
921 EM_CORE_LOCK(adapter);
922 switch (adapter->hw.mac.type) {
924 max_frame_size = ETHER_MAX_LEN;
927 max_frame_size = MAX_JUMBO_FRAME_SIZE;
929 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
931 EM_CORE_UNLOCK(adapter);
936 ifp->if_mtu = ifr->ifr_mtu;
937 adapter->max_frame_size =
938 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
939 lem_init_locked(adapter);
940 EM_CORE_UNLOCK(adapter);
944 IOCTL_DEBUGOUT("ioctl rcv'd:\
945 SIOCSIFFLAGS (Set Interface Flags)");
946 EM_CORE_LOCK(adapter);
947 if (ifp->if_flags & IFF_UP) {
948 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
949 if ((ifp->if_flags ^ adapter->if_flags) &
950 (IFF_PROMISC | IFF_ALLMULTI)) {
951 lem_disable_promisc(adapter);
952 lem_set_promisc(adapter);
955 lem_init_locked(adapter);
957 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
960 EM_TX_UNLOCK(adapter);
962 adapter->if_flags = ifp->if_flags;
963 EM_CORE_UNLOCK(adapter);
967 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
968 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
969 EM_CORE_LOCK(adapter);
970 lem_disable_intr(adapter);
971 lem_set_multi(adapter);
972 if (adapter->hw.mac.type == e1000_82542 &&
973 adapter->hw.revision_id == E1000_REVISION_2) {
974 lem_initialize_receive_unit(adapter);
976 #ifdef DEVICE_POLLING
977 if (!(ifp->if_capenable & IFCAP_POLLING))
979 lem_enable_intr(adapter);
980 EM_CORE_UNLOCK(adapter);
984 /* Check SOL/IDER usage */
985 EM_CORE_LOCK(adapter);
986 if (e1000_check_reset_block(&adapter->hw)) {
987 EM_CORE_UNLOCK(adapter);
988 device_printf(adapter->dev, "Media change is"
989 " blocked due to SOL/IDER session.\n");
992 EM_CORE_UNLOCK(adapter);
994 IOCTL_DEBUGOUT("ioctl rcv'd: \
995 SIOCxIFMEDIA (Get/Set Interface Media)");
996 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1002 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1004 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1005 #ifdef DEVICE_POLLING
1006 if (mask & IFCAP_POLLING) {
1007 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1008 error = ether_poll_register(lem_poll, ifp);
1011 EM_CORE_LOCK(adapter);
1012 lem_disable_intr(adapter);
1013 ifp->if_capenable |= IFCAP_POLLING;
1014 EM_CORE_UNLOCK(adapter);
1016 error = ether_poll_deregister(ifp);
1017 /* Enable interrupt even in error case */
1018 EM_CORE_LOCK(adapter);
1019 lem_enable_intr(adapter);
1020 ifp->if_capenable &= ~IFCAP_POLLING;
1021 EM_CORE_UNLOCK(adapter);
1025 if (mask & IFCAP_HWCSUM) {
1026 ifp->if_capenable ^= IFCAP_HWCSUM;
1029 if (mask & IFCAP_VLAN_HWTAGGING) {
1030 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1033 if ((mask & IFCAP_WOL) &&
1034 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1035 if (mask & IFCAP_WOL_MCAST)
1036 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1037 if (mask & IFCAP_WOL_MAGIC)
1038 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1040 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1042 #if __FreeBSD_version >= 700000
1043 VLAN_CAPABILITIES(ifp);
1049 error = ether_ioctl(ifp, command, data);
1057 /*********************************************************************
1060 * This routine is used in two ways. It is used by the stack as
1061 * init entry point in network interface structure. It is also used
1062 * by the driver as a hw/sw initialization routine to get to a
1065 * return 0 on success, positive on failure
1066 **********************************************************************/
1069 lem_init_locked(struct adapter *adapter)
1071 struct ifnet *ifp = adapter->ifp;
1072 device_t dev = adapter->dev;
1075 INIT_DEBUGOUT("lem_init: begin");
1077 EM_CORE_LOCK_ASSERT(adapter);
1079 EM_TX_LOCK(adapter);
1081 EM_TX_UNLOCK(adapter);
1084 * Packet Buffer Allocation (PBA)
1085 * Writing PBA sets the receive portion of the buffer
1086 * the remainder is used for the transmit buffer.
1088 * Devices before the 82547 had a Packet Buffer of 64K.
1089 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1090 * After the 82547 the buffer was reduced to 40K.
1091 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1092 * Note: default does not leave enough room for Jumbo Frame >10k.
1094 switch (adapter->hw.mac.type) {
1096 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1097 if (adapter->max_frame_size > 8192)
1098 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1100 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1101 adapter->tx_fifo_head = 0;
1102 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1103 adapter->tx_fifo_size =
1104 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1107 /* Devices before 82547 had a Packet Buffer of 64K. */
1108 if (adapter->max_frame_size > 8192)
1109 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1111 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1114 INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1115 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1117 /* Get the latest mac address, User can use a LAA */
1118 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1121 /* Put the address into the Receive Address Array */
1122 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1124 /* Initialize the hardware */
1125 if (lem_hardware_init(adapter)) {
1126 device_printf(dev, "Unable to initialize the hardware\n");
1129 lem_update_link_status(adapter);
1131 /* Setup VLAN support, basic and offload if available */
1132 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1134 #if __FreeBSD_version < 700029
1135 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1137 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1138 ctrl |= E1000_CTRL_VME;
1139 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1142 /* Use real VLAN Filter support */
1143 lem_setup_vlan_hw_support(adapter);
1146 /* Set hardware offload abilities */
1147 ifp->if_hwassist = 0;
1148 if (adapter->hw.mac.type >= e1000_82543) {
1149 if (ifp->if_capenable & IFCAP_TXCSUM)
1150 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1153 /* Configure for OS presence */
1154 lem_init_manageability(adapter);
1156 /* Prepare transmit descriptors and buffers */
1157 lem_setup_transmit_structures(adapter);
1158 lem_initialize_transmit_unit(adapter);
1160 /* Setup Multicast table */
1161 lem_set_multi(adapter);
1163 /* Prepare receive descriptors and buffers */
1164 if (lem_setup_receive_structures(adapter)) {
1165 device_printf(dev, "Could not setup receive structures\n");
1166 EM_TX_LOCK(adapter);
1168 EM_TX_UNLOCK(adapter);
1171 lem_initialize_receive_unit(adapter);
1173 /* Don't lose promiscuous settings */
1174 lem_set_promisc(adapter);
1176 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1177 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1179 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1180 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1182 /* MSI/X configuration for 82574 */
1183 if (adapter->hw.mac.type == e1000_82574) {
1185 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1186 tmp |= E1000_CTRL_EXT_PBA_CLR;
1187 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1189 ** Set the IVAR - interrupt vector routing.
1190 ** Each nibble represents a vector, high bit
1191 ** is enable, other 3 bits are the MSIX table
1192 ** entry, we map RXQ0 to 0, TXQ0 to 1, and
1193 ** Link (other) to 2, hence the magic number.
1195 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1198 #ifdef DEVICE_POLLING
1200 * Only enable interrupts if we are not polling, make sure
1201 * they are off otherwise.
1203 if (ifp->if_capenable & IFCAP_POLLING)
1204 lem_disable_intr(adapter);
1206 #endif /* DEVICE_POLLING */
1207 lem_enable_intr(adapter);
1209 /* AMT based hardware can now take control from firmware */
1210 if (adapter->has_manage && adapter->has_amt)
1211 lem_get_hw_control(adapter);
1213 /* Don't reset the phy next time init gets called */
1214 adapter->hw.phy.reset_disable = TRUE;
1220 struct adapter *adapter = arg;
1222 EM_CORE_LOCK(adapter);
1223 lem_init_locked(adapter);
1224 EM_CORE_UNLOCK(adapter);
1228 #ifdef DEVICE_POLLING
1229 /*********************************************************************
1231 * Legacy polling routine
1233 *********************************************************************/
1235 lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1237 struct adapter *adapter = ifp->if_softc;
1238 u32 reg_icr, rx_done = 0;
1240 EM_CORE_LOCK(adapter);
1241 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1242 EM_CORE_UNLOCK(adapter);
1246 if (cmd == POLL_AND_CHECK_STATUS) {
1247 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1248 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1249 callout_stop(&adapter->timer);
1250 adapter->hw.mac.get_link_status = 1;
1251 lem_update_link_status(adapter);
1252 callout_reset(&adapter->timer, hz,
1253 lem_local_timer, adapter);
1256 EM_CORE_UNLOCK(adapter);
1258 rx_done = lem_rxeof(adapter, count);
1260 EM_TX_LOCK(adapter);
1262 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1263 lem_start_locked(ifp);
1264 EM_TX_UNLOCK(adapter);
1267 #endif /* DEVICE_POLLING */
1269 #ifdef EM_LEGACY_IRQ
1270 /*********************************************************************
1272 * Legacy Interrupt Service routine
1274 *********************************************************************/
1279 struct adapter *adapter = arg;
1280 struct ifnet *ifp = adapter->ifp;
1284 if (ifp->if_capenable & IFCAP_POLLING)
1287 EM_CORE_LOCK(adapter);
1288 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1289 if (reg_icr & E1000_ICR_RXO)
1290 adapter->rx_overruns++;
1292 if ((reg_icr == 0xffffffff) || (reg_icr == 0))
1295 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1298 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1299 callout_stop(&adapter->timer);
1300 adapter->hw.mac.get_link_status = 1;
1301 lem_update_link_status(adapter);
1302 /* Deal with TX cruft when link lost */
1303 lem_tx_purge(adapter);
1304 callout_reset(&adapter->timer, hz,
1305 lem_local_timer, adapter);
1309 EM_TX_LOCK(adapter);
1311 lem_rxeof(adapter, -1);
1313 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1314 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1315 lem_start_locked(ifp);
1316 EM_TX_UNLOCK(adapter);
1319 EM_CORE_UNLOCK(adapter);
1323 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1326 lem_handle_link(void *context, int pending)
1328 struct adapter *adapter = context;
1329 struct ifnet *ifp = adapter->ifp;
1331 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1334 EM_CORE_LOCK(adapter);
1335 callout_stop(&adapter->timer);
1336 lem_update_link_status(adapter);
1337 /* Deal with TX cruft when link lost */
1338 lem_tx_purge(adapter);
1339 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1340 EM_CORE_UNLOCK(adapter);
1344 /* Combined RX/TX handler, used by Legacy and MSI */
1346 lem_handle_rxtx(void *context, int pending)
1348 struct adapter *adapter = context;
1349 struct ifnet *ifp = adapter->ifp;
1352 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1353 if (lem_rxeof(adapter, adapter->rx_process_limit) != 0)
1354 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1355 EM_TX_LOCK(adapter);
1357 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1358 lem_start_locked(ifp);
1359 EM_TX_UNLOCK(adapter);
1362 lem_enable_intr(adapter);
1365 /*********************************************************************
1367 * Fast Legacy/MSI Combined Interrupt Service routine
1369 *********************************************************************/
1370 #if __FreeBSD_version < 700000
1371 #define FILTER_STRAY
1372 #define FILTER_HANDLED
1377 lem_irq_fast(void *arg)
1379 struct adapter *adapter = arg;
1385 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1388 if (reg_icr == 0xffffffff)
1389 return FILTER_STRAY;
1391 /* Definitely not our interrupt. */
1393 return FILTER_STRAY;
1396 * Mask interrupts until the taskqueue is finished running. This is
1397 * cheap, just assume that it is needed. This also works around the
1398 * MSI message reordering errata on certain systems.
1400 lem_disable_intr(adapter);
1401 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1403 /* Link status change */
1404 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1405 adapter->hw.mac.get_link_status = 1;
1406 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1409 if (reg_icr & E1000_ICR_RXO)
1410 adapter->rx_overruns++;
1411 return FILTER_HANDLED;
1413 #endif /* ~EM_LEGACY_IRQ */
1416 /*********************************************************************
1418 * Media Ioctl callback
1420 * This routine is called whenever the user queries the status of
1421 * the interface using ifconfig.
1423 **********************************************************************/
1425 lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1427 struct adapter *adapter = ifp->if_softc;
1428 u_char fiber_type = IFM_1000_SX;
1430 INIT_DEBUGOUT("lem_media_status: begin");
1432 EM_CORE_LOCK(adapter);
1433 lem_update_link_status(adapter);
1435 ifmr->ifm_status = IFM_AVALID;
1436 ifmr->ifm_active = IFM_ETHER;
1438 if (!adapter->link_active) {
1439 EM_CORE_UNLOCK(adapter);
1443 ifmr->ifm_status |= IFM_ACTIVE;
1445 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1446 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1447 if (adapter->hw.mac.type == e1000_82545)
1448 fiber_type = IFM_1000_LX;
1449 ifmr->ifm_active |= fiber_type | IFM_FDX;
1451 switch (adapter->link_speed) {
1453 ifmr->ifm_active |= IFM_10_T;
1456 ifmr->ifm_active |= IFM_100_TX;
1459 ifmr->ifm_active |= IFM_1000_T;
1462 if (adapter->link_duplex == FULL_DUPLEX)
1463 ifmr->ifm_active |= IFM_FDX;
1465 ifmr->ifm_active |= IFM_HDX;
1467 EM_CORE_UNLOCK(adapter);
1470 /*********************************************************************
1472 * Media Ioctl callback
1474 * This routine is called when the user changes speed/duplex using
1475 * media/mediopt option with ifconfig.
1477 **********************************************************************/
1479 lem_media_change(struct ifnet *ifp)
1481 struct adapter *adapter = ifp->if_softc;
1482 struct ifmedia *ifm = &adapter->media;
1484 INIT_DEBUGOUT("lem_media_change: begin");
1486 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1489 EM_CORE_LOCK(adapter);
1490 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1492 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1493 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1498 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1499 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1502 adapter->hw.mac.autoneg = FALSE;
1503 adapter->hw.phy.autoneg_advertised = 0;
1504 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1505 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1507 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1510 adapter->hw.mac.autoneg = FALSE;
1511 adapter->hw.phy.autoneg_advertised = 0;
1512 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1513 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1515 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1518 device_printf(adapter->dev, "Unsupported media type\n");
1521 /* As the speed/duplex settings my have changed we need to
1524 adapter->hw.phy.reset_disable = FALSE;
1526 lem_init_locked(adapter);
1527 EM_CORE_UNLOCK(adapter);
1532 /*********************************************************************
1534 * This routine maps the mbufs to tx descriptors.
1536 * return 0 on success, positive on failure
1537 **********************************************************************/
1540 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1542 bus_dma_segment_t segs[EM_MAX_SCATTER];
1544 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1545 struct e1000_tx_desc *ctxd = NULL;
1546 struct mbuf *m_head;
1547 u32 txd_upper, txd_lower, txd_used, txd_saved;
1548 int error, nsegs, i, j, first, last = 0;
1549 #if __FreeBSD_version < 700000
1553 txd_upper = txd_lower = txd_used = txd_saved = 0;
1556 * Force a cleanup if number of TX descriptors
1557 * available hits the threshold
1559 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1561 /* Now do we at least have a minimal? */
1562 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
1563 adapter->no_tx_desc_avail1++;
1569 * Map the packet for DMA
1571 * Capture the first descriptor index,
1572 * this descriptor will have the index
1573 * of the EOP which is the only one that
1574 * now gets a DONE bit writeback.
1576 first = adapter->next_avail_tx_desc;
1577 tx_buffer = &adapter->tx_buffer_area[first];
1578 tx_buffer_mapped = tx_buffer;
1579 map = tx_buffer->map;
1581 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1582 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1585 * There are two types of errors we can (try) to handle:
1586 * - EFBIG means the mbuf chain was too long and bus_dma ran
1587 * out of segments. Defragment the mbuf chain and try again.
1588 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1589 * at this point in time. Defer sending and try again later.
1590 * All other errors, in particular EINVAL, are fatal and prevent the
1591 * mbuf chain from ever going through. Drop it and report error.
1593 if (error == EFBIG) {
1596 m = m_defrag(*m_headp, M_DONTWAIT);
1598 adapter->mbuf_alloc_failed++;
1606 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1607 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1610 adapter->no_tx_dma_setup++;
1615 } else if (error != 0) {
1616 adapter->no_tx_dma_setup++;
1620 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1621 adapter->no_tx_desc_avail2++;
1622 bus_dmamap_unload(adapter->txtag, map);
1627 /* Do hardware assists */
1628 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1629 lem_transmit_checksum_setup(adapter, m_head,
1630 &txd_upper, &txd_lower);
1632 i = adapter->next_avail_tx_desc;
1633 if (adapter->pcix_82544)
1636 /* Set up our transmit descriptors */
1637 for (j = 0; j < nsegs; j++) {
1639 bus_addr_t seg_addr;
1640 /* If adapter is 82544 and on PCIX bus */
1641 if(adapter->pcix_82544) {
1642 DESC_ARRAY desc_array;
1643 u32 array_elements, counter;
1645 * Check the Address and Length combination and
1646 * split the data accordingly
1648 array_elements = lem_fill_descriptors(segs[j].ds_addr,
1649 segs[j].ds_len, &desc_array);
1650 for (counter = 0; counter < array_elements; counter++) {
1651 if (txd_used == adapter->num_tx_desc_avail) {
1652 adapter->next_avail_tx_desc = txd_saved;
1653 adapter->no_tx_desc_avail2++;
1654 bus_dmamap_unload(adapter->txtag, map);
1657 tx_buffer = &adapter->tx_buffer_area[i];
1658 ctxd = &adapter->tx_desc_base[i];
1659 ctxd->buffer_addr = htole64(
1660 desc_array.descriptor[counter].address);
1661 ctxd->lower.data = htole32(
1662 (adapter->txd_cmd | txd_lower | (u16)
1663 desc_array.descriptor[counter].length));
1665 htole32((txd_upper));
1667 if (++i == adapter->num_tx_desc)
1669 tx_buffer->m_head = NULL;
1670 tx_buffer->next_eop = -1;
1674 tx_buffer = &adapter->tx_buffer_area[i];
1675 ctxd = &adapter->tx_desc_base[i];
1676 seg_addr = segs[j].ds_addr;
1677 seg_len = segs[j].ds_len;
1678 ctxd->buffer_addr = htole64(seg_addr);
1679 ctxd->lower.data = htole32(
1680 adapter->txd_cmd | txd_lower | seg_len);
1684 if (++i == adapter->num_tx_desc)
1686 tx_buffer->m_head = NULL;
1687 tx_buffer->next_eop = -1;
1691 adapter->next_avail_tx_desc = i;
1693 if (adapter->pcix_82544)
1694 adapter->num_tx_desc_avail -= txd_used;
1696 adapter->num_tx_desc_avail -= nsegs;
1699 ** Handle VLAN tag, this is the
1700 ** biggest difference between
1703 #if __FreeBSD_version < 700000
1704 /* Find out if we are in vlan mode. */
1705 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1707 ctxd->upper.fields.special =
1708 htole16(VLAN_TAG_VALUE(mtag));
1709 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1711 #else /* FreeBSD 7 */
1712 if (m_head->m_flags & M_VLANTAG) {
1713 /* Set the vlan id. */
1714 ctxd->upper.fields.special =
1715 htole16(m_head->m_pkthdr.ether_vtag);
1716 /* Tell hardware to add tag */
1717 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1721 tx_buffer->m_head = m_head;
1722 tx_buffer_mapped->map = tx_buffer->map;
1723 tx_buffer->map = map;
1724 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1727 * Last Descriptor of Packet
1728 * needs End Of Packet (EOP)
1729 * and Report Status (RS)
1732 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1734 * Keep track in the first buffer which
1735 * descriptor will be written back
1737 tx_buffer = &adapter->tx_buffer_area[first];
1738 tx_buffer->next_eop = last;
1741 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1742 * that this frame is available to transmit.
1744 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1745 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1746 if (adapter->hw.mac.type == e1000_82547 &&
1747 adapter->link_duplex == HALF_DUPLEX)
1748 lem_82547_move_tail(adapter);
1750 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1751 if (adapter->hw.mac.type == e1000_82547)
1752 lem_82547_update_fifo_head(adapter,
1753 m_head->m_pkthdr.len);
1759 /*********************************************************************
1761 * 82547 workaround to avoid controller hang in half-duplex environment.
1762 * The workaround is to avoid queuing a large packet that would span
1763 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1764 * in this case. We do that only when FIFO is quiescent.
1766 **********************************************************************/
1768 lem_82547_move_tail(void *arg)
1770 struct adapter *adapter = arg;
1771 struct e1000_tx_desc *tx_desc;
1772 u16 hw_tdt, sw_tdt, length = 0;
1775 EM_TX_LOCK_ASSERT(adapter);
1777 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1778 sw_tdt = adapter->next_avail_tx_desc;
1780 while (hw_tdt != sw_tdt) {
1781 tx_desc = &adapter->tx_desc_base[hw_tdt];
1782 length += tx_desc->lower.flags.length;
1783 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1784 if (++hw_tdt == adapter->num_tx_desc)
1788 if (lem_82547_fifo_workaround(adapter, length)) {
1789 adapter->tx_fifo_wrk_cnt++;
1790 callout_reset(&adapter->tx_fifo_timer, 1,
1791 lem_82547_move_tail, adapter);
1794 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1795 lem_82547_update_fifo_head(adapter, length);
1802 lem_82547_fifo_workaround(struct adapter *adapter, int len)
1804 int fifo_space, fifo_pkt_len;
1806 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1808 if (adapter->link_duplex == HALF_DUPLEX) {
1809 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1811 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1812 if (lem_82547_tx_fifo_reset(adapter))
1823 lem_82547_update_fifo_head(struct adapter *adapter, int len)
1825 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1827 /* tx_fifo_head is always 16 byte aligned */
1828 adapter->tx_fifo_head += fifo_pkt_len;
1829 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1830 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1836 lem_82547_tx_fifo_reset(struct adapter *adapter)
1840 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1841 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1842 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1843 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1844 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1845 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1846 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1847 /* Disable TX unit */
1848 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1849 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1850 tctl & ~E1000_TCTL_EN);
1852 /* Reset FIFO pointers */
1853 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1854 adapter->tx_head_addr);
1855 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1856 adapter->tx_head_addr);
1857 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1858 adapter->tx_head_addr);
1859 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1860 adapter->tx_head_addr);
1862 /* Re-enable TX unit */
1863 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1864 E1000_WRITE_FLUSH(&adapter->hw);
1866 adapter->tx_fifo_head = 0;
1867 adapter->tx_fifo_reset_cnt++;
1877 lem_set_promisc(struct adapter *adapter)
1879 struct ifnet *ifp = adapter->ifp;
1882 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1884 if (ifp->if_flags & IFF_PROMISC) {
1885 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1886 /* Turn this on if you want to see bad packets */
1888 reg_rctl |= E1000_RCTL_SBP;
1889 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1890 } else if (ifp->if_flags & IFF_ALLMULTI) {
1891 reg_rctl |= E1000_RCTL_MPE;
1892 reg_rctl &= ~E1000_RCTL_UPE;
1893 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1898 lem_disable_promisc(struct adapter *adapter)
1902 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1904 reg_rctl &= (~E1000_RCTL_UPE);
1905 reg_rctl &= (~E1000_RCTL_MPE);
1906 reg_rctl &= (~E1000_RCTL_SBP);
1907 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1911 /*********************************************************************
1914 * This routine is called whenever multicast address list is updated.
1916 **********************************************************************/
1919 lem_set_multi(struct adapter *adapter)
1921 struct ifnet *ifp = adapter->ifp;
1922 struct ifmultiaddr *ifma;
1924 u8 *mta; /* Multicast array memory */
1927 IOCTL_DEBUGOUT("lem_set_multi: begin");
1929 if (adapter->hw.mac.type == e1000_82542 &&
1930 adapter->hw.revision_id == E1000_REVISION_2) {
1931 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1932 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1933 e1000_pci_clear_mwi(&adapter->hw);
1934 reg_rctl |= E1000_RCTL_RST;
1935 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1939 /* Allocate temporary memory to setup array */
1940 mta = malloc(sizeof(u8) *
1941 (ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES),
1942 M_DEVBUF, M_NOWAIT | M_ZERO);
1944 panic("lem_set_multi memory failure\n");
1946 #if __FreeBSD_version < 800000
1949 if_maddr_rlock(ifp);
1951 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1952 if (ifma->ifma_addr->sa_family != AF_LINK)
1955 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1958 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1959 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1962 #if __FreeBSD_version < 800000
1963 IF_ADDR_UNLOCK(ifp);
1965 if_maddr_runlock(ifp);
1967 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1968 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1969 reg_rctl |= E1000_RCTL_MPE;
1970 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1972 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1974 if (adapter->hw.mac.type == e1000_82542 &&
1975 adapter->hw.revision_id == E1000_REVISION_2) {
1976 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1977 reg_rctl &= ~E1000_RCTL_RST;
1978 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1980 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1981 e1000_pci_set_mwi(&adapter->hw);
1983 free(mta, M_DEVBUF);
1987 /*********************************************************************
1990 * This routine checks for link status and updates statistics.
1992 **********************************************************************/
1995 lem_local_timer(void *arg)
1997 struct adapter *adapter = arg;
1998 struct ifnet *ifp = adapter->ifp;
2000 EM_CORE_LOCK_ASSERT(adapter);
2002 taskqueue_enqueue(adapter->tq,
2003 &adapter->rxtx_task);
2004 lem_update_link_status(adapter);
2005 lem_update_stats_counters(adapter);
2007 if (lem_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
2008 lem_print_hw_stats(adapter);
2010 lem_smartspeed(adapter);
2013 * We check the watchdog: the time since
2014 * the last TX descriptor was cleaned.
2015 * This implies a functional TX engine.
2017 if ((adapter->watchdog_check == TRUE) &&
2018 (ticks - adapter->watchdog_time > EM_WATCHDOG))
2021 callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
2024 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2025 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2026 adapter->watchdog_events++;
2027 lem_init_locked(adapter);
2031 lem_update_link_status(struct adapter *adapter)
2033 struct e1000_hw *hw = &adapter->hw;
2034 struct ifnet *ifp = adapter->ifp;
2035 device_t dev = adapter->dev;
2038 /* Get the cached link value or read phy for real */
2039 switch (hw->phy.media_type) {
2040 case e1000_media_type_copper:
2041 if (hw->mac.get_link_status) {
2042 /* Do the work to read phy */
2043 e1000_check_for_link(hw);
2044 link_check = !hw->mac.get_link_status;
2045 if (link_check) /* ESB2 fix */
2046 e1000_cfg_on_link_up(hw);
2050 case e1000_media_type_fiber:
2051 e1000_check_for_link(hw);
2052 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2055 case e1000_media_type_internal_serdes:
2056 e1000_check_for_link(hw);
2057 link_check = adapter->hw.mac.serdes_has_link;
2060 case e1000_media_type_unknown:
2064 /* Now check for a transition */
2065 if (link_check && (adapter->link_active == 0)) {
2066 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2067 &adapter->link_duplex);
2069 device_printf(dev, "Link is up %d Mbps %s\n",
2070 adapter->link_speed,
2071 ((adapter->link_duplex == FULL_DUPLEX) ?
2072 "Full Duplex" : "Half Duplex"));
2073 adapter->link_active = 1;
2074 adapter->smartspeed = 0;
2075 ifp->if_baudrate = adapter->link_speed * 1000000;
2076 if_link_state_change(ifp, LINK_STATE_UP);
2077 } else if (!link_check && (adapter->link_active == 1)) {
2078 ifp->if_baudrate = adapter->link_speed = 0;
2079 adapter->link_duplex = 0;
2081 device_printf(dev, "Link is Down\n");
2082 adapter->link_active = 0;
2083 /* Link down, disable watchdog */
2084 adapter->watchdog_check = FALSE;
2085 if_link_state_change(ifp, LINK_STATE_DOWN);
2089 /*********************************************************************
2091 * This routine disables all traffic on the adapter by issuing a
2092 * global reset on the MAC and deallocates TX/RX buffers.
2094 * This routine should always be called with BOTH the CORE
2096 **********************************************************************/
2101 struct adapter *adapter = arg;
2102 struct ifnet *ifp = adapter->ifp;
2104 EM_CORE_LOCK_ASSERT(adapter);
2105 EM_TX_LOCK_ASSERT(adapter);
2107 INIT_DEBUGOUT("lem_stop: begin");
2109 lem_disable_intr(adapter);
2110 callout_stop(&adapter->timer);
2111 callout_stop(&adapter->tx_fifo_timer);
2113 /* Tell the stack that the interface is no longer active */
2114 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2116 e1000_reset_hw(&adapter->hw);
2117 if (adapter->hw.mac.type >= e1000_82544)
2118 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2120 e1000_led_off(&adapter->hw);
2121 e1000_cleanup_led(&adapter->hw);
2125 /*********************************************************************
2127 * Determine hardware revision.
2129 **********************************************************************/
2131 lem_identify_hardware(struct adapter *adapter)
2133 device_t dev = adapter->dev;
2135 /* Make sure our PCI config space has the necessary stuff set */
2136 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2137 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2138 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2139 device_printf(dev, "Memory Access and/or Bus Master bits "
2141 adapter->hw.bus.pci_cmd_word |=
2142 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2143 pci_write_config(dev, PCIR_COMMAND,
2144 adapter->hw.bus.pci_cmd_word, 2);
2147 /* Save off the information about this board */
2148 adapter->hw.vendor_id = pci_get_vendor(dev);
2149 adapter->hw.device_id = pci_get_device(dev);
2150 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2151 adapter->hw.subsystem_vendor_id =
2152 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2153 adapter->hw.subsystem_device_id =
2154 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2156 /* Do Shared Code Init and Setup */
2157 if (e1000_set_mac_type(&adapter->hw)) {
2158 device_printf(dev, "Setup init failure\n");
2164 lem_allocate_pci_resources(struct adapter *adapter)
2166 device_t dev = adapter->dev;
2167 int val, rid, error = E1000_SUCCESS;
2170 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2172 if (adapter->memory == NULL) {
2173 device_printf(dev, "Unable to allocate bus resource: memory\n");
2176 adapter->osdep.mem_bus_space_tag =
2177 rman_get_bustag(adapter->memory);
2178 adapter->osdep.mem_bus_space_handle =
2179 rman_get_bushandle(adapter->memory);
2180 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2182 /* Only older adapters use IO mapping */
2183 if (adapter->hw.mac.type > e1000_82543) {
2184 /* Figure our where our IO BAR is ? */
2185 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2186 val = pci_read_config(dev, rid, 4);
2187 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2188 adapter->io_rid = rid;
2192 /* check for 64bit BAR */
2193 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2196 if (rid >= PCIR_CIS) {
2197 device_printf(dev, "Unable to locate IO BAR\n");
2200 adapter->ioport = bus_alloc_resource_any(dev,
2201 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2202 if (adapter->ioport == NULL) {
2203 device_printf(dev, "Unable to allocate bus resource: "
2207 adapter->hw.io_base = 0;
2208 adapter->osdep.io_bus_space_tag =
2209 rman_get_bustag(adapter->ioport);
2210 adapter->osdep.io_bus_space_handle =
2211 rman_get_bushandle(adapter->ioport);
2214 adapter->hw.back = &adapter->osdep;
2219 /*********************************************************************
2221 * Setup the Legacy or MSI Interrupt handler
2223 **********************************************************************/
2225 lem_allocate_irq(struct adapter *adapter)
2227 device_t dev = adapter->dev;
2230 /* Manually turn off all interrupts */
2231 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2233 /* We allocate a single interrupt resource */
2234 adapter->res[0] = bus_alloc_resource_any(dev,
2235 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2236 if (adapter->res[0] == NULL) {
2237 device_printf(dev, "Unable to allocate bus resource: "
2242 #ifdef EM_LEGACY_IRQ
2243 /* We do Legacy setup */
2244 if ((error = bus_setup_intr(dev, adapter->res[0],
2245 #if __FreeBSD_version > 700000
2246 INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2248 INTR_TYPE_NET | INTR_MPSAFE, lem_intr, adapter,
2250 &adapter->tag[0])) != 0) {
2251 device_printf(dev, "Failed to register interrupt handler");
2255 #else /* FAST_IRQ */
2257 * Try allocating a fast interrupt and the associated deferred
2258 * processing contexts.
2260 TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2261 TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2262 adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2263 taskqueue_thread_enqueue, &adapter->tq);
2264 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2265 device_get_nameunit(adapter->dev));
2266 #if __FreeBSD_version < 700000
2267 if ((error = bus_setup_intr(dev, adapter->res[0],
2268 INTR_TYPE_NET | INTR_FAST, lem_irq_fast, adapter,
2270 if ((error = bus_setup_intr(dev, adapter->res[0],
2271 INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2273 &adapter->tag[0])) != 0) {
2274 device_printf(dev, "Failed to register fast interrupt "
2275 "handler: %d\n", error);
2276 taskqueue_free(adapter->tq);
2280 #endif /* EM_LEGACY_IRQ */
2287 lem_free_pci_resources(struct adapter *adapter)
2289 device_t dev = adapter->dev;
2292 if (adapter->tag[0] != NULL) {
2293 bus_teardown_intr(dev, adapter->res[0],
2295 adapter->tag[0] = NULL;
2298 if (adapter->res[0] != NULL) {
2299 bus_release_resource(dev, SYS_RES_IRQ,
2300 0, adapter->res[0]);
2303 if (adapter->memory != NULL)
2304 bus_release_resource(dev, SYS_RES_MEMORY,
2305 PCIR_BAR(0), adapter->memory);
2307 if (adapter->ioport != NULL)
2308 bus_release_resource(dev, SYS_RES_IOPORT,
2309 adapter->io_rid, adapter->ioport);
2313 /*********************************************************************
2315 * Initialize the hardware to a configuration
2316 * as specified by the adapter structure.
2318 **********************************************************************/
2320 lem_hardware_init(struct adapter *adapter)
2322 device_t dev = adapter->dev;
2325 INIT_DEBUGOUT("lem_hardware_init: begin");
2327 /* Issue a global reset */
2328 e1000_reset_hw(&adapter->hw);
2330 /* When hardware is reset, fifo_head is also reset */
2331 adapter->tx_fifo_head = 0;
2334 * These parameters control the automatic generation (Tx) and
2335 * response (Rx) to Ethernet PAUSE frames.
2336 * - High water mark should allow for at least two frames to be
2337 * received after sending an XOFF.
2338 * - Low water mark works best when it is very near the high water mark.
2339 * This allows the receiver to restart by sending XON when it has
2340 * drained a bit. Here we use an arbitary value of 1500 which will
2341 * restart after one full frame is pulled from the buffer. There
2342 * could be several smaller frames in the buffer and if so they will
2343 * not trigger the XON until their total number reduces the buffer
2345 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2347 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2350 adapter->hw.fc.high_water = rx_buffer_size -
2351 roundup2(adapter->max_frame_size, 1024);
2352 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2354 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2355 adapter->hw.fc.send_xon = TRUE;
2357 /* Set Flow control, use the tunable location if sane */
2358 if ((lem_fc_setting >= 0) || (lem_fc_setting < 4))
2359 adapter->hw.fc.requested_mode = lem_fc_setting;
2361 adapter->hw.fc.requested_mode = e1000_fc_none;
2363 if (e1000_init_hw(&adapter->hw) < 0) {
2364 device_printf(dev, "Hardware Initialization Failed\n");
2368 e1000_check_for_link(&adapter->hw);
2373 /*********************************************************************
2375 * Setup networking device structure and register an interface.
2377 **********************************************************************/
2379 lem_setup_interface(device_t dev, struct adapter *adapter)
2383 INIT_DEBUGOUT("lem_setup_interface: begin");
2385 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2387 panic("%s: can not if_alloc()", device_get_nameunit(dev));
2388 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2389 ifp->if_mtu = ETHERMTU;
2390 ifp->if_init = lem_init;
2391 ifp->if_softc = adapter;
2392 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2393 ifp->if_ioctl = lem_ioctl;
2394 ifp->if_start = lem_start;
2395 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2396 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2397 IFQ_SET_READY(&ifp->if_snd);
2399 ether_ifattach(ifp, adapter->hw.mac.addr);
2401 ifp->if_capabilities = ifp->if_capenable = 0;
2403 if (adapter->hw.mac.type >= e1000_82543) {
2405 #if __FreeBSD_version < 700000
2406 version_cap = IFCAP_HWCSUM;
2408 version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2410 ifp->if_capabilities |= version_cap;
2411 ifp->if_capenable |= version_cap;
2415 * Tell the upper layer(s) we support long frames.
2417 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2418 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2419 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2421 #ifdef DEVICE_POLLING
2422 ifp->if_capabilities |= IFCAP_POLLING;
2425 /* Enable only WOL MAGIC by default */
2427 ifp->if_capabilities |= IFCAP_WOL;
2428 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2432 * Specify the media types supported by this adapter and register
2433 * callbacks to update media and link information
2435 ifmedia_init(&adapter->media, IFM_IMASK,
2436 lem_media_change, lem_media_status);
2437 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2438 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2439 u_char fiber_type = IFM_1000_SX; /* default type */
2441 if (adapter->hw.mac.type == e1000_82545)
2442 fiber_type = IFM_1000_LX;
2443 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2445 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2447 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2448 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2450 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2452 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2454 if (adapter->hw.phy.type != e1000_phy_ife) {
2455 ifmedia_add(&adapter->media,
2456 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2457 ifmedia_add(&adapter->media,
2458 IFM_ETHER | IFM_1000_T, 0, NULL);
2461 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2462 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2466 /*********************************************************************
2468 * Workaround for SmartSpeed on 82541 and 82547 controllers
2470 **********************************************************************/
2472 lem_smartspeed(struct adapter *adapter)
2476 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2477 adapter->hw.mac.autoneg == 0 ||
2478 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2481 if (adapter->smartspeed == 0) {
2482 /* If Master/Slave config fault is asserted twice,
2483 * we assume back-to-back */
2484 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2485 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2487 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2488 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2489 e1000_read_phy_reg(&adapter->hw,
2490 PHY_1000T_CTRL, &phy_tmp);
2491 if(phy_tmp & CR_1000T_MS_ENABLE) {
2492 phy_tmp &= ~CR_1000T_MS_ENABLE;
2493 e1000_write_phy_reg(&adapter->hw,
2494 PHY_1000T_CTRL, phy_tmp);
2495 adapter->smartspeed++;
2496 if(adapter->hw.mac.autoneg &&
2497 !e1000_copper_link_autoneg(&adapter->hw) &&
2498 !e1000_read_phy_reg(&adapter->hw,
2499 PHY_CONTROL, &phy_tmp)) {
2500 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2501 MII_CR_RESTART_AUTO_NEG);
2502 e1000_write_phy_reg(&adapter->hw,
2503 PHY_CONTROL, phy_tmp);
2508 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2509 /* If still no link, perhaps using 2/3 pair cable */
2510 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2511 phy_tmp |= CR_1000T_MS_ENABLE;
2512 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2513 if(adapter->hw.mac.autoneg &&
2514 !e1000_copper_link_autoneg(&adapter->hw) &&
2515 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2516 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2517 MII_CR_RESTART_AUTO_NEG);
2518 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2521 /* Restart process after EM_SMARTSPEED_MAX iterations */
2522 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2523 adapter->smartspeed = 0;
2528 * Manage DMA'able memory.
2531 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2535 *(bus_addr_t *) arg = segs[0].ds_addr;
2539 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2540 struct em_dma_alloc *dma, int mapflags)
2544 #if __FreeBSD_version >= 700000
2545 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2547 error = bus_dma_tag_create(NULL, /* parent */
2549 EM_DBA_ALIGN, 0, /* alignment, bounds */
2550 BUS_SPACE_MAXADDR, /* lowaddr */
2551 BUS_SPACE_MAXADDR, /* highaddr */
2552 NULL, NULL, /* filter, filterarg */
2555 size, /* maxsegsize */
2557 NULL, /* lockfunc */
2561 device_printf(adapter->dev,
2562 "%s: bus_dma_tag_create failed: %d\n",
2567 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2568 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2570 device_printf(adapter->dev,
2571 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2572 __func__, (uintmax_t)size, error);
2577 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2578 size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2579 if (error || dma->dma_paddr == 0) {
2580 device_printf(adapter->dev,
2581 "%s: bus_dmamap_load failed: %d\n",
2589 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2591 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2592 bus_dma_tag_destroy(dma->dma_tag);
2594 dma->dma_map = NULL;
2595 dma->dma_tag = NULL;
2601 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2603 if (dma->dma_tag == NULL)
2605 if (dma->dma_map != NULL) {
2606 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2607 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2608 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2609 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2610 dma->dma_map = NULL;
2612 bus_dma_tag_destroy(dma->dma_tag);
2613 dma->dma_tag = NULL;
2617 /*********************************************************************
2619 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2620 * the information needed to transmit a packet on the wire.
2622 **********************************************************************/
2624 lem_allocate_transmit_structures(struct adapter *adapter)
2626 device_t dev = adapter->dev;
2627 struct em_buffer *tx_buffer;
2631 * Create DMA tags for tx descriptors
2633 #if __FreeBSD_version >= 700000
2634 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2636 if ((error = bus_dma_tag_create(NULL, /* parent */
2638 1, 0, /* alignment, bounds */
2639 BUS_SPACE_MAXADDR, /* lowaddr */
2640 BUS_SPACE_MAXADDR, /* highaddr */
2641 NULL, NULL, /* filter, filterarg */
2642 EM_TSO_SIZE, /* maxsize */
2643 EM_MAX_SCATTER, /* nsegments */
2644 EM_TSO_SEG_SIZE, /* maxsegsize */
2646 NULL, /* lockfunc */
2648 &adapter->txtag)) != 0) {
2649 device_printf(dev, "Unable to allocate TX DMA tag\n");
2653 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2654 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2655 if (adapter->tx_buffer_area == NULL) {
2656 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2661 /* Create the descriptor buffer dma maps */
2662 for (int i = 0; i < adapter->num_tx_desc; i++) {
2663 tx_buffer = &adapter->tx_buffer_area[i];
2664 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2666 device_printf(dev, "Unable to create TX DMA map\n");
2669 tx_buffer->next_eop = -1;
2674 lem_free_transmit_structures(adapter);
2678 /*********************************************************************
2680 * (Re)Initialize transmit structures.
2682 **********************************************************************/
2684 lem_setup_transmit_structures(struct adapter *adapter)
2686 struct em_buffer *tx_buffer;
2688 /* Clear the old ring contents */
2689 bzero(adapter->tx_desc_base,
2690 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2692 /* Free any existing TX buffers */
2693 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2694 tx_buffer = &adapter->tx_buffer_area[i];
2695 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2696 BUS_DMASYNC_POSTWRITE);
2697 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2698 m_freem(tx_buffer->m_head);
2699 tx_buffer->m_head = NULL;
2700 tx_buffer->next_eop = -1;
2704 adapter->next_avail_tx_desc = 0;
2705 adapter->next_tx_to_clean = 0;
2706 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2708 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2709 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2714 /*********************************************************************
2716 * Enable transmit unit.
2718 **********************************************************************/
2720 lem_initialize_transmit_unit(struct adapter *adapter)
2725 INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2726 /* Setup the Base and Length of the Tx Descriptor Ring */
2727 bus_addr = adapter->txdma.dma_paddr;
2728 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2729 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2730 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2731 (u32)(bus_addr >> 32));
2732 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2734 /* Setup the HW Tx Head and Tail descriptor pointers */
2735 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2736 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2738 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2739 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2740 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2742 /* Set the default values for the Tx Inter Packet Gap timer */
2743 switch (adapter->hw.mac.type) {
2745 tipg = DEFAULT_82542_TIPG_IPGT;
2746 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2747 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2750 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2751 (adapter->hw.phy.media_type ==
2752 e1000_media_type_internal_serdes))
2753 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2755 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2756 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2757 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2760 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2761 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2762 if(adapter->hw.mac.type >= e1000_82540)
2763 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2764 adapter->tx_abs_int_delay.value);
2766 /* Program the Transmit Control Register */
2767 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2768 tctl &= ~E1000_TCTL_CT;
2769 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2770 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2772 /* This write will effectively turn on the transmit unit. */
2773 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2775 /* Setup Transmit Descriptor Base Settings */
2776 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2778 if (adapter->tx_int_delay.value > 0)
2779 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2782 /*********************************************************************
2784 * Free all transmit related data structures.
2786 **********************************************************************/
2788 lem_free_transmit_structures(struct adapter *adapter)
2790 struct em_buffer *tx_buffer;
2792 INIT_DEBUGOUT("free_transmit_structures: begin");
2794 if (adapter->tx_buffer_area != NULL) {
2795 for (int i = 0; i < adapter->num_tx_desc; i++) {
2796 tx_buffer = &adapter->tx_buffer_area[i];
2797 if (tx_buffer->m_head != NULL) {
2798 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2799 BUS_DMASYNC_POSTWRITE);
2800 bus_dmamap_unload(adapter->txtag,
2802 m_freem(tx_buffer->m_head);
2803 tx_buffer->m_head = NULL;
2804 } else if (tx_buffer->map != NULL)
2805 bus_dmamap_unload(adapter->txtag,
2807 if (tx_buffer->map != NULL) {
2808 bus_dmamap_destroy(adapter->txtag,
2810 tx_buffer->map = NULL;
2814 if (adapter->tx_buffer_area != NULL) {
2815 free(adapter->tx_buffer_area, M_DEVBUF);
2816 adapter->tx_buffer_area = NULL;
2818 if (adapter->txtag != NULL) {
2819 bus_dma_tag_destroy(adapter->txtag);
2820 adapter->txtag = NULL;
2822 #if __FreeBSD_version >= 800000
2823 if (adapter->br != NULL)
2824 buf_ring_free(adapter->br, M_DEVBUF);
2828 /*********************************************************************
2830 * The offload context needs to be set when we transfer the first
2831 * packet of a particular protocol (TCP/UDP). This routine has been
2832 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2834 * Added back the old method of keeping the current context type
2835 * and not setting if unnecessary, as this is reported to be a
2836 * big performance win. -jfv
2837 **********************************************************************/
2839 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2840 u32 *txd_upper, u32 *txd_lower)
2842 struct e1000_context_desc *TXD = NULL;
2843 struct em_buffer *tx_buffer;
2844 struct ether_vlan_header *eh;
2845 struct ip *ip = NULL;
2846 struct ip6_hdr *ip6;
2847 int curr_txd, ehdrlen;
2848 u32 cmd, hdr_len, ip_hlen;
2853 cmd = hdr_len = ipproto = 0;
2854 curr_txd = adapter->next_avail_tx_desc;
2857 * Determine where frame payload starts.
2858 * Jump over vlan headers if already present,
2859 * helpful for QinQ too.
2861 eh = mtod(mp, struct ether_vlan_header *);
2862 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2863 etype = ntohs(eh->evl_proto);
2864 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2866 etype = ntohs(eh->evl_encap_proto);
2867 ehdrlen = ETHER_HDR_LEN;
2871 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2872 * TODO: Support SCTP too when it hits the tree.
2876 ip = (struct ip *)(mp->m_data + ehdrlen);
2877 ip_hlen = ip->ip_hl << 2;
2879 /* Setup of IP header checksum. */
2880 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2882 * Start offset for header checksum calculation.
2883 * End offset for header checksum calculation.
2884 * Offset of place to put the checksum.
2886 TXD = (struct e1000_context_desc *)
2887 &adapter->tx_desc_base[curr_txd];
2888 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2889 TXD->lower_setup.ip_fields.ipcse =
2890 htole16(ehdrlen + ip_hlen);
2891 TXD->lower_setup.ip_fields.ipcso =
2892 ehdrlen + offsetof(struct ip, ip_sum);
2893 cmd |= E1000_TXD_CMD_IP;
2894 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2897 if (mp->m_len < ehdrlen + ip_hlen)
2898 return; /* failure */
2900 hdr_len = ehdrlen + ip_hlen;
2904 case ETHERTYPE_IPV6:
2905 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2906 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2908 if (mp->m_len < ehdrlen + ip_hlen)
2909 return; /* failure */
2911 /* IPv6 doesn't have a header checksum. */
2913 hdr_len = ehdrlen + ip_hlen;
2914 ipproto = ip6->ip6_nxt;
2925 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2926 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2927 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2928 /* no need for context if already set */
2929 if (adapter->last_hw_offload == CSUM_TCP)
2931 adapter->last_hw_offload = CSUM_TCP;
2933 * Start offset for payload checksum calculation.
2934 * End offset for payload checksum calculation.
2935 * Offset of place to put the checksum.
2937 TXD = (struct e1000_context_desc *)
2938 &adapter->tx_desc_base[curr_txd];
2939 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2940 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2941 TXD->upper_setup.tcp_fields.tucso =
2942 hdr_len + offsetof(struct tcphdr, th_sum);
2943 cmd |= E1000_TXD_CMD_TCP;
2948 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2949 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2950 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2951 /* no need for context if already set */
2952 if (adapter->last_hw_offload == CSUM_UDP)
2954 adapter->last_hw_offload = CSUM_UDP;
2956 * Start offset for header checksum calculation.
2957 * End offset for header checksum calculation.
2958 * Offset of place to put the checksum.
2960 TXD = (struct e1000_context_desc *)
2961 &adapter->tx_desc_base[curr_txd];
2962 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2963 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2964 TXD->upper_setup.tcp_fields.tucso =
2965 hdr_len + offsetof(struct udphdr, uh_sum);
2973 TXD->tcp_seg_setup.data = htole32(0);
2974 TXD->cmd_and_length =
2975 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
2976 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2977 tx_buffer->m_head = NULL;
2978 tx_buffer->next_eop = -1;
2980 if (++curr_txd == adapter->num_tx_desc)
2983 adapter->num_tx_desc_avail--;
2984 adapter->next_avail_tx_desc = curr_txd;
2988 /**********************************************************************
2990 * Examine each tx_buffer in the used queue. If the hardware is done
2991 * processing the packet then free associated resources. The
2992 * tx_buffer is put back on the free queue.
2994 **********************************************************************/
2996 lem_txeof(struct adapter *adapter)
2998 int first, last, done, num_avail;
2999 struct em_buffer *tx_buffer;
3000 struct e1000_tx_desc *tx_desc, *eop_desc;
3001 struct ifnet *ifp = adapter->ifp;
3003 EM_TX_LOCK_ASSERT(adapter);
3005 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3008 num_avail = adapter->num_tx_desc_avail;
3009 first = adapter->next_tx_to_clean;
3010 tx_desc = &adapter->tx_desc_base[first];
3011 tx_buffer = &adapter->tx_buffer_area[first];
3012 last = tx_buffer->next_eop;
3013 eop_desc = &adapter->tx_desc_base[last];
3016 * What this does is get the index of the
3017 * first descriptor AFTER the EOP of the
3018 * first packet, that way we can do the
3019 * simple comparison on the inner while loop.
3021 if (++last == adapter->num_tx_desc)
3025 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3026 BUS_DMASYNC_POSTREAD);
3028 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3029 /* We clean the range of the packet */
3030 while (first != done) {
3031 tx_desc->upper.data = 0;
3032 tx_desc->lower.data = 0;
3033 tx_desc->buffer_addr = 0;
3036 if (tx_buffer->m_head) {
3038 bus_dmamap_sync(adapter->txtag,
3040 BUS_DMASYNC_POSTWRITE);
3041 bus_dmamap_unload(adapter->txtag,
3044 m_freem(tx_buffer->m_head);
3045 tx_buffer->m_head = NULL;
3047 tx_buffer->next_eop = -1;
3048 adapter->watchdog_time = ticks;
3050 if (++first == adapter->num_tx_desc)
3053 tx_buffer = &adapter->tx_buffer_area[first];
3054 tx_desc = &adapter->tx_desc_base[first];
3056 /* See if we can continue to the next packet */
3057 last = tx_buffer->next_eop;
3059 eop_desc = &adapter->tx_desc_base[last];
3060 /* Get new done point */
3061 if (++last == adapter->num_tx_desc) last = 0;
3066 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3067 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3069 adapter->next_tx_to_clean = first;
3072 * If we have enough room, clear IFF_DRV_OACTIVE to
3073 * tell the stack that it is OK to send packets.
3074 * If there are no pending descriptors, clear the watchdog.
3076 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
3077 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3078 if (num_avail == adapter->num_tx_desc) {
3079 adapter->watchdog_check = FALSE;
3080 adapter->num_tx_desc_avail = num_avail;
3085 adapter->num_tx_desc_avail = num_avail;
3089 /*********************************************************************
3091 * When Link is lost sometimes there is work still in the TX ring
3092 * which may result in a watchdog, rather than allow that we do an
3093 * attempted cleanup and then reinit here. Note that this has been
3094 * seens mostly with fiber adapters.
3096 **********************************************************************/
3098 lem_tx_purge(struct adapter *adapter)
3100 if ((!adapter->link_active) && (adapter->watchdog_check)) {
3101 EM_TX_LOCK(adapter);
3103 EM_TX_UNLOCK(adapter);
3104 if (adapter->watchdog_check) /* Still outstanding? */
3105 lem_init_locked(adapter);
3109 /*********************************************************************
3111 * Get a buffer from system mbuf buffer pool.
3113 **********************************************************************/
3115 lem_get_buf(struct adapter *adapter, int i)
3118 bus_dma_segment_t segs[1];
3120 struct em_buffer *rx_buffer;
3123 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3125 adapter->mbuf_cluster_failed++;
3128 m->m_len = m->m_pkthdr.len = MCLBYTES;
3130 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3131 m_adj(m, ETHER_ALIGN);
3134 * Using memory from the mbuf cluster pool, invoke the
3135 * bus_dma machinery to arrange the memory mapping.
3137 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3138 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3144 /* If nsegs is wrong then the stack is corrupt. */
3145 KASSERT(nsegs == 1, ("Too many segments returned!"));
3147 rx_buffer = &adapter->rx_buffer_area[i];
3148 if (rx_buffer->m_head != NULL)
3149 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3151 map = rx_buffer->map;
3152 rx_buffer->map = adapter->rx_sparemap;
3153 adapter->rx_sparemap = map;
3154 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3155 rx_buffer->m_head = m;
3157 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3161 /*********************************************************************
3163 * Allocate memory for rx_buffer structures. Since we use one
3164 * rx_buffer per received packet, the maximum number of rx_buffer's
3165 * that we'll need is equal to the number of receive descriptors
3166 * that we've allocated.
3168 **********************************************************************/
3170 lem_allocate_receive_structures(struct adapter *adapter)
3172 device_t dev = adapter->dev;
3173 struct em_buffer *rx_buffer;
3176 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3177 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3178 if (adapter->rx_buffer_area == NULL) {
3179 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3183 #if __FreeBSD_version >= 700000
3184 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3186 error = bus_dma_tag_create(NULL, /* parent */
3188 1, 0, /* alignment, bounds */
3189 BUS_SPACE_MAXADDR, /* lowaddr */
3190 BUS_SPACE_MAXADDR, /* highaddr */
3191 NULL, NULL, /* filter, filterarg */
3192 MCLBYTES, /* maxsize */
3194 MCLBYTES, /* maxsegsize */
3196 NULL, /* lockfunc */
3200 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3205 /* Create the spare map (used by getbuf) */
3206 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3207 &adapter->rx_sparemap);
3209 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3214 rx_buffer = adapter->rx_buffer_area;
3215 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3216 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3219 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3228 lem_free_receive_structures(adapter);
3232 /*********************************************************************
3234 * (Re)initialize receive structures.
3236 **********************************************************************/
3238 lem_setup_receive_structures(struct adapter *adapter)
3240 struct em_buffer *rx_buffer;
3243 /* Reset descriptor ring */
3244 bzero(adapter->rx_desc_base,
3245 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3247 /* Free current RX buffers. */
3248 rx_buffer = adapter->rx_buffer_area;
3249 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3250 if (rx_buffer->m_head != NULL) {
3251 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3252 BUS_DMASYNC_POSTREAD);
3253 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3254 m_freem(rx_buffer->m_head);
3255 rx_buffer->m_head = NULL;
3259 /* Allocate new ones. */
3260 for (i = 0; i < adapter->num_rx_desc; i++) {
3261 error = lem_get_buf(adapter, i);
3266 /* Setup our descriptor pointers */
3267 adapter->next_rx_desc_to_check = 0;
3268 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3269 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3274 /*********************************************************************
3276 * Enable receive unit.
3278 **********************************************************************/
3279 #define MAX_INTS_PER_SEC 8000
3280 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
3283 lem_initialize_receive_unit(struct adapter *adapter)
3285 struct ifnet *ifp = adapter->ifp;
3289 INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3292 * Make sure receives are disabled while setting
3293 * up the descriptor ring
3295 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3296 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3298 if (adapter->hw.mac.type >= e1000_82540) {
3299 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3300 adapter->rx_abs_int_delay.value);
3302 * Set the interrupt throttling rate. Value is calculated
3303 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3305 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3309 ** When using MSIX interrupts we need to throttle
3310 ** using the EITR register (82574 only)
3313 for (int i = 0; i < 4; i++)
3314 E1000_WRITE_REG(&adapter->hw,
3315 E1000_EITR_82574(i), DEFAULT_ITR);
3317 /* Disable accelerated ackknowledge */
3318 if (adapter->hw.mac.type == e1000_82574)
3319 E1000_WRITE_REG(&adapter->hw,
3320 E1000_RFCTL, E1000_RFCTL_ACK_DIS);
3322 /* Setup the Base and Length of the Rx Descriptor Ring */
3323 bus_addr = adapter->rxdma.dma_paddr;
3324 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3325 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3326 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3327 (u32)(bus_addr >> 32));
3328 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3331 /* Setup the Receive Control Register */
3332 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3333 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3334 E1000_RCTL_RDMTS_HALF |
3335 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3337 /* Make sure VLAN Filters are off */
3338 rctl &= ~E1000_RCTL_VFE;
3340 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3341 rctl |= E1000_RCTL_SBP;
3343 rctl &= ~E1000_RCTL_SBP;
3345 switch (adapter->rx_buffer_len) {
3348 rctl |= E1000_RCTL_SZ_2048;
3351 rctl |= E1000_RCTL_SZ_4096 |
3352 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3355 rctl |= E1000_RCTL_SZ_8192 |
3356 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3359 rctl |= E1000_RCTL_SZ_16384 |
3360 E1000_RCTL_BSEX | E1000_RCTL_LPE;
3364 if (ifp->if_mtu > ETHERMTU)
3365 rctl |= E1000_RCTL_LPE;
3367 rctl &= ~E1000_RCTL_LPE;
3369 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3370 if ((adapter->hw.mac.type >= e1000_82543) &&
3371 (ifp->if_capenable & IFCAP_RXCSUM)) {
3372 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3373 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3374 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3377 /* Enable Receives */
3378 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3381 * Setup the HW Rx Head and
3382 * Tail Descriptor Pointers
3384 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3385 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
3390 /*********************************************************************
3392 * Free receive related data structures.
3394 **********************************************************************/
3396 lem_free_receive_structures(struct adapter *adapter)
3398 struct em_buffer *rx_buffer;
3401 INIT_DEBUGOUT("free_receive_structures: begin");
3403 if (adapter->rx_sparemap) {
3404 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3405 adapter->rx_sparemap = NULL;
3408 /* Cleanup any existing buffers */
3409 if (adapter->rx_buffer_area != NULL) {
3410 rx_buffer = adapter->rx_buffer_area;
3411 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3412 if (rx_buffer->m_head != NULL) {
3413 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3414 BUS_DMASYNC_POSTREAD);
3415 bus_dmamap_unload(adapter->rxtag,
3417 m_freem(rx_buffer->m_head);
3418 rx_buffer->m_head = NULL;
3419 } else if (rx_buffer->map != NULL)
3420 bus_dmamap_unload(adapter->rxtag,
3422 if (rx_buffer->map != NULL) {
3423 bus_dmamap_destroy(adapter->rxtag,
3425 rx_buffer->map = NULL;
3430 if (adapter->rx_buffer_area != NULL) {
3431 free(adapter->rx_buffer_area, M_DEVBUF);
3432 adapter->rx_buffer_area = NULL;
3435 if (adapter->rxtag != NULL) {
3436 bus_dma_tag_destroy(adapter->rxtag);
3437 adapter->rxtag = NULL;
3441 /*********************************************************************
3443 * This routine executes in interrupt context. It replenishes
3444 * the mbufs in the descriptor and sends data which has been
3445 * dma'ed into host memory to upper layer.
3447 * We loop at most count times if count is > 0, or until done if
3450 * For polling we also now return the number of cleaned packets
3451 *********************************************************************/
3453 lem_rxeof(struct adapter *adapter, int count)
3455 struct ifnet *ifp = adapter->ifp;;
3457 u8 status, accept_frame = 0, eop = 0;
3458 u16 len, desc_len, prev_len_adj;
3460 struct e1000_rx_desc *current_desc;
3462 EM_RX_LOCK(adapter);
3463 i = adapter->next_rx_desc_to_check;
3464 current_desc = &adapter->rx_desc_base[i];
3465 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3466 BUS_DMASYNC_POSTREAD);
3468 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3469 EM_RX_UNLOCK(adapter);
3473 while ((current_desc->status & E1000_RXD_STAT_DD) &&
3475 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3476 struct mbuf *m = NULL;
3478 mp = adapter->rx_buffer_area[i].m_head;
3480 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3481 * needs to access the last received byte in the mbuf.
3483 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3484 BUS_DMASYNC_POSTREAD);
3488 desc_len = le16toh(current_desc->length);
3489 status = current_desc->status;
3490 if (status & E1000_RXD_STAT_EOP) {
3493 if (desc_len < ETHER_CRC_LEN) {
3495 prev_len_adj = ETHER_CRC_LEN - desc_len;
3497 len = desc_len - ETHER_CRC_LEN;
3503 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3505 u32 pkt_len = desc_len;
3507 if (adapter->fmp != NULL)
3508 pkt_len += adapter->fmp->m_pkthdr.len;
3510 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3511 if (TBI_ACCEPT(&adapter->hw, status,
3512 current_desc->errors, pkt_len, last_byte,
3513 adapter->min_frame_size, adapter->max_frame_size)) {
3514 e1000_tbi_adjust_stats_82543(&adapter->hw,
3515 &adapter->stats, pkt_len,
3516 adapter->hw.mac.addr,
3517 adapter->max_frame_size);
3525 if (lem_get_buf(adapter, i) != 0) {
3530 /* Assign correct length to the current fragment */
3533 if (adapter->fmp == NULL) {
3534 mp->m_pkthdr.len = len;
3535 adapter->fmp = mp; /* Store the first mbuf */
3538 /* Chain mbuf's together */
3539 mp->m_flags &= ~M_PKTHDR;
3541 * Adjust length of previous mbuf in chain if
3542 * we received less than 4 bytes in the last
3545 if (prev_len_adj > 0) {
3546 adapter->lmp->m_len -= prev_len_adj;
3547 adapter->fmp->m_pkthdr.len -=
3550 adapter->lmp->m_next = mp;
3551 adapter->lmp = adapter->lmp->m_next;
3552 adapter->fmp->m_pkthdr.len += len;
3556 adapter->fmp->m_pkthdr.rcvif = ifp;
3558 lem_receive_checksum(adapter, current_desc,
3560 #ifndef __NO_STRICT_ALIGNMENT
3561 if (adapter->max_frame_size >
3562 (MCLBYTES - ETHER_ALIGN) &&
3563 lem_fixup_rx(adapter) != 0)
3566 if (status & E1000_RXD_STAT_VP) {
3567 #if __FreeBSD_version < 700000
3568 VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
3569 (le16toh(current_desc->special) &
3570 E1000_RXD_SPC_VLAN_MASK));
3572 adapter->fmp->m_pkthdr.ether_vtag =
3573 (le16toh(current_desc->special) &
3574 E1000_RXD_SPC_VLAN_MASK);
3575 adapter->fmp->m_flags |= M_VLANTAG;
3578 #ifndef __NO_STRICT_ALIGNMENT
3582 adapter->fmp = NULL;
3583 adapter->lmp = NULL;
3588 /* Reuse loaded DMA map and just update mbuf chain */
3589 mp = adapter->rx_buffer_area[i].m_head;
3590 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3591 mp->m_data = mp->m_ext.ext_buf;
3593 if (adapter->max_frame_size <=
3594 (MCLBYTES - ETHER_ALIGN))
3595 m_adj(mp, ETHER_ALIGN);
3596 if (adapter->fmp != NULL) {
3597 m_freem(adapter->fmp);
3598 adapter->fmp = NULL;
3599 adapter->lmp = NULL;
3604 /* Zero out the receive descriptors status. */
3605 current_desc->status = 0;
3606 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3607 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3609 /* Advance our pointers to the next descriptor. */
3610 if (++i == adapter->num_rx_desc)
3612 /* Call into the stack */
3614 adapter->next_rx_desc_to_check = i;
3615 EM_RX_UNLOCK(adapter);
3616 (*ifp->if_input)(ifp, m);
3617 EM_RX_LOCK(adapter);
3619 i = adapter->next_rx_desc_to_check;
3621 current_desc = &adapter->rx_desc_base[i];
3623 adapter->next_rx_desc_to_check = i;
3625 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
3627 i = adapter->num_rx_desc - 1;
3628 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3629 EM_RX_UNLOCK(adapter);
3633 #ifndef __NO_STRICT_ALIGNMENT
3635 * When jumbo frames are enabled we should realign entire payload on
3636 * architecures with strict alignment. This is serious design mistake of 8254x
3637 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3638 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3639 * payload. On architecures without strict alignment restrictions 8254x still
3640 * performs unaligned memory access which would reduce the performance too.
3641 * To avoid copying over an entire frame to align, we allocate a new mbuf and
3642 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3643 * existing mbuf chain.
3645 * Be aware, best performance of the 8254x is achived only when jumbo frame is
3646 * not used at all on architectures with strict alignment.
3649 lem_fixup_rx(struct adapter *adapter)
3656 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3657 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3658 m->m_data += ETHER_HDR_LEN;
3660 MGETHDR(n, M_DONTWAIT, MT_DATA);
3662 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3663 m->m_data += ETHER_HDR_LEN;
3664 m->m_len -= ETHER_HDR_LEN;
3665 n->m_len = ETHER_HDR_LEN;
3666 M_MOVE_PKTHDR(n, m);
3670 adapter->dropped_pkts++;
3671 m_freem(adapter->fmp);
3672 adapter->fmp = NULL;
3681 /*********************************************************************
3683 * Verify that the hardware indicated that the checksum is valid.
3684 * Inform the stack about the status of checksum so that stack
3685 * doesn't spend time verifying the checksum.
3687 *********************************************************************/
3689 lem_receive_checksum(struct adapter *adapter,
3690 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3692 /* 82543 or newer only */
3693 if ((adapter->hw.mac.type < e1000_82543) ||
3694 /* Ignore Checksum bit is set */
3695 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3696 mp->m_pkthdr.csum_flags = 0;
3700 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3702 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3703 /* IP Checksum Good */
3704 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3705 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3708 mp->m_pkthdr.csum_flags = 0;
3712 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3714 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3715 mp->m_pkthdr.csum_flags |=
3716 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3717 mp->m_pkthdr.csum_data = htons(0xffff);
3722 #if __FreeBSD_version >= 700029
3724 * This routine is run via an vlan
3728 lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3730 struct adapter *adapter = ifp->if_softc;
3733 if (ifp->if_softc != arg) /* Not our event */
3736 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
3739 index = (vtag >> 5) & 0x7F;
3741 lem_shadow_vfta[index] |= (1 << bit);
3742 ++adapter->num_vlans;
3743 /* Re-init to load the changes */
3748 * This routine is run via an vlan
3752 lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3754 struct adapter *adapter = ifp->if_softc;
3757 if (ifp->if_softc != arg)
3760 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3763 index = (vtag >> 5) & 0x7F;
3765 lem_shadow_vfta[index] &= ~(1 << bit);
3766 --adapter->num_vlans;
3767 /* Re-init to load the changes */
3772 lem_setup_vlan_hw_support(struct adapter *adapter)
3774 struct e1000_hw *hw = &adapter->hw;
3778 ** We get here thru init_locked, meaning
3779 ** a soft reset, this has already cleared
3780 ** the VFTA and other state, so if there
3781 ** have been no vlan's registered do nothing.
3783 if (adapter->num_vlans == 0)
3787 ** A soft reset zero's out the VFTA, so
3788 ** we need to repopulate it now.
3790 for (int i = 0; i < EM_VFTA_SIZE; i++)
3791 if (lem_shadow_vfta[i] != 0)
3792 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3793 i, lem_shadow_vfta[i]);
3795 reg = E1000_READ_REG(hw, E1000_CTRL);
3796 reg |= E1000_CTRL_VME;
3797 E1000_WRITE_REG(hw, E1000_CTRL, reg);
3799 /* Enable the Filter Table */
3800 reg = E1000_READ_REG(hw, E1000_RCTL);
3801 reg &= ~E1000_RCTL_CFIEN;
3802 reg |= E1000_RCTL_VFE;
3803 E1000_WRITE_REG(hw, E1000_RCTL, reg);
3805 /* Update the frame size */
3806 E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
3807 adapter->max_frame_size + VLAN_TAG_SIZE);
3812 lem_enable_intr(struct adapter *adapter)
3814 struct e1000_hw *hw = &adapter->hw;
3815 u32 ims_mask = IMS_ENABLE_MASK;
3817 if (adapter->msix) {
3818 E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
3819 ims_mask |= EM_MSIX_MASK;
3821 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3825 lem_disable_intr(struct adapter *adapter)
3827 struct e1000_hw *hw = &adapter->hw;
3830 E1000_WRITE_REG(hw, EM_EIAC, 0);
3831 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
3835 * Bit of a misnomer, what this really means is
3836 * to enable OS management of the system... aka
3837 * to disable special hardware management features
3840 lem_init_manageability(struct adapter *adapter)
3842 /* A shared code workaround */
3843 if (adapter->has_manage) {
3844 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3845 /* disable hardware interception of ARP */
3846 manc &= ~(E1000_MANC_ARP_EN);
3847 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3852 * Give control back to hardware management
3853 * controller if there is one.
3856 lem_release_manageability(struct adapter *adapter)
3858 if (adapter->has_manage) {
3859 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3861 /* re-enable hardware interception of ARP */
3862 manc |= E1000_MANC_ARP_EN;
3863 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3868 * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3869 * For ASF and Pass Through versions of f/w this means
3870 * that the driver is loaded. For AMT version type f/w
3871 * this means that the network i/f is open.
3874 lem_get_hw_control(struct adapter *adapter)
3878 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3879 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3880 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3885 * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3886 * For ASF and Pass Through versions of f/w this means that
3887 * the driver is no longer loaded. For AMT versions of the
3888 * f/w this means that the network i/f is closed.
3891 lem_release_hw_control(struct adapter *adapter)
3895 if (!adapter->has_manage)
3898 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3899 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3900 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3905 lem_is_valid_ether_addr(u8 *addr)
3907 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3909 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3917 ** Parse the interface capabilities with regard
3918 ** to both system management and wake-on-lan for
3922 lem_get_wakeup(device_t dev)
3924 struct adapter *adapter = device_get_softc(dev);
3925 u16 eeprom_data = 0, device_id, apme_mask;
3927 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3928 apme_mask = EM_EEPROM_APME;
3930 switch (adapter->hw.mac.type) {
3935 e1000_read_nvm(&adapter->hw,
3936 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3937 apme_mask = EM_82544_APME;
3940 case e1000_82546_rev_3:
3941 if (adapter->hw.bus.func == 1) {
3942 e1000_read_nvm(&adapter->hw,
3943 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3946 e1000_read_nvm(&adapter->hw,
3947 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3950 e1000_read_nvm(&adapter->hw,
3951 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3954 if (eeprom_data & apme_mask)
3955 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
3957 * We have the eeprom settings, now apply the special cases
3958 * where the eeprom may be wrong or the board won't support
3959 * wake on lan on a particular port
3961 device_id = pci_get_device(dev);
3962 switch (device_id) {
3963 case E1000_DEV_ID_82546GB_PCIE:
3966 case E1000_DEV_ID_82546EB_FIBER:
3967 case E1000_DEV_ID_82546GB_FIBER:
3968 /* Wake events only supported on port A for dual fiber
3969 * regardless of eeprom setting */
3970 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3971 E1000_STATUS_FUNC_1)
3974 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
3975 /* if quad port adapter, disable WoL on all but port A */
3976 if (global_quad_port_a != 0)
3978 /* Reset for multiple quad port adapters */
3979 if (++global_quad_port_a == 4)
3980 global_quad_port_a = 0;
3988 * Enable PCI Wake On Lan capability
3991 lem_enable_wakeup(device_t dev)
3993 struct adapter *adapter = device_get_softc(dev);
3994 struct ifnet *ifp = adapter->ifp;
3995 u32 pmc, ctrl, ctrl_ext, rctl;
3998 if ((pci_find_extcap(dev, PCIY_PMG, &pmc) != 0))
4001 /* Advertise the wakeup capability */
4002 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4003 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4004 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4005 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4007 /* Keep the laser running on Fiber adapters */
4008 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4009 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4010 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4011 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4012 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4016 ** Determine type of Wakeup: note that wol
4017 ** is set with all bits on by default.
4019 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
4020 adapter->wol &= ~E1000_WUFC_MAG;
4022 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
4023 adapter->wol &= ~E1000_WUFC_MC;
4025 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4026 rctl |= E1000_RCTL_MPE;
4027 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4030 if (adapter->hw.mac.type == e1000_pchlan) {
4031 if (lem_enable_phy_wakeup(adapter))
4034 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4035 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4040 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4041 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4042 if (ifp->if_capenable & IFCAP_WOL)
4043 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4044 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4050 ** WOL in the newer chipset interfaces (pchlan)
4051 ** require thing to be copied into the phy
4054 lem_enable_phy_wakeup(struct adapter *adapter)
4056 struct e1000_hw *hw = &adapter->hw;
4060 /* copy MAC RARs to PHY RARs */
4061 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4062 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4063 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4064 e1000_write_phy_reg(hw, BM_RAR_M(i),
4065 (u16)((mreg >> 16) & 0xFFFF));
4066 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4067 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4068 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4069 (u16)((mreg >> 16) & 0xFFFF));
4072 /* copy MAC MTA to PHY MTA */
4073 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4074 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4075 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4076 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4077 (u16)((mreg >> 16) & 0xFFFF));
4080 /* configure PHY Rx Control register */
4081 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4082 mreg = E1000_READ_REG(hw, E1000_RCTL);
4083 if (mreg & E1000_RCTL_UPE)
4084 preg |= BM_RCTL_UPE;
4085 if (mreg & E1000_RCTL_MPE)
4086 preg |= BM_RCTL_MPE;
4087 preg &= ~(BM_RCTL_MO_MASK);
4088 if (mreg & E1000_RCTL_MO_3)
4089 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4090 << BM_RCTL_MO_SHIFT);
4091 if (mreg & E1000_RCTL_BAM)
4092 preg |= BM_RCTL_BAM;
4093 if (mreg & E1000_RCTL_PMCF)
4094 preg |= BM_RCTL_PMCF;
4095 mreg = E1000_READ_REG(hw, E1000_CTRL);
4096 if (mreg & E1000_CTRL_RFCE)
4097 preg |= BM_RCTL_RFCE;
4098 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4100 /* enable PHY wakeup in MAC register */
4101 E1000_WRITE_REG(hw, E1000_WUC,
4102 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4103 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4105 /* configure and enable PHY wakeup in PHY registers */
4106 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4107 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4109 /* activate PHY wakeup */
4110 ret = hw->phy.ops.acquire(hw);
4112 printf("Could not acquire PHY\n");
4115 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4116 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4117 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4119 printf("Could not read PHY page 769\n");
4122 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4123 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4125 printf("Could not set PHY Host Wakeup bit\n");
4127 hw->phy.ops.release(hw);
4133 lem_led_func(void *arg, int onoff)
4135 struct adapter *adapter = arg;
4137 EM_CORE_LOCK(adapter);
4139 e1000_setup_led(&adapter->hw);
4140 e1000_led_on(&adapter->hw);
4142 e1000_led_off(&adapter->hw);
4143 e1000_cleanup_led(&adapter->hw);
4145 EM_CORE_UNLOCK(adapter);
4148 /*********************************************************************
4149 * 82544 Coexistence issue workaround.
4150 * There are 2 issues.
4151 * 1. Transmit Hang issue.
4152 * To detect this issue, following equation can be used...
4153 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4154 * If SUM[3:0] is in between 1 to 4, we will have this issue.
4157 * To detect this issue, following equation can be used...
4158 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4159 * If SUM[3:0] is in between 9 to c, we will have this issue.
4163 * Make sure we do not have ending address
4164 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4166 *************************************************************************/
4168 lem_fill_descriptors (bus_addr_t address, u32 length,
4169 PDESC_ARRAY desc_array)
4171 u32 safe_terminator;
4173 /* Since issue is sensitive to length and address.*/
4174 /* Let us first check the address...*/
4176 desc_array->descriptor[0].address = address;
4177 desc_array->descriptor[0].length = length;
4178 desc_array->elements = 1;
4179 return (desc_array->elements);
4181 safe_terminator = (u32)((((u32)address & 0x7) +
4182 (length & 0xF)) & 0xF);
4183 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4184 if (safe_terminator == 0 ||
4185 (safe_terminator > 4 &&
4186 safe_terminator < 9) ||
4187 (safe_terminator > 0xC &&
4188 safe_terminator <= 0xF)) {
4189 desc_array->descriptor[0].address = address;
4190 desc_array->descriptor[0].length = length;
4191 desc_array->elements = 1;
4192 return (desc_array->elements);
4195 desc_array->descriptor[0].address = address;
4196 desc_array->descriptor[0].length = length - 4;
4197 desc_array->descriptor[1].address = address + (length - 4);
4198 desc_array->descriptor[1].length = 4;
4199 desc_array->elements = 2;
4200 return (desc_array->elements);
4203 /**********************************************************************
4205 * Update the board statistics counters.
4207 **********************************************************************/
4209 lem_update_stats_counters(struct adapter *adapter)
4213 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4214 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4215 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4216 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4218 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4219 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4220 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4221 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4223 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4224 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4225 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4226 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4227 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4228 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4229 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4230 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4231 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4232 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4233 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4234 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4235 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4236 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4237 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4238 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4239 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4240 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4241 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4242 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4244 /* For the 64-bit byte counters the low dword must be read first. */
4245 /* Both registers clear on the read of the high dword */
4247 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH);
4248 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
4250 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4251 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4252 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4253 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4254 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4256 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4257 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4259 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4260 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4261 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4262 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4263 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4264 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4265 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4266 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4267 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4268 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4270 if (adapter->hw.mac.type >= e1000_82543) {
4271 adapter->stats.algnerrc +=
4272 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4273 adapter->stats.rxerrc +=
4274 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4275 adapter->stats.tncrs +=
4276 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4277 adapter->stats.cexterr +=
4278 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4279 adapter->stats.tsctc +=
4280 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4281 adapter->stats.tsctfc +=
4282 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4286 ifp->if_collisions = adapter->stats.colc;
4289 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4290 adapter->stats.crcerrs + adapter->stats.algnerrc +
4291 adapter->stats.ruc + adapter->stats.roc +
4292 adapter->stats.mpc + adapter->stats.cexterr;
4295 ifp->if_oerrors = adapter->stats.ecol +
4296 adapter->stats.latecol + adapter->watchdog_events;
4300 /**********************************************************************
4302 * This routine is called only when lem_display_debug_stats is enabled.
4303 * This routine provides a way to take a look at important statistics
4304 * maintained by the driver and hardware.
4306 **********************************************************************/
4308 lem_print_debug_info(struct adapter *adapter)
4310 device_t dev = adapter->dev;
4311 u8 *hw_addr = adapter->hw.hw_addr;
4313 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
4314 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
4315 E1000_READ_REG(&adapter->hw, E1000_CTRL),
4316 E1000_READ_REG(&adapter->hw, E1000_RCTL));
4317 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
4318 ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
4319 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
4320 device_printf(dev, "Flow control watermarks high = %d low = %d\n",
4321 adapter->hw.fc.high_water,
4322 adapter->hw.fc.low_water);
4323 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
4324 E1000_READ_REG(&adapter->hw, E1000_TIDV),
4325 E1000_READ_REG(&adapter->hw, E1000_TADV));
4326 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
4327 E1000_READ_REG(&adapter->hw, E1000_RDTR),
4328 E1000_READ_REG(&adapter->hw, E1000_RADV));
4329 device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
4330 (long long)adapter->tx_fifo_wrk_cnt,
4331 (long long)adapter->tx_fifo_reset_cnt);
4332 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
4333 E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
4334 E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
4335 device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
4336 E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
4337 E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
4338 device_printf(dev, "Num Tx descriptors avail = %d\n",
4339 adapter->num_tx_desc_avail);
4340 device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
4341 adapter->no_tx_desc_avail1);
4342 device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
4343 adapter->no_tx_desc_avail2);
4344 device_printf(dev, "Std mbuf failed = %ld\n",
4345 adapter->mbuf_alloc_failed);
4346 device_printf(dev, "Std mbuf cluster failed = %ld\n",
4347 adapter->mbuf_cluster_failed);
4348 device_printf(dev, "Driver dropped packets = %ld\n",
4349 adapter->dropped_pkts);
4350 device_printf(dev, "Driver tx dma failure in encap = %ld\n",
4351 adapter->no_tx_dma_setup);
4355 lem_print_hw_stats(struct adapter *adapter)
4357 device_t dev = adapter->dev;
4359 device_printf(dev, "Excessive collisions = %lld\n",
4360 (long long)adapter->stats.ecol);
4361 #if (DEBUG_HW > 0) /* Dont output these errors normally */
4362 device_printf(dev, "Symbol errors = %lld\n",
4363 (long long)adapter->stats.symerrs);
4365 device_printf(dev, "Sequence errors = %lld\n",
4366 (long long)adapter->stats.sec);
4367 device_printf(dev, "Defer count = %lld\n",
4368 (long long)adapter->stats.dc);
4369 device_printf(dev, "Missed Packets = %lld\n",
4370 (long long)adapter->stats.mpc);
4371 device_printf(dev, "Receive No Buffers = %lld\n",
4372 (long long)adapter->stats.rnbc);
4373 /* RLEC is inaccurate on some hardware, calculate our own. */
4374 device_printf(dev, "Receive Length Errors = %lld\n",
4375 ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
4376 device_printf(dev, "Receive errors = %lld\n",
4377 (long long)adapter->stats.rxerrc);
4378 device_printf(dev, "Crc errors = %lld\n",
4379 (long long)adapter->stats.crcerrs);
4380 device_printf(dev, "Alignment errors = %lld\n",
4381 (long long)adapter->stats.algnerrc);
4382 device_printf(dev, "Collision/Carrier extension errors = %lld\n",
4383 (long long)adapter->stats.cexterr);
4384 device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
4385 device_printf(dev, "watchdog timeouts = %ld\n",
4386 adapter->watchdog_events);
4387 device_printf(dev, "RX MSIX IRQ = %ld TX MSIX IRQ = %ld"
4388 " LINK MSIX IRQ = %ld\n", adapter->rx_irq,
4389 adapter->tx_irq , adapter->link_irq);
4390 device_printf(dev, "XON Rcvd = %lld\n",
4391 (long long)adapter->stats.xonrxc);
4392 device_printf(dev, "XON Xmtd = %lld\n",
4393 (long long)adapter->stats.xontxc);
4394 device_printf(dev, "XOFF Rcvd = %lld\n",
4395 (long long)adapter->stats.xoffrxc);
4396 device_printf(dev, "XOFF Xmtd = %lld\n",
4397 (long long)adapter->stats.xofftxc);
4398 device_printf(dev, "Good Packets Rcvd = %lld\n",
4399 (long long)adapter->stats.gprc);
4400 device_printf(dev, "Good Packets Xmtd = %lld\n",
4401 (long long)adapter->stats.gptc);
4404 /**********************************************************************
4406 * This routine provides a way to dump out the adapter eeprom,
4407 * often a useful debug/service tool. This only dumps the first
4408 * 32 words, stuff that matters is in that extent.
4410 **********************************************************************/
4412 lem_print_nvm_info(struct adapter *adapter)
4417 /* Its a bit crude, but it gets the job done */
4418 printf("\nInterface EEPROM Dump:\n");
4419 printf("Offset\n0x0000 ");
4420 for (i = 0, j = 0; i < 32; i++, j++) {
4421 if (j == 8) { /* Make the offset block */
4423 printf("\n0x00%x0 ",row);
4425 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4426 printf("%04x ", eeprom_data);
4432 lem_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
4434 struct adapter *adapter;
4439 error = sysctl_handle_int(oidp, &result, 0, req);
4441 if (error || !req->newptr)
4445 adapter = (struct adapter *)arg1;
4446 lem_print_debug_info(adapter);
4449 * This value will cause a hex dump of the
4450 * first 32 16-bit words of the EEPROM to
4454 adapter = (struct adapter *)arg1;
4455 lem_print_nvm_info(adapter);
4463 lem_sysctl_stats(SYSCTL_HANDLER_ARGS)
4465 struct adapter *adapter;
4470 error = sysctl_handle_int(oidp, &result, 0, req);
4472 if (error || !req->newptr)
4476 adapter = (struct adapter *)arg1;
4477 lem_print_hw_stats(adapter);
4484 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4486 struct em_int_delay_info *info;
4487 struct adapter *adapter;
4493 info = (struct em_int_delay_info *)arg1;
4494 usecs = info->value;
4495 error = sysctl_handle_int(oidp, &usecs, 0, req);
4496 if (error != 0 || req->newptr == NULL)
4498 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4500 info->value = usecs;
4501 ticks = EM_USECS_TO_TICKS(usecs);
4503 adapter = info->adapter;
4505 EM_CORE_LOCK(adapter);
4506 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4507 regval = (regval & ~0xffff) | (ticks & 0xffff);
4508 /* Handle a few special cases. */
4509 switch (info->offset) {
4514 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4515 /* Don't write 0 into the TIDV register. */
4518 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4521 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4522 EM_CORE_UNLOCK(adapter);
4527 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4528 const char *description, struct em_int_delay_info *info,
4529 int offset, int value)
4531 info->adapter = adapter;
4532 info->offset = offset;
4533 info->value = value;
4534 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4535 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4536 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4537 info, 0, lem_sysctl_int_delay, "I", description);
4540 #ifndef EM_LEGACY_IRQ
4542 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4543 const char *description, int *limit, int value)
4546 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4547 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4548 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);