1 /******************************************************************************
3 Copyright (c) 2001-2008, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
42 #include <sys/endian.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/malloc.h>
47 #include <sys/module.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
52 #include <sys/taskqueue.h>
53 #include <sys/eventhandler.h>
55 #include <sys/ioccom.h>
58 #include <machine/bus.h>
59 #include <machine/resource.h>
62 #include <net/ethernet.h>
64 #include <net/if_arp.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
68 #include <net/if_types.h>
69 #include <net/if_vlan_var.h>
71 #include <netinet/in_systm.h>
72 #include <netinet/in.h>
73 #include <netinet/if_ether.h>
74 #include <netinet/ip.h>
75 #include <netinet/ip6.h>
76 #include <netinet/tcp.h>
77 #include <netinet/udp.h>
79 #include <machine/in_cksum.h>
80 #include <dev/pci/pcivar.h>
81 #include <dev/pci/pcireg.h>
83 #include "e1000_api.h"
84 #include "e1000_82571.h"
87 /*********************************************************************
88 * Set this to one to display debug statistics
89 *********************************************************************/
90 int em_display_debug_stats = 0;
92 /*********************************************************************
94 *********************************************************************/
95 char em_driver_version[] = "6.9.6";
98 /*********************************************************************
101 * Used by probe to select devices to load on
102 * Last field stores an index into e1000_strings
103 * Last entry must be all 0s
105 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
106 *********************************************************************/
108 static em_vendor_info_t em_vendor_info_array[] =
110 /* Intel(R) PRO/1000 Network Connection */
111 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
112 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
113 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
118 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
128 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
131 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
132 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
142 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
143 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
147 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
148 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
149 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
150 PCI_ANY_ID, PCI_ANY_ID, 0},
152 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
153 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
154 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
156 { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
157 { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
158 { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
159 { 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL,
160 PCI_ANY_ID, PCI_ANY_ID, 0},
161 { 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD,
162 PCI_ANY_ID, PCI_ANY_ID, 0},
163 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
164 PCI_ANY_ID, PCI_ANY_ID, 0},
165 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
166 PCI_ANY_ID, PCI_ANY_ID, 0},
167 { 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
168 PCI_ANY_ID, PCI_ANY_ID, 0},
169 { 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
170 PCI_ANY_ID, PCI_ANY_ID, 0},
171 { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
172 { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
173 { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
174 { 0x8086, E1000_DEV_ID_82572EI, PCI_ANY_ID, PCI_ANY_ID, 0},
176 { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0},
177 { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0},
178 { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0},
179 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
180 PCI_ANY_ID, PCI_ANY_ID, 0},
181 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
182 PCI_ANY_ID, PCI_ANY_ID, 0},
183 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
184 PCI_ANY_ID, PCI_ANY_ID, 0},
185 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
186 PCI_ANY_ID, PCI_ANY_ID, 0},
187 { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
188 { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
189 { 0x8086, E1000_DEV_ID_ICH8_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
190 { 0x8086, E1000_DEV_ID_ICH8_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
191 { 0x8086, E1000_DEV_ID_ICH8_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
192 { 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
193 { 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
195 { 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
196 { 0x8086, E1000_DEV_ID_ICH9_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
197 { 0x8086, E1000_DEV_ID_ICH9_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
198 { 0x8086, E1000_DEV_ID_ICH9_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
199 { 0x8086, E1000_DEV_ID_ICH9_IGP_M_V, PCI_ANY_ID, PCI_ANY_ID, 0},
200 { 0x8086, E1000_DEV_ID_ICH9_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
201 { 0x8086, E1000_DEV_ID_ICH9_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
202 { 0x8086, E1000_DEV_ID_ICH9_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
203 { 0x8086, E1000_DEV_ID_ICH9_BM, PCI_ANY_ID, PCI_ANY_ID, 0},
204 { 0x8086, E1000_DEV_ID_82574L, PCI_ANY_ID, PCI_ANY_ID, 0},
205 { 0x8086, E1000_DEV_ID_ICH10_R_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
206 { 0x8086, E1000_DEV_ID_ICH10_R_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
207 { 0x8086, E1000_DEV_ID_ICH10_R_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0},
208 { 0x8086, E1000_DEV_ID_ICH10_D_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
209 { 0x8086, E1000_DEV_ID_ICH10_D_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
210 /* required last entry */
214 /*********************************************************************
215 * Table of branding strings for all supported NICs.
216 *********************************************************************/
218 static char *em_strings[] = {
219 "Intel(R) PRO/1000 Network Connection"
222 /*********************************************************************
223 * Function prototypes
224 *********************************************************************/
225 static int em_probe(device_t);
226 static int em_attach(device_t);
227 static int em_detach(device_t);
228 static int em_shutdown(device_t);
229 static int em_suspend(device_t);
230 static int em_resume(device_t);
231 static void em_start(struct ifnet *);
232 static void em_start_locked(struct ifnet *ifp);
233 static int em_ioctl(struct ifnet *, u_long, caddr_t);
234 static void em_watchdog(struct adapter *);
235 static void em_init(void *);
236 static void em_init_locked(struct adapter *);
237 static void em_stop(void *);
238 static void em_media_status(struct ifnet *, struct ifmediareq *);
239 static int em_media_change(struct ifnet *);
240 static void em_identify_hardware(struct adapter *);
241 static int em_allocate_pci_resources(struct adapter *);
242 static int em_allocate_legacy(struct adapter *adapter);
243 static int em_allocate_msix(struct adapter *adapter);
244 static int em_setup_msix(struct adapter *);
245 static void em_free_pci_resources(struct adapter *);
246 static void em_local_timer(void *);
247 static int em_hardware_init(struct adapter *);
248 static void em_setup_interface(device_t, struct adapter *);
249 static void em_setup_transmit_structures(struct adapter *);
250 static void em_initialize_transmit_unit(struct adapter *);
251 static int em_setup_receive_structures(struct adapter *);
252 static void em_initialize_receive_unit(struct adapter *);
253 static void em_enable_intr(struct adapter *);
254 static void em_disable_intr(struct adapter *);
255 static void em_free_transmit_structures(struct adapter *);
256 static void em_free_receive_structures(struct adapter *);
257 static void em_update_stats_counters(struct adapter *);
258 static void em_txeof(struct adapter *);
259 static void em_tx_purge(struct adapter *);
260 static int em_allocate_receive_structures(struct adapter *);
261 static int em_allocate_transmit_structures(struct adapter *);
262 static int em_rxeof(struct adapter *, int);
263 #ifndef __NO_STRICT_ALIGNMENT
264 static int em_fixup_rx(struct adapter *);
266 static void em_receive_checksum(struct adapter *, struct e1000_rx_desc *,
268 static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
270 #if __FreeBSD_version >= 700000
271 static bool em_tso_setup(struct adapter *, struct mbuf *,
273 #endif /* FreeBSD_version >= 700000 */
274 static void em_set_promisc(struct adapter *);
275 static void em_disable_promisc(struct adapter *);
276 static void em_set_multi(struct adapter *);
277 static void em_print_hw_stats(struct adapter *);
278 static void em_update_link_status(struct adapter *);
279 static int em_get_buf(struct adapter *, int);
281 #ifdef EM_HW_VLAN_SUPPORT
282 static void em_register_vlan(void *, struct ifnet *, u16);
283 static void em_unregister_vlan(void *, struct ifnet *, u16);
286 static int em_xmit(struct adapter *, struct mbuf **);
287 static void em_smartspeed(struct adapter *);
288 static int em_82547_fifo_workaround(struct adapter *, int);
289 static void em_82547_update_fifo_head(struct adapter *, int);
290 static int em_82547_tx_fifo_reset(struct adapter *);
291 static void em_82547_move_tail(void *);
292 static int em_dma_malloc(struct adapter *, bus_size_t,
293 struct em_dma_alloc *, int);
294 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
295 static void em_print_debug_info(struct adapter *);
296 static void em_print_nvm_info(struct adapter *);
297 static int em_is_valid_ether_addr(u8 *);
298 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
299 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
300 static u32 em_fill_descriptors (bus_addr_t address, u32 length,
301 PDESC_ARRAY desc_array);
302 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
303 static void em_add_int_delay_sysctl(struct adapter *, const char *,
304 const char *, struct em_int_delay_info *, int, int);
305 /* Management and WOL Support */
306 static void em_init_manageability(struct adapter *);
307 static void em_release_manageability(struct adapter *);
308 static void em_get_hw_control(struct adapter *);
309 static void em_release_hw_control(struct adapter *);
310 static void em_enable_wakeup(device_t);
313 /* Precision Time sync support */
314 static int em_tsync_init(struct adapter *);
315 static void em_tsync_disable(struct adapter *);
319 static void em_intr(void *);
321 #if __FreeBSD_version < 700000
322 static void em_irq_fast(void *);
324 static int em_irq_fast(void *);
327 static void em_msix_tx(void *);
328 static void em_msix_rx(void *);
329 static void em_msix_link(void *);
330 static void em_add_rx_process_limit(struct adapter *, const char *,
331 const char *, int *, int);
332 static void em_handle_rxtx(void *context, int pending);
333 static void em_handle_rx(void *context, int pending);
334 static void em_handle_tx(void *context, int pending);
335 static void em_handle_link(void *context, int pending);
336 #endif /* EM_LEGACY_IRQ */
338 #ifdef DEVICE_POLLING
339 static poll_handler_t em_poll;
342 /*********************************************************************
343 * FreeBSD Device Interface Entry Points
344 *********************************************************************/
346 static device_method_t em_methods[] = {
347 /* Device interface */
348 DEVMETHOD(device_probe, em_probe),
349 DEVMETHOD(device_attach, em_attach),
350 DEVMETHOD(device_detach, em_detach),
351 DEVMETHOD(device_shutdown, em_shutdown),
352 DEVMETHOD(device_suspend, em_suspend),
353 DEVMETHOD(device_resume, em_resume),
357 static driver_t em_driver = {
358 "em", em_methods, sizeof(struct adapter),
361 static devclass_t em_devclass;
362 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
363 MODULE_DEPEND(em, pci, 1, 1, 1);
364 MODULE_DEPEND(em, ether, 1, 1, 1);
366 /*********************************************************************
367 * Tunable default values.
368 *********************************************************************/
370 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
371 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
374 /* Allow common code without TSO */
379 static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
380 static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
381 static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
382 static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
383 static int em_rxd = EM_DEFAULT_RXD;
384 static int em_txd = EM_DEFAULT_TXD;
385 static int em_smart_pwr_down = FALSE;
386 /* Controls whether promiscuous also shows bad packets */
387 static int em_debug_sbp = FALSE;
388 /* Local switch for MSI/MSIX */
389 static int em_enable_msi = TRUE;
391 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
392 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
393 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
394 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
395 TUNABLE_INT("hw.em.rxd", &em_rxd);
396 TUNABLE_INT("hw.em.txd", &em_txd);
397 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
398 TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
399 TUNABLE_INT("hw.em.enable_msi", &em_enable_msi);
401 #ifndef EM_LEGACY_IRQ
402 /* How many packets rxeof tries to clean at a time */
403 static int em_rx_process_limit = 100;
404 TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
407 /* Global used in WOL setup with multiport cards */
408 static int global_quad_port_a = 0;
410 /*********************************************************************
411 * Device identification routine
413 * em_probe determines if the driver should be loaded on
414 * adapter based on PCI vendor/device id of the adapter.
416 * return BUS_PROBE_DEFAULT on success, positive on failure
417 *********************************************************************/
420 em_probe(device_t dev)
422 char adapter_name[60];
423 u16 pci_vendor_id = 0;
424 u16 pci_device_id = 0;
425 u16 pci_subvendor_id = 0;
426 u16 pci_subdevice_id = 0;
427 em_vendor_info_t *ent;
429 INIT_DEBUGOUT("em_probe: begin");
431 pci_vendor_id = pci_get_vendor(dev);
432 if (pci_vendor_id != EM_VENDOR_ID)
435 pci_device_id = pci_get_device(dev);
436 pci_subvendor_id = pci_get_subvendor(dev);
437 pci_subdevice_id = pci_get_subdevice(dev);
439 ent = em_vendor_info_array;
440 while (ent->vendor_id != 0) {
441 if ((pci_vendor_id == ent->vendor_id) &&
442 (pci_device_id == ent->device_id) &&
444 ((pci_subvendor_id == ent->subvendor_id) ||
445 (ent->subvendor_id == PCI_ANY_ID)) &&
447 ((pci_subdevice_id == ent->subdevice_id) ||
448 (ent->subdevice_id == PCI_ANY_ID))) {
449 sprintf(adapter_name, "%s %s",
450 em_strings[ent->index],
452 device_set_desc_copy(dev, adapter_name);
453 return (BUS_PROBE_DEFAULT);
461 /*********************************************************************
462 * Device initialization routine
464 * The attach entry point is called when the driver is being loaded.
465 * This routine identifies the type of hardware, allocates all resources
466 * and initializes the hardware.
468 * return 0 on success, positive on failure
469 *********************************************************************/
472 em_attach(device_t dev)
474 struct adapter *adapter;
477 u16 eeprom_data, device_id;
479 INIT_DEBUGOUT("em_attach: begin");
481 adapter = device_get_softc(dev);
482 adapter->dev = adapter->osdep.dev = dev;
483 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
484 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
485 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
488 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
489 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
490 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
491 em_sysctl_debug_info, "I", "Debug Information");
493 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
494 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
495 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
496 em_sysctl_stats, "I", "Statistics");
498 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
499 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
501 /* Determine hardware and mac info */
502 em_identify_hardware(adapter);
504 /* Setup PCI resources */
505 if (em_allocate_pci_resources(adapter)) {
506 device_printf(dev, "Allocation of PCI resources failed\n");
512 ** For ICH8 and family we need to
513 ** map the flash memory, and this
514 ** must happen after the MAC is
517 if ((adapter->hw.mac.type == e1000_ich8lan) ||
518 (adapter->hw.mac.type == e1000_ich10lan) ||
519 (adapter->hw.mac.type == e1000_ich9lan)) {
520 int rid = EM_BAR_TYPE_FLASH;
521 adapter->flash = bus_alloc_resource_any(dev,
522 SYS_RES_MEMORY, &rid, RF_ACTIVE);
523 if (adapter->flash == NULL) {
524 device_printf(dev, "Mapping of Flash failed\n");
528 /* This is used in the shared code */
529 adapter->hw.flash_address = (u8 *)adapter->flash;
530 adapter->osdep.flash_bus_space_tag =
531 rman_get_bustag(adapter->flash);
532 adapter->osdep.flash_bus_space_handle =
533 rman_get_bushandle(adapter->flash);
536 /* Do Shared Code initialization */
537 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
538 device_printf(dev, "Setup of Shared code failed\n");
543 e1000_get_bus_info(&adapter->hw);
545 /* Set up some sysctls for the tunable interrupt delays */
546 em_add_int_delay_sysctl(adapter, "rx_int_delay",
547 "receive interrupt delay in usecs", &adapter->rx_int_delay,
548 E1000_REGISTER(&adapter->hw, E1000_RDTR), em_rx_int_delay_dflt);
549 em_add_int_delay_sysctl(adapter, "tx_int_delay",
550 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
551 E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt);
552 if (adapter->hw.mac.type >= e1000_82540) {
553 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
554 "receive interrupt delay limit in usecs",
555 &adapter->rx_abs_int_delay,
556 E1000_REGISTER(&adapter->hw, E1000_RADV),
557 em_rx_abs_int_delay_dflt);
558 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
559 "transmit interrupt delay limit in usecs",
560 &adapter->tx_abs_int_delay,
561 E1000_REGISTER(&adapter->hw, E1000_TADV),
562 em_tx_abs_int_delay_dflt);
565 #ifndef EM_LEGACY_IRQ
566 /* Sysctls for limiting the amount of work done in the taskqueue */
567 em_add_rx_process_limit(adapter, "rx_processing_limit",
568 "max number of rx packets to process", &adapter->rx_process_limit,
569 em_rx_process_limit);
573 * Validate number of transmit and receive descriptors. It
574 * must not exceed hardware maximum, and must be multiple
575 * of E1000_DBA_ALIGN.
577 if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
578 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
579 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
580 (em_txd < EM_MIN_TXD)) {
581 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
582 EM_DEFAULT_TXD, em_txd);
583 adapter->num_tx_desc = EM_DEFAULT_TXD;
585 adapter->num_tx_desc = em_txd;
586 if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
587 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
588 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
589 (em_rxd < EM_MIN_RXD)) {
590 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
591 EM_DEFAULT_RXD, em_rxd);
592 adapter->num_rx_desc = EM_DEFAULT_RXD;
594 adapter->num_rx_desc = em_rxd;
596 adapter->hw.mac.autoneg = DO_AUTO_NEG;
597 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
598 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
599 adapter->rx_buffer_len = 2048;
601 e1000_init_script_state_82541(&adapter->hw, TRUE);
602 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
605 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
606 adapter->hw.phy.mdix = AUTO_ALL_MODES;
607 adapter->hw.phy.disable_polarity_correction = FALSE;
608 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
612 * Set the frame limits assuming
613 * standard ethernet sized frames.
615 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
616 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
619 * This controls when hardware reports transmit completion
622 adapter->hw.mac.report_tx_early = 1;
624 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
627 /* Allocate Transmit Descriptor ring */
628 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
629 device_printf(dev, "Unable to allocate tx_desc memory\n");
633 adapter->tx_desc_base =
634 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
636 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
639 /* Allocate Receive Descriptor ring */
640 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
641 device_printf(dev, "Unable to allocate rx_desc memory\n");
645 adapter->rx_desc_base =
646 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
648 /* Make sure we have a good EEPROM before we read from it */
649 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
651 ** Some PCI-E parts fail the first check due to
652 ** the link being in sleep state, call it again,
653 ** if it fails a second time its a real issue.
655 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
657 "The EEPROM Checksum Is Not Valid\n");
663 /* Initialize the hardware */
664 if (em_hardware_init(adapter)) {
665 device_printf(dev, "Unable to initialize the hardware\n");
670 /* Copy the permanent MAC address out of the EEPROM */
671 if (e1000_read_mac_addr(&adapter->hw) < 0) {
672 device_printf(dev, "EEPROM read error while reading MAC"
678 if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) {
679 device_printf(dev, "Invalid MAC address\n");
684 /* Allocate transmit descriptors and buffers */
685 if (em_allocate_transmit_structures(adapter)) {
686 device_printf(dev, "Could not setup transmit structures\n");
691 /* Allocate receive descriptors and buffers */
692 if (em_allocate_receive_structures(adapter)) {
693 device_printf(dev, "Could not setup receive structures\n");
699 ** Do interrupt configuration
701 if (adapter->msi > 1) /* Do MSI/X */
702 error = em_allocate_msix(adapter);
703 else /* MSI or Legacy */
704 error = em_allocate_legacy(adapter);
708 /* Setup OS specific network interface */
709 em_setup_interface(dev, adapter);
711 /* Initialize statistics */
712 em_update_stats_counters(adapter);
714 adapter->hw.mac.get_link_status = 1;
715 em_update_link_status(adapter);
717 /* Indicate SOL/IDER usage */
718 if (e1000_check_reset_block(&adapter->hw))
720 "PHY reset is blocked due to SOL/IDER session.\n");
722 /* Determine if we have to control management hardware */
723 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
728 switch (adapter->hw.mac.type) {
734 case e1000_82546_rev_3:
736 case e1000_80003es2lan:
737 if (adapter->hw.bus.func == 1)
738 e1000_read_nvm(&adapter->hw,
739 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
741 e1000_read_nvm(&adapter->hw,
742 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
743 eeprom_data &= EM_EEPROM_APME;
746 /* APME bit in EEPROM is mapped to WUC.APME */
747 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) &
752 adapter->wol = E1000_WUFC_MAG;
754 * We have the eeprom settings, now apply the special cases
755 * where the eeprom may be wrong or the board won't support
756 * wake on lan on a particular port
758 device_id = pci_get_device(dev);
760 case E1000_DEV_ID_82546GB_PCIE:
763 case E1000_DEV_ID_82546EB_FIBER:
764 case E1000_DEV_ID_82546GB_FIBER:
765 case E1000_DEV_ID_82571EB_FIBER:
766 /* Wake events only supported on port A for dual fiber
767 * regardless of eeprom setting */
768 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
772 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
773 case E1000_DEV_ID_82571EB_QUAD_COPPER:
774 case E1000_DEV_ID_82571EB_QUAD_FIBER:
775 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
776 /* if quad port adapter, disable WoL on all but port A */
777 if (global_quad_port_a != 0)
779 /* Reset for multiple quad port adapters */
780 if (++global_quad_port_a == 4)
781 global_quad_port_a = 0;
785 /* Do we need workaround for 82544 PCI-X adapter? */
786 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
787 adapter->hw.mac.type == e1000_82544)
788 adapter->pcix_82544 = TRUE;
790 adapter->pcix_82544 = FALSE;
792 #ifdef EM_HW_VLAN_SUPPORT
793 /* Register for VLAN events */
794 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
795 em_register_vlan, 0, EVENTHANDLER_PRI_FIRST);
796 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
797 em_unregister_vlan, 0, EVENTHANDLER_PRI_FIRST);
800 /* Tell the stack that the interface is not active */
801 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
803 INIT_DEBUGOUT("em_attach: end");
808 em_free_transmit_structures(adapter);
811 em_release_hw_control(adapter);
812 em_dma_free(adapter, &adapter->rxdma);
814 em_dma_free(adapter, &adapter->txdma);
817 em_free_pci_resources(adapter);
818 EM_TX_LOCK_DESTROY(adapter);
819 EM_RX_LOCK_DESTROY(adapter);
820 EM_CORE_LOCK_DESTROY(adapter);
825 /*********************************************************************
826 * Device removal routine
828 * The detach entry point is called when the driver is being removed.
829 * This routine stops the adapter and deallocates all the resources
830 * that were allocated for driver operation.
832 * return 0 on success, positive on failure
833 *********************************************************************/
836 em_detach(device_t dev)
838 struct adapter *adapter = device_get_softc(dev);
839 struct ifnet *ifp = adapter->ifp;
841 INIT_DEBUGOUT("em_detach: begin");
843 /* Make sure VLANS are not using driver */
844 #if __FreeBSD_version >= 700000
845 if (adapter->ifp->if_vlantrunk != NULL) {
847 if (adapter->ifp->if_nvlans != 0) {
849 device_printf(dev,"Vlan in use, detach first\n");
853 #ifdef DEVICE_POLLING
854 if (ifp->if_capenable & IFCAP_POLLING)
855 ether_poll_deregister(ifp);
858 EM_CORE_LOCK(adapter);
860 adapter->in_detach = 1;
862 e1000_phy_hw_reset(&adapter->hw);
864 em_release_manageability(adapter);
866 if (((adapter->hw.mac.type == e1000_82573) ||
867 (adapter->hw.mac.type == e1000_ich8lan) ||
868 (adapter->hw.mac.type == e1000_ich10lan) ||
869 (adapter->hw.mac.type == e1000_ich9lan)) &&
870 e1000_check_mng_mode(&adapter->hw))
871 em_release_hw_control(adapter);
874 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
875 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
876 em_enable_wakeup(dev);
879 EM_TX_UNLOCK(adapter);
880 EM_CORE_UNLOCK(adapter);
882 #ifdef EM_HW_VLAN_SUPPORT
883 /* Unregister VLAN events */
884 if (adapter->vlan_attach != NULL)
885 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
886 if (adapter->vlan_detach != NULL)
887 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
890 ether_ifdetach(adapter->ifp);
891 callout_drain(&adapter->timer);
892 callout_drain(&adapter->tx_fifo_timer);
894 em_free_pci_resources(adapter);
895 bus_generic_detach(dev);
898 em_free_transmit_structures(adapter);
899 em_free_receive_structures(adapter);
901 /* Free Transmit Descriptor ring */
902 if (adapter->tx_desc_base) {
903 em_dma_free(adapter, &adapter->txdma);
904 adapter->tx_desc_base = NULL;
907 /* Free Receive Descriptor ring */
908 if (adapter->rx_desc_base) {
909 em_dma_free(adapter, &adapter->rxdma);
910 adapter->rx_desc_base = NULL;
913 EM_TX_LOCK_DESTROY(adapter);
914 EM_RX_LOCK_DESTROY(adapter);
915 EM_CORE_LOCK_DESTROY(adapter);
920 /*********************************************************************
922 * Shutdown entry point
924 **********************************************************************/
927 em_shutdown(device_t dev)
929 return em_suspend(dev);
933 * Suspend/resume device methods.
936 em_suspend(device_t dev)
938 struct adapter *adapter = device_get_softc(dev);
940 EM_CORE_LOCK(adapter);
944 EM_TX_UNLOCK(adapter);
946 em_release_manageability(adapter);
948 if (((adapter->hw.mac.type == e1000_82573) ||
949 (adapter->hw.mac.type == e1000_ich8lan) ||
950 (adapter->hw.mac.type == e1000_ich10lan) ||
951 (adapter->hw.mac.type == e1000_ich9lan)) &&
952 e1000_check_mng_mode(&adapter->hw))
953 em_release_hw_control(adapter);
956 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
957 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
958 em_enable_wakeup(dev);
961 EM_CORE_UNLOCK(adapter);
963 return bus_generic_suspend(dev);
967 em_resume(device_t dev)
969 struct adapter *adapter = device_get_softc(dev);
970 struct ifnet *ifp = adapter->ifp;
972 EM_CORE_LOCK(adapter);
973 em_init_locked(adapter);
974 em_init_manageability(adapter);
975 EM_CORE_UNLOCK(adapter);
978 return bus_generic_resume(dev);
982 /*********************************************************************
983 * Transmit entry point
985 * em_start is called by the stack to initiate a transmit.
986 * The driver will remain in this routine as long as there are
987 * packets to transmit and transmit resources are available.
988 * In case resources are not available stack is notified and
989 * the packet is requeued.
990 **********************************************************************/
993 em_start_locked(struct ifnet *ifp)
995 struct adapter *adapter = ifp->if_softc;
998 EM_TX_LOCK_ASSERT(adapter);
1000 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1003 if (!adapter->link_active)
1006 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1008 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1012 * Encapsulation can modify our pointer, and or make it
1013 * NULL on failure. In that event, we can't requeue.
1015 if (em_xmit(adapter, &m_head)) {
1018 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1019 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1023 /* Send a copy of the frame to the BPF listener */
1024 ETHER_BPF_MTAP(ifp, m_head);
1026 /* Set timeout in case hardware has problems transmitting. */
1027 adapter->watchdog_timer = EM_TX_TIMEOUT;
1032 em_start(struct ifnet *ifp)
1034 struct adapter *adapter = ifp->if_softc;
1036 EM_TX_LOCK(adapter);
1037 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1038 em_start_locked(ifp);
1039 EM_TX_UNLOCK(adapter);
1042 /*********************************************************************
1045 * em_ioctl is called when the user wants to configure the
1048 * return 0 on success, positive on failure
1049 **********************************************************************/
1052 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1054 struct adapter *adapter = ifp->if_softc;
1055 struct ifreq *ifr = (struct ifreq *)data;
1056 struct ifaddr *ifa = (struct ifaddr *)data;
1059 if (adapter->in_detach)
1064 if (ifa->ifa_addr->sa_family == AF_INET) {
1067 * Since resetting hardware takes a very long time
1068 * and results in link renegotiation we only
1069 * initialize the hardware only when it is absolutely
1072 ifp->if_flags |= IFF_UP;
1073 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1074 EM_CORE_LOCK(adapter);
1075 em_init_locked(adapter);
1076 EM_CORE_UNLOCK(adapter);
1078 arp_ifinit(ifp, ifa);
1080 error = ether_ioctl(ifp, command, data);
1085 u16 eeprom_data = 0;
1087 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1089 EM_CORE_LOCK(adapter);
1090 switch (adapter->hw.mac.type) {
1093 * 82573 only supports jumbo frames
1094 * if ASPM is disabled.
1096 e1000_read_nvm(&adapter->hw,
1097 NVM_INIT_3GIO_3, 1, &eeprom_data);
1098 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1099 max_frame_size = ETHER_MAX_LEN;
1102 /* Allow Jumbo frames - fall thru */
1106 case e1000_ich10lan:
1108 case e1000_80003es2lan: /* Limit Jumbo Frame size */
1109 max_frame_size = 9234;
1111 /* Adapters that do not support jumbo frames */
1114 max_frame_size = ETHER_MAX_LEN;
1117 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1119 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1121 EM_CORE_UNLOCK(adapter);
1126 ifp->if_mtu = ifr->ifr_mtu;
1127 adapter->max_frame_size =
1128 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1129 em_init_locked(adapter);
1130 EM_CORE_UNLOCK(adapter);
1134 IOCTL_DEBUGOUT("ioctl rcv'd:\
1135 SIOCSIFFLAGS (Set Interface Flags)");
1136 EM_CORE_LOCK(adapter);
1137 if (ifp->if_flags & IFF_UP) {
1138 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1139 if ((ifp->if_flags ^ adapter->if_flags) &
1140 (IFF_PROMISC | IFF_ALLMULTI)) {
1141 em_disable_promisc(adapter);
1142 em_set_promisc(adapter);
1145 em_init_locked(adapter);
1147 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1148 EM_TX_LOCK(adapter);
1150 EM_TX_UNLOCK(adapter);
1152 adapter->if_flags = ifp->if_flags;
1153 EM_CORE_UNLOCK(adapter);
1157 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1158 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1159 EM_CORE_LOCK(adapter);
1160 em_disable_intr(adapter);
1161 em_set_multi(adapter);
1162 if (adapter->hw.mac.type == e1000_82542 &&
1163 adapter->hw.revision_id == E1000_REVISION_2) {
1164 em_initialize_receive_unit(adapter);
1166 #ifdef DEVICE_POLLING
1167 if (!(ifp->if_capenable & IFCAP_POLLING))
1169 em_enable_intr(adapter);
1170 EM_CORE_UNLOCK(adapter);
1174 /* Check SOL/IDER usage */
1175 EM_CORE_LOCK(adapter);
1176 if (e1000_check_reset_block(&adapter->hw)) {
1177 EM_CORE_UNLOCK(adapter);
1178 device_printf(adapter->dev, "Media change is"
1179 " blocked due to SOL/IDER session.\n");
1182 EM_CORE_UNLOCK(adapter);
1184 IOCTL_DEBUGOUT("ioctl rcv'd: \
1185 SIOCxIFMEDIA (Get/Set Interface Media)");
1186 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1192 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1194 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1195 #ifdef DEVICE_POLLING
1196 if (mask & IFCAP_POLLING) {
1197 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1198 error = ether_poll_register(em_poll, ifp);
1201 EM_CORE_LOCK(adapter);
1202 em_disable_intr(adapter);
1203 ifp->if_capenable |= IFCAP_POLLING;
1204 EM_CORE_UNLOCK(adapter);
1206 error = ether_poll_deregister(ifp);
1207 /* Enable interrupt even in error case */
1208 EM_CORE_LOCK(adapter);
1209 em_enable_intr(adapter);
1210 ifp->if_capenable &= ~IFCAP_POLLING;
1211 EM_CORE_UNLOCK(adapter);
1215 if (mask & IFCAP_HWCSUM) {
1216 ifp->if_capenable ^= IFCAP_HWCSUM;
1219 #if __FreeBSD_version >= 700000
1220 if (mask & IFCAP_TSO4) {
1221 ifp->if_capenable ^= IFCAP_TSO4;
1226 if (mask & IFCAP_VLAN_HWTAGGING) {
1227 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1230 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1232 #if __FreeBSD_version >= 700000
1233 VLAN_CAPABILITIES(ifp);
1240 ** IOCTL support for Precision Time (IEEE 1588) Support
1242 case EM_TIMESYNC_READTS:
1245 struct em_tsync_read *tdata;
1247 tdata = (struct em_tsync_read *) ifr->ifr_data;
1249 IOCTL_DEBUGOUT("Reading Timestamp\n");
1251 if (tdata->read_current_time) {
1252 getnanotime(&tdata->system_time);
1253 tdata->network_time = E1000_READ_REG(&adapter->hw, E1000_SYSTIML);
1254 tdata->network_time |=
1255 (u64)E1000_READ_REG(&adapter->hw, E1000_SYSTIMH ) << 32;
1258 rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
1259 tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
1261 IOCTL_DEBUGOUT1("RX_CTL value = %u\n", rx_ctl);
1262 IOCTL_DEBUGOUT1("TX_CTL value = %u\n", tx_ctl);
1265 IOCTL_DEBUGOUT("RX timestamp is valid\n");
1267 unsigned char *tmp_cp;
1269 tdata->rx_valid = 1;
1270 tdata->rx_stamp = E1000_READ_REG(&adapter->hw, E1000_RXSTMPL);
1271 tdata->rx_stamp |= (u64)E1000_READ_REG(&adapter->hw,
1272 E1000_RXSTMPH) << 32;
1274 tmp = E1000_READ_REG(&adapter->hw, E1000_RXSATRL);
1275 tmp_cp = (unsigned char *) &tmp;
1276 tdata->srcid[0] = tmp_cp[0];
1277 tdata->srcid[1] = tmp_cp[1];
1278 tdata->srcid[2] = tmp_cp[2];
1279 tdata->srcid[3] = tmp_cp[3];
1280 tmp = E1000_READ_REG(&adapter->hw, E1000_RXSATRH);
1281 tmp_cp = (unsigned char *) &tmp;
1282 tdata->srcid[4] = tmp_cp[0];
1283 tdata->srcid[5] = tmp_cp[1];
1284 tdata->seqid = tmp >> 16;
1285 tdata->seqid = htons(tdata->seqid);
1287 tdata->rx_valid = 0;
1290 IOCTL_DEBUGOUT("TX timestamp is valid\n");
1291 tdata->tx_valid = 1;
1292 tdata->tx_stamp = E1000_READ_REG(&adapter->hw, E1000_TXSTMPL);
1293 tdata->tx_stamp |= (u64) E1000_READ_REG(&adapter->hw,
1294 E1000_TXSTMPH) << 32;
1296 tdata->tx_valid = 0;
1300 #endif /* EM_TIMESYNC */
1303 error = ether_ioctl(ifp, command, data);
1310 /*********************************************************************
1313 * This routine is called from the local timer every second.
1314 * As long as transmit descriptors are being cleaned the value
1315 * is non-zero and we do nothing. Reaching 0 indicates a tx hang
1316 * and we then reset the device.
1318 **********************************************************************/
1321 em_watchdog(struct adapter *adapter)
1324 EM_CORE_LOCK_ASSERT(adapter);
1327 ** The timer is set to 5 every time start queues a packet.
1328 ** Then txeof keeps resetting it as long as it cleans at
1329 ** least one descriptor.
1330 ** Finally, anytime all descriptors are clean the timer is
1333 EM_TX_LOCK(adapter);
1334 if ((adapter->watchdog_timer == 0) || (--adapter->watchdog_timer)) {
1335 EM_TX_UNLOCK(adapter);
1339 /* If we are in this routine because of pause frames, then
1340 * don't reset the hardware.
1342 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
1343 E1000_STATUS_TXOFF) {
1344 adapter->watchdog_timer = EM_TX_TIMEOUT;
1345 EM_TX_UNLOCK(adapter);
1349 if (e1000_check_for_link(&adapter->hw) == 0)
1350 device_printf(adapter->dev, "watchdog timeout -- resetting\n");
1351 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1352 adapter->watchdog_events++;
1353 EM_TX_UNLOCK(adapter);
1355 em_init_locked(adapter);
1358 /*********************************************************************
1361 * This routine is used in two ways. It is used by the stack as
1362 * init entry point in network interface structure. It is also used
1363 * by the driver as a hw/sw initialization routine to get to a
1366 * return 0 on success, positive on failure
1367 **********************************************************************/
1370 em_init_locked(struct adapter *adapter)
1372 struct ifnet *ifp = adapter->ifp;
1373 device_t dev = adapter->dev;
1376 INIT_DEBUGOUT("em_init: begin");
1378 EM_CORE_LOCK_ASSERT(adapter);
1380 EM_TX_LOCK(adapter);
1382 EM_TX_UNLOCK(adapter);
1385 * Packet Buffer Allocation (PBA)
1386 * Writing PBA sets the receive portion of the buffer
1387 * the remainder is used for the transmit buffer.
1389 * Devices before the 82547 had a Packet Buffer of 64K.
1390 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1391 * After the 82547 the buffer was reduced to 40K.
1392 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1393 * Note: default does not leave enough room for Jumbo Frame >10k.
1395 switch (adapter->hw.mac.type) {
1397 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1398 if (adapter->max_frame_size > 8192)
1399 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1401 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1402 adapter->tx_fifo_head = 0;
1403 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1404 adapter->tx_fifo_size =
1405 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1407 /* Total Packet Buffer on these is 48K */
1410 case e1000_80003es2lan:
1411 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1413 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
1414 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1417 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1420 case e1000_ich10lan:
1421 #define E1000_PBA_10K 0x000A
1422 pba = E1000_PBA_10K;
1428 /* Devices before 82547 had a Packet Buffer of 64K. */
1429 if (adapter->max_frame_size > 8192)
1430 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1432 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1435 INIT_DEBUGOUT1("em_init: pba=%dK",pba);
1436 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1438 /* Get the latest mac address, User can use a LAA */
1439 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1442 /* Put the address into the Receive Address Array */
1443 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1446 * With the 82571 adapter, RAR[0] may be overwritten
1447 * when the other port is reset, we make a duplicate
1448 * in RAR[14] for that eventuality, this assures
1449 * the interface continues to function.
1451 if (adapter->hw.mac.type == e1000_82571) {
1452 e1000_set_laa_state_82571(&adapter->hw, TRUE);
1453 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1454 E1000_RAR_ENTRIES - 1);
1457 /* Initialize the hardware */
1458 if (em_hardware_init(adapter)) {
1459 device_printf(dev, "Unable to initialize the hardware\n");
1462 em_update_link_status(adapter);
1464 /* Setup VLAN support, basic and offload if available */
1465 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1467 #ifndef EM_HW_VLAN_SUPPORT
1468 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1470 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1471 ctrl |= E1000_CTRL_VME;
1472 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1475 /* Set hardware offload abilities */
1476 ifp->if_hwassist = 0;
1477 if (adapter->hw.mac.type >= e1000_82543) {
1478 if (ifp->if_capenable & IFCAP_TXCSUM)
1479 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1480 #if __FreeBSD_version >= 700000
1481 if (ifp->if_capenable & IFCAP_TSO4)
1482 ifp->if_hwassist |= CSUM_TSO;
1486 /* Configure for OS presence */
1487 em_init_manageability(adapter);
1489 /* Prepare transmit descriptors and buffers */
1490 em_setup_transmit_structures(adapter);
1491 em_initialize_transmit_unit(adapter);
1493 /* Setup Multicast table */
1494 em_set_multi(adapter);
1496 /* Prepare receive descriptors and buffers */
1497 if (em_setup_receive_structures(adapter)) {
1498 device_printf(dev, "Could not setup receive structures\n");
1499 EM_TX_LOCK(adapter);
1501 EM_TX_UNLOCK(adapter);
1504 em_initialize_receive_unit(adapter);
1506 /* Don't lose promiscuous settings */
1507 em_set_promisc(adapter);
1509 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1510 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1512 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1513 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1515 /* MSI/X configuration for 82574 */
1516 if (adapter->hw.mac.type == e1000_82574) {
1518 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1519 tmp |= E1000_CTRL_EXT_PBA_CLR;
1520 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1522 ** Set the IVAR - interrupt vector routing.
1523 ** Each nibble represents a vector, high bit
1524 ** is enable, other 3 bits are the MSIX table
1525 ** entry, we map RXQ0 to 0, TXQ0 to 1, and
1526 ** Link (other) to 2, hence the magic number.
1528 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1531 #ifdef DEVICE_POLLING
1533 * Only enable interrupts if we are not polling, make sure
1534 * they are off otherwise.
1536 if (ifp->if_capenable & IFCAP_POLLING)
1537 em_disable_intr(adapter);
1539 #endif /* DEVICE_POLLING */
1540 em_enable_intr(adapter);
1543 /* Initializae IEEE 1588 Precision Time hardware */
1544 if ((adapter->hw.mac.type == e1000_82574) ||
1545 (adapter->hw.mac.type == e1000_ich10lan))
1546 em_tsync_init(adapter);
1549 /* Don't reset the phy next time init gets called */
1550 adapter->hw.phy.reset_disable = TRUE;
1556 struct adapter *adapter = arg;
1558 EM_CORE_LOCK(adapter);
1559 em_init_locked(adapter);
1560 EM_CORE_UNLOCK(adapter);
1564 #ifdef DEVICE_POLLING
1565 /*********************************************************************
1567 * Legacy polling routine
1569 *********************************************************************/
1571 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1573 struct adapter *adapter = ifp->if_softc;
1576 EM_CORE_LOCK(adapter);
1577 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1578 EM_CORE_UNLOCK(adapter);
1582 if (cmd == POLL_AND_CHECK_STATUS) {
1583 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1584 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1585 callout_stop(&adapter->timer);
1586 adapter->hw.mac.get_link_status = 1;
1587 em_update_link_status(adapter);
1588 callout_reset(&adapter->timer, hz,
1589 em_local_timer, adapter);
1592 EM_CORE_UNLOCK(adapter);
1594 em_rxeof(adapter, count);
1596 EM_TX_LOCK(adapter);
1599 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1600 em_start_locked(ifp);
1601 EM_TX_UNLOCK(adapter);
1603 #endif /* DEVICE_POLLING */
1605 #ifdef EM_LEGACY_IRQ
1606 /*********************************************************************
1608 * Legacy Interrupt Service routine
1610 *********************************************************************/
1615 struct adapter *adapter = arg;
1616 struct ifnet *ifp = adapter->ifp;
1620 if (ifp->if_capenable & IFCAP_POLLING)
1623 EM_CORE_LOCK(adapter);
1625 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1627 if (adapter->hw.mac.type >= e1000_82571 &&
1628 (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1630 else if (reg_icr == 0)
1634 * XXX: some laptops trigger several spurious interrupts
1635 * on em(4) when in the resume cycle. The ICR register
1636 * reports all-ones value in this case. Processing such
1637 * interrupts would lead to a freeze. I don't know why.
1639 if (reg_icr == 0xffffffff)
1642 EM_CORE_UNLOCK(adapter);
1643 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1644 em_rxeof(adapter, -1);
1645 EM_TX_LOCK(adapter);
1647 EM_TX_UNLOCK(adapter);
1649 EM_CORE_LOCK(adapter);
1651 /* Link status change */
1652 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1653 callout_stop(&adapter->timer);
1654 adapter->hw.mac.get_link_status = 1;
1655 em_update_link_status(adapter);
1656 /* Deal with TX cruft when link lost */
1657 em_tx_purge(adapter);
1658 callout_reset(&adapter->timer, hz,
1659 em_local_timer, adapter);
1662 if (reg_icr & E1000_ICR_RXO)
1663 adapter->rx_overruns++;
1665 EM_CORE_UNLOCK(adapter);
1667 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1668 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1672 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1675 em_handle_link(void *context, int pending)
1677 struct adapter *adapter = context;
1678 struct ifnet *ifp = adapter->ifp;
1680 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1683 EM_CORE_LOCK(adapter);
1684 callout_stop(&adapter->timer);
1685 em_update_link_status(adapter);
1686 /* Deal with TX cruft when link lost */
1687 em_tx_purge(adapter);
1688 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1689 EM_CORE_UNLOCK(adapter);
1693 /* Combined RX/TX handler, used by Legacy and MSI */
1695 em_handle_rxtx(void *context, int pending)
1697 struct adapter *adapter = context;
1698 struct ifnet *ifp = adapter->ifp;
1701 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1702 if (em_rxeof(adapter, adapter->rx_process_limit) != 0)
1703 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1704 EM_TX_LOCK(adapter);
1707 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1708 em_start_locked(ifp);
1709 EM_TX_UNLOCK(adapter);
1712 em_enable_intr(adapter);
1716 em_handle_rx(void *context, int pending)
1718 struct adapter *adapter = context;
1719 struct ifnet *ifp = adapter->ifp;
1721 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1722 (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1723 taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1728 em_handle_tx(void *context, int pending)
1730 struct adapter *adapter = context;
1731 struct ifnet *ifp = adapter->ifp;
1733 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1734 EM_TX_LOCK(adapter);
1736 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1737 em_start_locked(ifp);
1738 EM_TX_UNLOCK(adapter);
1742 /*********************************************************************
1744 * Fast Legacy/MSI Combined Interrupt Service routine
1746 *********************************************************************/
1747 #if __FreeBSD_version < 700000
1748 #define FILTER_STRAY
1749 #define FILTER_HANDLED
1754 em_irq_fast(void *arg)
1756 struct adapter *adapter = arg;
1762 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1765 if (reg_icr == 0xffffffff)
1766 return FILTER_STRAY;
1768 /* Definitely not our interrupt. */
1770 return FILTER_STRAY;
1773 * Starting with the 82571 chip, bit 31 should be used to
1774 * determine whether the interrupt belongs to us.
1776 if (adapter->hw.mac.type >= e1000_82571 &&
1777 (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1778 return FILTER_STRAY;
1781 * Mask interrupts until the taskqueue is finished running. This is
1782 * cheap, just assume that it is needed. This also works around the
1783 * MSI message reordering errata on certain systems.
1785 em_disable_intr(adapter);
1786 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1788 /* Link status change */
1789 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1790 adapter->hw.mac.get_link_status = 1;
1791 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1794 if (reg_icr & E1000_ICR_RXO)
1795 adapter->rx_overruns++;
1796 return FILTER_HANDLED;
1799 /*********************************************************************
1801 * MSIX Interrupt Service Routines
1803 **********************************************************************/
1804 #define EM_MSIX_TX 0x00040000
1805 #define EM_MSIX_RX 0x00010000
1806 #define EM_MSIX_LINK 0x00100000
1809 em_msix_tx(void *arg)
1811 struct adapter *adapter = arg;
1812 struct ifnet *ifp = adapter->ifp;
1815 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1816 EM_TX_LOCK(adapter);
1818 EM_TX_UNLOCK(adapter);
1819 taskqueue_enqueue(adapter->tq, &adapter->tx_task);
1821 /* Reenable this interrupt */
1822 E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_TX);
1826 /*********************************************************************
1828 * MSIX RX Interrupt Service routine
1830 **********************************************************************/
1833 em_msix_rx(void *arg)
1835 struct adapter *adapter = arg;
1836 struct ifnet *ifp = adapter->ifp;
1839 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1840 (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1841 taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1842 /* Reenable this interrupt */
1843 E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_RX);
1847 /*********************************************************************
1849 * MSIX Link Fast Interrupt Service routine
1851 **********************************************************************/
1854 em_msix_link(void *arg)
1856 struct adapter *adapter = arg;
1859 ++adapter->link_irq;
1860 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1862 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1863 adapter->hw.mac.get_link_status = 1;
1864 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1866 E1000_WRITE_REG(&adapter->hw, E1000_IMS,
1867 EM_MSIX_LINK | E1000_IMS_LSC);
1870 #endif /* EM_FAST_IRQ */
1872 /*********************************************************************
1874 * Media Ioctl callback
1876 * This routine is called whenever the user queries the status of
1877 * the interface using ifconfig.
1879 **********************************************************************/
1881 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1883 struct adapter *adapter = ifp->if_softc;
1884 u_char fiber_type = IFM_1000_SX;
1886 INIT_DEBUGOUT("em_media_status: begin");
1888 EM_CORE_LOCK(adapter);
1889 em_update_link_status(adapter);
1891 ifmr->ifm_status = IFM_AVALID;
1892 ifmr->ifm_active = IFM_ETHER;
1894 if (!adapter->link_active) {
1895 EM_CORE_UNLOCK(adapter);
1899 ifmr->ifm_status |= IFM_ACTIVE;
1901 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1902 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1903 if (adapter->hw.mac.type == e1000_82545)
1904 fiber_type = IFM_1000_LX;
1905 ifmr->ifm_active |= fiber_type | IFM_FDX;
1907 switch (adapter->link_speed) {
1909 ifmr->ifm_active |= IFM_10_T;
1912 ifmr->ifm_active |= IFM_100_TX;
1915 ifmr->ifm_active |= IFM_1000_T;
1918 if (adapter->link_duplex == FULL_DUPLEX)
1919 ifmr->ifm_active |= IFM_FDX;
1921 ifmr->ifm_active |= IFM_HDX;
1923 EM_CORE_UNLOCK(adapter);
1926 /*********************************************************************
1928 * Media Ioctl callback
1930 * This routine is called when the user changes speed/duplex using
1931 * media/mediopt option with ifconfig.
1933 **********************************************************************/
1935 em_media_change(struct ifnet *ifp)
1937 struct adapter *adapter = ifp->if_softc;
1938 struct ifmedia *ifm = &adapter->media;
1940 INIT_DEBUGOUT("em_media_change: begin");
1942 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1945 EM_CORE_LOCK(adapter);
1946 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1948 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1949 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1954 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1955 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1958 adapter->hw.mac.autoneg = FALSE;
1959 adapter->hw.phy.autoneg_advertised = 0;
1960 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1961 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1963 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1966 adapter->hw.mac.autoneg = FALSE;
1967 adapter->hw.phy.autoneg_advertised = 0;
1968 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1969 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1971 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1974 device_printf(adapter->dev, "Unsupported media type\n");
1977 /* As the speed/duplex settings my have changed we need to
1980 adapter->hw.phy.reset_disable = FALSE;
1982 em_init_locked(adapter);
1983 EM_CORE_UNLOCK(adapter);
1988 /*********************************************************************
1990 * This routine maps the mbufs to tx descriptors.
1992 * return 0 on success, positive on failure
1993 **********************************************************************/
1996 em_xmit(struct adapter *adapter, struct mbuf **m_headp)
1998 bus_dma_segment_t segs[EM_MAX_SCATTER];
2000 struct em_buffer *tx_buffer, *tx_buffer_mapped;
2001 struct e1000_tx_desc *ctxd = NULL;
2002 struct mbuf *m_head;
2003 u32 txd_upper, txd_lower, txd_used, txd_saved;
2004 int nsegs, i, j, first, last = 0;
2005 int error, do_tso, tso_desc = 0;
2006 #if __FreeBSD_version < 700000
2010 txd_upper = txd_lower = txd_used = txd_saved = 0;
2012 #if __FreeBSD_version >= 700000
2013 do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
2019 * Force a cleanup if number of TX descriptors
2020 * available hits the threshold
2022 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
2024 /* Now do we at least have a minimal? */
2025 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
2026 adapter->no_tx_desc_avail1++;
2034 * If an mbuf is only header we need
2035 * to pull 4 bytes of data into it.
2037 if (do_tso && (m_head->m_len <= M_TSO_LEN)) {
2038 m_head = m_pullup(m_head, M_TSO_LEN + 4);
2045 * Map the packet for DMA
2047 * Capture the first descriptor index,
2048 * this descriptor will have the index
2049 * of the EOP which is the only one that
2050 * now gets a DONE bit writeback.
2052 first = adapter->next_avail_tx_desc;
2053 tx_buffer = &adapter->tx_buffer_area[first];
2054 tx_buffer_mapped = tx_buffer;
2055 map = tx_buffer->map;
2057 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2058 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2061 * There are two types of errors we can (try) to handle:
2062 * - EFBIG means the mbuf chain was too long and bus_dma ran
2063 * out of segments. Defragment the mbuf chain and try again.
2064 * - ENOMEM means bus_dma could not obtain enough bounce buffers
2065 * at this point in time. Defer sending and try again later.
2066 * All other errors, in particular EINVAL, are fatal and prevent the
2067 * mbuf chain from ever going through. Drop it and report error.
2069 if (error == EFBIG) {
2072 m = m_defrag(*m_headp, M_DONTWAIT);
2074 adapter->mbuf_alloc_failed++;
2082 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2083 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2085 if (error == ENOMEM) {
2086 adapter->no_tx_dma_setup++;
2088 } else if (error != 0) {
2089 adapter->no_tx_dma_setup++;
2094 } else if (error == ENOMEM) {
2095 adapter->no_tx_dma_setup++;
2097 } else if (error != 0) {
2098 adapter->no_tx_dma_setup++;
2105 * TSO Hardware workaround, if this packet is not
2106 * TSO, and is only a single descriptor long, and
2107 * it follows a TSO burst, then we need to add a
2108 * sentinel descriptor to prevent premature writeback.
2110 if ((do_tso == 0) && (adapter->tx_tso == TRUE)) {
2113 adapter->tx_tso = FALSE;
2116 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
2117 adapter->no_tx_desc_avail2++;
2118 bus_dmamap_unload(adapter->txtag, map);
2123 /* Do hardware assists */
2124 #if __FreeBSD_version >= 700000
2125 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2126 error = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower);
2128 return (ENXIO); /* something foobar */
2129 /* we need to make a final sentinel transmit desc */
2135 ** Timesync needs to check the packet header
2136 ** so call checksum code to do so, but don't
2137 ** penalize the code if not defined.
2139 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
2141 em_transmit_checksum_setup(adapter, m_head,
2142 &txd_upper, &txd_lower);
2144 i = adapter->next_avail_tx_desc;
2145 if (adapter->pcix_82544)
2148 /* Set up our transmit descriptors */
2149 for (j = 0; j < nsegs; j++) {
2151 bus_addr_t seg_addr;
2152 /* If adapter is 82544 and on PCIX bus */
2153 if(adapter->pcix_82544) {
2154 DESC_ARRAY desc_array;
2155 u32 array_elements, counter;
2157 * Check the Address and Length combination and
2158 * split the data accordingly
2160 array_elements = em_fill_descriptors(segs[j].ds_addr,
2161 segs[j].ds_len, &desc_array);
2162 for (counter = 0; counter < array_elements; counter++) {
2163 if (txd_used == adapter->num_tx_desc_avail) {
2164 adapter->next_avail_tx_desc = txd_saved;
2165 adapter->no_tx_desc_avail2++;
2166 bus_dmamap_unload(adapter->txtag, map);
2169 tx_buffer = &adapter->tx_buffer_area[i];
2170 ctxd = &adapter->tx_desc_base[i];
2171 ctxd->buffer_addr = htole64(
2172 desc_array.descriptor[counter].address);
2173 ctxd->lower.data = htole32(
2174 (adapter->txd_cmd | txd_lower | (u16)
2175 desc_array.descriptor[counter].length));
2177 htole32((txd_upper));
2179 if (++i == adapter->num_tx_desc)
2181 tx_buffer->m_head = NULL;
2182 tx_buffer->next_eop = -1;
2186 tx_buffer = &adapter->tx_buffer_area[i];
2187 ctxd = &adapter->tx_desc_base[i];
2188 seg_addr = segs[j].ds_addr;
2189 seg_len = segs[j].ds_len;
2192 ** If this is the last descriptor, we want to
2193 ** split it so we have a small final sentinel
2195 if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
2197 ctxd->buffer_addr = htole64(seg_addr);
2198 ctxd->lower.data = htole32(
2199 adapter->txd_cmd | txd_lower | seg_len);
2202 if (++i == adapter->num_tx_desc)
2204 /* Now make the sentinel */
2205 ++txd_used; /* using an extra txd */
2206 ctxd = &adapter->tx_desc_base[i];
2207 tx_buffer = &adapter->tx_buffer_area[i];
2209 htole64(seg_addr + seg_len);
2210 ctxd->lower.data = htole32(
2211 adapter->txd_cmd | txd_lower | 4);
2215 if (++i == adapter->num_tx_desc)
2218 ctxd->buffer_addr = htole64(seg_addr);
2219 ctxd->lower.data = htole32(
2220 adapter->txd_cmd | txd_lower | seg_len);
2224 if (++i == adapter->num_tx_desc)
2227 tx_buffer->m_head = NULL;
2228 tx_buffer->next_eop = -1;
2232 adapter->next_avail_tx_desc = i;
2233 if (adapter->pcix_82544)
2234 adapter->num_tx_desc_avail -= txd_used;
2236 adapter->num_tx_desc_avail -= nsegs;
2237 if (tso_desc) /* TSO used an extra for sentinel */
2238 adapter->num_tx_desc_avail -= txd_used;
2242 ** Handle VLAN tag, this is the
2243 ** biggest difference between
2246 #if __FreeBSD_version < 700000
2247 /* Find out if we are in vlan mode. */
2248 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
2250 ctxd->upper.fields.special =
2251 htole16(VLAN_TAG_VALUE(mtag));
2252 #else /* FreeBSD 7 */
2253 if (m_head->m_flags & M_VLANTAG) {
2254 /* Set the vlan id. */
2255 ctxd->upper.fields.special =
2256 htole16(m_head->m_pkthdr.ether_vtag);
2258 /* Tell hardware to add tag */
2259 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
2262 tx_buffer->m_head = m_head;
2263 tx_buffer_mapped->map = tx_buffer->map;
2264 tx_buffer->map = map;
2265 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
2268 * Last Descriptor of Packet
2269 * needs End Of Packet (EOP)
2270 * and Report Status (RS)
2273 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2275 * Keep track in the first buffer which
2276 * descriptor will be written back
2278 tx_buffer = &adapter->tx_buffer_area[first];
2279 tx_buffer->next_eop = last;
2282 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2283 * that this frame is available to transmit.
2285 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2286 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2287 if (adapter->hw.mac.type == e1000_82547 &&
2288 adapter->link_duplex == HALF_DUPLEX)
2289 em_82547_move_tail(adapter);
2291 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
2292 if (adapter->hw.mac.type == e1000_82547)
2293 em_82547_update_fifo_head(adapter,
2294 m_head->m_pkthdr.len);
2298 if (ctxd->upper.data & E1000_TXD_EXTCMD_TSTAMP) {
2299 HW_DEBUGOUT( "@@@ Timestamp bit is set in transmit descriptor\n" );
2305 /*********************************************************************
2307 * 82547 workaround to avoid controller hang in half-duplex environment.
2308 * The workaround is to avoid queuing a large packet that would span
2309 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
2310 * in this case. We do that only when FIFO is quiescent.
2312 **********************************************************************/
2314 em_82547_move_tail(void *arg)
2316 struct adapter *adapter = arg;
2317 struct e1000_tx_desc *tx_desc;
2318 u16 hw_tdt, sw_tdt, length = 0;
2321 EM_TX_LOCK_ASSERT(adapter);
2323 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
2324 sw_tdt = adapter->next_avail_tx_desc;
2326 while (hw_tdt != sw_tdt) {
2327 tx_desc = &adapter->tx_desc_base[hw_tdt];
2328 length += tx_desc->lower.flags.length;
2329 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
2330 if (++hw_tdt == adapter->num_tx_desc)
2334 if (em_82547_fifo_workaround(adapter, length)) {
2335 adapter->tx_fifo_wrk_cnt++;
2336 callout_reset(&adapter->tx_fifo_timer, 1,
2337 em_82547_move_tail, adapter);
2340 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
2341 em_82547_update_fifo_head(adapter, length);
2348 em_82547_fifo_workaround(struct adapter *adapter, int len)
2350 int fifo_space, fifo_pkt_len;
2352 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2354 if (adapter->link_duplex == HALF_DUPLEX) {
2355 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2357 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
2358 if (em_82547_tx_fifo_reset(adapter))
2369 em_82547_update_fifo_head(struct adapter *adapter, int len)
2371 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2373 /* tx_fifo_head is always 16 byte aligned */
2374 adapter->tx_fifo_head += fifo_pkt_len;
2375 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
2376 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2382 em_82547_tx_fifo_reset(struct adapter *adapter)
2386 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
2387 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
2388 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
2389 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
2390 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
2391 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
2392 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
2393 /* Disable TX unit */
2394 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2395 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
2396 tctl & ~E1000_TCTL_EN);
2398 /* Reset FIFO pointers */
2399 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
2400 adapter->tx_head_addr);
2401 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
2402 adapter->tx_head_addr);
2403 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
2404 adapter->tx_head_addr);
2405 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
2406 adapter->tx_head_addr);
2408 /* Re-enable TX unit */
2409 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2410 E1000_WRITE_FLUSH(&adapter->hw);
2412 adapter->tx_fifo_head = 0;
2413 adapter->tx_fifo_reset_cnt++;
2423 em_set_promisc(struct adapter *adapter)
2425 struct ifnet *ifp = adapter->ifp;
2428 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2430 if (ifp->if_flags & IFF_PROMISC) {
2431 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2432 /* Turn this on if you want to see bad packets */
2434 reg_rctl |= E1000_RCTL_SBP;
2435 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2436 } else if (ifp->if_flags & IFF_ALLMULTI) {
2437 reg_rctl |= E1000_RCTL_MPE;
2438 reg_rctl &= ~E1000_RCTL_UPE;
2439 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2444 em_disable_promisc(struct adapter *adapter)
2448 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2450 reg_rctl &= (~E1000_RCTL_UPE);
2451 reg_rctl &= (~E1000_RCTL_MPE);
2452 reg_rctl &= (~E1000_RCTL_SBP);
2453 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2457 /*********************************************************************
2460 * This routine is called whenever multicast address list is updated.
2462 **********************************************************************/
2465 em_set_multi(struct adapter *adapter)
2467 struct ifnet *ifp = adapter->ifp;
2468 struct ifmultiaddr *ifma;
2470 u8 mta[512]; /* Largest MTS is 4096 bits */
2473 IOCTL_DEBUGOUT("em_set_multi: begin");
2475 if (adapter->hw.mac.type == e1000_82542 &&
2476 adapter->hw.revision_id == E1000_REVISION_2) {
2477 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2478 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2479 e1000_pci_clear_mwi(&adapter->hw);
2480 reg_rctl |= E1000_RCTL_RST;
2481 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2486 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2487 if (ifma->ifma_addr->sa_family != AF_LINK)
2490 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2493 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2494 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2497 IF_ADDR_UNLOCK(ifp);
2499 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2500 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2501 reg_rctl |= E1000_RCTL_MPE;
2502 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2504 e1000_update_mc_addr_list(&adapter->hw, mta,
2505 mcnt, 1, adapter->hw.mac.rar_entry_count);
2507 if (adapter->hw.mac.type == e1000_82542 &&
2508 adapter->hw.revision_id == E1000_REVISION_2) {
2509 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2510 reg_rctl &= ~E1000_RCTL_RST;
2511 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2513 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2514 e1000_pci_set_mwi(&adapter->hw);
2519 /*********************************************************************
2522 * This routine checks for link status and updates statistics.
2524 **********************************************************************/
2527 em_local_timer(void *arg)
2529 struct adapter *adapter = arg;
2530 struct ifnet *ifp = adapter->ifp;
2532 EM_CORE_LOCK_ASSERT(adapter);
2534 em_update_link_status(adapter);
2535 em_update_stats_counters(adapter);
2537 /* Reset LAA into RAR[0] on 82571 */
2538 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
2539 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2541 if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
2542 em_print_hw_stats(adapter);
2544 em_smartspeed(adapter);
2547 * Each second we check the watchdog to
2548 * protect against hardware hangs.
2550 em_watchdog(adapter);
2552 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
2557 em_update_link_status(struct adapter *adapter)
2559 struct e1000_hw *hw = &adapter->hw;
2560 struct ifnet *ifp = adapter->ifp;
2561 device_t dev = adapter->dev;
2564 /* Get the cached link value or read phy for real */
2565 switch (hw->phy.media_type) {
2566 case e1000_media_type_copper:
2567 if (hw->mac.get_link_status) {
2568 /* Do the work to read phy */
2569 e1000_check_for_link(hw);
2570 link_check = !hw->mac.get_link_status;
2571 if (link_check) /* ESB2 fix */
2572 e1000_cfg_on_link_up(hw);
2576 case e1000_media_type_fiber:
2577 e1000_check_for_link(hw);
2578 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2581 case e1000_media_type_internal_serdes:
2582 e1000_check_for_link(hw);
2583 link_check = adapter->hw.mac.serdes_has_link;
2586 case e1000_media_type_unknown:
2590 /* Now check for a transition */
2591 if (link_check && (adapter->link_active == 0)) {
2592 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2593 &adapter->link_duplex);
2594 /* Check if we must disable SPEED_MODE bit on PCI-E */
2595 if ((adapter->link_speed != SPEED_1000) &&
2596 ((hw->mac.type == e1000_82571) ||
2597 (hw->mac.type == e1000_82572))) {
2599 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2600 tarc0 &= ~SPEED_MODE_BIT;
2601 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
2604 device_printf(dev, "Link is up %d Mbps %s\n",
2605 adapter->link_speed,
2606 ((adapter->link_duplex == FULL_DUPLEX) ?
2607 "Full Duplex" : "Half Duplex"));
2608 adapter->link_active = 1;
2609 adapter->smartspeed = 0;
2610 ifp->if_baudrate = adapter->link_speed * 1000000;
2611 if_link_state_change(ifp, LINK_STATE_UP);
2612 } else if (!link_check && (adapter->link_active == 1)) {
2613 ifp->if_baudrate = adapter->link_speed = 0;
2614 adapter->link_duplex = 0;
2616 device_printf(dev, "Link is Down\n");
2617 adapter->link_active = 0;
2618 /* Link down, disable watchdog */
2619 adapter->watchdog_timer = FALSE;
2620 if_link_state_change(ifp, LINK_STATE_DOWN);
2624 /*********************************************************************
2626 * This routine disables all traffic on the adapter by issuing a
2627 * global reset on the MAC and deallocates TX/RX buffers.
2629 * This routine should always be called with BOTH the CORE
2631 **********************************************************************/
2636 struct adapter *adapter = arg;
2637 struct ifnet *ifp = adapter->ifp;
2639 EM_CORE_LOCK_ASSERT(adapter);
2640 EM_TX_LOCK_ASSERT(adapter);
2642 INIT_DEBUGOUT("em_stop: begin");
2644 em_disable_intr(adapter);
2645 callout_stop(&adapter->timer);
2646 callout_stop(&adapter->tx_fifo_timer);
2648 /* Tell the stack that the interface is no longer active */
2649 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2652 /* Disable IEEE 1588 Time hardware */
2653 if ((adapter->hw.mac.type == e1000_82574) ||
2654 (adapter->hw.mac.type == e1000_ich10lan))
2655 em_tsync_disable(adapter);
2658 e1000_reset_hw(&adapter->hw);
2659 if (adapter->hw.mac.type >= e1000_82544)
2660 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2664 /*********************************************************************
2666 * Determine hardware revision.
2668 **********************************************************************/
2670 em_identify_hardware(struct adapter *adapter)
2672 device_t dev = adapter->dev;
2674 /* Make sure our PCI config space has the necessary stuff set */
2675 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2676 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2677 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2678 device_printf(dev, "Memory Access and/or Bus Master bits "
2680 adapter->hw.bus.pci_cmd_word |=
2681 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2682 pci_write_config(dev, PCIR_COMMAND,
2683 adapter->hw.bus.pci_cmd_word, 2);
2686 /* Save off the information about this board */
2687 adapter->hw.vendor_id = pci_get_vendor(dev);
2688 adapter->hw.device_id = pci_get_device(dev);
2689 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2690 adapter->hw.subsystem_vendor_id =
2691 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2692 adapter->hw.subsystem_device_id =
2693 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2695 /* Do Shared Code Init and Setup */
2696 if (e1000_set_mac_type(&adapter->hw)) {
2697 device_printf(dev, "Setup init failure\n");
2703 em_allocate_pci_resources(struct adapter *adapter)
2705 device_t dev = adapter->dev;
2706 int val, rid, error = E1000_SUCCESS;
2709 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2711 if (adapter->memory == NULL) {
2712 device_printf(dev, "Unable to allocate bus resource: memory\n");
2715 adapter->osdep.mem_bus_space_tag =
2716 rman_get_bustag(adapter->memory);
2717 adapter->osdep.mem_bus_space_handle =
2718 rman_get_bushandle(adapter->memory);
2719 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2721 /* Only older adapters use IO mapping */
2722 if ((adapter->hw.mac.type > e1000_82543) &&
2723 (adapter->hw.mac.type < e1000_82571)) {
2724 /* Figure our where our IO BAR is ? */
2725 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2726 val = pci_read_config(dev, rid, 4);
2727 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2728 adapter->io_rid = rid;
2732 /* check for 64bit BAR */
2733 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2736 if (rid >= PCIR_CIS) {
2737 device_printf(dev, "Unable to locate IO BAR\n");
2740 adapter->ioport = bus_alloc_resource_any(dev,
2741 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2742 if (adapter->ioport == NULL) {
2743 device_printf(dev, "Unable to allocate bus resource: "
2747 adapter->hw.io_base = 0;
2748 adapter->osdep.io_bus_space_tag =
2749 rman_get_bustag(adapter->ioport);
2750 adapter->osdep.io_bus_space_handle =
2751 rman_get_bushandle(adapter->ioport);
2755 ** Init the resource arrays
2756 ** used by MSIX setup
2758 for (int i = 0; i < 3; i++) {
2759 adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
2760 adapter->tag[i] = NULL;
2761 adapter->res[i] = NULL;
2765 * Setup MSI/X or MSI if PCI Express
2768 adapter->msi = em_setup_msix(adapter);
2770 adapter->hw.back = &adapter->osdep;
2775 /*********************************************************************
2777 * Setup the Legacy or MSI Interrupt handler
2779 **********************************************************************/
2781 em_allocate_legacy(struct adapter *adapter)
2783 device_t dev = adapter->dev;
2786 /* Manually turn off all interrupts */
2787 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2789 /* Legacy RID is 0 */
2790 if (adapter->msi == 0)
2791 adapter->rid[0] = 0;
2793 /* We allocate a single interrupt resource */
2794 adapter->res[0] = bus_alloc_resource_any(dev,
2795 SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
2796 if (adapter->res[0] == NULL) {
2797 device_printf(dev, "Unable to allocate bus resource: "
2802 #ifdef EM_LEGACY_IRQ
2803 /* We do Legacy setup */
2804 if ((error = bus_setup_intr(dev, adapter->res[0],
2805 #if __FreeBSD_version > 700000
2806 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_intr, adapter,
2808 INTR_TYPE_NET | INTR_MPSAFE, em_intr, adapter,
2810 &adapter->tag[0])) != 0) {
2811 device_printf(dev, "Failed to register interrupt handler");
2815 #else /* FAST_IRQ */
2817 * Try allocating a fast interrupt and the associated deferred
2818 * processing contexts.
2820 TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2821 TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2822 adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2823 taskqueue_thread_enqueue, &adapter->tq);
2824 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2825 device_get_nameunit(adapter->dev));
2826 #if __FreeBSD_version < 700000
2827 if ((error = bus_setup_intr(dev, adapter->res[0],
2828 INTR_TYPE_NET | INTR_FAST, em_irq_fast, adapter,
2830 if ((error = bus_setup_intr(dev, adapter->res[0],
2831 INTR_TYPE_NET, em_irq_fast, NULL, adapter,
2833 &adapter->tag[0])) != 0) {
2834 device_printf(dev, "Failed to register fast interrupt "
2835 "handler: %d\n", error);
2836 taskqueue_free(adapter->tq);
2840 #endif /* EM_LEGACY_IRQ */
2845 /*********************************************************************
2847 * Setup the MSIX Interrupt handlers
2848 * This is not really Multiqueue, rather
2849 * its just multiple interrupt vectors.
2851 **********************************************************************/
2853 em_allocate_msix(struct adapter *adapter)
2855 device_t dev = adapter->dev;
2858 /* Make sure all interrupts are disabled */
2859 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2861 /* First get the resources */
2862 for (int i = 0; i < adapter->msi; i++) {
2863 adapter->res[i] = bus_alloc_resource_any(dev,
2864 SYS_RES_IRQ, &adapter->rid[i], RF_ACTIVE);
2865 if (adapter->res[i] == NULL) {
2867 "Unable to allocate bus resource: "
2868 "MSIX Interrupt\n");
2874 * Now allocate deferred processing contexts.
2876 TASK_INIT(&adapter->rx_task, 0, em_handle_rx, adapter);
2877 TASK_INIT(&adapter->tx_task, 0, em_handle_tx, adapter);
2878 TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2879 adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2880 taskqueue_thread_enqueue, &adapter->tq);
2881 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2882 device_get_nameunit(adapter->dev));
2885 * And setup the interrupt handlers
2888 /* First slot to RX */
2889 if ((error = bus_setup_intr(dev, adapter->res[0],
2890 #if __FreeBSD_version > 700000
2891 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, adapter,
2893 INTR_TYPE_NET | INTR_MPSAFE, em_msix_rx, adapter,
2895 &adapter->tag[0])) != 0) {
2896 device_printf(dev, "Failed to register RX handler");
2901 if ((error = bus_setup_intr(dev, adapter->res[1],
2902 #if __FreeBSD_version > 700000
2903 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, adapter,
2905 INTR_TYPE_NET | INTR_MPSAFE, em_msix_tx, adapter,
2907 &adapter->tag[1])) != 0) {
2908 device_printf(dev, "Failed to register TX handler");
2913 if ((error = bus_setup_intr(dev, adapter->res[2],
2914 #if __FreeBSD_version > 700000
2915 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_link, adapter,
2917 INTR_TYPE_NET | INTR_MPSAFE, em_msix_link, adapter,
2919 &adapter->tag[2])) != 0) {
2920 device_printf(dev, "Failed to register TX handler");
2928 em_free_pci_resources(struct adapter *adapter)
2930 device_t dev = adapter->dev;
2932 /* Make sure the for loop below runs once */
2933 if (adapter->msi == 0)
2937 * First release all the interrupt resources:
2938 * notice that since these are just kept
2939 * in an array we can do the same logic
2940 * whether its MSIX or just legacy.
2942 for (int i = 0; i < adapter->msi; i++) {
2943 if (adapter->tag[i] != NULL) {
2944 bus_teardown_intr(dev, adapter->res[i],
2946 adapter->tag[i] = NULL;
2948 if (adapter->res[i] != NULL) {
2949 bus_release_resource(dev, SYS_RES_IRQ,
2950 adapter->rid[i], adapter->res[i]);
2955 pci_release_msi(dev);
2957 if (adapter->msix != NULL)
2958 bus_release_resource(dev, SYS_RES_MEMORY,
2959 PCIR_BAR(EM_MSIX_BAR), adapter->msix);
2961 if (adapter->memory != NULL)
2962 bus_release_resource(dev, SYS_RES_MEMORY,
2963 PCIR_BAR(0), adapter->memory);
2965 if (adapter->flash != NULL)
2966 bus_release_resource(dev, SYS_RES_MEMORY,
2967 EM_FLASH, adapter->flash);
2969 if (adapter->ioport != NULL)
2970 bus_release_resource(dev, SYS_RES_IOPORT,
2971 adapter->io_rid, adapter->ioport);
2978 em_setup_msix(struct adapter *adapter)
2980 device_t dev = adapter->dev;
2983 if (adapter->hw.mac.type < e1000_82571)
2986 /* Setup MSI/X for Hartwell */
2987 if (adapter->hw.mac.type == e1000_82574) {
2988 /* Map the MSIX BAR */
2989 int rid = PCIR_BAR(EM_MSIX_BAR);
2990 adapter->msix = bus_alloc_resource_any(dev,
2991 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2992 if (!adapter->msix) {
2993 /* May not be enabled */
2994 device_printf(adapter->dev,
2995 "Unable to map MSIX table \n");
2998 val = pci_msix_count(dev);
3000 ** 82574 can be configured for 5 but
3001 ** we limit use to 3.
3003 if (val > 3) val = 3;
3004 if ((val) && pci_alloc_msix(dev, &val) == 0) {
3005 device_printf(adapter->dev,"Using MSIX interrupts\n");
3010 val = pci_msi_count(dev);
3011 if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
3013 device_printf(adapter->dev,"Using MSI interrupt\n");
3019 /*********************************************************************
3021 * Initialize the hardware to a configuration
3022 * as specified by the adapter structure.
3024 **********************************************************************/
3026 em_hardware_init(struct adapter *adapter)
3028 device_t dev = adapter->dev;
3031 INIT_DEBUGOUT("em_hardware_init: begin");
3033 /* Issue a global reset */
3034 e1000_reset_hw(&adapter->hw);
3036 /* Get control from any management/hw control */
3037 if (((adapter->hw.mac.type == e1000_82573) ||
3038 (adapter->hw.mac.type == e1000_ich8lan) ||
3039 (adapter->hw.mac.type == e1000_ich10lan) ||
3040 (adapter->hw.mac.type == e1000_ich9lan)) &&
3041 e1000_check_mng_mode(&adapter->hw))
3042 em_get_hw_control(adapter);
3044 /* When hardware is reset, fifo_head is also reset */
3045 adapter->tx_fifo_head = 0;
3047 /* Set up smart power down as default off on newer adapters. */
3048 if (!em_smart_pwr_down && (adapter->hw.mac.type == e1000_82571 ||
3049 adapter->hw.mac.type == e1000_82572)) {
3052 /* Speed up time to link by disabling smart power down. */
3053 e1000_read_phy_reg(&adapter->hw,
3054 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
3055 phy_tmp &= ~IGP02E1000_PM_SPD;
3056 e1000_write_phy_reg(&adapter->hw,
3057 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
3061 * These parameters control the automatic generation (Tx) and
3062 * response (Rx) to Ethernet PAUSE frames.
3063 * - High water mark should allow for at least two frames to be
3064 * received after sending an XOFF.
3065 * - Low water mark works best when it is very near the high water mark.
3066 * This allows the receiver to restart by sending XON when it has
3067 * drained a bit. Here we use an arbitary value of 1500 which will
3068 * restart after one full frame is pulled from the buffer. There
3069 * could be several smaller frames in the buffer and if so they will
3070 * not trigger the XON until their total number reduces the buffer
3072 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
3074 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
3077 adapter->hw.fc.high_water = rx_buffer_size -
3078 roundup2(adapter->max_frame_size, 1024);
3079 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
3081 if (adapter->hw.mac.type == e1000_80003es2lan)
3082 adapter->hw.fc.pause_time = 0xFFFF;
3084 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
3085 adapter->hw.fc.send_xon = TRUE;
3086 adapter->hw.fc.requested_mode = e1000_fc_full;
3088 if (e1000_init_hw(&adapter->hw) < 0) {
3089 device_printf(dev, "Hardware Initialization Failed\n");
3093 e1000_check_for_link(&adapter->hw);
3098 /*********************************************************************
3100 * Setup networking device structure and register an interface.
3102 **********************************************************************/
3104 em_setup_interface(device_t dev, struct adapter *adapter)
3108 INIT_DEBUGOUT("em_setup_interface: begin");
3110 ifp = adapter->ifp = if_alloc(IFT_ETHER);
3112 panic("%s: can not if_alloc()", device_get_nameunit(dev));
3113 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3114 ifp->if_mtu = ETHERMTU;
3115 ifp->if_init = em_init;
3116 ifp->if_softc = adapter;
3117 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3118 ifp->if_ioctl = em_ioctl;
3119 ifp->if_start = em_start;
3120 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
3121 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
3122 IFQ_SET_READY(&ifp->if_snd);
3124 ether_ifattach(ifp, adapter->hw.mac.addr);
3126 ifp->if_capabilities = ifp->if_capenable = 0;
3128 if (adapter->hw.mac.type >= e1000_82543) {
3130 #if __FreeBSD_version < 700000
3131 version_cap = IFCAP_HWCSUM;
3133 version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
3135 ifp->if_capabilities |= version_cap;
3136 ifp->if_capenable |= version_cap;
3139 #if __FreeBSD_version >= 700000
3140 /* Identify TSO capable adapters */
3141 if ((adapter->hw.mac.type > e1000_82544) &&
3142 (adapter->hw.mac.type != e1000_82547))
3143 ifp->if_capabilities |= IFCAP_TSO4;
3145 * By default only enable on PCI-E, this
3146 * can be overriden by ifconfig.
3148 if (adapter->hw.mac.type >= e1000_82571)
3149 ifp->if_capenable |= IFCAP_TSO4;
3153 * Tell the upper layer(s) we support long frames.
3155 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3156 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3157 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3159 #ifdef DEVICE_POLLING
3160 ifp->if_capabilities |= IFCAP_POLLING;
3164 * Specify the media types supported by this adapter and register
3165 * callbacks to update media and link information
3167 ifmedia_init(&adapter->media, IFM_IMASK,
3168 em_media_change, em_media_status);
3169 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3170 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
3171 u_char fiber_type = IFM_1000_SX; /* default type */
3173 if (adapter->hw.mac.type == e1000_82545)
3174 fiber_type = IFM_1000_LX;
3175 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
3177 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
3179 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
3180 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
3182 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
3184 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
3186 if (adapter->hw.phy.type != e1000_phy_ife) {
3187 ifmedia_add(&adapter->media,
3188 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
3189 ifmedia_add(&adapter->media,
3190 IFM_ETHER | IFM_1000_T, 0, NULL);
3193 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3194 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3198 /*********************************************************************
3200 * Workaround for SmartSpeed on 82541 and 82547 controllers
3202 **********************************************************************/
3204 em_smartspeed(struct adapter *adapter)
3208 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
3209 adapter->hw.mac.autoneg == 0 ||
3210 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
3213 if (adapter->smartspeed == 0) {
3214 /* If Master/Slave config fault is asserted twice,
3215 * we assume back-to-back */
3216 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3217 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
3219 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3220 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
3221 e1000_read_phy_reg(&adapter->hw,
3222 PHY_1000T_CTRL, &phy_tmp);
3223 if(phy_tmp & CR_1000T_MS_ENABLE) {
3224 phy_tmp &= ~CR_1000T_MS_ENABLE;
3225 e1000_write_phy_reg(&adapter->hw,
3226 PHY_1000T_CTRL, phy_tmp);
3227 adapter->smartspeed++;
3228 if(adapter->hw.mac.autoneg &&
3229 !e1000_phy_setup_autoneg(&adapter->hw) &&
3230 !e1000_read_phy_reg(&adapter->hw,
3231 PHY_CONTROL, &phy_tmp)) {
3232 phy_tmp |= (MII_CR_AUTO_NEG_EN |
3233 MII_CR_RESTART_AUTO_NEG);
3234 e1000_write_phy_reg(&adapter->hw,
3235 PHY_CONTROL, phy_tmp);
3240 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
3241 /* If still no link, perhaps using 2/3 pair cable */
3242 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
3243 phy_tmp |= CR_1000T_MS_ENABLE;
3244 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
3245 if(adapter->hw.mac.autoneg &&
3246 !e1000_phy_setup_autoneg(&adapter->hw) &&
3247 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
3248 phy_tmp |= (MII_CR_AUTO_NEG_EN |
3249 MII_CR_RESTART_AUTO_NEG);
3250 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
3253 /* Restart process after EM_SMARTSPEED_MAX iterations */
3254 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
3255 adapter->smartspeed = 0;
3260 * Manage DMA'able memory.
3263 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3267 *(bus_addr_t *) arg = segs[0].ds_addr;
3271 em_dma_malloc(struct adapter *adapter, bus_size_t size,
3272 struct em_dma_alloc *dma, int mapflags)
3276 #if __FreeBSD_version >= 700000
3277 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
3279 error = bus_dma_tag_create(NULL, /* parent */
3281 EM_DBA_ALIGN, 0, /* alignment, bounds */
3282 BUS_SPACE_MAXADDR, /* lowaddr */
3283 BUS_SPACE_MAXADDR, /* highaddr */
3284 NULL, NULL, /* filter, filterarg */
3287 size, /* maxsegsize */
3289 NULL, /* lockfunc */
3293 device_printf(adapter->dev,
3294 "%s: bus_dma_tag_create failed: %d\n",
3299 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
3300 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
3302 device_printf(adapter->dev,
3303 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
3304 __func__, (uintmax_t)size, error);
3309 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3310 size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
3311 if (error || dma->dma_paddr == 0) {
3312 device_printf(adapter->dev,
3313 "%s: bus_dmamap_load failed: %d\n",
3321 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3323 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3324 bus_dma_tag_destroy(dma->dma_tag);
3326 dma->dma_map = NULL;
3327 dma->dma_tag = NULL;
3333 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
3335 if (dma->dma_tag == NULL)
3337 if (dma->dma_map != NULL) {
3338 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3339 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3340 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3341 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3342 dma->dma_map = NULL;
3344 bus_dma_tag_destroy(dma->dma_tag);
3345 dma->dma_tag = NULL;
3349 /*********************************************************************
3351 * Allocate memory for tx_buffer structures. The tx_buffer stores all
3352 * the information needed to transmit a packet on the wire.
3354 **********************************************************************/
3356 em_allocate_transmit_structures(struct adapter *adapter)
3358 device_t dev = adapter->dev;
3359 struct em_buffer *tx_buffer;
3363 * Create DMA tags for tx descriptors
3365 #if __FreeBSD_version >= 700000
3366 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3368 if ((error = bus_dma_tag_create(NULL, /* parent */
3370 1, 0, /* alignment, bounds */
3371 BUS_SPACE_MAXADDR, /* lowaddr */
3372 BUS_SPACE_MAXADDR, /* highaddr */
3373 NULL, NULL, /* filter, filterarg */
3374 EM_TSO_SIZE, /* maxsize */
3375 EM_MAX_SCATTER, /* nsegments */
3376 EM_TSO_SEG_SIZE, /* maxsegsize */
3378 NULL, /* lockfunc */
3380 &adapter->txtag)) != 0) {
3381 device_printf(dev, "Unable to allocate TX DMA tag\n");
3385 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
3386 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3387 if (adapter->tx_buffer_area == NULL) {
3388 device_printf(dev, "Unable to allocate tx_buffer memory\n");
3393 /* Create the descriptor buffer dma maps */
3394 for (int i = 0; i < adapter->num_tx_desc; i++) {
3395 tx_buffer = &adapter->tx_buffer_area[i];
3396 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
3398 device_printf(dev, "Unable to create TX DMA map\n");
3401 tx_buffer->next_eop = -1;
3406 em_free_transmit_structures(adapter);
3410 /*********************************************************************
3412 * (Re)Initialize transmit structures.
3414 **********************************************************************/
3416 em_setup_transmit_structures(struct adapter *adapter)
3418 struct em_buffer *tx_buffer;
3420 /* Clear the old ring contents */
3421 bzero(adapter->tx_desc_base,
3422 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
3424 /* Free any existing TX buffers */
3425 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
3426 tx_buffer = &adapter->tx_buffer_area[i];
3427 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3428 BUS_DMASYNC_POSTWRITE);
3429 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
3430 m_freem(tx_buffer->m_head);
3431 tx_buffer->m_head = NULL;
3432 tx_buffer->next_eop = -1;
3436 adapter->next_avail_tx_desc = 0;
3437 adapter->next_tx_to_clean = 0;
3438 adapter->num_tx_desc_avail = adapter->num_tx_desc;
3440 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3441 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3446 /*********************************************************************
3448 * Enable transmit unit.
3450 **********************************************************************/
3452 em_initialize_transmit_unit(struct adapter *adapter)
3454 u32 tctl, tarc, tipg = 0;
3457 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3458 /* Setup the Base and Length of the Tx Descriptor Ring */
3459 bus_addr = adapter->txdma.dma_paddr;
3460 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
3461 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
3462 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
3463 (u32)(bus_addr >> 32));
3464 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
3466 /* Setup the HW Tx Head and Tail descriptor pointers */
3467 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
3468 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
3470 HW_DEBUGOUT2("Base = %x, Length = %x\n",
3471 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
3472 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
3474 /* Set the default values for the Tx Inter Packet Gap timer */
3475 switch (adapter->hw.mac.type) {
3477 tipg = DEFAULT_82542_TIPG_IPGT;
3478 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3479 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3481 case e1000_80003es2lan:
3482 tipg = DEFAULT_82543_TIPG_IPGR1;
3483 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3484 E1000_TIPG_IPGR2_SHIFT;
3487 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3488 (adapter->hw.phy.media_type ==
3489 e1000_media_type_internal_serdes))
3490 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3492 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3493 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3494 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3497 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
3498 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
3499 if(adapter->hw.mac.type >= e1000_82540)
3500 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
3501 adapter->tx_abs_int_delay.value);
3503 if ((adapter->hw.mac.type == e1000_82571) ||
3504 (adapter->hw.mac.type == e1000_82572)) {
3505 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3506 tarc |= SPEED_MODE_BIT;
3507 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3508 } else if (adapter->hw.mac.type == e1000_80003es2lan) {
3509 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3511 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3512 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
3514 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3517 /* Program the Transmit Control Register */
3518 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3519 tctl &= ~E1000_TCTL_CT;
3520 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3521 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3523 if (adapter->hw.mac.type >= e1000_82571)
3524 tctl |= E1000_TCTL_MULR;
3526 /* This write will effectively turn on the transmit unit. */
3527 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
3529 /* Setup Transmit Descriptor Base Settings */
3530 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3532 if (adapter->tx_int_delay.value > 0)
3533 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3536 /*********************************************************************
3538 * Free all transmit related data structures.
3540 **********************************************************************/
3542 em_free_transmit_structures(struct adapter *adapter)
3544 struct em_buffer *tx_buffer;
3546 INIT_DEBUGOUT("free_transmit_structures: begin");
3548 if (adapter->tx_buffer_area != NULL) {
3549 for (int i = 0; i < adapter->num_tx_desc; i++) {
3550 tx_buffer = &adapter->tx_buffer_area[i];
3551 if (tx_buffer->m_head != NULL) {
3552 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3553 BUS_DMASYNC_POSTWRITE);
3554 bus_dmamap_unload(adapter->txtag,
3556 m_freem(tx_buffer->m_head);
3557 tx_buffer->m_head = NULL;
3558 } else if (tx_buffer->map != NULL)
3559 bus_dmamap_unload(adapter->txtag,
3561 if (tx_buffer->map != NULL) {
3562 bus_dmamap_destroy(adapter->txtag,
3564 tx_buffer->map = NULL;
3568 if (adapter->tx_buffer_area != NULL) {
3569 free(adapter->tx_buffer_area, M_DEVBUF);
3570 adapter->tx_buffer_area = NULL;
3572 if (adapter->txtag != NULL) {
3573 bus_dma_tag_destroy(adapter->txtag);
3574 adapter->txtag = NULL;
3578 /*********************************************************************
3580 * The offload context needs to be set when we transfer the first
3581 * packet of a particular protocol (TCP/UDP). This routine has been
3582 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
3584 **********************************************************************/
3586 em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
3587 u32 *txd_upper, u32 *txd_lower)
3589 struct e1000_context_desc *TXD;
3590 struct em_buffer *tx_buffer;
3591 struct ether_vlan_header *eh;
3592 struct ip *ip = NULL;
3593 struct ip6_hdr *ip6;
3595 int curr_txd, ehdrlen;
3596 u32 cmd, hdr_len, ip_hlen;
3600 cmd = hdr_len = ipproto = 0;
3601 /* Setup checksum offload context. */
3602 curr_txd = adapter->next_avail_tx_desc;
3603 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3604 TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3607 * Determine where frame payload starts.
3608 * Jump over vlan headers if already present,
3609 * helpful for QinQ too.
3611 eh = mtod(mp, struct ether_vlan_header *);
3612 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3613 etype = ntohs(eh->evl_proto);
3614 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3616 etype = ntohs(eh->evl_encap_proto);
3617 ehdrlen = ETHER_HDR_LEN;
3621 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
3622 * TODO: Support SCTP too when it hits the tree.
3626 ip = (struct ip *)(mp->m_data + ehdrlen);
3627 ip_hlen = ip->ip_hl << 2;
3629 /* Setup of IP header checksum. */
3630 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3632 * Start offset for header checksum calculation.
3633 * End offset for header checksum calculation.
3634 * Offset of place to put the checksum.
3636 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3637 TXD->lower_setup.ip_fields.ipcse =
3638 htole16(ehdrlen + ip_hlen);
3639 TXD->lower_setup.ip_fields.ipcso =
3640 ehdrlen + offsetof(struct ip, ip_sum);
3641 cmd |= E1000_TXD_CMD_IP;
3642 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3645 if (mp->m_len < ehdrlen + ip_hlen)
3646 return; /* failure */
3648 hdr_len = ehdrlen + ip_hlen;
3652 case ETHERTYPE_IPV6:
3653 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3654 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3656 if (mp->m_len < ehdrlen + ip_hlen)
3657 return; /* failure */
3659 /* IPv6 doesn't have a header checksum. */
3661 hdr_len = ehdrlen + ip_hlen;
3662 ipproto = ip6->ip6_nxt;
3666 case ETHERTYPE_IEEE1588:
3667 *txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
3678 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3680 * Start offset for payload checksum calculation.
3681 * End offset for payload checksum calculation.
3682 * Offset of place to put the checksum.
3684 th = (struct tcp_hdr *)(mp->m_data + hdr_len);
3685 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3686 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3687 TXD->upper_setup.tcp_fields.tucso =
3688 hdr_len + offsetof(struct tcphdr, th_sum);
3689 cmd |= E1000_TXD_CMD_TCP;
3690 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3696 void *hdr = (caddr_t) ip + ip_hlen;
3697 struct udphdr *uh = (struct udphdr *)hdr;
3699 if (uh->uh_dport == htons(TSYNC_PORT)) {
3700 *txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
3701 IOCTL_DEBUGOUT("@@@ Sending Event Packet\n");
3704 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3706 * Start offset for header checksum calculation.
3707 * End offset for header checksum calculation.
3708 * Offset of place to put the checksum.
3710 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3711 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3712 TXD->upper_setup.tcp_fields.tucso =
3713 hdr_len + offsetof(struct udphdr, uh_sum);
3714 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3724 ** We might be here just for TIMESYNC
3725 ** which means we don't need the context
3728 if (!mp->m_pkthdr.csum_flags & CSUM_OFFLOAD)
3731 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
3732 E1000_TXD_DTYP_D; /* Data descr */
3733 TXD->tcp_seg_setup.data = htole32(0);
3734 TXD->cmd_and_length =
3735 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3736 tx_buffer->m_head = NULL;
3737 tx_buffer->next_eop = -1;
3739 if (++curr_txd == adapter->num_tx_desc)
3742 adapter->num_tx_desc_avail--;
3743 adapter->next_avail_tx_desc = curr_txd;
3747 #if __FreeBSD_version >= 700000
3748 /**********************************************************************
3750 * Setup work for hardware segmentation offload (TSO)
3752 **********************************************************************/
3754 em_tso_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper,
3757 struct e1000_context_desc *TXD;
3758 struct em_buffer *tx_buffer;
3759 struct ether_vlan_header *eh;
3761 struct ip6_hdr *ip6;
3763 int curr_txd, ehdrlen, hdr_len, ip_hlen, isip6;
3767 * This function could/should be extended to support IP/IPv6
3768 * fragmentation as well. But as they say, one step at a time.
3772 * Determine where frame payload starts.
3773 * Jump over vlan headers if already present,
3774 * helpful for QinQ too.
3776 eh = mtod(mp, struct ether_vlan_header *);
3777 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3778 etype = ntohs(eh->evl_proto);
3779 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3781 etype = ntohs(eh->evl_encap_proto);
3782 ehdrlen = ETHER_HDR_LEN;
3785 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3786 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3787 return FALSE; /* -1 */
3790 * We only support TCP for IPv4 and IPv6 (notyet) for the moment.
3791 * TODO: Support SCTP too when it hits the tree.
3796 ip = (struct ip *)(mp->m_data + ehdrlen);
3797 if (ip->ip_p != IPPROTO_TCP)
3798 return FALSE; /* 0 */
3801 ip_hlen = ip->ip_hl << 2;
3802 if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3803 return FALSE; /* -1 */
3804 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3806 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3807 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3809 th->th_sum = mp->m_pkthdr.csum_data;
3812 case ETHERTYPE_IPV6:
3814 return FALSE; /* Not supported yet. */
3815 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3816 if (ip6->ip6_nxt != IPPROTO_TCP)
3817 return FALSE; /* 0 */
3819 ip_hlen = sizeof(struct ip6_hdr); /* XXX: no header stacking. */
3820 if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3821 return FALSE; /* -1 */
3822 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
3824 th->th_sum = in6_pseudo(ip6->ip6_src, ip->ip6_dst,
3825 htons(IPPROTO_TCP)); /* XXX: function notyet. */
3827 th->th_sum = mp->m_pkthdr.csum_data;
3833 hdr_len = ehdrlen + ip_hlen + (th->th_off << 2);
3835 *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
3836 E1000_TXD_DTYP_D | /* Data descr type */
3837 E1000_TXD_CMD_TSE); /* Do TSE on this packet */
3839 /* IP and/or TCP header checksum calculation and insertion. */
3840 *txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) |
3841 E1000_TXD_POPTS_TXSM) << 8;
3843 curr_txd = adapter->next_avail_tx_desc;
3844 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3845 TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3847 /* IPv6 doesn't have a header checksum. */
3850 * Start offset for header checksum calculation.
3851 * End offset for header checksum calculation.
3852 * Offset of place put the checksum.
3854 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3855 TXD->lower_setup.ip_fields.ipcse =
3856 htole16(ehdrlen + ip_hlen - 1);
3857 TXD->lower_setup.ip_fields.ipcso =
3858 ehdrlen + offsetof(struct ip, ip_sum);
3861 * Start offset for payload checksum calculation.
3862 * End offset for payload checksum calculation.
3863 * Offset of place to put the checksum.
3865 TXD->upper_setup.tcp_fields.tucss =
3867 TXD->upper_setup.tcp_fields.tucse = 0;
3868 TXD->upper_setup.tcp_fields.tucso =
3869 ehdrlen + ip_hlen + offsetof(struct tcphdr, th_sum);
3871 * Payload size per packet w/o any headers.
3872 * Length of all headers up to payload.
3874 TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz);
3875 TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
3877 TXD->cmd_and_length = htole32(adapter->txd_cmd |
3878 E1000_TXD_CMD_DEXT | /* Extended descr */
3879 E1000_TXD_CMD_TSE | /* TSE context */
3880 (isip6 ? 0 : E1000_TXD_CMD_IP) | /* Do IP csum */
3881 E1000_TXD_CMD_TCP | /* Do TCP checksum */
3882 (mp->m_pkthdr.len - (hdr_len))); /* Total len */
3884 tx_buffer->m_head = NULL;
3885 tx_buffer->next_eop = -1;
3887 if (++curr_txd == adapter->num_tx_desc)
3890 adapter->num_tx_desc_avail--;
3891 adapter->next_avail_tx_desc = curr_txd;
3892 adapter->tx_tso = TRUE;
3897 #endif /* __FreeBSD_version >= 700000 */
3899 /**********************************************************************
3901 * Examine each tx_buffer in the used queue. If the hardware is done
3902 * processing the packet then free associated resources. The
3903 * tx_buffer is put back on the free queue.
3905 **********************************************************************/
3907 em_txeof(struct adapter *adapter)
3909 int first, last, done, num_avail;
3910 struct em_buffer *tx_buffer;
3911 struct e1000_tx_desc *tx_desc, *eop_desc;
3912 struct ifnet *ifp = adapter->ifp;
3914 EM_TX_LOCK_ASSERT(adapter);
3916 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3919 num_avail = adapter->num_tx_desc_avail;
3920 first = adapter->next_tx_to_clean;
3921 tx_desc = &adapter->tx_desc_base[first];
3922 tx_buffer = &adapter->tx_buffer_area[first];
3923 last = tx_buffer->next_eop;
3924 eop_desc = &adapter->tx_desc_base[last];
3927 * What this does is get the index of the
3928 * first descriptor AFTER the EOP of the
3929 * first packet, that way we can do the
3930 * simple comparison on the inner while loop.
3932 if (++last == adapter->num_tx_desc)
3936 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3937 BUS_DMASYNC_POSTREAD);
3939 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3940 /* We clean the range of the packet */
3941 while (first != done) {
3942 tx_desc->upper.data = 0;
3943 tx_desc->lower.data = 0;
3944 tx_desc->buffer_addr = 0;
3947 if (tx_buffer->m_head) {
3949 bus_dmamap_sync(adapter->txtag,
3951 BUS_DMASYNC_POSTWRITE);
3952 bus_dmamap_unload(adapter->txtag,
3955 m_freem(tx_buffer->m_head);
3956 tx_buffer->m_head = NULL;
3958 tx_buffer->next_eop = -1;
3960 if (++first == adapter->num_tx_desc)
3963 tx_buffer = &adapter->tx_buffer_area[first];
3964 tx_desc = &adapter->tx_desc_base[first];
3966 /* See if we can continue to the next packet */
3967 last = tx_buffer->next_eop;
3969 eop_desc = &adapter->tx_desc_base[last];
3970 /* Get new done point */
3971 if (++last == adapter->num_tx_desc) last = 0;
3976 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3977 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3979 adapter->next_tx_to_clean = first;
3982 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
3983 * that it is OK to send packets.
3984 * If there are no pending descriptors, clear the timeout. Otherwise,
3985 * if some descriptors have been freed, restart the timeout.
3987 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
3988 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3989 /* All clean, turn off the timer */
3990 if (num_avail == adapter->num_tx_desc) {
3991 adapter->watchdog_timer = 0;
3993 /* Some cleaned, reset the timer */
3994 if (num_avail != adapter->num_tx_desc_avail)
3995 adapter->watchdog_timer = EM_TX_TIMEOUT;
3997 adapter->num_tx_desc_avail = num_avail;
4001 /*********************************************************************
4003 * When Link is lost sometimes there is work still in the TX ring
4004 * which will result in a watchdog, rather than allow that do an
4005 * attempted cleanup and then reinit here. Note that this has been
4006 * seens mostly with fiber adapters.
4008 **********************************************************************/
4010 em_tx_purge(struct adapter *adapter)
4012 if ((!adapter->link_active) && (adapter->watchdog_timer)) {
4013 EM_TX_LOCK(adapter);
4015 EM_TX_UNLOCK(adapter);
4016 if (adapter->watchdog_timer) { /* Still not clean? */
4017 adapter->watchdog_timer = 0;
4018 em_init_locked(adapter);
4023 /*********************************************************************
4025 * Get a buffer from system mbuf buffer pool.
4027 **********************************************************************/
4029 em_get_buf(struct adapter *adapter, int i)
4032 bus_dma_segment_t segs[1];
4034 struct em_buffer *rx_buffer;
4037 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
4039 adapter->mbuf_cluster_failed++;
4042 m->m_len = m->m_pkthdr.len = MCLBYTES;
4044 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
4045 m_adj(m, ETHER_ALIGN);
4048 * Using memory from the mbuf cluster pool, invoke the
4049 * bus_dma machinery to arrange the memory mapping.
4051 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
4052 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
4058 /* If nsegs is wrong then the stack is corrupt. */
4059 KASSERT(nsegs == 1, ("Too many segments returned!"));
4061 rx_buffer = &adapter->rx_buffer_area[i];
4062 if (rx_buffer->m_head != NULL)
4063 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4065 map = rx_buffer->map;
4066 rx_buffer->map = adapter->rx_sparemap;
4067 adapter->rx_sparemap = map;
4068 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
4069 rx_buffer->m_head = m;
4071 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
4075 /*********************************************************************
4077 * Allocate memory for rx_buffer structures. Since we use one
4078 * rx_buffer per received packet, the maximum number of rx_buffer's
4079 * that we'll need is equal to the number of receive descriptors
4080 * that we've allocated.
4082 **********************************************************************/
4084 em_allocate_receive_structures(struct adapter *adapter)
4086 device_t dev = adapter->dev;
4087 struct em_buffer *rx_buffer;
4090 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
4091 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
4092 if (adapter->rx_buffer_area == NULL) {
4093 device_printf(dev, "Unable to allocate rx_buffer memory\n");
4097 #if __FreeBSD_version >= 700000
4098 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
4100 error = bus_dma_tag_create(NULL, /* parent */
4102 1, 0, /* alignment, bounds */
4103 BUS_SPACE_MAXADDR, /* lowaddr */
4104 BUS_SPACE_MAXADDR, /* highaddr */
4105 NULL, NULL, /* filter, filterarg */
4106 MCLBYTES, /* maxsize */
4108 MCLBYTES, /* maxsegsize */
4110 NULL, /* lockfunc */
4114 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
4119 /* Create the spare map (used by getbuf) */
4120 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4121 &adapter->rx_sparemap);
4123 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4128 rx_buffer = adapter->rx_buffer_area;
4129 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4130 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4133 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4142 em_free_receive_structures(adapter);
4146 /*********************************************************************
4148 * (Re)initialize receive structures.
4150 **********************************************************************/
4152 em_setup_receive_structures(struct adapter *adapter)
4154 struct em_buffer *rx_buffer;
4157 /* Reset descriptor ring */
4158 bzero(adapter->rx_desc_base,
4159 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
4161 /* Free current RX buffers. */
4162 rx_buffer = adapter->rx_buffer_area;
4163 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4164 if (rx_buffer->m_head != NULL) {
4165 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4166 BUS_DMASYNC_POSTREAD);
4167 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4168 m_freem(rx_buffer->m_head);
4169 rx_buffer->m_head = NULL;
4173 /* Allocate new ones. */
4174 for (i = 0; i < adapter->num_rx_desc; i++) {
4175 error = em_get_buf(adapter, i);
4180 /* Setup our descriptor pointers */
4181 adapter->next_rx_desc_to_check = 0;
4182 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4183 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4188 /*********************************************************************
4190 * Enable receive unit.
4192 **********************************************************************/
4193 #define MAX_INTS_PER_SEC 8000
4194 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
4197 em_initialize_receive_unit(struct adapter *adapter)
4199 struct ifnet *ifp = adapter->ifp;
4203 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
4206 * Make sure receives are disabled while setting
4207 * up the descriptor ring
4209 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4210 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
4212 if (adapter->hw.mac.type >= e1000_82540) {
4213 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
4214 adapter->rx_abs_int_delay.value);
4216 * Set the interrupt throttling rate. Value is calculated
4217 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
4219 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
4223 ** When using MSIX interrupts we need to throttle
4224 ** using the EITR register (82574 only)
4227 for (int i = 0; i < 4; i++)
4228 E1000_WRITE_REG(&adapter->hw,
4229 E1000_EITR_82574(i), DEFAULT_ITR);
4231 /* Disable accelerated ackknowledge */
4232 if (adapter->hw.mac.type == e1000_82574)
4233 E1000_WRITE_REG(&adapter->hw,
4234 E1000_RFCTL, E1000_RFCTL_ACK_DIS);
4236 /* Setup the Base and Length of the Rx Descriptor Ring */
4237 bus_addr = adapter->rxdma.dma_paddr;
4238 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
4239 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
4240 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
4241 (u32)(bus_addr >> 32));
4242 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
4245 /* Setup the Receive Control Register */
4246 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4247 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
4248 E1000_RCTL_RDMTS_HALF |
4249 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4251 /* Make sure VLAN Filters are off */
4252 rctl &= ~E1000_RCTL_VFE;
4254 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
4255 rctl |= E1000_RCTL_SBP;
4257 rctl &= ~E1000_RCTL_SBP;
4259 switch (adapter->rx_buffer_len) {
4262 rctl |= E1000_RCTL_SZ_2048;
4265 rctl |= E1000_RCTL_SZ_4096 |
4266 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4269 rctl |= E1000_RCTL_SZ_8192 |
4270 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4273 rctl |= E1000_RCTL_SZ_16384 |
4274 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4278 if (ifp->if_mtu > ETHERMTU)
4279 rctl |= E1000_RCTL_LPE;
4281 rctl &= ~E1000_RCTL_LPE;
4283 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
4284 if ((adapter->hw.mac.type >= e1000_82543) &&
4285 (ifp->if_capenable & IFCAP_RXCSUM)) {
4286 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
4287 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
4288 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
4292 ** XXX TEMPORARY WORKAROUND: on some systems with 82573
4293 ** long latencies are observed, like Lenovo X60. This
4294 ** change eliminates the problem, but since having positive
4295 ** values in RDTR is a known source of problems on other
4296 ** platforms another solution is being sought.
4298 if (adapter->hw.mac.type == e1000_82573)
4299 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, 0x20);
4301 /* Enable Receives */
4302 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4305 * Setup the HW Rx Head and
4306 * Tail Descriptor Pointers
4308 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
4309 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
4314 /*********************************************************************
4316 * Free receive related data structures.
4318 **********************************************************************/
4320 em_free_receive_structures(struct adapter *adapter)
4322 struct em_buffer *rx_buffer;
4325 INIT_DEBUGOUT("free_receive_structures: begin");
4327 if (adapter->rx_sparemap) {
4328 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
4329 adapter->rx_sparemap = NULL;
4332 /* Cleanup any existing buffers */
4333 if (adapter->rx_buffer_area != NULL) {
4334 rx_buffer = adapter->rx_buffer_area;
4335 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4336 if (rx_buffer->m_head != NULL) {
4337 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4338 BUS_DMASYNC_POSTREAD);
4339 bus_dmamap_unload(adapter->rxtag,
4341 m_freem(rx_buffer->m_head);
4342 rx_buffer->m_head = NULL;
4343 } else if (rx_buffer->map != NULL)
4344 bus_dmamap_unload(adapter->rxtag,
4346 if (rx_buffer->map != NULL) {
4347 bus_dmamap_destroy(adapter->rxtag,
4349 rx_buffer->map = NULL;
4354 if (adapter->rx_buffer_area != NULL) {
4355 free(adapter->rx_buffer_area, M_DEVBUF);
4356 adapter->rx_buffer_area = NULL;
4359 if (adapter->rxtag != NULL) {
4360 bus_dma_tag_destroy(adapter->rxtag);
4361 adapter->rxtag = NULL;
4365 /*********************************************************************
4367 * This routine executes in interrupt context. It replenishes
4368 * the mbufs in the descriptor and sends data which has been
4369 * dma'ed into host memory to upper layer.
4371 * We loop at most count times if count is > 0, or until done if
4374 *********************************************************************/
4376 em_rxeof(struct adapter *adapter, int count)
4378 struct ifnet *ifp = adapter->ifp;;
4380 u8 status, accept_frame = 0, eop = 0;
4381 u16 len, desc_len, prev_len_adj;
4383 struct e1000_rx_desc *current_desc;
4385 EM_RX_LOCK(adapter);
4386 i = adapter->next_rx_desc_to_check;
4387 current_desc = &adapter->rx_desc_base[i];
4388 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4389 BUS_DMASYNC_POSTREAD);
4391 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
4392 EM_RX_UNLOCK(adapter);
4396 while ((current_desc->status & E1000_RXD_STAT_DD) &&
4398 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4399 struct mbuf *m = NULL;
4401 mp = adapter->rx_buffer_area[i].m_head;
4403 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
4404 * needs to access the last received byte in the mbuf.
4406 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
4407 BUS_DMASYNC_POSTREAD);
4411 desc_len = le16toh(current_desc->length);
4412 status = current_desc->status;
4413 if (status & E1000_RXD_STAT_EOP) {
4416 if (desc_len < ETHER_CRC_LEN) {
4418 prev_len_adj = ETHER_CRC_LEN - desc_len;
4420 len = desc_len - ETHER_CRC_LEN;
4426 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
4428 u32 pkt_len = desc_len;
4430 if (adapter->fmp != NULL)
4431 pkt_len += adapter->fmp->m_pkthdr.len;
4433 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
4434 if (TBI_ACCEPT(&adapter->hw, status,
4435 current_desc->errors, pkt_len, last_byte,
4436 adapter->min_frame_size, adapter->max_frame_size)) {
4437 e1000_tbi_adjust_stats_82543(&adapter->hw,
4438 &adapter->stats, pkt_len,
4439 adapter->hw.mac.addr,
4440 adapter->max_frame_size);
4448 if (em_get_buf(adapter, i) != 0) {
4453 /* Assign correct length to the current fragment */
4456 if (adapter->fmp == NULL) {
4457 mp->m_pkthdr.len = len;
4458 adapter->fmp = mp; /* Store the first mbuf */
4461 /* Chain mbuf's together */
4462 mp->m_flags &= ~M_PKTHDR;
4464 * Adjust length of previous mbuf in chain if
4465 * we received less than 4 bytes in the last
4468 if (prev_len_adj > 0) {
4469 adapter->lmp->m_len -= prev_len_adj;
4470 adapter->fmp->m_pkthdr.len -=
4473 adapter->lmp->m_next = mp;
4474 adapter->lmp = adapter->lmp->m_next;
4475 adapter->fmp->m_pkthdr.len += len;
4479 adapter->fmp->m_pkthdr.rcvif = ifp;
4481 em_receive_checksum(adapter, current_desc,
4483 #ifndef __NO_STRICT_ALIGNMENT
4484 if (adapter->max_frame_size >
4485 (MCLBYTES - ETHER_ALIGN) &&
4486 em_fixup_rx(adapter) != 0)
4489 if (status & E1000_RXD_STAT_VP) {
4490 #if __FreeBSD_version < 700000
4491 VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
4492 (le16toh(current_desc->special) &
4493 E1000_RXD_SPC_VLAN_MASK));
4495 adapter->fmp->m_pkthdr.ether_vtag =
4496 (le16toh(current_desc->special) &
4497 E1000_RXD_SPC_VLAN_MASK);
4498 adapter->fmp->m_flags |= M_VLANTAG;
4501 #ifndef __NO_STRICT_ALIGNMENT
4505 adapter->fmp = NULL;
4506 adapter->lmp = NULL;
4511 /* Reuse loaded DMA map and just update mbuf chain */
4512 mp = adapter->rx_buffer_area[i].m_head;
4513 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
4514 mp->m_data = mp->m_ext.ext_buf;
4516 if (adapter->max_frame_size <=
4517 (MCLBYTES - ETHER_ALIGN))
4518 m_adj(mp, ETHER_ALIGN);
4519 if (adapter->fmp != NULL) {
4520 m_freem(adapter->fmp);
4521 adapter->fmp = NULL;
4522 adapter->lmp = NULL;
4527 /* Zero out the receive descriptors status. */
4528 current_desc->status = 0;
4529 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4530 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4532 /* Advance our pointers to the next descriptor. */
4533 if (++i == adapter->num_rx_desc)
4536 adapter->next_rx_desc_to_check = i;
4537 /* Unlock for call into stack */
4538 EM_RX_UNLOCK(adapter);
4539 (*ifp->if_input)(ifp, m);
4540 EM_RX_LOCK(adapter);
4541 i = adapter->next_rx_desc_to_check;
4543 current_desc = &adapter->rx_desc_base[i];
4545 adapter->next_rx_desc_to_check = i;
4547 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
4549 i = adapter->num_rx_desc - 1;
4550 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
4551 EM_RX_UNLOCK(adapter);
4552 if (!((current_desc->status) & E1000_RXD_STAT_DD))
4558 #ifndef __NO_STRICT_ALIGNMENT
4560 * When jumbo frames are enabled we should realign entire payload on
4561 * architecures with strict alignment. This is serious design mistake of 8254x
4562 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
4563 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
4564 * payload. On architecures without strict alignment restrictions 8254x still
4565 * performs unaligned memory access which would reduce the performance too.
4566 * To avoid copying over an entire frame to align, we allocate a new mbuf and
4567 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
4568 * existing mbuf chain.
4570 * Be aware, best performance of the 8254x is achived only when jumbo frame is
4571 * not used at all on architectures with strict alignment.
4574 em_fixup_rx(struct adapter *adapter)
4581 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
4582 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
4583 m->m_data += ETHER_HDR_LEN;
4585 MGETHDR(n, M_DONTWAIT, MT_DATA);
4587 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
4588 m->m_data += ETHER_HDR_LEN;
4589 m->m_len -= ETHER_HDR_LEN;
4590 n->m_len = ETHER_HDR_LEN;
4591 M_MOVE_PKTHDR(n, m);
4595 adapter->dropped_pkts++;
4596 m_freem(adapter->fmp);
4597 adapter->fmp = NULL;
4606 /*********************************************************************
4608 * Verify that the hardware indicated that the checksum is valid.
4609 * Inform the stack about the status of checksum so that stack
4610 * doesn't spend time verifying the checksum.
4612 *********************************************************************/
4614 em_receive_checksum(struct adapter *adapter,
4615 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
4617 /* 82543 or newer only */
4618 if ((adapter->hw.mac.type < e1000_82543) ||
4619 /* Ignore Checksum bit is set */
4620 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
4621 mp->m_pkthdr.csum_flags = 0;
4625 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
4627 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
4628 /* IP Checksum Good */
4629 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4630 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4633 mp->m_pkthdr.csum_flags = 0;
4637 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
4639 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
4640 mp->m_pkthdr.csum_flags |=
4641 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4642 mp->m_pkthdr.csum_data = htons(0xffff);
4648 #ifdef EM_HW_VLAN_SUPPORT
4650 * This routine is run via an vlan
4654 em_register_vlan(void *unused, struct ifnet *ifp, u16 vtag)
4656 struct adapter *adapter = ifp->if_softc;
4657 u32 ctrl, rctl, index, vfta;
4659 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4660 ctrl |= E1000_CTRL_VME;
4661 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4663 /* Setup for Hardware Filter */
4664 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4665 rctl |= E1000_RCTL_VFE;
4666 rctl &= ~E1000_RCTL_CFIEN;
4667 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4669 /* Make entry in the hardware filter table */
4670 index = ((vtag >> 5) & 0x7F);
4671 vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
4672 vfta |= (1 << (vtag & 0x1F));
4673 E1000_WRITE_REG_ARRAY(&adapter->hw, E1000_VFTA, index, vfta);
4675 /* Update the frame size */
4676 E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
4677 adapter->max_frame_size + VLAN_TAG_SIZE);
4682 * This routine is run via an vlan
4686 em_unregister_vlan(void *unused, struct ifnet *ifp, u16 vtag)
4688 struct adapter *adapter = ifp->if_softc;
4691 /* Remove entry in the hardware filter table */
4692 index = ((vtag >> 5) & 0x7F);
4693 vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
4694 vfta &= ~(1 << (vtag & 0x1F));
4695 E1000_WRITE_REG_ARRAY(&adapter->hw, E1000_VFTA, index, vfta);
4696 /* Have all vlans unregistered? */
4697 if (adapter->ifp->if_vlantrunk == NULL) {
4699 /* Turn off the filter table */
4700 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4701 rctl &= ~E1000_RCTL_VFE;
4702 rctl |= E1000_RCTL_CFIEN;
4703 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4704 /* Reset the frame size */
4705 E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
4706 adapter->max_frame_size);
4709 #endif /* EM_HW_VLAN_SUPPORT */
4712 em_enable_intr(struct adapter *adapter)
4714 struct e1000_hw *hw = &adapter->hw;
4715 u32 ims_mask = IMS_ENABLE_MASK;
4717 if (adapter->msix) {
4718 E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
4719 ims_mask |= EM_MSIX_MASK;
4721 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4725 em_disable_intr(struct adapter *adapter)
4727 struct e1000_hw *hw = &adapter->hw;
4730 E1000_WRITE_REG(hw, EM_EIAC, 0);
4731 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
4735 * Bit of a misnomer, what this really means is
4736 * to enable OS management of the system... aka
4737 * to disable special hardware management features
4740 em_init_manageability(struct adapter *adapter)
4742 /* A shared code workaround */
4743 #define E1000_82542_MANC2H E1000_MANC2H
4744 if (adapter->has_manage) {
4745 int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
4746 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4748 /* disable hardware interception of ARP */
4749 manc &= ~(E1000_MANC_ARP_EN);
4751 /* enable receiving management packets to the host */
4752 if (adapter->hw.mac.type >= e1000_82571) {
4753 manc |= E1000_MANC_EN_MNG2HOST;
4754 #define E1000_MNG2HOST_PORT_623 (1 << 5)
4755 #define E1000_MNG2HOST_PORT_664 (1 << 6)
4756 manc2h |= E1000_MNG2HOST_PORT_623;
4757 manc2h |= E1000_MNG2HOST_PORT_664;
4758 E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
4761 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4766 * Give control back to hardware management
4767 * controller if there is one.
4770 em_release_manageability(struct adapter *adapter)
4772 if (adapter->has_manage) {
4773 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4775 /* re-enable hardware interception of ARP */
4776 manc |= E1000_MANC_ARP_EN;
4778 if (adapter->hw.mac.type >= e1000_82571)
4779 manc &= ~E1000_MANC_EN_MNG2HOST;
4781 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4786 * em_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4787 * For ASF and Pass Through versions of f/w this means that
4788 * the driver is loaded. For AMT version (only with 82573)
4789 * of the f/w this means that the network i/f is open.
4793 em_get_hw_control(struct adapter *adapter)
4797 /* Let firmware know the driver has taken over */
4798 switch (adapter->hw.mac.type) {
4800 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4801 E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4802 swsm | E1000_SWSM_DRV_LOAD);
4806 case e1000_80003es2lan:
4809 case e1000_ich10lan:
4810 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4811 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4812 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4820 * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4821 * For ASF and Pass Through versions of f/w this means that the
4822 * driver is no longer loaded. For AMT version (only with 82573) i
4823 * of the f/w this means that the network i/f is closed.
4827 em_release_hw_control(struct adapter *adapter)
4831 /* Let firmware taken over control of h/w */
4832 switch (adapter->hw.mac.type) {
4834 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4835 E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4836 swsm & ~E1000_SWSM_DRV_LOAD);
4840 case e1000_80003es2lan:
4843 case e1000_ich10lan:
4844 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4845 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4846 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4855 em_is_valid_ether_addr(u8 *addr)
4857 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4859 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4867 * Enable PCI Wake On Lan capability
4870 em_enable_wakeup(device_t dev)
4875 /* First find the capabilities pointer*/
4876 cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
4877 /* Read the PM Capabilities */
4878 id = pci_read_config(dev, cap, 1);
4879 if (id != PCIY_PMG) /* Something wrong */
4881 /* OK, we have the power capabilities, so
4882 now get the status register */
4883 cap += PCIR_POWER_STATUS;
4884 status = pci_read_config(dev, cap, 2);
4885 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4886 pci_write_config(dev, cap, status, 2);
4891 /*********************************************************************
4892 * 82544 Coexistence issue workaround.
4893 * There are 2 issues.
4894 * 1. Transmit Hang issue.
4895 * To detect this issue, following equation can be used...
4896 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4897 * If SUM[3:0] is in between 1 to 4, we will have this issue.
4900 * To detect this issue, following equation can be used...
4901 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4902 * If SUM[3:0] is in between 9 to c, we will have this issue.
4906 * Make sure we do not have ending address
4907 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4909 *************************************************************************/
4911 em_fill_descriptors (bus_addr_t address, u32 length,
4912 PDESC_ARRAY desc_array)
4914 u32 safe_terminator;
4916 /* Since issue is sensitive to length and address.*/
4917 /* Let us first check the address...*/
4919 desc_array->descriptor[0].address = address;
4920 desc_array->descriptor[0].length = length;
4921 desc_array->elements = 1;
4922 return (desc_array->elements);
4924 safe_terminator = (u32)((((u32)address & 0x7) +
4925 (length & 0xF)) & 0xF);
4926 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4927 if (safe_terminator == 0 ||
4928 (safe_terminator > 4 &&
4929 safe_terminator < 9) ||
4930 (safe_terminator > 0xC &&
4931 safe_terminator <= 0xF)) {
4932 desc_array->descriptor[0].address = address;
4933 desc_array->descriptor[0].length = length;
4934 desc_array->elements = 1;
4935 return (desc_array->elements);
4938 desc_array->descriptor[0].address = address;
4939 desc_array->descriptor[0].length = length - 4;
4940 desc_array->descriptor[1].address = address + (length - 4);
4941 desc_array->descriptor[1].length = 4;
4942 desc_array->elements = 2;
4943 return (desc_array->elements);
4946 /**********************************************************************
4948 * Update the board statistics counters.
4950 **********************************************************************/
4952 em_update_stats_counters(struct adapter *adapter)
4956 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4957 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4958 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4959 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4961 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4962 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4963 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4964 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4966 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4967 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4968 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4969 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4970 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4971 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4972 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4973 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4974 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4975 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4976 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4977 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4978 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4979 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4980 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4981 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4982 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4983 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4984 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4985 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4987 /* For the 64-bit byte counters the low dword must be read first. */
4988 /* Both registers clear on the read of the high dword */
4990 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH);
4991 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
4993 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4994 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4995 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4996 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4997 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4999 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
5000 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
5002 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
5003 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
5004 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
5005 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
5006 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
5007 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
5008 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
5009 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
5010 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
5011 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
5013 if (adapter->hw.mac.type >= e1000_82543) {
5014 adapter->stats.algnerrc +=
5015 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
5016 adapter->stats.rxerrc +=
5017 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
5018 adapter->stats.tncrs +=
5019 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
5020 adapter->stats.cexterr +=
5021 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
5022 adapter->stats.tsctc +=
5023 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
5024 adapter->stats.tsctfc +=
5025 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
5029 ifp->if_collisions = adapter->stats.colc;
5032 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
5033 adapter->stats.crcerrs + adapter->stats.algnerrc +
5034 adapter->stats.ruc + adapter->stats.roc +
5035 adapter->stats.mpc + adapter->stats.cexterr;
5038 ifp->if_oerrors = adapter->stats.ecol +
5039 adapter->stats.latecol + adapter->watchdog_events;
5043 /**********************************************************************
5045 * This routine is called only when em_display_debug_stats is enabled.
5046 * This routine provides a way to take a look at important statistics
5047 * maintained by the driver and hardware.
5049 **********************************************************************/
5051 em_print_debug_info(struct adapter *adapter)
5053 device_t dev = adapter->dev;
5054 u8 *hw_addr = adapter->hw.hw_addr;
5056 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
5057 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
5058 E1000_READ_REG(&adapter->hw, E1000_CTRL),
5059 E1000_READ_REG(&adapter->hw, E1000_RCTL));
5060 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
5061 ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
5062 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
5063 device_printf(dev, "Flow control watermarks high = %d low = %d\n",
5064 adapter->hw.fc.high_water,
5065 adapter->hw.fc.low_water);
5066 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
5067 E1000_READ_REG(&adapter->hw, E1000_TIDV),
5068 E1000_READ_REG(&adapter->hw, E1000_TADV));
5069 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
5070 E1000_READ_REG(&adapter->hw, E1000_RDTR),
5071 E1000_READ_REG(&adapter->hw, E1000_RADV));
5072 device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
5073 (long long)adapter->tx_fifo_wrk_cnt,
5074 (long long)adapter->tx_fifo_reset_cnt);
5075 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
5076 E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
5077 E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
5078 device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
5079 E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
5080 E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
5081 device_printf(dev, "Num Tx descriptors avail = %d\n",
5082 adapter->num_tx_desc_avail);
5083 device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
5084 adapter->no_tx_desc_avail1);
5085 device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
5086 adapter->no_tx_desc_avail2);
5087 device_printf(dev, "Std mbuf failed = %ld\n",
5088 adapter->mbuf_alloc_failed);
5089 device_printf(dev, "Std mbuf cluster failed = %ld\n",
5090 adapter->mbuf_cluster_failed);
5091 device_printf(dev, "Driver dropped packets = %ld\n",
5092 adapter->dropped_pkts);
5093 device_printf(dev, "Driver tx dma failure in encap = %ld\n",
5094 adapter->no_tx_dma_setup);
5098 em_print_hw_stats(struct adapter *adapter)
5100 device_t dev = adapter->dev;
5102 device_printf(dev, "Excessive collisions = %lld\n",
5103 (long long)adapter->stats.ecol);
5104 #if (DEBUG_HW > 0) /* Dont output these errors normally */
5105 device_printf(dev, "Symbol errors = %lld\n",
5106 (long long)adapter->stats.symerrs);
5108 device_printf(dev, "Sequence errors = %lld\n",
5109 (long long)adapter->stats.sec);
5110 device_printf(dev, "Defer count = %lld\n",
5111 (long long)adapter->stats.dc);
5112 device_printf(dev, "Missed Packets = %lld\n",
5113 (long long)adapter->stats.mpc);
5114 device_printf(dev, "Receive No Buffers = %lld\n",
5115 (long long)adapter->stats.rnbc);
5116 /* RLEC is inaccurate on some hardware, calculate our own. */
5117 device_printf(dev, "Receive Length Errors = %lld\n",
5118 ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
5119 device_printf(dev, "Receive errors = %lld\n",
5120 (long long)adapter->stats.rxerrc);
5121 device_printf(dev, "Crc errors = %lld\n",
5122 (long long)adapter->stats.crcerrs);
5123 device_printf(dev, "Alignment errors = %lld\n",
5124 (long long)adapter->stats.algnerrc);
5125 device_printf(dev, "Collision/Carrier extension errors = %lld\n",
5126 (long long)adapter->stats.cexterr);
5127 device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
5128 device_printf(dev, "watchdog timeouts = %ld\n",
5129 adapter->watchdog_events);
5130 device_printf(dev, "RX MSIX IRQ = %ld TX MSIX IRQ = %ld"
5131 " LINK MSIX IRQ = %ld\n", adapter->rx_irq,
5132 adapter->tx_irq , adapter->link_irq);
5133 device_printf(dev, "XON Rcvd = %lld\n",
5134 (long long)adapter->stats.xonrxc);
5135 device_printf(dev, "XON Xmtd = %lld\n",
5136 (long long)adapter->stats.xontxc);
5137 device_printf(dev, "XOFF Rcvd = %lld\n",
5138 (long long)adapter->stats.xoffrxc);
5139 device_printf(dev, "XOFF Xmtd = %lld\n",
5140 (long long)adapter->stats.xofftxc);
5141 device_printf(dev, "Good Packets Rcvd = %lld\n",
5142 (long long)adapter->stats.gprc);
5143 device_printf(dev, "Good Packets Xmtd = %lld\n",
5144 (long long)adapter->stats.gptc);
5145 device_printf(dev, "TSO Contexts Xmtd = %lld\n",
5146 (long long)adapter->stats.tsctc);
5147 device_printf(dev, "TSO Contexts Failed = %lld\n",
5148 (long long)adapter->stats.tsctfc);
5151 /**********************************************************************
5153 * This routine provides a way to dump out the adapter eeprom,
5154 * often a useful debug/service tool. This only dumps the first
5155 * 32 words, stuff that matters is in that extent.
5157 **********************************************************************/
5159 em_print_nvm_info(struct adapter *adapter)
5164 /* Its a bit crude, but it gets the job done */
5165 printf("\nInterface EEPROM Dump:\n");
5166 printf("Offset\n0x0000 ");
5167 for (i = 0, j = 0; i < 32; i++, j++) {
5168 if (j == 8) { /* Make the offset block */
5170 printf("\n0x00%x0 ",row);
5172 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
5173 printf("%04x ", eeprom_data);
5179 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5181 struct adapter *adapter;
5186 error = sysctl_handle_int(oidp, &result, 0, req);
5188 if (error || !req->newptr)
5192 adapter = (struct adapter *)arg1;
5193 em_print_debug_info(adapter);
5196 * This value will cause a hex dump of the
5197 * first 32 16-bit words of the EEPROM to
5201 adapter = (struct adapter *)arg1;
5202 em_print_nvm_info(adapter);
5210 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
5212 struct adapter *adapter;
5217 error = sysctl_handle_int(oidp, &result, 0, req);
5219 if (error || !req->newptr)
5223 adapter = (struct adapter *)arg1;
5224 em_print_hw_stats(adapter);
5231 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
5233 struct em_int_delay_info *info;
5234 struct adapter *adapter;
5240 info = (struct em_int_delay_info *)arg1;
5241 usecs = info->value;
5242 error = sysctl_handle_int(oidp, &usecs, 0, req);
5243 if (error != 0 || req->newptr == NULL)
5245 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
5247 info->value = usecs;
5248 ticks = EM_USECS_TO_TICKS(usecs);
5250 adapter = info->adapter;
5252 EM_CORE_LOCK(adapter);
5253 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
5254 regval = (regval & ~0xffff) | (ticks & 0xffff);
5255 /* Handle a few special cases. */
5256 switch (info->offset) {
5261 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
5262 /* Don't write 0 into the TIDV register. */
5265 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
5268 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
5269 EM_CORE_UNLOCK(adapter);
5274 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
5275 const char *description, struct em_int_delay_info *info,
5276 int offset, int value)
5278 info->adapter = adapter;
5279 info->offset = offset;
5280 info->value = value;
5281 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
5282 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5283 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5284 info, 0, em_sysctl_int_delay, "I", description);
5287 #ifndef EM_LEGACY_IRQ
5289 em_add_rx_process_limit(struct adapter *adapter, const char *name,
5290 const char *description, int *limit, int value)
5293 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
5294 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5295 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
5301 * Initialize the Time Sync Feature
5304 em_tsync_init(struct adapter *adapter)
5306 device_t dev = adapter->dev;
5310 E1000_WRITE_REG(&adapter->hw, E1000_TIMINCA, (1<<24) |
5311 20833/PICOSECS_PER_TICK);
5313 adapter->last_stamp = E1000_READ_REG(&adapter->hw, E1000_SYSTIML);
5314 adapter->last_stamp |= (u64)E1000_READ_REG(&adapter->hw,
5315 E1000_SYSTIMH) << 32ULL;
5317 /* Enable the TX side */
5318 tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5320 E1000_WRITE_REG(&adapter->hw, E1000_TSYNCTXCTL, tx_ctl);
5321 E1000_WRITE_FLUSH(&adapter->hw);
5323 tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5324 if ((tx_ctl & 0x10) == 0) {
5325 device_printf(dev, "Failed to enable TX timestamping\n");
5330 rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5331 rx_ctl |= 0x10; /* Enable the feature */
5332 rx_ctl |= 0x0a; /* This value turns on Ver 1 and 2 */
5333 E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCTL, rx_ctl);
5336 * Ethertype Stamping (Ethertype = 0x88F7)
5338 E1000_WRITE_REG(&adapter->hw, E1000_RXMTRL, htonl(0x440088f7));
5341 * Source Port Queue Filter Setup:
5342 * this is for UDP port filtering
5344 E1000_WRITE_REG(&adapter->hw, E1000_RXUDP, htons(TSYNC_PORT));
5345 /* Protocol = UDP, enable Timestamp, and filter on source/protocol */
5347 E1000_WRITE_FLUSH(&adapter->hw);
5349 rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5350 if ((rx_ctl & 0x10) == 0) {
5351 device_printf(dev, "Failed to enable RX timestamping\n");
5355 device_printf(dev, "IEEE 1588 Precision Time Protocol enabled\n");
5361 * Disable the Time Sync Feature
5364 em_tsync_disable(struct adapter *adapter)
5368 tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5370 E1000_WRITE_REG(&adapter->hw, E1000_TSYNCTXCTL, tx_ctl);
5371 E1000_WRITE_FLUSH(&adapter->hw);
5373 /* Invalidate TX Timestamp */
5374 E1000_READ_REG(&adapter->hw, E1000_TXSTMPH);
5376 tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
5378 HW_DEBUGOUT("Failed to disable TX timestamping\n");
5380 rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5383 E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCTL, rx_ctl);
5384 E1000_WRITE_FLUSH(&adapter->hw);
5386 /* Invalidate RX Timestamp */
5387 E1000_READ_REG(&adapter->hw, E1000_RXSATRH);
5389 rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
5391 HW_DEBUGOUT("Failed to disable RX timestamping\n");
5395 #endif /* EM_TIMESYNC */