1 /******************************************************************************
3 Copyright (c) 2001-2009, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #if __FreeBSD_version >= 800000
43 #include <sys/buf_ring.h>
46 #include <sys/endian.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
49 #include <sys/malloc.h>
51 #include <sys/module.h>
53 #include <sys/socket.h>
54 #include <sys/sockio.h>
55 #include <sys/sysctl.h>
56 #include <sys/taskqueue.h>
57 #if __FreeBSD_version >= 700029
58 #include <sys/eventhandler.h>
60 #include <machine/bus.h>
61 #include <machine/resource.h>
64 #include <net/ethernet.h>
66 #include <net/if_arp.h>
67 #include <net/if_dl.h>
68 #include <net/if_media.h>
70 #include <net/if_types.h>
71 #include <net/if_vlan_var.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/in.h>
75 #include <netinet/if_ether.h>
76 #include <netinet/ip.h>
77 #include <netinet/ip6.h>
78 #include <netinet/tcp.h>
79 #include <netinet/udp.h>
81 #include <machine/in_cksum.h>
82 #include <dev/pci/pcivar.h>
83 #include <dev/pci/pcireg.h>
85 #include "e1000_api.h"
86 #include "e1000_82571.h"
89 /*********************************************************************
90 * Set this to one to display debug statistics
91 *********************************************************************/
92 int em_display_debug_stats = 0;
94 /*********************************************************************
96 *********************************************************************/
97 char em_driver_version[] = "6.9.14";
100 /*********************************************************************
101 * PCI Device ID Table
103 * Used by probe to select devices to load on
104 * Last field stores an index into e1000_strings
105 * Last entry must be all 0s
107 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
108 *********************************************************************/
110 static em_vendor_info_t em_vendor_info_array[] =
112 /* Intel(R) PRO/1000 Network Connection */
113 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
132 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
143 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
147 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
148 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
149 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
150 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
151 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
152 PCI_ANY_ID, PCI_ANY_ID, 0},
154 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
155 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
156 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
158 { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
159 { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
160 { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
161 { 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL,
162 PCI_ANY_ID, PCI_ANY_ID, 0},
163 { 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD,
164 PCI_ANY_ID, PCI_ANY_ID, 0},
165 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
166 PCI_ANY_ID, PCI_ANY_ID, 0},
167 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
168 PCI_ANY_ID, PCI_ANY_ID, 0},
169 { 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
170 PCI_ANY_ID, PCI_ANY_ID, 0},
171 { 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
172 PCI_ANY_ID, PCI_ANY_ID, 0},
173 { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
174 { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
175 { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
176 { 0x8086, E1000_DEV_ID_82572EI, PCI_ANY_ID, PCI_ANY_ID, 0},
178 { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0},
179 { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0},
180 { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0},
181 { 0x8086, E1000_DEV_ID_82583V, PCI_ANY_ID, PCI_ANY_ID, 0},
182 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
183 PCI_ANY_ID, PCI_ANY_ID, 0},
184 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
185 PCI_ANY_ID, PCI_ANY_ID, 0},
186 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
187 PCI_ANY_ID, PCI_ANY_ID, 0},
188 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
189 PCI_ANY_ID, PCI_ANY_ID, 0},
190 { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
191 { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
192 { 0x8086, E1000_DEV_ID_ICH8_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
193 { 0x8086, E1000_DEV_ID_ICH8_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
194 { 0x8086, E1000_DEV_ID_ICH8_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
195 { 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
196 { 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
198 { 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
199 { 0x8086, E1000_DEV_ID_ICH9_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
200 { 0x8086, E1000_DEV_ID_ICH9_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
201 { 0x8086, E1000_DEV_ID_ICH9_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
202 { 0x8086, E1000_DEV_ID_ICH9_IGP_M_V, PCI_ANY_ID, PCI_ANY_ID, 0},
203 { 0x8086, E1000_DEV_ID_ICH9_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
204 { 0x8086, E1000_DEV_ID_ICH9_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
205 { 0x8086, E1000_DEV_ID_ICH9_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
206 { 0x8086, E1000_DEV_ID_ICH9_BM, PCI_ANY_ID, PCI_ANY_ID, 0},
207 { 0x8086, E1000_DEV_ID_82574L, PCI_ANY_ID, PCI_ANY_ID, 0},
208 { 0x8086, E1000_DEV_ID_82574LA, PCI_ANY_ID, PCI_ANY_ID, 0},
209 { 0x8086, E1000_DEV_ID_ICH10_R_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
210 { 0x8086, E1000_DEV_ID_ICH10_R_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
211 { 0x8086, E1000_DEV_ID_ICH10_R_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0},
212 { 0x8086, E1000_DEV_ID_ICH10_D_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
213 { 0x8086, E1000_DEV_ID_ICH10_D_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
214 /* required last entry */
218 /*********************************************************************
219 * Table of branding strings for all supported NICs.
220 *********************************************************************/
222 static char *em_strings[] = {
223 "Intel(R) PRO/1000 Network Connection"
226 /*********************************************************************
227 * Function prototypes
228 *********************************************************************/
229 static int em_probe(device_t);
230 static int em_attach(device_t);
231 static int em_detach(device_t);
232 static int em_shutdown(device_t);
233 static int em_suspend(device_t);
234 static int em_resume(device_t);
235 static void em_start(struct ifnet *);
236 static void em_start_locked(struct ifnet *ifp);
237 #if __FreeBSD_version >= 800000
238 static int em_mq_start(struct ifnet *, struct mbuf *);
239 static int em_mq_start_locked(struct ifnet *, struct mbuf *);
240 static void em_qflush(struct ifnet *);
242 static int em_ioctl(struct ifnet *, u_long, caddr_t);
243 static void em_watchdog(struct adapter *);
244 static void em_init(void *);
245 static void em_init_locked(struct adapter *);
246 static void em_stop(void *);
247 static void em_media_status(struct ifnet *, struct ifmediareq *);
248 static int em_media_change(struct ifnet *);
249 static void em_identify_hardware(struct adapter *);
250 static int em_allocate_pci_resources(struct adapter *);
251 static int em_allocate_legacy(struct adapter *adapter);
252 static int em_allocate_msix(struct adapter *adapter);
253 static int em_setup_msix(struct adapter *);
254 static void em_free_pci_resources(struct adapter *);
255 static void em_local_timer(void *);
256 static int em_hardware_init(struct adapter *);
257 static void em_setup_interface(device_t, struct adapter *);
258 static void em_setup_transmit_structures(struct adapter *);
259 static void em_initialize_transmit_unit(struct adapter *);
260 static int em_setup_receive_structures(struct adapter *);
261 static void em_initialize_receive_unit(struct adapter *);
262 static void em_enable_intr(struct adapter *);
263 static void em_disable_intr(struct adapter *);
264 static void em_free_transmit_structures(struct adapter *);
265 static void em_free_receive_structures(struct adapter *);
266 static void em_update_stats_counters(struct adapter *);
267 static void em_txeof(struct adapter *);
268 static void em_tx_purge(struct adapter *);
269 static int em_allocate_receive_structures(struct adapter *);
270 static int em_allocate_transmit_structures(struct adapter *);
271 static int em_rxeof(struct adapter *, int);
272 #ifndef __NO_STRICT_ALIGNMENT
273 static int em_fixup_rx(struct adapter *);
275 static void em_receive_checksum(struct adapter *, struct e1000_rx_desc *,
277 static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
279 #if __FreeBSD_version >= 700000
280 static bool em_tso_setup(struct adapter *, struct mbuf *,
282 #endif /* FreeBSD_version >= 700000 */
283 static void em_set_promisc(struct adapter *);
284 static void em_disable_promisc(struct adapter *);
285 static void em_set_multi(struct adapter *);
286 static void em_print_hw_stats(struct adapter *);
287 static void em_update_link_status(struct adapter *);
288 static int em_get_buf(struct adapter *, int);
289 #if __FreeBSD_version >= 700029
290 static void em_register_vlan(void *, struct ifnet *, u16);
291 static void em_unregister_vlan(void *, struct ifnet *, u16);
292 static void em_setup_vlan_hw_support(struct adapter *);
294 static int em_xmit(struct adapter *, struct mbuf **);
295 static void em_smartspeed(struct adapter *);
296 static int em_82547_fifo_workaround(struct adapter *, int);
297 static void em_82547_update_fifo_head(struct adapter *, int);
298 static int em_82547_tx_fifo_reset(struct adapter *);
299 static void em_82547_move_tail(void *);
300 static int em_dma_malloc(struct adapter *, bus_size_t,
301 struct em_dma_alloc *, int);
302 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
303 static void em_print_debug_info(struct adapter *);
304 static void em_print_nvm_info(struct adapter *);
305 static int em_is_valid_ether_addr(u8 *);
306 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
307 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
308 static u32 em_fill_descriptors (bus_addr_t address, u32 length,
309 PDESC_ARRAY desc_array);
310 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
311 static void em_add_int_delay_sysctl(struct adapter *, const char *,
312 const char *, struct em_int_delay_info *, int, int);
313 /* Management and WOL Support */
314 static void em_init_manageability(struct adapter *);
315 static void em_release_manageability(struct adapter *);
316 static void em_get_hw_control(struct adapter *);
317 static void em_release_hw_control(struct adapter *);
318 static void em_enable_wakeup(device_t);
321 static void em_intr(void *);
323 #if __FreeBSD_version < 700000
324 static void em_irq_fast(void *);
326 static int em_irq_fast(void *);
330 static void em_msix_tx(void *);
331 static void em_msix_rx(void *);
332 static void em_msix_link(void *);
333 static void em_handle_rx(void *context, int pending);
334 static void em_handle_tx(void *context, int pending);
336 static void em_handle_rxtx(void *context, int pending);
337 static void em_handle_link(void *context, int pending);
338 static void em_add_rx_process_limit(struct adapter *, const char *,
339 const char *, int *, int);
340 #endif /* ~EM_LEGACY_IRQ */
342 #ifdef DEVICE_POLLING
343 static poll_handler_t em_poll;
346 /*********************************************************************
347 * FreeBSD Device Interface Entry Points
348 *********************************************************************/
350 static device_method_t em_methods[] = {
351 /* Device interface */
352 DEVMETHOD(device_probe, em_probe),
353 DEVMETHOD(device_attach, em_attach),
354 DEVMETHOD(device_detach, em_detach),
355 DEVMETHOD(device_shutdown, em_shutdown),
356 DEVMETHOD(device_suspend, em_suspend),
357 DEVMETHOD(device_resume, em_resume),
361 static driver_t em_driver = {
362 "em", em_methods, sizeof(struct adapter),
365 static devclass_t em_devclass;
366 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
367 MODULE_DEPEND(em, pci, 1, 1, 1);
368 MODULE_DEPEND(em, ether, 1, 1, 1);
370 /*********************************************************************
371 * Tunable default values.
372 *********************************************************************/
374 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
375 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
378 /* Allow common code without TSO */
383 static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
384 static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
385 static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
386 static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
387 static int em_rxd = EM_DEFAULT_RXD;
388 static int em_txd = EM_DEFAULT_TXD;
389 static int em_smart_pwr_down = FALSE;
390 /* Controls whether promiscuous also shows bad packets */
391 static int em_debug_sbp = FALSE;
392 /* Local switch for MSI/MSIX */
393 static int em_enable_msi = TRUE;
395 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
396 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
397 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
398 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
399 TUNABLE_INT("hw.em.rxd", &em_rxd);
400 TUNABLE_INT("hw.em.txd", &em_txd);
401 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
402 TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
403 TUNABLE_INT("hw.em.enable_msi", &em_enable_msi);
405 #ifndef EM_LEGACY_IRQ
406 /* How many packets rxeof tries to clean at a time */
407 static int em_rx_process_limit = 100;
408 TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
411 /* Flow control setting - default to FULL */
412 static int em_fc_setting = e1000_fc_full;
413 TUNABLE_INT("hw.em.fc_setting", &em_fc_setting);
416 ** Shadow VFTA table, this is needed because
417 ** the real vlan filter table gets cleared during
418 ** a soft reset and the driver needs to be able
421 static u32 em_shadow_vfta[EM_VFTA_SIZE];
423 /* Global used in WOL setup with multiport cards */
424 static int global_quad_port_a = 0;
426 /*********************************************************************
427 * Device identification routine
429 * em_probe determines if the driver should be loaded on
430 * adapter based on PCI vendor/device id of the adapter.
432 * return BUS_PROBE_DEFAULT on success, positive on failure
433 *********************************************************************/
436 em_probe(device_t dev)
438 char adapter_name[60];
439 u16 pci_vendor_id = 0;
440 u16 pci_device_id = 0;
441 u16 pci_subvendor_id = 0;
442 u16 pci_subdevice_id = 0;
443 em_vendor_info_t *ent;
445 INIT_DEBUGOUT("em_probe: begin");
447 pci_vendor_id = pci_get_vendor(dev);
448 if (pci_vendor_id != EM_VENDOR_ID)
451 pci_device_id = pci_get_device(dev);
452 pci_subvendor_id = pci_get_subvendor(dev);
453 pci_subdevice_id = pci_get_subdevice(dev);
455 ent = em_vendor_info_array;
456 while (ent->vendor_id != 0) {
457 if ((pci_vendor_id == ent->vendor_id) &&
458 (pci_device_id == ent->device_id) &&
460 ((pci_subvendor_id == ent->subvendor_id) ||
461 (ent->subvendor_id == PCI_ANY_ID)) &&
463 ((pci_subdevice_id == ent->subdevice_id) ||
464 (ent->subdevice_id == PCI_ANY_ID))) {
465 sprintf(adapter_name, "%s %s",
466 em_strings[ent->index],
468 device_set_desc_copy(dev, adapter_name);
469 return (BUS_PROBE_DEFAULT);
477 /*********************************************************************
478 * Device initialization routine
480 * The attach entry point is called when the driver is being loaded.
481 * This routine identifies the type of hardware, allocates all resources
482 * and initializes the hardware.
484 * return 0 on success, positive on failure
485 *********************************************************************/
488 em_attach(device_t dev)
490 struct adapter *adapter;
493 u16 eeprom_data, device_id;
495 INIT_DEBUGOUT("em_attach: begin");
497 adapter = device_get_softc(dev);
498 adapter->dev = adapter->osdep.dev = dev;
499 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
500 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
501 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
504 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
505 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
506 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
507 em_sysctl_debug_info, "I", "Debug Information");
509 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
510 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
511 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
512 em_sysctl_stats, "I", "Statistics");
514 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
515 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
517 /* Determine hardware and mac info */
518 em_identify_hardware(adapter);
520 /* Setup PCI resources */
521 if (em_allocate_pci_resources(adapter)) {
522 device_printf(dev, "Allocation of PCI resources failed\n");
528 ** For ICH8 and family we need to
529 ** map the flash memory, and this
530 ** must happen after the MAC is
533 if ((adapter->hw.mac.type == e1000_ich8lan) ||
534 (adapter->hw.mac.type == e1000_ich9lan) ||
535 (adapter->hw.mac.type == e1000_ich10lan)) {
536 int rid = EM_BAR_TYPE_FLASH;
537 adapter->flash = bus_alloc_resource_any(dev,
538 SYS_RES_MEMORY, &rid, RF_ACTIVE);
539 if (adapter->flash == NULL) {
540 device_printf(dev, "Mapping of Flash failed\n");
544 /* This is used in the shared code */
545 adapter->hw.flash_address = (u8 *)adapter->flash;
546 adapter->osdep.flash_bus_space_tag =
547 rman_get_bustag(adapter->flash);
548 adapter->osdep.flash_bus_space_handle =
549 rman_get_bushandle(adapter->flash);
552 /* Do Shared Code initialization */
553 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
554 device_printf(dev, "Setup of Shared code failed\n");
559 e1000_get_bus_info(&adapter->hw);
561 /* Set up some sysctls for the tunable interrupt delays */
562 em_add_int_delay_sysctl(adapter, "rx_int_delay",
563 "receive interrupt delay in usecs", &adapter->rx_int_delay,
564 E1000_REGISTER(&adapter->hw, E1000_RDTR), em_rx_int_delay_dflt);
565 em_add_int_delay_sysctl(adapter, "tx_int_delay",
566 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
567 E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt);
568 if (adapter->hw.mac.type >= e1000_82540) {
569 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
570 "receive interrupt delay limit in usecs",
571 &adapter->rx_abs_int_delay,
572 E1000_REGISTER(&adapter->hw, E1000_RADV),
573 em_rx_abs_int_delay_dflt);
574 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
575 "transmit interrupt delay limit in usecs",
576 &adapter->tx_abs_int_delay,
577 E1000_REGISTER(&adapter->hw, E1000_TADV),
578 em_tx_abs_int_delay_dflt);
581 #ifndef EM_LEGACY_IRQ
582 /* Sysctls for limiting the amount of work done in the taskqueue */
583 em_add_rx_process_limit(adapter, "rx_processing_limit",
584 "max number of rx packets to process", &adapter->rx_process_limit,
585 em_rx_process_limit);
589 * Validate number of transmit and receive descriptors. It
590 * must not exceed hardware maximum, and must be multiple
591 * of E1000_DBA_ALIGN.
593 if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
594 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
595 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
596 (em_txd < EM_MIN_TXD)) {
597 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
598 EM_DEFAULT_TXD, em_txd);
599 adapter->num_tx_desc = EM_DEFAULT_TXD;
601 adapter->num_tx_desc = em_txd;
602 if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
603 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
604 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
605 (em_rxd < EM_MIN_RXD)) {
606 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
607 EM_DEFAULT_RXD, em_rxd);
608 adapter->num_rx_desc = EM_DEFAULT_RXD;
610 adapter->num_rx_desc = em_rxd;
612 adapter->hw.mac.autoneg = DO_AUTO_NEG;
613 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
614 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
615 adapter->rx_buffer_len = 2048;
617 e1000_init_script_state_82541(&adapter->hw, TRUE);
618 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
621 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
622 adapter->hw.phy.mdix = AUTO_ALL_MODES;
623 adapter->hw.phy.disable_polarity_correction = FALSE;
624 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
628 * Set the frame limits assuming
629 * standard ethernet sized frames.
631 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
632 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
635 * This controls when hardware reports transmit completion
638 adapter->hw.mac.report_tx_early = 1;
640 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
643 /* Allocate Transmit Descriptor ring */
644 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
645 device_printf(dev, "Unable to allocate tx_desc memory\n");
649 adapter->tx_desc_base =
650 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
652 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
655 /* Allocate Receive Descriptor ring */
656 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
657 device_printf(dev, "Unable to allocate rx_desc memory\n");
661 adapter->rx_desc_base =
662 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
665 ** Start from a known state, this is
666 ** important in reading the nvm and
669 e1000_reset_hw(&adapter->hw);
671 /* Make sure we have a good EEPROM before we read from it */
672 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
674 ** Some PCI-E parts fail the first check due to
675 ** the link being in sleep state, call it again,
676 ** if it fails a second time its a real issue.
678 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
680 "The EEPROM Checksum Is Not Valid\n");
686 /* Copy the permanent MAC address out of the EEPROM */
687 if (e1000_read_mac_addr(&adapter->hw) < 0) {
688 device_printf(dev, "EEPROM read error while reading MAC"
694 if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) {
695 device_printf(dev, "Invalid MAC address\n");
700 /* Initialize the hardware */
701 if (em_hardware_init(adapter)) {
702 device_printf(dev, "Unable to initialize the hardware\n");
707 /* Allocate transmit descriptors and buffers */
708 if (em_allocate_transmit_structures(adapter)) {
709 device_printf(dev, "Could not setup transmit structures\n");
714 /* Allocate receive descriptors and buffers */
715 if (em_allocate_receive_structures(adapter)) {
716 device_printf(dev, "Could not setup receive structures\n");
722 ** Do interrupt configuration
724 if (adapter->msi > 1) /* Do MSI/X */
725 error = em_allocate_msix(adapter);
726 else /* MSI or Legacy */
727 error = em_allocate_legacy(adapter);
731 /* Setup OS specific network interface */
732 em_setup_interface(dev, adapter);
734 /* Initialize statistics */
735 em_update_stats_counters(adapter);
737 adapter->hw.mac.get_link_status = 1;
738 em_update_link_status(adapter);
740 /* Indicate SOL/IDER usage */
741 if (e1000_check_reset_block(&adapter->hw))
743 "PHY reset is blocked due to SOL/IDER session.\n");
745 /* Determine if we have to control management hardware */
746 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
751 switch (adapter->hw.mac.type) {
757 case e1000_82546_rev_3:
759 case e1000_80003es2lan:
760 if (adapter->hw.bus.func == 1)
761 e1000_read_nvm(&adapter->hw,
762 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
764 e1000_read_nvm(&adapter->hw,
765 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
766 eeprom_data &= EM_EEPROM_APME;
769 /* APME bit in EEPROM is mapped to WUC.APME */
770 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) &
775 adapter->wol = E1000_WUFC_MAG;
777 * We have the eeprom settings, now apply the special cases
778 * where the eeprom may be wrong or the board won't support
779 * wake on lan on a particular port
781 device_id = pci_get_device(dev);
783 case E1000_DEV_ID_82546GB_PCIE:
786 case E1000_DEV_ID_82546EB_FIBER:
787 case E1000_DEV_ID_82546GB_FIBER:
788 case E1000_DEV_ID_82571EB_FIBER:
789 /* Wake events only supported on port A for dual fiber
790 * regardless of eeprom setting */
791 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
795 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
796 case E1000_DEV_ID_82571EB_QUAD_COPPER:
797 case E1000_DEV_ID_82571EB_QUAD_FIBER:
798 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
799 /* if quad port adapter, disable WoL on all but port A */
800 if (global_quad_port_a != 0)
802 /* Reset for multiple quad port adapters */
803 if (++global_quad_port_a == 4)
804 global_quad_port_a = 0;
808 /* Do we need workaround for 82544 PCI-X adapter? */
809 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
810 adapter->hw.mac.type == e1000_82544)
811 adapter->pcix_82544 = TRUE;
813 adapter->pcix_82544 = FALSE;
815 #if __FreeBSD_version >= 700029
816 /* Register for VLAN events */
817 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
818 em_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
819 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
820 em_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
823 /* Tell the stack that the interface is not active */
824 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
826 INIT_DEBUGOUT("em_attach: end");
831 em_free_transmit_structures(adapter);
834 em_release_hw_control(adapter);
835 em_dma_free(adapter, &adapter->rxdma);
837 em_dma_free(adapter, &adapter->txdma);
840 em_free_pci_resources(adapter);
841 EM_TX_LOCK_DESTROY(adapter);
842 EM_RX_LOCK_DESTROY(adapter);
843 EM_CORE_LOCK_DESTROY(adapter);
848 /*********************************************************************
849 * Device removal routine
851 * The detach entry point is called when the driver is being removed.
852 * This routine stops the adapter and deallocates all the resources
853 * that were allocated for driver operation.
855 * return 0 on success, positive on failure
856 *********************************************************************/
859 em_detach(device_t dev)
861 struct adapter *adapter = device_get_softc(dev);
862 struct ifnet *ifp = adapter->ifp;
864 INIT_DEBUGOUT("em_detach: begin");
866 /* Make sure VLANS are not using driver */
867 #if __FreeBSD_version >= 700000
868 if (adapter->ifp->if_vlantrunk != NULL) {
870 if (adapter->ifp->if_nvlans != 0) {
872 device_printf(dev,"Vlan in use, detach first\n");
876 #ifdef DEVICE_POLLING
877 if (ifp->if_capenable & IFCAP_POLLING)
878 ether_poll_deregister(ifp);
881 EM_CORE_LOCK(adapter);
883 adapter->in_detach = 1;
885 e1000_phy_hw_reset(&adapter->hw);
887 em_release_manageability(adapter);
889 if (((adapter->hw.mac.type == e1000_82573) ||
890 (adapter->hw.mac.type == e1000_82583) ||
891 (adapter->hw.mac.type == e1000_ich8lan) ||
892 (adapter->hw.mac.type == e1000_ich10lan) ||
893 (adapter->hw.mac.type == e1000_ich9lan)) &&
894 e1000_check_mng_mode(&adapter->hw))
895 em_release_hw_control(adapter);
898 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
899 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
900 em_enable_wakeup(dev);
903 EM_TX_UNLOCK(adapter);
904 EM_CORE_UNLOCK(adapter);
906 #if __FreeBSD_version >= 700029
907 /* Unregister VLAN events */
908 if (adapter->vlan_attach != NULL)
909 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
910 if (adapter->vlan_detach != NULL)
911 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
914 ether_ifdetach(adapter->ifp);
915 callout_drain(&adapter->timer);
916 callout_drain(&adapter->tx_fifo_timer);
918 em_free_pci_resources(adapter);
919 bus_generic_detach(dev);
922 em_free_transmit_structures(adapter);
923 em_free_receive_structures(adapter);
925 /* Free Transmit Descriptor ring */
926 if (adapter->tx_desc_base) {
927 em_dma_free(adapter, &adapter->txdma);
928 adapter->tx_desc_base = NULL;
931 /* Free Receive Descriptor ring */
932 if (adapter->rx_desc_base) {
933 em_dma_free(adapter, &adapter->rxdma);
934 adapter->rx_desc_base = NULL;
937 EM_TX_LOCK_DESTROY(adapter);
938 EM_RX_LOCK_DESTROY(adapter);
939 EM_CORE_LOCK_DESTROY(adapter);
944 /*********************************************************************
946 * Shutdown entry point
948 **********************************************************************/
951 em_shutdown(device_t dev)
953 return em_suspend(dev);
957 * Suspend/resume device methods.
960 em_suspend(device_t dev)
962 struct adapter *adapter = device_get_softc(dev);
964 EM_CORE_LOCK(adapter);
968 EM_TX_UNLOCK(adapter);
970 em_release_manageability(adapter);
972 if (((adapter->hw.mac.type == e1000_82573) ||
973 (adapter->hw.mac.type == e1000_82583) ||
974 (adapter->hw.mac.type == e1000_ich8lan) ||
975 (adapter->hw.mac.type == e1000_ich10lan) ||
976 (adapter->hw.mac.type == e1000_ich9lan)) &&
977 e1000_check_mng_mode(&adapter->hw))
978 em_release_hw_control(adapter);
981 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
982 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
983 em_enable_wakeup(dev);
986 EM_CORE_UNLOCK(adapter);
988 return bus_generic_suspend(dev);
992 em_resume(device_t dev)
994 struct adapter *adapter = device_get_softc(dev);
995 struct ifnet *ifp = adapter->ifp;
997 EM_CORE_LOCK(adapter);
998 em_init_locked(adapter);
999 em_init_manageability(adapter);
1000 EM_CORE_UNLOCK(adapter);
1003 return bus_generic_resume(dev);
1007 /*********************************************************************
1008 * Transmit entry point
1010 * em_start is called by the stack to initiate a transmit.
1011 * The driver will remain in this routine as long as there are
1012 * packets to transmit and transmit resources are available.
1013 * In case resources are not available stack is notified and
1014 * the packet is requeued.
1015 **********************************************************************/
1017 #if __FreeBSD_version >= 800000
1019 em_mq_start_locked(struct ifnet *ifp, struct mbuf *m)
1021 struct adapter *adapter = ifp->if_softc;
1023 int error = E1000_SUCCESS;
1025 EM_TX_LOCK_ASSERT(adapter);
1026 /* To allow being called from a tasklet */
1030 if (((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1032 || (!adapter->link_active)) {
1033 error = drbr_enqueue(ifp, adapter->br, m);
1035 } else if (drbr_empty(ifp, adapter->br) &&
1036 (adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)) {
1037 if (em_xmit(adapter, &m)) {
1038 if (m && (error = drbr_enqueue(ifp, adapter->br, m)) != 0)
1042 * We've bypassed the buf ring so we need to update
1045 drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
1047 ** Send a copy of the frame to the BPF
1048 ** listener and set the watchdog on.
1050 ETHER_BPF_MTAP(ifp, m);
1051 adapter->watchdog_timer = EM_TX_TIMEOUT;
1053 } else if ((error = drbr_enqueue(ifp, adapter->br, m)) != 0)
1057 if (drbr_empty(ifp, adapter->br))
1059 /* Process the queue */
1061 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1063 next = drbr_dequeue(ifp, adapter->br);
1066 if (em_xmit(adapter, &next))
1068 ETHER_BPF_MTAP(ifp, next);
1069 /* Set the watchdog */
1070 adapter->watchdog_timer = EM_TX_TIMEOUT;
1073 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
1074 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1080 ** Multiqueue capable stack interface, this is not
1081 ** yet truely multiqueue, but that is coming...
1084 em_mq_start(struct ifnet *ifp, struct mbuf *m)
1087 struct adapter *adapter = ifp->if_softc;
1090 if (EM_TX_TRYLOCK(adapter)) {
1091 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1092 error = em_mq_start_locked(ifp, m);
1093 EM_TX_UNLOCK(adapter);
1095 error = drbr_enqueue(ifp, adapter->br, m);
1101 em_qflush(struct ifnet *ifp)
1104 struct adapter *adapter = (struct adapter *)ifp->if_softc;
1106 EM_TX_LOCK(adapter);
1107 while ((m = buf_ring_dequeue_sc(adapter->br)) != NULL)
1110 EM_TX_UNLOCK(adapter);
1112 #endif /* FreeBSD_version */
1115 em_start_locked(struct ifnet *ifp)
1117 struct adapter *adapter = ifp->if_softc;
1118 struct mbuf *m_head;
1120 EM_TX_LOCK_ASSERT(adapter);
1122 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1125 if (!adapter->link_active)
1128 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1130 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1134 * Encapsulation can modify our pointer, and or make it
1135 * NULL on failure. In that event, we can't requeue.
1137 if (em_xmit(adapter, &m_head)) {
1140 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1141 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1145 /* Send a copy of the frame to the BPF listener */
1146 ETHER_BPF_MTAP(ifp, m_head);
1148 /* Set timeout in case hardware has problems transmitting. */
1149 adapter->watchdog_timer = EM_TX_TIMEOUT;
1151 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
1152 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1158 em_start(struct ifnet *ifp)
1160 struct adapter *adapter = ifp->if_softc;
1162 EM_TX_LOCK(adapter);
1163 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1164 em_start_locked(ifp);
1165 EM_TX_UNLOCK(adapter);
1168 /*********************************************************************
1171 * em_ioctl is called when the user wants to configure the
1174 * return 0 on success, positive on failure
1175 **********************************************************************/
1178 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1180 struct adapter *adapter = ifp->if_softc;
1181 struct ifreq *ifr = (struct ifreq *)data;
1183 struct ifaddr *ifa = (struct ifaddr *)data;
1187 if (adapter->in_detach)
1193 if (ifa->ifa_addr->sa_family == AF_INET) {
1196 * Since resetting hardware takes a very long time
1197 * and results in link renegotiation we only
1198 * initialize the hardware only when it is absolutely
1201 ifp->if_flags |= IFF_UP;
1202 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1203 EM_CORE_LOCK(adapter);
1204 em_init_locked(adapter);
1205 EM_CORE_UNLOCK(adapter);
1207 arp_ifinit(ifp, ifa);
1210 error = ether_ioctl(ifp, command, data);
1215 u16 eeprom_data = 0;
1217 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1219 EM_CORE_LOCK(adapter);
1220 switch (adapter->hw.mac.type) {
1223 * 82573 only supports jumbo frames
1224 * if ASPM is disabled.
1226 e1000_read_nvm(&adapter->hw,
1227 NVM_INIT_3GIO_3, 1, &eeprom_data);
1228 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1229 max_frame_size = ETHER_MAX_LEN;
1232 /* Allow Jumbo frames - fall thru */
1236 case e1000_ich10lan:
1238 case e1000_80003es2lan: /* Limit Jumbo Frame size */
1239 max_frame_size = 9234;
1241 /* Adapters that do not support jumbo frames */
1245 max_frame_size = ETHER_MAX_LEN;
1248 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1250 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1252 EM_CORE_UNLOCK(adapter);
1257 ifp->if_mtu = ifr->ifr_mtu;
1258 adapter->max_frame_size =
1259 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1260 em_init_locked(adapter);
1261 EM_CORE_UNLOCK(adapter);
1265 IOCTL_DEBUGOUT("ioctl rcv'd:\
1266 SIOCSIFFLAGS (Set Interface Flags)");
1267 EM_CORE_LOCK(adapter);
1268 if (ifp->if_flags & IFF_UP) {
1269 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1270 if ((ifp->if_flags ^ adapter->if_flags) &
1271 (IFF_PROMISC | IFF_ALLMULTI)) {
1272 em_disable_promisc(adapter);
1273 em_set_promisc(adapter);
1276 em_init_locked(adapter);
1278 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1279 EM_TX_LOCK(adapter);
1281 EM_TX_UNLOCK(adapter);
1283 adapter->if_flags = ifp->if_flags;
1284 EM_CORE_UNLOCK(adapter);
1288 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1289 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1290 EM_CORE_LOCK(adapter);
1291 em_disable_intr(adapter);
1292 em_set_multi(adapter);
1293 if (adapter->hw.mac.type == e1000_82542 &&
1294 adapter->hw.revision_id == E1000_REVISION_2) {
1295 em_initialize_receive_unit(adapter);
1297 #ifdef DEVICE_POLLING
1298 if (!(ifp->if_capenable & IFCAP_POLLING))
1300 em_enable_intr(adapter);
1301 EM_CORE_UNLOCK(adapter);
1305 /* Check SOL/IDER usage */
1306 EM_CORE_LOCK(adapter);
1307 if (e1000_check_reset_block(&adapter->hw)) {
1308 EM_CORE_UNLOCK(adapter);
1309 device_printf(adapter->dev, "Media change is"
1310 " blocked due to SOL/IDER session.\n");
1313 EM_CORE_UNLOCK(adapter);
1315 IOCTL_DEBUGOUT("ioctl rcv'd: \
1316 SIOCxIFMEDIA (Get/Set Interface Media)");
1317 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1323 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1325 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1326 #ifdef DEVICE_POLLING
1327 if (mask & IFCAP_POLLING) {
1328 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1329 error = ether_poll_register(em_poll, ifp);
1332 EM_CORE_LOCK(adapter);
1333 em_disable_intr(adapter);
1334 ifp->if_capenable |= IFCAP_POLLING;
1335 EM_CORE_UNLOCK(adapter);
1337 error = ether_poll_deregister(ifp);
1338 /* Enable interrupt even in error case */
1339 EM_CORE_LOCK(adapter);
1340 em_enable_intr(adapter);
1341 ifp->if_capenable &= ~IFCAP_POLLING;
1342 EM_CORE_UNLOCK(adapter);
1346 if (mask & IFCAP_HWCSUM) {
1347 ifp->if_capenable ^= IFCAP_HWCSUM;
1350 #if __FreeBSD_version >= 700000
1351 if (mask & IFCAP_TSO4) {
1352 ifp->if_capenable ^= IFCAP_TSO4;
1357 if (mask & IFCAP_VLAN_HWTAGGING) {
1358 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1361 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1363 #if __FreeBSD_version >= 700000
1364 VLAN_CAPABILITIES(ifp);
1370 error = ether_ioctl(ifp, command, data);
1377 /*********************************************************************
1380 * This routine is called from the local timer every second.
1381 * As long as transmit descriptors are being cleaned the value
1382 * is non-zero and we do nothing. Reaching 0 indicates a tx hang
1383 * and we then reset the device.
1385 **********************************************************************/
1388 em_watchdog(struct adapter *adapter)
1391 EM_CORE_LOCK_ASSERT(adapter);
1394 ** The timer is set to 5 every time start queues a packet.
1395 ** Then txeof keeps resetting it as long as it cleans at
1396 ** least one descriptor.
1397 ** Finally, anytime all descriptors are clean the timer is
1400 EM_TX_LOCK(adapter);
1401 if ((adapter->watchdog_timer == 0) || (--adapter->watchdog_timer)) {
1402 EM_TX_UNLOCK(adapter);
1406 /* If we are in this routine because of pause frames, then
1407 * don't reset the hardware.
1409 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
1410 E1000_STATUS_TXOFF) {
1411 adapter->watchdog_timer = EM_TX_TIMEOUT;
1412 EM_TX_UNLOCK(adapter);
1416 if (e1000_check_for_link(&adapter->hw) == 0)
1417 device_printf(adapter->dev, "watchdog timeout -- resetting\n");
1418 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1419 adapter->watchdog_events++;
1420 EM_TX_UNLOCK(adapter);
1422 em_init_locked(adapter);
1425 /*********************************************************************
1428 * This routine is used in two ways. It is used by the stack as
1429 * init entry point in network interface structure. It is also used
1430 * by the driver as a hw/sw initialization routine to get to a
1433 * return 0 on success, positive on failure
1434 **********************************************************************/
1437 em_init_locked(struct adapter *adapter)
1439 struct ifnet *ifp = adapter->ifp;
1440 device_t dev = adapter->dev;
1443 INIT_DEBUGOUT("em_init: begin");
1445 EM_CORE_LOCK_ASSERT(adapter);
1447 EM_TX_LOCK(adapter);
1449 EM_TX_UNLOCK(adapter);
1452 * Packet Buffer Allocation (PBA)
1453 * Writing PBA sets the receive portion of the buffer
1454 * the remainder is used for the transmit buffer.
1456 * Devices before the 82547 had a Packet Buffer of 64K.
1457 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1458 * After the 82547 the buffer was reduced to 40K.
1459 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1460 * Note: default does not leave enough room for Jumbo Frame >10k.
1462 switch (adapter->hw.mac.type) {
1464 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1465 if (adapter->max_frame_size > 8192)
1466 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1468 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1469 adapter->tx_fifo_head = 0;
1470 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1471 adapter->tx_fifo_size =
1472 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1474 /* Total Packet Buffer on these is 48K */
1477 case e1000_80003es2lan:
1478 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1480 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
1481 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1485 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1488 case e1000_ich10lan:
1493 /* Devices before 82547 had a Packet Buffer of 64K. */
1494 if (adapter->max_frame_size > 8192)
1495 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1497 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1500 INIT_DEBUGOUT1("em_init: pba=%dK",pba);
1501 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1503 /* Get the latest mac address, User can use a LAA */
1504 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1507 /* Put the address into the Receive Address Array */
1508 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1511 * With the 82571 adapter, RAR[0] may be overwritten
1512 * when the other port is reset, we make a duplicate
1513 * in RAR[14] for that eventuality, this assures
1514 * the interface continues to function.
1516 if (adapter->hw.mac.type == e1000_82571) {
1517 e1000_set_laa_state_82571(&adapter->hw, TRUE);
1518 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1519 E1000_RAR_ENTRIES - 1);
1522 /* Initialize the hardware */
1523 if (em_hardware_init(adapter)) {
1524 device_printf(dev, "Unable to initialize the hardware\n");
1527 em_update_link_status(adapter);
1529 /* Setup VLAN support, basic and offload if available */
1530 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1532 #if __FreeBSD_version < 700029
1533 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1535 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1536 ctrl |= E1000_CTRL_VME;
1537 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1540 /* Use real VLAN Filter support */
1541 em_setup_vlan_hw_support(adapter);
1544 /* Set hardware offload abilities */
1545 ifp->if_hwassist = 0;
1546 if (adapter->hw.mac.type >= e1000_82543) {
1547 if (ifp->if_capenable & IFCAP_TXCSUM)
1548 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1549 #if __FreeBSD_version >= 700000
1550 if (ifp->if_capenable & IFCAP_TSO4)
1551 ifp->if_hwassist |= CSUM_TSO;
1555 /* Configure for OS presence */
1556 em_init_manageability(adapter);
1558 /* Prepare transmit descriptors and buffers */
1559 em_setup_transmit_structures(adapter);
1560 em_initialize_transmit_unit(adapter);
1562 /* Setup Multicast table */
1563 em_set_multi(adapter);
1565 /* Prepare receive descriptors and buffers */
1566 if (em_setup_receive_structures(adapter)) {
1567 device_printf(dev, "Could not setup receive structures\n");
1568 EM_TX_LOCK(adapter);
1570 EM_TX_UNLOCK(adapter);
1573 em_initialize_receive_unit(adapter);
1575 /* Don't lose promiscuous settings */
1576 em_set_promisc(adapter);
1578 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1579 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1581 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1582 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1584 /* MSI/X configuration for 82574 */
1585 if (adapter->hw.mac.type == e1000_82574) {
1587 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1588 tmp |= E1000_CTRL_EXT_PBA_CLR;
1589 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1591 ** Set the IVAR - interrupt vector routing.
1592 ** Each nibble represents a vector, high bit
1593 ** is enable, other 3 bits are the MSIX table
1594 ** entry, we map RXQ0 to 0, TXQ0 to 1, and
1595 ** Link (other) to 2, hence the magic number.
1597 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1600 #ifdef DEVICE_POLLING
1602 * Only enable interrupts if we are not polling, make sure
1603 * they are off otherwise.
1605 if (ifp->if_capenable & IFCAP_POLLING)
1606 em_disable_intr(adapter);
1608 #endif /* DEVICE_POLLING */
1609 em_enable_intr(adapter);
1611 /* Don't reset the phy next time init gets called */
1612 adapter->hw.phy.reset_disable = TRUE;
1618 struct adapter *adapter = arg;
1620 EM_CORE_LOCK(adapter);
1621 em_init_locked(adapter);
1622 EM_CORE_UNLOCK(adapter);
1626 #ifdef DEVICE_POLLING
1627 /*********************************************************************
1629 * Legacy polling routine
1631 *********************************************************************/
1633 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1635 struct adapter *adapter = ifp->if_softc;
1636 u32 reg_icr, rx_done = 0;
1638 EM_CORE_LOCK(adapter);
1639 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1640 EM_CORE_UNLOCK(adapter);
1644 if (cmd == POLL_AND_CHECK_STATUS) {
1645 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1646 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1647 callout_stop(&adapter->timer);
1648 adapter->hw.mac.get_link_status = 1;
1649 em_update_link_status(adapter);
1650 callout_reset(&adapter->timer, hz,
1651 em_local_timer, adapter);
1654 EM_CORE_UNLOCK(adapter);
1656 rx_done = em_rxeof(adapter, count);
1658 EM_TX_LOCK(adapter);
1660 #if __FreeBSD_version >= 800000
1661 if (!drbr_empty(ifp, adapter->br))
1662 em_mq_start_locked(ifp, NULL);
1664 if (!IFQ_DRV_IS_EMPTY(&ifp->snd))
1665 em_start_locked(ifp);
1667 EM_TX_UNLOCK(adapter);
1670 #endif /* DEVICE_POLLING */
1672 #ifdef EM_LEGACY_IRQ
1673 /*********************************************************************
1675 * Legacy Interrupt Service routine
1677 *********************************************************************/
1682 struct adapter *adapter = arg;
1683 struct ifnet *ifp = adapter->ifp;
1687 if (ifp->if_capenable & IFCAP_POLLING)
1690 EM_CORE_LOCK(adapter);
1691 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1692 if (reg_icr & E1000_ICR_RXO)
1693 adapter->rx_overruns++;
1694 if ((reg_icr == 0xffffffff) || (reg_icr == 0)||
1695 (adapter->hw.mac.type >= e1000_82571 &&
1696 (reg_icr & E1000_ICR_INT_ASSERTED) == 0))
1699 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1702 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1703 callout_stop(&adapter->timer);
1704 adapter->hw.mac.get_link_status = 1;
1705 em_update_link_status(adapter);
1706 /* Deal with TX cruft when link lost */
1707 em_tx_purge(adapter);
1708 callout_reset(&adapter->timer, hz,
1709 em_local_timer, adapter);
1713 EM_TX_LOCK(adapter);
1715 em_rxeof(adapter, -1);
1717 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1718 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1719 em_start_locked(ifp);
1720 EM_TX_UNLOCK(adapter);
1723 EM_CORE_UNLOCK(adapter);
1727 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1730 em_handle_link(void *context, int pending)
1732 struct adapter *adapter = context;
1733 struct ifnet *ifp = adapter->ifp;
1735 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1738 EM_CORE_LOCK(adapter);
1739 callout_stop(&adapter->timer);
1740 em_update_link_status(adapter);
1741 /* Deal with TX cruft when link lost */
1742 em_tx_purge(adapter);
1743 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1744 EM_CORE_UNLOCK(adapter);
1748 /* Combined RX/TX handler, used by Legacy and MSI */
1750 em_handle_rxtx(void *context, int pending)
1752 struct adapter *adapter = context;
1753 struct ifnet *ifp = adapter->ifp;
1756 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1757 if (em_rxeof(adapter, adapter->rx_process_limit) != 0)
1758 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1759 EM_TX_LOCK(adapter);
1762 #if __FreeBSD_version >= 800000
1763 if (!drbr_empty(ifp, adapter->br))
1764 em_mq_start_locked(ifp, NULL);
1766 if (!IFQ_DRV_IS_EMPTY(&ifp->snd))
1767 em_start_locked(ifp);
1769 EM_TX_UNLOCK(adapter);
1772 em_enable_intr(adapter);
1775 /*********************************************************************
1777 * Fast Legacy/MSI Combined Interrupt Service routine
1779 *********************************************************************/
1780 #if __FreeBSD_version < 700000
1781 #define FILTER_STRAY
1782 #define FILTER_HANDLED
1787 em_irq_fast(void *arg)
1789 struct adapter *adapter = arg;
1795 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1798 if (reg_icr == 0xffffffff)
1799 return FILTER_STRAY;
1801 /* Definitely not our interrupt. */
1803 return FILTER_STRAY;
1806 * Starting with the 82571 chip, bit 31 should be used to
1807 * determine whether the interrupt belongs to us.
1809 if (adapter->hw.mac.type >= e1000_82571 &&
1810 (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1811 return FILTER_STRAY;
1814 * Mask interrupts until the taskqueue is finished running. This is
1815 * cheap, just assume that it is needed. This also works around the
1816 * MSI message reordering errata on certain systems.
1818 em_disable_intr(adapter);
1819 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1821 /* Link status change */
1822 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1823 adapter->hw.mac.get_link_status = 1;
1824 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1827 if (reg_icr & E1000_ICR_RXO)
1828 adapter->rx_overruns++;
1829 return FILTER_HANDLED;
1832 /*********************************************************************
1834 * MSIX Interrupt Service Routines
1836 **********************************************************************/
1837 #define EM_MSIX_TX 0x00040000
1838 #define EM_MSIX_RX 0x00010000
1839 #define EM_MSIX_LINK 0x00100000
1842 em_msix_tx(void *arg)
1844 struct adapter *adapter = arg;
1845 struct ifnet *ifp = adapter->ifp;
1848 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1849 EM_TX_LOCK(adapter);
1851 EM_TX_UNLOCK(adapter);
1852 taskqueue_enqueue(adapter->tq, &adapter->tx_task);
1854 /* Reenable this interrupt */
1855 E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_TX);
1859 /*********************************************************************
1861 * MSIX RX Interrupt Service routine
1863 **********************************************************************/
1866 em_msix_rx(void *arg)
1868 struct adapter *adapter = arg;
1869 struct ifnet *ifp = adapter->ifp;
1872 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1873 (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1874 taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1875 /* Reenable this interrupt */
1876 E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_RX);
1880 /*********************************************************************
1882 * MSIX Link Fast Interrupt Service routine
1884 **********************************************************************/
1887 em_msix_link(void *arg)
1889 struct adapter *adapter = arg;
1892 ++adapter->link_irq;
1893 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1895 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1896 adapter->hw.mac.get_link_status = 1;
1897 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1899 E1000_WRITE_REG(&adapter->hw, E1000_IMS,
1900 EM_MSIX_LINK | E1000_IMS_LSC);
1905 em_handle_rx(void *context, int pending)
1907 struct adapter *adapter = context;
1908 struct ifnet *ifp = adapter->ifp;
1910 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1911 (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1912 taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1917 em_handle_tx(void *context, int pending)
1919 struct adapter *adapter = context;
1920 struct ifnet *ifp = adapter->ifp;
1922 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1923 if (!EM_TX_TRYLOCK(adapter))
1926 #if __FreeBSD_version >= 800000
1927 if (!drbr_empty(ifp, adapter->br))
1928 em_mq_start_locked(ifp, NULL);
1930 if (!IFQ_DRV_IS_EMPTY(&ifp->snd))
1931 em_start_locked(ifp);
1933 EM_TX_UNLOCK(adapter);
1936 #endif /* EM_FAST_IRQ */
1938 /*********************************************************************
1940 * Media Ioctl callback
1942 * This routine is called whenever the user queries the status of
1943 * the interface using ifconfig.
1945 **********************************************************************/
1947 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1949 struct adapter *adapter = ifp->if_softc;
1950 u_char fiber_type = IFM_1000_SX;
1952 INIT_DEBUGOUT("em_media_status: begin");
1954 EM_CORE_LOCK(adapter);
1955 em_update_link_status(adapter);
1957 ifmr->ifm_status = IFM_AVALID;
1958 ifmr->ifm_active = IFM_ETHER;
1960 if (!adapter->link_active) {
1961 EM_CORE_UNLOCK(adapter);
1965 ifmr->ifm_status |= IFM_ACTIVE;
1967 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1968 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1969 if (adapter->hw.mac.type == e1000_82545)
1970 fiber_type = IFM_1000_LX;
1971 ifmr->ifm_active |= fiber_type | IFM_FDX;
1973 switch (adapter->link_speed) {
1975 ifmr->ifm_active |= IFM_10_T;
1978 ifmr->ifm_active |= IFM_100_TX;
1981 ifmr->ifm_active |= IFM_1000_T;
1984 if (adapter->link_duplex == FULL_DUPLEX)
1985 ifmr->ifm_active |= IFM_FDX;
1987 ifmr->ifm_active |= IFM_HDX;
1989 EM_CORE_UNLOCK(adapter);
1992 /*********************************************************************
1994 * Media Ioctl callback
1996 * This routine is called when the user changes speed/duplex using
1997 * media/mediopt option with ifconfig.
1999 **********************************************************************/
2001 em_media_change(struct ifnet *ifp)
2003 struct adapter *adapter = ifp->if_softc;
2004 struct ifmedia *ifm = &adapter->media;
2006 INIT_DEBUGOUT("em_media_change: begin");
2008 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2011 EM_CORE_LOCK(adapter);
2012 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2014 adapter->hw.mac.autoneg = DO_AUTO_NEG;
2015 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
2020 adapter->hw.mac.autoneg = DO_AUTO_NEG;
2021 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
2024 adapter->hw.mac.autoneg = FALSE;
2025 adapter->hw.phy.autoneg_advertised = 0;
2026 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2027 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
2029 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
2032 adapter->hw.mac.autoneg = FALSE;
2033 adapter->hw.phy.autoneg_advertised = 0;
2034 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2035 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
2037 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
2040 device_printf(adapter->dev, "Unsupported media type\n");
2043 /* As the speed/duplex settings my have changed we need to
2046 adapter->hw.phy.reset_disable = FALSE;
2048 em_init_locked(adapter);
2049 EM_CORE_UNLOCK(adapter);
2054 /*********************************************************************
2056 * This routine maps the mbufs to tx descriptors.
2058 * return 0 on success, positive on failure
2059 **********************************************************************/
2062 em_xmit(struct adapter *adapter, struct mbuf **m_headp)
2064 bus_dma_segment_t segs[EM_MAX_SCATTER];
2066 struct em_buffer *tx_buffer, *tx_buffer_mapped;
2067 struct e1000_tx_desc *ctxd = NULL;
2068 struct mbuf *m_head;
2069 u32 txd_upper, txd_lower, txd_used, txd_saved;
2070 int nsegs, i, j, first, last = 0;
2071 int error, do_tso, tso_desc = 0;
2072 #if __FreeBSD_version < 700000
2076 txd_upper = txd_lower = txd_used = txd_saved = 0;
2078 #if __FreeBSD_version >= 700000
2079 do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
2085 * Force a cleanup if number of TX descriptors
2086 * available hits the threshold
2088 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
2090 /* Now do we at least have a minimal? */
2091 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
2092 adapter->no_tx_desc_avail1++;
2100 * If an mbuf is only header we need
2101 * to pull 4 bytes of data into it.
2103 if (do_tso && (m_head->m_len <= M_TSO_LEN)) {
2104 m_head = m_pullup(m_head, M_TSO_LEN + 4);
2111 * Map the packet for DMA
2113 * Capture the first descriptor index,
2114 * this descriptor will have the index
2115 * of the EOP which is the only one that
2116 * now gets a DONE bit writeback.
2118 first = adapter->next_avail_tx_desc;
2119 tx_buffer = &adapter->tx_buffer_area[first];
2120 tx_buffer_mapped = tx_buffer;
2121 map = tx_buffer->map;
2123 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2124 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2127 * There are two types of errors we can (try) to handle:
2128 * - EFBIG means the mbuf chain was too long and bus_dma ran
2129 * out of segments. Defragment the mbuf chain and try again.
2130 * - ENOMEM means bus_dma could not obtain enough bounce buffers
2131 * at this point in time. Defer sending and try again later.
2132 * All other errors, in particular EINVAL, are fatal and prevent the
2133 * mbuf chain from ever going through. Drop it and report error.
2135 if (error == EFBIG) {
2138 m = m_defrag(*m_headp, M_DONTWAIT);
2140 adapter->mbuf_alloc_failed++;
2148 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2149 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2152 adapter->no_tx_dma_setup++;
2157 } else if (error != 0) {
2158 adapter->no_tx_dma_setup++;
2163 * TSO Hardware workaround, if this packet is not
2164 * TSO, and is only a single descriptor long, and
2165 * it follows a TSO burst, then we need to add a
2166 * sentinel descriptor to prevent premature writeback.
2168 if ((do_tso == 0) && (adapter->tx_tso == TRUE)) {
2171 adapter->tx_tso = FALSE;
2174 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
2175 adapter->no_tx_desc_avail2++;
2176 bus_dmamap_unload(adapter->txtag, map);
2181 /* Do hardware assists */
2182 #if __FreeBSD_version >= 700000
2183 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2184 error = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower);
2186 return (ENXIO); /* something foobar */
2187 /* we need to make a final sentinel transmit desc */
2191 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
2192 em_transmit_checksum_setup(adapter, m_head,
2193 &txd_upper, &txd_lower);
2195 i = adapter->next_avail_tx_desc;
2196 if (adapter->pcix_82544)
2199 /* Set up our transmit descriptors */
2200 for (j = 0; j < nsegs; j++) {
2202 bus_addr_t seg_addr;
2203 /* If adapter is 82544 and on PCIX bus */
2204 if(adapter->pcix_82544) {
2205 DESC_ARRAY desc_array;
2206 u32 array_elements, counter;
2208 * Check the Address and Length combination and
2209 * split the data accordingly
2211 array_elements = em_fill_descriptors(segs[j].ds_addr,
2212 segs[j].ds_len, &desc_array);
2213 for (counter = 0; counter < array_elements; counter++) {
2214 if (txd_used == adapter->num_tx_desc_avail) {
2215 adapter->next_avail_tx_desc = txd_saved;
2216 adapter->no_tx_desc_avail2++;
2217 bus_dmamap_unload(adapter->txtag, map);
2220 tx_buffer = &adapter->tx_buffer_area[i];
2221 ctxd = &adapter->tx_desc_base[i];
2222 ctxd->buffer_addr = htole64(
2223 desc_array.descriptor[counter].address);
2224 ctxd->lower.data = htole32(
2225 (adapter->txd_cmd | txd_lower | (u16)
2226 desc_array.descriptor[counter].length));
2228 htole32((txd_upper));
2230 if (++i == adapter->num_tx_desc)
2232 tx_buffer->m_head = NULL;
2233 tx_buffer->next_eop = -1;
2237 tx_buffer = &adapter->tx_buffer_area[i];
2238 ctxd = &adapter->tx_desc_base[i];
2239 seg_addr = segs[j].ds_addr;
2240 seg_len = segs[j].ds_len;
2243 ** If this is the last descriptor, we want to
2244 ** split it so we have a small final sentinel
2246 if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
2248 ctxd->buffer_addr = htole64(seg_addr);
2249 ctxd->lower.data = htole32(
2250 adapter->txd_cmd | txd_lower | seg_len);
2253 if (++i == adapter->num_tx_desc)
2255 /* Now make the sentinel */
2256 ++txd_used; /* using an extra txd */
2257 ctxd = &adapter->tx_desc_base[i];
2258 tx_buffer = &adapter->tx_buffer_area[i];
2260 htole64(seg_addr + seg_len);
2261 ctxd->lower.data = htole32(
2262 adapter->txd_cmd | txd_lower | 4);
2266 if (++i == adapter->num_tx_desc)
2269 ctxd->buffer_addr = htole64(seg_addr);
2270 ctxd->lower.data = htole32(
2271 adapter->txd_cmd | txd_lower | seg_len);
2275 if (++i == adapter->num_tx_desc)
2278 tx_buffer->m_head = NULL;
2279 tx_buffer->next_eop = -1;
2283 adapter->next_avail_tx_desc = i;
2284 if (adapter->pcix_82544)
2285 adapter->num_tx_desc_avail -= txd_used;
2287 adapter->num_tx_desc_avail -= nsegs;
2288 if (tso_desc) /* TSO used an extra for sentinel */
2289 adapter->num_tx_desc_avail -= txd_used;
2293 ** Handle VLAN tag, this is the
2294 ** biggest difference between
2297 #if __FreeBSD_version < 700000
2298 /* Find out if we are in vlan mode. */
2299 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
2301 ctxd->upper.fields.special =
2302 htole16(VLAN_TAG_VALUE(mtag));
2303 #else /* FreeBSD 7 */
2304 if (m_head->m_flags & M_VLANTAG) {
2305 /* Set the vlan id. */
2306 ctxd->upper.fields.special =
2307 htole16(m_head->m_pkthdr.ether_vtag);
2309 /* Tell hardware to add tag */
2310 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
2313 tx_buffer->m_head = m_head;
2314 tx_buffer_mapped->map = tx_buffer->map;
2315 tx_buffer->map = map;
2316 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
2319 * Last Descriptor of Packet
2320 * needs End Of Packet (EOP)
2321 * and Report Status (RS)
2324 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2326 * Keep track in the first buffer which
2327 * descriptor will be written back
2329 tx_buffer = &adapter->tx_buffer_area[first];
2330 tx_buffer->next_eop = last;
2333 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2334 * that this frame is available to transmit.
2336 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2337 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2338 if (adapter->hw.mac.type == e1000_82547 &&
2339 adapter->link_duplex == HALF_DUPLEX)
2340 em_82547_move_tail(adapter);
2342 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
2343 if (adapter->hw.mac.type == e1000_82547)
2344 em_82547_update_fifo_head(adapter,
2345 m_head->m_pkthdr.len);
2351 /*********************************************************************
2353 * 82547 workaround to avoid controller hang in half-duplex environment.
2354 * The workaround is to avoid queuing a large packet that would span
2355 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
2356 * in this case. We do that only when FIFO is quiescent.
2358 **********************************************************************/
2360 em_82547_move_tail(void *arg)
2362 struct adapter *adapter = arg;
2363 struct e1000_tx_desc *tx_desc;
2364 u16 hw_tdt, sw_tdt, length = 0;
2367 EM_TX_LOCK_ASSERT(adapter);
2369 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
2370 sw_tdt = adapter->next_avail_tx_desc;
2372 while (hw_tdt != sw_tdt) {
2373 tx_desc = &adapter->tx_desc_base[hw_tdt];
2374 length += tx_desc->lower.flags.length;
2375 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
2376 if (++hw_tdt == adapter->num_tx_desc)
2380 if (em_82547_fifo_workaround(adapter, length)) {
2381 adapter->tx_fifo_wrk_cnt++;
2382 callout_reset(&adapter->tx_fifo_timer, 1,
2383 em_82547_move_tail, adapter);
2386 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
2387 em_82547_update_fifo_head(adapter, length);
2394 em_82547_fifo_workaround(struct adapter *adapter, int len)
2396 int fifo_space, fifo_pkt_len;
2398 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2400 if (adapter->link_duplex == HALF_DUPLEX) {
2401 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2403 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
2404 if (em_82547_tx_fifo_reset(adapter))
2415 em_82547_update_fifo_head(struct adapter *adapter, int len)
2417 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2419 /* tx_fifo_head is always 16 byte aligned */
2420 adapter->tx_fifo_head += fifo_pkt_len;
2421 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
2422 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2428 em_82547_tx_fifo_reset(struct adapter *adapter)
2432 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
2433 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
2434 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
2435 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
2436 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
2437 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
2438 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
2439 /* Disable TX unit */
2440 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2441 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
2442 tctl & ~E1000_TCTL_EN);
2444 /* Reset FIFO pointers */
2445 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
2446 adapter->tx_head_addr);
2447 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
2448 adapter->tx_head_addr);
2449 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
2450 adapter->tx_head_addr);
2451 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
2452 adapter->tx_head_addr);
2454 /* Re-enable TX unit */
2455 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2456 E1000_WRITE_FLUSH(&adapter->hw);
2458 adapter->tx_fifo_head = 0;
2459 adapter->tx_fifo_reset_cnt++;
2469 em_set_promisc(struct adapter *adapter)
2471 struct ifnet *ifp = adapter->ifp;
2474 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2476 if (ifp->if_flags & IFF_PROMISC) {
2477 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2478 /* Turn this on if you want to see bad packets */
2480 reg_rctl |= E1000_RCTL_SBP;
2481 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2482 } else if (ifp->if_flags & IFF_ALLMULTI) {
2483 reg_rctl |= E1000_RCTL_MPE;
2484 reg_rctl &= ~E1000_RCTL_UPE;
2485 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2490 em_disable_promisc(struct adapter *adapter)
2494 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2496 reg_rctl &= (~E1000_RCTL_UPE);
2497 reg_rctl &= (~E1000_RCTL_MPE);
2498 reg_rctl &= (~E1000_RCTL_SBP);
2499 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2503 /*********************************************************************
2506 * This routine is called whenever multicast address list is updated.
2508 **********************************************************************/
2511 em_set_multi(struct adapter *adapter)
2513 struct ifnet *ifp = adapter->ifp;
2514 struct ifmultiaddr *ifma;
2516 u8 *mta; /* Multicast array memory */
2519 IOCTL_DEBUGOUT("em_set_multi: begin");
2521 if (adapter->hw.mac.type == e1000_82542 &&
2522 adapter->hw.revision_id == E1000_REVISION_2) {
2523 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2524 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2525 e1000_pci_clear_mwi(&adapter->hw);
2526 reg_rctl |= E1000_RCTL_RST;
2527 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2531 /* Allocate temporary memory to setup array */
2532 mta = malloc(sizeof(u8) *
2533 (ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES),
2534 M_DEVBUF, M_NOWAIT | M_ZERO);
2536 panic("em_set_multi memory failure\n");
2538 if_maddr_rlock(ifp);
2539 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2540 if (ifma->ifma_addr->sa_family != AF_LINK)
2543 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2546 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2547 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2550 if_maddr_runlock(ifp);
2552 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2553 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2554 reg_rctl |= E1000_RCTL_MPE;
2555 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2557 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
2559 if (adapter->hw.mac.type == e1000_82542 &&
2560 adapter->hw.revision_id == E1000_REVISION_2) {
2561 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2562 reg_rctl &= ~E1000_RCTL_RST;
2563 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2565 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2566 e1000_pci_set_mwi(&adapter->hw);
2568 free(mta, M_DEVBUF);
2572 /*********************************************************************
2575 * This routine checks for link status and updates statistics.
2577 **********************************************************************/
2580 em_local_timer(void *arg)
2582 struct adapter *adapter = arg;
2583 struct ifnet *ifp = adapter->ifp;
2585 EM_CORE_LOCK_ASSERT(adapter);
2587 taskqueue_enqueue(adapter->tq,
2588 &adapter->rxtx_task);
2589 em_update_link_status(adapter);
2590 em_update_stats_counters(adapter);
2592 /* Reset LAA into RAR[0] on 82571 */
2593 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
2594 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2596 if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
2597 em_print_hw_stats(adapter);
2599 em_smartspeed(adapter);
2602 * Each second we check the watchdog to
2603 * protect against hardware hangs.
2605 em_watchdog(adapter);
2607 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
2612 em_update_link_status(struct adapter *adapter)
2614 struct e1000_hw *hw = &adapter->hw;
2615 struct ifnet *ifp = adapter->ifp;
2616 device_t dev = adapter->dev;
2619 /* Get the cached link value or read phy for real */
2620 switch (hw->phy.media_type) {
2621 case e1000_media_type_copper:
2622 if (hw->mac.get_link_status) {
2623 /* Do the work to read phy */
2624 e1000_check_for_link(hw);
2625 link_check = !hw->mac.get_link_status;
2626 if (link_check) /* ESB2 fix */
2627 e1000_cfg_on_link_up(hw);
2631 case e1000_media_type_fiber:
2632 e1000_check_for_link(hw);
2633 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2636 case e1000_media_type_internal_serdes:
2637 e1000_check_for_link(hw);
2638 link_check = adapter->hw.mac.serdes_has_link;
2641 case e1000_media_type_unknown:
2645 /* Now check for a transition */
2646 if (link_check && (adapter->link_active == 0)) {
2647 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2648 &adapter->link_duplex);
2649 /* Check if we must disable SPEED_MODE bit on PCI-E */
2650 if ((adapter->link_speed != SPEED_1000) &&
2651 ((hw->mac.type == e1000_82571) ||
2652 (hw->mac.type == e1000_82572))) {
2654 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2655 tarc0 &= ~SPEED_MODE_BIT;
2656 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
2659 device_printf(dev, "Link is up %d Mbps %s\n",
2660 adapter->link_speed,
2661 ((adapter->link_duplex == FULL_DUPLEX) ?
2662 "Full Duplex" : "Half Duplex"));
2663 adapter->link_active = 1;
2664 adapter->smartspeed = 0;
2665 ifp->if_baudrate = adapter->link_speed * 1000000;
2666 if_link_state_change(ifp, LINK_STATE_UP);
2667 } else if (!link_check && (adapter->link_active == 1)) {
2668 ifp->if_baudrate = adapter->link_speed = 0;
2669 adapter->link_duplex = 0;
2671 device_printf(dev, "Link is Down\n");
2672 adapter->link_active = 0;
2673 /* Link down, disable watchdog */
2674 adapter->watchdog_timer = FALSE;
2675 if_link_state_change(ifp, LINK_STATE_DOWN);
2679 /*********************************************************************
2681 * This routine disables all traffic on the adapter by issuing a
2682 * global reset on the MAC and deallocates TX/RX buffers.
2684 * This routine should always be called with BOTH the CORE
2686 **********************************************************************/
2691 struct adapter *adapter = arg;
2692 struct ifnet *ifp = adapter->ifp;
2694 EM_CORE_LOCK_ASSERT(adapter);
2695 EM_TX_LOCK_ASSERT(adapter);
2697 INIT_DEBUGOUT("em_stop: begin");
2699 em_disable_intr(adapter);
2700 callout_stop(&adapter->timer);
2701 callout_stop(&adapter->tx_fifo_timer);
2703 /* Tell the stack that the interface is no longer active */
2704 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2706 e1000_reset_hw(&adapter->hw);
2707 if (adapter->hw.mac.type >= e1000_82544)
2708 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2712 /*********************************************************************
2714 * Determine hardware revision.
2716 **********************************************************************/
2718 em_identify_hardware(struct adapter *adapter)
2720 device_t dev = adapter->dev;
2722 /* Make sure our PCI config space has the necessary stuff set */
2723 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2724 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2725 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2726 device_printf(dev, "Memory Access and/or Bus Master bits "
2728 adapter->hw.bus.pci_cmd_word |=
2729 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2730 pci_write_config(dev, PCIR_COMMAND,
2731 adapter->hw.bus.pci_cmd_word, 2);
2734 /* Save off the information about this board */
2735 adapter->hw.vendor_id = pci_get_vendor(dev);
2736 adapter->hw.device_id = pci_get_device(dev);
2737 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2738 adapter->hw.subsystem_vendor_id =
2739 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2740 adapter->hw.subsystem_device_id =
2741 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2743 /* Do Shared Code Init and Setup */
2744 if (e1000_set_mac_type(&adapter->hw)) {
2745 device_printf(dev, "Setup init failure\n");
2751 em_allocate_pci_resources(struct adapter *adapter)
2753 device_t dev = adapter->dev;
2754 int val, rid, error = E1000_SUCCESS;
2757 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2759 if (adapter->memory == NULL) {
2760 device_printf(dev, "Unable to allocate bus resource: memory\n");
2763 adapter->osdep.mem_bus_space_tag =
2764 rman_get_bustag(adapter->memory);
2765 adapter->osdep.mem_bus_space_handle =
2766 rman_get_bushandle(adapter->memory);
2767 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2769 /* Only older adapters use IO mapping */
2770 if ((adapter->hw.mac.type > e1000_82543) &&
2771 (adapter->hw.mac.type < e1000_82571)) {
2772 /* Figure our where our IO BAR is ? */
2773 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2774 val = pci_read_config(dev, rid, 4);
2775 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2776 adapter->io_rid = rid;
2780 /* check for 64bit BAR */
2781 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2784 if (rid >= PCIR_CIS) {
2785 device_printf(dev, "Unable to locate IO BAR\n");
2788 adapter->ioport = bus_alloc_resource_any(dev,
2789 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2790 if (adapter->ioport == NULL) {
2791 device_printf(dev, "Unable to allocate bus resource: "
2795 adapter->hw.io_base = 0;
2796 adapter->osdep.io_bus_space_tag =
2797 rman_get_bustag(adapter->ioport);
2798 adapter->osdep.io_bus_space_handle =
2799 rman_get_bushandle(adapter->ioport);
2803 ** Init the resource arrays
2804 ** used by MSIX setup
2806 for (int i = 0; i < 3; i++) {
2807 adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
2808 adapter->tag[i] = NULL;
2809 adapter->res[i] = NULL;
2813 * Setup MSI/X or MSI if PCI Express
2816 adapter->msi = em_setup_msix(adapter);
2818 adapter->hw.back = &adapter->osdep;
2823 /*********************************************************************
2825 * Setup the Legacy or MSI Interrupt handler
2827 **********************************************************************/
2829 em_allocate_legacy(struct adapter *adapter)
2831 device_t dev = adapter->dev;
2834 /* Manually turn off all interrupts */
2835 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2837 /* Legacy RID is 0 */
2838 if (adapter->msi == 0)
2839 adapter->rid[0] = 0;
2841 /* We allocate a single interrupt resource */
2842 adapter->res[0] = bus_alloc_resource_any(dev,
2843 SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
2844 if (adapter->res[0] == NULL) {
2845 device_printf(dev, "Unable to allocate bus resource: "
2850 #ifdef EM_LEGACY_IRQ
2851 /* We do Legacy setup */
2852 if ((error = bus_setup_intr(dev, adapter->res[0],
2853 #if __FreeBSD_version > 700000
2854 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_intr, adapter,
2856 INTR_TYPE_NET | INTR_MPSAFE, em_intr, adapter,
2858 &adapter->tag[0])) != 0) {
2859 device_printf(dev, "Failed to register interrupt handler");
2863 #else /* FAST_IRQ */
2865 * Try allocating a fast interrupt and the associated deferred
2866 * processing contexts.
2868 TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2869 TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2870 adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2871 taskqueue_thread_enqueue, &adapter->tq);
2872 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2873 device_get_nameunit(adapter->dev));
2874 #if __FreeBSD_version < 700000
2875 if ((error = bus_setup_intr(dev, adapter->res[0],
2876 INTR_TYPE_NET | INTR_FAST, em_irq_fast, adapter,
2878 if ((error = bus_setup_intr(dev, adapter->res[0],
2879 INTR_TYPE_NET, em_irq_fast, NULL, adapter,
2881 &adapter->tag[0])) != 0) {
2882 device_printf(dev, "Failed to register fast interrupt "
2883 "handler: %d\n", error);
2884 taskqueue_free(adapter->tq);
2888 #endif /* EM_LEGACY_IRQ */
2893 /*********************************************************************
2895 * Setup the MSIX Interrupt handlers
2896 * This is not really Multiqueue, rather
2897 * its just multiple interrupt vectors.
2899 **********************************************************************/
2901 em_allocate_msix(struct adapter *adapter)
2903 device_t dev = adapter->dev;
2906 /* Make sure all interrupts are disabled */
2907 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2909 /* First get the resources */
2910 for (int i = 0; i < adapter->msi; i++) {
2911 adapter->res[i] = bus_alloc_resource_any(dev,
2912 SYS_RES_IRQ, &adapter->rid[i], RF_ACTIVE);
2913 if (adapter->res[i] == NULL) {
2915 "Unable to allocate bus resource: "
2916 "MSIX Interrupt\n");
2922 * Now allocate deferred processing contexts.
2924 TASK_INIT(&adapter->rx_task, 0, em_handle_rx, adapter);
2925 TASK_INIT(&adapter->tx_task, 0, em_handle_tx, adapter);
2927 * Handle compatibility for msi case for deferral due to
2930 TASK_INIT(&adapter->rxtx_task, 0, em_handle_tx, adapter);
2931 TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2932 adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2933 taskqueue_thread_enqueue, &adapter->tq);
2934 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2935 device_get_nameunit(adapter->dev));
2938 * And setup the interrupt handlers
2941 /* First slot to RX */
2942 if ((error = bus_setup_intr(dev, adapter->res[0],
2943 #if __FreeBSD_version > 700000
2944 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, adapter,
2946 INTR_TYPE_NET | INTR_MPSAFE, em_msix_rx, adapter,
2948 &adapter->tag[0])) != 0) {
2949 device_printf(dev, "Failed to register RX handler");
2954 if ((error = bus_setup_intr(dev, adapter->res[1],
2955 #if __FreeBSD_version > 700000
2956 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, adapter,
2958 INTR_TYPE_NET | INTR_MPSAFE, em_msix_tx, adapter,
2960 &adapter->tag[1])) != 0) {
2961 device_printf(dev, "Failed to register TX handler");
2966 if ((error = bus_setup_intr(dev, adapter->res[2],
2967 #if __FreeBSD_version > 700000
2968 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_link, adapter,
2970 INTR_TYPE_NET | INTR_MPSAFE, em_msix_link, adapter,
2972 &adapter->tag[2])) != 0) {
2973 device_printf(dev, "Failed to register TX handler");
2982 em_free_pci_resources(struct adapter *adapter)
2984 device_t dev = adapter->dev;
2986 /* Make sure the for loop below runs once */
2987 if (adapter->msi == 0)
2991 * First release all the interrupt resources:
2992 * notice that since these are just kept
2993 * in an array we can do the same logic
2994 * whether its MSIX or just legacy.
2996 for (int i = 0; i < adapter->msi; i++) {
2997 if (adapter->tag[i] != NULL) {
2998 bus_teardown_intr(dev, adapter->res[i],
3000 adapter->tag[i] = NULL;
3002 if (adapter->res[i] != NULL) {
3003 bus_release_resource(dev, SYS_RES_IRQ,
3004 adapter->rid[i], adapter->res[i]);
3009 pci_release_msi(dev);
3011 if (adapter->msix != NULL)
3012 bus_release_resource(dev, SYS_RES_MEMORY,
3013 PCIR_BAR(EM_MSIX_BAR), adapter->msix);
3015 if (adapter->memory != NULL)
3016 bus_release_resource(dev, SYS_RES_MEMORY,
3017 PCIR_BAR(0), adapter->memory);
3019 if (adapter->flash != NULL)
3020 bus_release_resource(dev, SYS_RES_MEMORY,
3021 EM_FLASH, adapter->flash);
3023 if (adapter->ioport != NULL)
3024 bus_release_resource(dev, SYS_RES_IOPORT,
3025 adapter->io_rid, adapter->ioport);
3029 * Setup MSI or MSI/X
3032 em_setup_msix(struct adapter *adapter)
3034 device_t dev = adapter->dev;
3037 if (adapter->hw.mac.type < e1000_82571)
3040 /* Setup MSI/X for Hartwell */
3041 if (adapter->hw.mac.type == e1000_82574) {
3042 /* Map the MSIX BAR */
3043 int rid = PCIR_BAR(EM_MSIX_BAR);
3044 adapter->msix = bus_alloc_resource_any(dev,
3045 SYS_RES_MEMORY, &rid, RF_ACTIVE);
3046 if (!adapter->msix) {
3047 /* May not be enabled */
3048 device_printf(adapter->dev,
3049 "Unable to map MSIX table \n");
3052 val = pci_msix_count(dev);
3054 ** 82574 can be configured for 5 but
3055 ** we limit use to 3.
3057 if (val > 3) val = 3;
3058 if ((val) && pci_alloc_msix(dev, &val) == 0) {
3059 device_printf(adapter->dev,"Using MSIX interrupts\n");
3064 val = pci_msi_count(dev);
3065 if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
3067 device_printf(adapter->dev,"Using MSI interrupt\n");
3073 /*********************************************************************
3075 * Initialize the hardware to a configuration
3076 * as specified by the adapter structure.
3078 **********************************************************************/
3080 em_hardware_init(struct adapter *adapter)
3082 device_t dev = adapter->dev;
3085 INIT_DEBUGOUT("em_hardware_init: begin");
3087 /* Issue a global reset */
3088 e1000_reset_hw(&adapter->hw);
3090 /* Get control from any management/hw control */
3091 if (((adapter->hw.mac.type == e1000_82573) ||
3092 (adapter->hw.mac.type == e1000_82583) ||
3093 (adapter->hw.mac.type == e1000_ich8lan) ||
3094 (adapter->hw.mac.type == e1000_ich10lan) ||
3095 (adapter->hw.mac.type == e1000_ich9lan)) &&
3096 e1000_check_mng_mode(&adapter->hw))
3097 em_get_hw_control(adapter);
3099 /* When hardware is reset, fifo_head is also reset */
3100 adapter->tx_fifo_head = 0;
3102 /* Set up smart power down as default off on newer adapters. */
3103 if (!em_smart_pwr_down && (adapter->hw.mac.type == e1000_82571 ||
3104 adapter->hw.mac.type == e1000_82572)) {
3107 /* Speed up time to link by disabling smart power down. */
3108 e1000_read_phy_reg(&adapter->hw,
3109 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
3110 phy_tmp &= ~IGP02E1000_PM_SPD;
3111 e1000_write_phy_reg(&adapter->hw,
3112 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
3116 * These parameters control the automatic generation (Tx) and
3117 * response (Rx) to Ethernet PAUSE frames.
3118 * - High water mark should allow for at least two frames to be
3119 * received after sending an XOFF.
3120 * - Low water mark works best when it is very near the high water mark.
3121 * This allows the receiver to restart by sending XON when it has
3122 * drained a bit. Here we use an arbitary value of 1500 which will
3123 * restart after one full frame is pulled from the buffer. There
3124 * could be several smaller frames in the buffer and if so they will
3125 * not trigger the XON until their total number reduces the buffer
3127 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
3129 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
3132 adapter->hw.fc.high_water = rx_buffer_size -
3133 roundup2(adapter->max_frame_size, 1024);
3134 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
3136 if (adapter->hw.mac.type == e1000_80003es2lan)
3137 adapter->hw.fc.pause_time = 0xFFFF;
3139 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
3140 adapter->hw.fc.send_xon = TRUE;
3142 /* Set Flow control, use the tunable location if sane */
3143 if ((em_fc_setting >= 0) || (em_fc_setting < 4))
3144 adapter->hw.fc.requested_mode = em_fc_setting;
3146 adapter->hw.fc.requested_mode = e1000_fc_none;
3149 if (e1000_init_hw(&adapter->hw) < 0) {
3150 device_printf(dev, "Hardware Initialization Failed\n");
3154 e1000_check_for_link(&adapter->hw);
3159 /*********************************************************************
3161 * Setup networking device structure and register an interface.
3163 **********************************************************************/
3165 em_setup_interface(device_t dev, struct adapter *adapter)
3169 INIT_DEBUGOUT("em_setup_interface: begin");
3171 ifp = adapter->ifp = if_alloc(IFT_ETHER);
3173 panic("%s: can not if_alloc()", device_get_nameunit(dev));
3174 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3175 ifp->if_mtu = ETHERMTU;
3176 ifp->if_init = em_init;
3177 ifp->if_softc = adapter;
3178 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3179 ifp->if_ioctl = em_ioctl;
3180 ifp->if_start = em_start;
3181 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
3182 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
3183 IFQ_SET_READY(&ifp->if_snd);
3185 ether_ifattach(ifp, adapter->hw.mac.addr);
3187 ifp->if_capabilities = ifp->if_capenable = 0;
3189 #if __FreeBSD_version >= 800000
3190 /* Multiqueue tx functions */
3191 ifp->if_transmit = em_mq_start;
3192 ifp->if_qflush = em_qflush;
3193 adapter->br = buf_ring_alloc(4096, M_DEVBUF, M_WAITOK, &adapter->tx_mtx);
3195 if (adapter->hw.mac.type >= e1000_82543) {
3197 #if __FreeBSD_version < 700000
3198 version_cap = IFCAP_HWCSUM;
3200 version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
3202 ifp->if_capabilities |= version_cap;
3203 ifp->if_capenable |= version_cap;
3206 #if __FreeBSD_version >= 700000
3207 /* Identify TSO capable adapters */
3208 if ((adapter->hw.mac.type > e1000_82544) &&
3209 (adapter->hw.mac.type != e1000_82547))
3210 ifp->if_capabilities |= IFCAP_TSO4;
3212 * By default only enable on PCI-E, this
3213 * can be overriden by ifconfig.
3215 if (adapter->hw.mac.type >= e1000_82571)
3216 ifp->if_capenable |= IFCAP_TSO4;
3220 * Tell the upper layer(s) we support long frames.
3222 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3223 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3224 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3226 #ifdef DEVICE_POLLING
3227 ifp->if_capabilities |= IFCAP_POLLING;
3231 * Specify the media types supported by this adapter and register
3232 * callbacks to update media and link information
3234 ifmedia_init(&adapter->media, IFM_IMASK,
3235 em_media_change, em_media_status);
3236 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3237 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
3238 u_char fiber_type = IFM_1000_SX; /* default type */
3240 if (adapter->hw.mac.type == e1000_82545)
3241 fiber_type = IFM_1000_LX;
3242 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
3244 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
3246 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
3247 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
3249 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
3251 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
3253 if (adapter->hw.phy.type != e1000_phy_ife) {
3254 ifmedia_add(&adapter->media,
3255 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
3256 ifmedia_add(&adapter->media,
3257 IFM_ETHER | IFM_1000_T, 0, NULL);
3260 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3261 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3265 /*********************************************************************
3267 * Workaround for SmartSpeed on 82541 and 82547 controllers
3269 **********************************************************************/
3271 em_smartspeed(struct adapter *adapter)
3275 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
3276 adapter->hw.mac.autoneg == 0 ||
3277 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
3280 if (adapter->smartspeed == 0) {
3281 /* If Master/Slave config fault is asserted twice,
3282 * we assume back-to-back */
3283 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3284 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
3286 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3287 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
3288 e1000_read_phy_reg(&adapter->hw,
3289 PHY_1000T_CTRL, &phy_tmp);
3290 if(phy_tmp & CR_1000T_MS_ENABLE) {
3291 phy_tmp &= ~CR_1000T_MS_ENABLE;
3292 e1000_write_phy_reg(&adapter->hw,
3293 PHY_1000T_CTRL, phy_tmp);
3294 adapter->smartspeed++;
3295 if(adapter->hw.mac.autoneg &&
3296 !e1000_phy_setup_autoneg(&adapter->hw) &&
3297 !e1000_read_phy_reg(&adapter->hw,
3298 PHY_CONTROL, &phy_tmp)) {
3299 phy_tmp |= (MII_CR_AUTO_NEG_EN |
3300 MII_CR_RESTART_AUTO_NEG);
3301 e1000_write_phy_reg(&adapter->hw,
3302 PHY_CONTROL, phy_tmp);
3307 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
3308 /* If still no link, perhaps using 2/3 pair cable */
3309 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
3310 phy_tmp |= CR_1000T_MS_ENABLE;
3311 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
3312 if(adapter->hw.mac.autoneg &&
3313 !e1000_phy_setup_autoneg(&adapter->hw) &&
3314 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
3315 phy_tmp |= (MII_CR_AUTO_NEG_EN |
3316 MII_CR_RESTART_AUTO_NEG);
3317 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
3320 /* Restart process after EM_SMARTSPEED_MAX iterations */
3321 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
3322 adapter->smartspeed = 0;
3327 * Manage DMA'able memory.
3330 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3334 *(bus_addr_t *) arg = segs[0].ds_addr;
3338 em_dma_malloc(struct adapter *adapter, bus_size_t size,
3339 struct em_dma_alloc *dma, int mapflags)
3343 #if __FreeBSD_version >= 700000
3344 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
3346 error = bus_dma_tag_create(NULL, /* parent */
3348 EM_DBA_ALIGN, 0, /* alignment, bounds */
3349 BUS_SPACE_MAXADDR, /* lowaddr */
3350 BUS_SPACE_MAXADDR, /* highaddr */
3351 NULL, NULL, /* filter, filterarg */
3354 size, /* maxsegsize */
3356 NULL, /* lockfunc */
3360 device_printf(adapter->dev,
3361 "%s: bus_dma_tag_create failed: %d\n",
3366 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
3367 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
3369 device_printf(adapter->dev,
3370 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
3371 __func__, (uintmax_t)size, error);
3376 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3377 size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
3378 if (error || dma->dma_paddr == 0) {
3379 device_printf(adapter->dev,
3380 "%s: bus_dmamap_load failed: %d\n",
3388 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3390 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3391 bus_dma_tag_destroy(dma->dma_tag);
3393 dma->dma_map = NULL;
3394 dma->dma_tag = NULL;
3400 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
3402 if (dma->dma_tag == NULL)
3404 if (dma->dma_map != NULL) {
3405 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3406 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3407 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3408 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3409 dma->dma_map = NULL;
3411 bus_dma_tag_destroy(dma->dma_tag);
3412 dma->dma_tag = NULL;
3416 /*********************************************************************
3418 * Allocate memory for tx_buffer structures. The tx_buffer stores all
3419 * the information needed to transmit a packet on the wire.
3421 **********************************************************************/
3423 em_allocate_transmit_structures(struct adapter *adapter)
3425 device_t dev = adapter->dev;
3426 struct em_buffer *tx_buffer;
3430 * Create DMA tags for tx descriptors
3432 #if __FreeBSD_version >= 700000
3433 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3435 if ((error = bus_dma_tag_create(NULL, /* parent */
3437 1, 0, /* alignment, bounds */
3438 BUS_SPACE_MAXADDR, /* lowaddr */
3439 BUS_SPACE_MAXADDR, /* highaddr */
3440 NULL, NULL, /* filter, filterarg */
3441 EM_TSO_SIZE, /* maxsize */
3442 EM_MAX_SCATTER, /* nsegments */
3443 EM_TSO_SEG_SIZE, /* maxsegsize */
3445 NULL, /* lockfunc */
3447 &adapter->txtag)) != 0) {
3448 device_printf(dev, "Unable to allocate TX DMA tag\n");
3452 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
3453 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3454 if (adapter->tx_buffer_area == NULL) {
3455 device_printf(dev, "Unable to allocate tx_buffer memory\n");
3460 /* Create the descriptor buffer dma maps */
3461 for (int i = 0; i < adapter->num_tx_desc; i++) {
3462 tx_buffer = &adapter->tx_buffer_area[i];
3463 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
3465 device_printf(dev, "Unable to create TX DMA map\n");
3468 tx_buffer->next_eop = -1;
3473 em_free_transmit_structures(adapter);
3477 /*********************************************************************
3479 * (Re)Initialize transmit structures.
3481 **********************************************************************/
3483 em_setup_transmit_structures(struct adapter *adapter)
3485 struct em_buffer *tx_buffer;
3487 /* Clear the old ring contents */
3488 bzero(adapter->tx_desc_base,
3489 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
3491 /* Free any existing TX buffers */
3492 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
3493 tx_buffer = &adapter->tx_buffer_area[i];
3494 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3495 BUS_DMASYNC_POSTWRITE);
3496 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
3497 m_freem(tx_buffer->m_head);
3498 tx_buffer->m_head = NULL;
3499 tx_buffer->next_eop = -1;
3503 adapter->next_avail_tx_desc = 0;
3504 adapter->next_tx_to_clean = 0;
3505 adapter->num_tx_desc_avail = adapter->num_tx_desc;
3507 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3508 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3513 /*********************************************************************
3515 * Enable transmit unit.
3517 **********************************************************************/
3519 em_initialize_transmit_unit(struct adapter *adapter)
3521 u32 tctl, tarc, tipg = 0;
3524 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3525 /* Setup the Base and Length of the Tx Descriptor Ring */
3526 bus_addr = adapter->txdma.dma_paddr;
3527 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
3528 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
3529 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
3530 (u32)(bus_addr >> 32));
3531 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
3533 /* Setup the HW Tx Head and Tail descriptor pointers */
3534 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
3535 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
3537 HW_DEBUGOUT2("Base = %x, Length = %x\n",
3538 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
3539 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
3541 /* Set the default values for the Tx Inter Packet Gap timer */
3542 switch (adapter->hw.mac.type) {
3544 tipg = DEFAULT_82542_TIPG_IPGT;
3545 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3546 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3548 case e1000_80003es2lan:
3549 tipg = DEFAULT_82543_TIPG_IPGR1;
3550 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3551 E1000_TIPG_IPGR2_SHIFT;
3554 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3555 (adapter->hw.phy.media_type ==
3556 e1000_media_type_internal_serdes))
3557 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3559 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3560 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3561 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3564 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
3565 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
3566 if(adapter->hw.mac.type >= e1000_82540)
3567 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
3568 adapter->tx_abs_int_delay.value);
3570 if ((adapter->hw.mac.type == e1000_82571) ||
3571 (adapter->hw.mac.type == e1000_82572)) {
3572 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3573 tarc |= SPEED_MODE_BIT;
3574 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3575 } else if (adapter->hw.mac.type == e1000_80003es2lan) {
3576 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3578 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3579 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
3581 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3584 /* Program the Transmit Control Register */
3585 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3586 tctl &= ~E1000_TCTL_CT;
3587 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3588 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3590 if (adapter->hw.mac.type >= e1000_82571)
3591 tctl |= E1000_TCTL_MULR;
3593 /* This write will effectively turn on the transmit unit. */
3594 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
3596 /* Setup Transmit Descriptor Base Settings */
3597 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3599 if (adapter->tx_int_delay.value > 0)
3600 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3603 /*********************************************************************
3605 * Free all transmit related data structures.
3607 **********************************************************************/
3609 em_free_transmit_structures(struct adapter *adapter)
3611 struct em_buffer *tx_buffer;
3613 INIT_DEBUGOUT("free_transmit_structures: begin");
3615 if (adapter->tx_buffer_area != NULL) {
3616 for (int i = 0; i < adapter->num_tx_desc; i++) {
3617 tx_buffer = &adapter->tx_buffer_area[i];
3618 if (tx_buffer->m_head != NULL) {
3619 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3620 BUS_DMASYNC_POSTWRITE);
3621 bus_dmamap_unload(adapter->txtag,
3623 m_freem(tx_buffer->m_head);
3624 tx_buffer->m_head = NULL;
3625 } else if (tx_buffer->map != NULL)
3626 bus_dmamap_unload(adapter->txtag,
3628 if (tx_buffer->map != NULL) {
3629 bus_dmamap_destroy(adapter->txtag,
3631 tx_buffer->map = NULL;
3635 if (adapter->tx_buffer_area != NULL) {
3636 free(adapter->tx_buffer_area, M_DEVBUF);
3637 adapter->tx_buffer_area = NULL;
3639 if (adapter->txtag != NULL) {
3640 bus_dma_tag_destroy(adapter->txtag);
3641 adapter->txtag = NULL;
3643 #if __FreeBSD_version >= 800000
3644 if (adapter->br != NULL)
3645 buf_ring_free(adapter->br, M_DEVBUF);
3649 /*********************************************************************
3651 * The offload context needs to be set when we transfer the first
3652 * packet of a particular protocol (TCP/UDP). This routine has been
3653 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
3655 * Added back the old method of keeping the current context type
3656 * and not setting if unnecessary, as this is reported to be a
3657 * big performance win. -jfv
3658 **********************************************************************/
3660 em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
3661 u32 *txd_upper, u32 *txd_lower)
3663 struct e1000_context_desc *TXD = NULL;
3664 struct em_buffer *tx_buffer;
3665 struct ether_vlan_header *eh;
3666 struct ip *ip = NULL;
3667 struct ip6_hdr *ip6;
3668 int curr_txd, ehdrlen;
3669 u32 cmd, hdr_len, ip_hlen;
3674 cmd = hdr_len = ipproto = 0;
3675 curr_txd = adapter->next_avail_tx_desc;
3678 * Determine where frame payload starts.
3679 * Jump over vlan headers if already present,
3680 * helpful for QinQ too.
3682 eh = mtod(mp, struct ether_vlan_header *);
3683 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3684 etype = ntohs(eh->evl_proto);
3685 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3687 etype = ntohs(eh->evl_encap_proto);
3688 ehdrlen = ETHER_HDR_LEN;
3692 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
3693 * TODO: Support SCTP too when it hits the tree.
3697 ip = (struct ip *)(mp->m_data + ehdrlen);
3698 ip_hlen = ip->ip_hl << 2;
3700 /* Setup of IP header checksum. */
3701 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3703 * Start offset for header checksum calculation.
3704 * End offset for header checksum calculation.
3705 * Offset of place to put the checksum.
3707 TXD = (struct e1000_context_desc *)
3708 &adapter->tx_desc_base[curr_txd];
3709 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3710 TXD->lower_setup.ip_fields.ipcse =
3711 htole16(ehdrlen + ip_hlen);
3712 TXD->lower_setup.ip_fields.ipcso =
3713 ehdrlen + offsetof(struct ip, ip_sum);
3714 cmd |= E1000_TXD_CMD_IP;
3715 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3718 if (mp->m_len < ehdrlen + ip_hlen)
3719 return; /* failure */
3721 hdr_len = ehdrlen + ip_hlen;
3725 case ETHERTYPE_IPV6:
3726 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3727 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3729 if (mp->m_len < ehdrlen + ip_hlen)
3730 return; /* failure */
3732 /* IPv6 doesn't have a header checksum. */
3734 hdr_len = ehdrlen + ip_hlen;
3735 ipproto = ip6->ip6_nxt;
3746 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3747 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3748 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3749 /* no need for context if already set */
3750 if (adapter->last_hw_offload == CSUM_TCP)
3752 adapter->last_hw_offload = CSUM_TCP;
3754 * Start offset for payload checksum calculation.
3755 * End offset for payload checksum calculation.
3756 * Offset of place to put the checksum.
3758 TXD = (struct e1000_context_desc *)
3759 &adapter->tx_desc_base[curr_txd];
3760 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3761 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3762 TXD->upper_setup.tcp_fields.tucso =
3763 hdr_len + offsetof(struct tcphdr, th_sum);
3764 cmd |= E1000_TXD_CMD_TCP;
3769 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3770 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3771 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3772 /* no need for context if already set */
3773 if (adapter->last_hw_offload == CSUM_UDP)
3775 adapter->last_hw_offload = CSUM_UDP;
3777 * Start offset for header checksum calculation.
3778 * End offset for header checksum calculation.
3779 * Offset of place to put the checksum.
3781 TXD = (struct e1000_context_desc *)
3782 &adapter->tx_desc_base[curr_txd];
3783 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3784 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3785 TXD->upper_setup.tcp_fields.tucso =
3786 hdr_len + offsetof(struct udphdr, uh_sum);
3794 TXD->tcp_seg_setup.data = htole32(0);
3795 TXD->cmd_and_length =
3796 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3797 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3798 tx_buffer->m_head = NULL;
3799 tx_buffer->next_eop = -1;
3801 if (++curr_txd == adapter->num_tx_desc)
3804 adapter->num_tx_desc_avail--;
3805 adapter->next_avail_tx_desc = curr_txd;
3809 #if __FreeBSD_version >= 700000
3810 /**********************************************************************
3812 * Setup work for hardware segmentation offload (TSO)
3814 **********************************************************************/
3816 em_tso_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper,
3819 struct e1000_context_desc *TXD;
3820 struct em_buffer *tx_buffer;
3821 struct ether_vlan_header *eh;
3823 struct ip6_hdr *ip6;
3825 int curr_txd, ehdrlen, hdr_len, ip_hlen, isip6;
3829 * This function could/should be extended to support IP/IPv6
3830 * fragmentation as well. But as they say, one step at a time.
3834 * Determine where frame payload starts.
3835 * Jump over vlan headers if already present,
3836 * helpful for QinQ too.
3838 eh = mtod(mp, struct ether_vlan_header *);
3839 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3840 etype = ntohs(eh->evl_proto);
3841 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3843 etype = ntohs(eh->evl_encap_proto);
3844 ehdrlen = ETHER_HDR_LEN;
3847 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3848 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3849 return FALSE; /* -1 */
3852 * We only support TCP for IPv4 and IPv6 (notyet) for the moment.
3853 * TODO: Support SCTP too when it hits the tree.
3858 ip = (struct ip *)(mp->m_data + ehdrlen);
3859 if (ip->ip_p != IPPROTO_TCP)
3860 return FALSE; /* 0 */
3863 ip_hlen = ip->ip_hl << 2;
3864 if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3865 return FALSE; /* -1 */
3866 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3868 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3869 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3871 th->th_sum = mp->m_pkthdr.csum_data;
3874 case ETHERTYPE_IPV6:
3876 return FALSE; /* Not supported yet. */
3877 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3878 if (ip6->ip6_nxt != IPPROTO_TCP)
3879 return FALSE; /* 0 */
3881 ip_hlen = sizeof(struct ip6_hdr); /* XXX: no header stacking. */
3882 if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3883 return FALSE; /* -1 */
3884 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
3886 th->th_sum = in6_pseudo(ip6->ip6_src, ip->ip6_dst,
3887 htons(IPPROTO_TCP)); /* XXX: function notyet. */
3889 th->th_sum = mp->m_pkthdr.csum_data;
3895 hdr_len = ehdrlen + ip_hlen + (th->th_off << 2);
3897 *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
3898 E1000_TXD_DTYP_D | /* Data descr type */
3899 E1000_TXD_CMD_TSE); /* Do TSE on this packet */
3901 /* IP and/or TCP header checksum calculation and insertion. */
3902 *txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) |
3903 E1000_TXD_POPTS_TXSM) << 8;
3905 curr_txd = adapter->next_avail_tx_desc;
3906 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3907 TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3909 /* IPv6 doesn't have a header checksum. */
3912 * Start offset for header checksum calculation.
3913 * End offset for header checksum calculation.
3914 * Offset of place put the checksum.
3916 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3917 TXD->lower_setup.ip_fields.ipcse =
3918 htole16(ehdrlen + ip_hlen - 1);
3919 TXD->lower_setup.ip_fields.ipcso =
3920 ehdrlen + offsetof(struct ip, ip_sum);
3923 * Start offset for payload checksum calculation.
3924 * End offset for payload checksum calculation.
3925 * Offset of place to put the checksum.
3927 TXD->upper_setup.tcp_fields.tucss =
3929 TXD->upper_setup.tcp_fields.tucse = 0;
3930 TXD->upper_setup.tcp_fields.tucso =
3931 ehdrlen + ip_hlen + offsetof(struct tcphdr, th_sum);
3933 * Payload size per packet w/o any headers.
3934 * Length of all headers up to payload.
3936 TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz);
3937 TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
3939 TXD->cmd_and_length = htole32(adapter->txd_cmd |
3940 E1000_TXD_CMD_DEXT | /* Extended descr */
3941 E1000_TXD_CMD_TSE | /* TSE context */
3942 (isip6 ? 0 : E1000_TXD_CMD_IP) | /* Do IP csum */
3943 E1000_TXD_CMD_TCP | /* Do TCP checksum */
3944 (mp->m_pkthdr.len - (hdr_len))); /* Total len */
3946 tx_buffer->m_head = NULL;
3947 tx_buffer->next_eop = -1;
3949 if (++curr_txd == adapter->num_tx_desc)
3952 adapter->num_tx_desc_avail--;
3953 adapter->next_avail_tx_desc = curr_txd;
3954 adapter->tx_tso = TRUE;
3959 #endif /* __FreeBSD_version >= 700000 */
3961 /**********************************************************************
3963 * Examine each tx_buffer in the used queue. If the hardware is done
3964 * processing the packet then free associated resources. The
3965 * tx_buffer is put back on the free queue.
3967 **********************************************************************/
3969 em_txeof(struct adapter *adapter)
3971 int first, last, done, num_avail;
3973 struct em_buffer *tx_buffer;
3974 struct e1000_tx_desc *tx_desc, *eop_desc;
3975 struct ifnet *ifp = adapter->ifp;
3977 EM_TX_LOCK_ASSERT(adapter);
3979 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3982 num_avail = adapter->num_tx_desc_avail;
3983 first = adapter->next_tx_to_clean;
3984 tx_desc = &adapter->tx_desc_base[first];
3985 tx_buffer = &adapter->tx_buffer_area[first];
3986 last = tx_buffer->next_eop;
3987 eop_desc = &adapter->tx_desc_base[last];
3990 * What this does is get the index of the
3991 * first descriptor AFTER the EOP of the
3992 * first packet, that way we can do the
3993 * simple comparison on the inner while loop.
3995 if (++last == adapter->num_tx_desc)
3999 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
4000 BUS_DMASYNC_POSTREAD);
4002 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
4003 /* We clean the range of the packet */
4004 while (first != done) {
4005 tx_desc->upper.data = 0;
4006 tx_desc->lower.data = 0;
4007 tx_desc->buffer_addr = 0;
4008 ++num_avail; ++cleaned;
4010 if (tx_buffer->m_head) {
4012 bus_dmamap_sync(adapter->txtag,
4014 BUS_DMASYNC_POSTWRITE);
4015 bus_dmamap_unload(adapter->txtag,
4018 m_freem(tx_buffer->m_head);
4019 tx_buffer->m_head = NULL;
4021 tx_buffer->next_eop = -1;
4023 if (++first == adapter->num_tx_desc)
4026 tx_buffer = &adapter->tx_buffer_area[first];
4027 tx_desc = &adapter->tx_desc_base[first];
4029 /* See if we can continue to the next packet */
4030 last = tx_buffer->next_eop;
4032 eop_desc = &adapter->tx_desc_base[last];
4033 /* Get new done point */
4034 if (++last == adapter->num_tx_desc) last = 0;
4039 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
4040 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4042 adapter->next_tx_to_clean = first;
4045 * If we have enough room, clear IFF_DRV_OACTIVE to
4046 * tell the stack that it is OK to send packets.
4047 * If there are no pending descriptors, clear the timeout.
4049 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
4050 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4051 if (num_avail == adapter->num_tx_desc) {
4052 adapter->watchdog_timer = 0;
4053 adapter->num_tx_desc_avail = num_avail;
4058 /* If any descriptors cleaned, reset the watchdog */
4060 adapter->watchdog_timer = EM_TX_TIMEOUT;
4061 adapter->num_tx_desc_avail = num_avail;
4065 /*********************************************************************
4067 * When Link is lost sometimes there is work still in the TX ring
4068 * which will result in a watchdog, rather than allow that do an
4069 * attempted cleanup and then reinit here. Note that this has been
4070 * seens mostly with fiber adapters.
4072 **********************************************************************/
4074 em_tx_purge(struct adapter *adapter)
4076 if ((!adapter->link_active) && (adapter->watchdog_timer)) {
4077 EM_TX_LOCK(adapter);
4079 EM_TX_UNLOCK(adapter);
4080 if (adapter->watchdog_timer) { /* Still not clean? */
4081 adapter->watchdog_timer = 0;
4082 em_init_locked(adapter);
4087 /*********************************************************************
4089 * Get a buffer from system mbuf buffer pool.
4091 **********************************************************************/
4093 em_get_buf(struct adapter *adapter, int i)
4096 bus_dma_segment_t segs[1];
4098 struct em_buffer *rx_buffer;
4101 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
4103 adapter->mbuf_cluster_failed++;
4106 m->m_len = m->m_pkthdr.len = MCLBYTES;
4108 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
4109 m_adj(m, ETHER_ALIGN);
4112 * Using memory from the mbuf cluster pool, invoke the
4113 * bus_dma machinery to arrange the memory mapping.
4115 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
4116 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
4122 /* If nsegs is wrong then the stack is corrupt. */
4123 KASSERT(nsegs == 1, ("Too many segments returned!"));
4125 rx_buffer = &adapter->rx_buffer_area[i];
4126 if (rx_buffer->m_head != NULL)
4127 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4129 map = rx_buffer->map;
4130 rx_buffer->map = adapter->rx_sparemap;
4131 adapter->rx_sparemap = map;
4132 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
4133 rx_buffer->m_head = m;
4135 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
4139 /*********************************************************************
4141 * Allocate memory for rx_buffer structures. Since we use one
4142 * rx_buffer per received packet, the maximum number of rx_buffer's
4143 * that we'll need is equal to the number of receive descriptors
4144 * that we've allocated.
4146 **********************************************************************/
4148 em_allocate_receive_structures(struct adapter *adapter)
4150 device_t dev = adapter->dev;
4151 struct em_buffer *rx_buffer;
4154 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
4155 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
4156 if (adapter->rx_buffer_area == NULL) {
4157 device_printf(dev, "Unable to allocate rx_buffer memory\n");
4161 #if __FreeBSD_version >= 700000
4162 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
4164 error = bus_dma_tag_create(NULL, /* parent */
4166 1, 0, /* alignment, bounds */
4167 BUS_SPACE_MAXADDR, /* lowaddr */
4168 BUS_SPACE_MAXADDR, /* highaddr */
4169 NULL, NULL, /* filter, filterarg */
4170 MCLBYTES, /* maxsize */
4172 MCLBYTES, /* maxsegsize */
4174 NULL, /* lockfunc */
4178 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
4183 /* Create the spare map (used by getbuf) */
4184 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4185 &adapter->rx_sparemap);
4187 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4192 rx_buffer = adapter->rx_buffer_area;
4193 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4194 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4197 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4206 em_free_receive_structures(adapter);
4210 /*********************************************************************
4212 * (Re)initialize receive structures.
4214 **********************************************************************/
4216 em_setup_receive_structures(struct adapter *adapter)
4218 struct em_buffer *rx_buffer;
4221 /* Reset descriptor ring */
4222 bzero(adapter->rx_desc_base,
4223 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
4225 /* Free current RX buffers. */
4226 rx_buffer = adapter->rx_buffer_area;
4227 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4228 if (rx_buffer->m_head != NULL) {
4229 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4230 BUS_DMASYNC_POSTREAD);
4231 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4232 m_freem(rx_buffer->m_head);
4233 rx_buffer->m_head = NULL;
4237 /* Allocate new ones. */
4238 for (i = 0; i < adapter->num_rx_desc; i++) {
4239 error = em_get_buf(adapter, i);
4244 /* Setup our descriptor pointers */
4245 adapter->next_rx_desc_to_check = 0;
4246 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4247 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4252 /*********************************************************************
4254 * Enable receive unit.
4256 **********************************************************************/
4257 #define MAX_INTS_PER_SEC 8000
4258 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
4261 em_initialize_receive_unit(struct adapter *adapter)
4263 struct ifnet *ifp = adapter->ifp;
4267 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
4270 * Make sure receives are disabled while setting
4271 * up the descriptor ring
4273 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4274 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
4276 if (adapter->hw.mac.type >= e1000_82540) {
4277 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
4278 adapter->rx_abs_int_delay.value);
4280 * Set the interrupt throttling rate. Value is calculated
4281 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
4283 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
4287 ** When using MSIX interrupts we need to throttle
4288 ** using the EITR register (82574 only)
4291 for (int i = 0; i < 4; i++)
4292 E1000_WRITE_REG(&adapter->hw,
4293 E1000_EITR_82574(i), DEFAULT_ITR);
4295 /* Disable accelerated ackknowledge */
4296 if (adapter->hw.mac.type == e1000_82574)
4297 E1000_WRITE_REG(&adapter->hw,
4298 E1000_RFCTL, E1000_RFCTL_ACK_DIS);
4300 /* Setup the Base and Length of the Rx Descriptor Ring */
4301 bus_addr = adapter->rxdma.dma_paddr;
4302 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
4303 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
4304 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
4305 (u32)(bus_addr >> 32));
4306 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
4309 /* Setup the Receive Control Register */
4310 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4311 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
4312 E1000_RCTL_RDMTS_HALF |
4313 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4315 /* Make sure VLAN Filters are off */
4316 rctl &= ~E1000_RCTL_VFE;
4318 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
4319 rctl |= E1000_RCTL_SBP;
4321 rctl &= ~E1000_RCTL_SBP;
4323 switch (adapter->rx_buffer_len) {
4326 rctl |= E1000_RCTL_SZ_2048;
4329 rctl |= E1000_RCTL_SZ_4096 |
4330 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4333 rctl |= E1000_RCTL_SZ_8192 |
4334 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4337 rctl |= E1000_RCTL_SZ_16384 |
4338 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4342 if (ifp->if_mtu > ETHERMTU)
4343 rctl |= E1000_RCTL_LPE;
4345 rctl &= ~E1000_RCTL_LPE;
4347 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
4348 if ((adapter->hw.mac.type >= e1000_82543) &&
4349 (ifp->if_capenable & IFCAP_RXCSUM)) {
4350 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
4351 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
4352 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
4356 ** XXX TEMPORARY WORKAROUND: on some systems with 82573
4357 ** long latencies are observed, like Lenovo X60. This
4358 ** change eliminates the problem, but since having positive
4359 ** values in RDTR is a known source of problems on other
4360 ** platforms another solution is being sought.
4362 if (adapter->hw.mac.type == e1000_82573)
4363 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, 0x20);
4365 /* Enable Receives */
4366 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4369 * Setup the HW Rx Head and
4370 * Tail Descriptor Pointers
4372 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
4373 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
4378 /*********************************************************************
4380 * Free receive related data structures.
4382 **********************************************************************/
4384 em_free_receive_structures(struct adapter *adapter)
4386 struct em_buffer *rx_buffer;
4389 INIT_DEBUGOUT("free_receive_structures: begin");
4391 if (adapter->rx_sparemap) {
4392 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
4393 adapter->rx_sparemap = NULL;
4396 /* Cleanup any existing buffers */
4397 if (adapter->rx_buffer_area != NULL) {
4398 rx_buffer = adapter->rx_buffer_area;
4399 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4400 if (rx_buffer->m_head != NULL) {
4401 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4402 BUS_DMASYNC_POSTREAD);
4403 bus_dmamap_unload(adapter->rxtag,
4405 m_freem(rx_buffer->m_head);
4406 rx_buffer->m_head = NULL;
4407 } else if (rx_buffer->map != NULL)
4408 bus_dmamap_unload(adapter->rxtag,
4410 if (rx_buffer->map != NULL) {
4411 bus_dmamap_destroy(adapter->rxtag,
4413 rx_buffer->map = NULL;
4418 if (adapter->rx_buffer_area != NULL) {
4419 free(adapter->rx_buffer_area, M_DEVBUF);
4420 adapter->rx_buffer_area = NULL;
4423 if (adapter->rxtag != NULL) {
4424 bus_dma_tag_destroy(adapter->rxtag);
4425 adapter->rxtag = NULL;
4429 /*********************************************************************
4431 * This routine executes in interrupt context. It replenishes
4432 * the mbufs in the descriptor and sends data which has been
4433 * dma'ed into host memory to upper layer.
4435 * We loop at most count times if count is > 0, or until done if
4438 * For polling we also now return the number of cleaned packets
4439 *********************************************************************/
4441 em_rxeof(struct adapter *adapter, int count)
4443 struct ifnet *ifp = adapter->ifp;;
4445 u8 status, accept_frame = 0, eop = 0;
4446 u16 len, desc_len, prev_len_adj;
4448 struct e1000_rx_desc *current_desc;
4450 EM_RX_LOCK(adapter);
4451 i = adapter->next_rx_desc_to_check;
4452 current_desc = &adapter->rx_desc_base[i];
4453 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4454 BUS_DMASYNC_POSTREAD);
4456 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
4457 EM_RX_UNLOCK(adapter);
4461 while ((current_desc->status & E1000_RXD_STAT_DD) &&
4463 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4464 struct mbuf *m = NULL;
4466 mp = adapter->rx_buffer_area[i].m_head;
4468 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
4469 * needs to access the last received byte in the mbuf.
4471 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
4472 BUS_DMASYNC_POSTREAD);
4476 desc_len = le16toh(current_desc->length);
4477 status = current_desc->status;
4478 if (status & E1000_RXD_STAT_EOP) {
4481 if (desc_len < ETHER_CRC_LEN) {
4483 prev_len_adj = ETHER_CRC_LEN - desc_len;
4485 len = desc_len - ETHER_CRC_LEN;
4491 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
4493 u32 pkt_len = desc_len;
4495 if (adapter->fmp != NULL)
4496 pkt_len += adapter->fmp->m_pkthdr.len;
4498 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
4499 if (TBI_ACCEPT(&adapter->hw, status,
4500 current_desc->errors, pkt_len, last_byte,
4501 adapter->min_frame_size, adapter->max_frame_size)) {
4502 e1000_tbi_adjust_stats_82543(&adapter->hw,
4503 &adapter->stats, pkt_len,
4504 adapter->hw.mac.addr,
4505 adapter->max_frame_size);
4513 if (em_get_buf(adapter, i) != 0) {
4518 /* Assign correct length to the current fragment */
4521 if (adapter->fmp == NULL) {
4522 mp->m_pkthdr.len = len;
4523 adapter->fmp = mp; /* Store the first mbuf */
4526 /* Chain mbuf's together */
4527 mp->m_flags &= ~M_PKTHDR;
4529 * Adjust length of previous mbuf in chain if
4530 * we received less than 4 bytes in the last
4533 if (prev_len_adj > 0) {
4534 adapter->lmp->m_len -= prev_len_adj;
4535 adapter->fmp->m_pkthdr.len -=
4538 adapter->lmp->m_next = mp;
4539 adapter->lmp = adapter->lmp->m_next;
4540 adapter->fmp->m_pkthdr.len += len;
4544 adapter->fmp->m_pkthdr.rcvif = ifp;
4546 em_receive_checksum(adapter, current_desc,
4548 #ifndef __NO_STRICT_ALIGNMENT
4549 if (adapter->max_frame_size >
4550 (MCLBYTES - ETHER_ALIGN) &&
4551 em_fixup_rx(adapter) != 0)
4554 if (status & E1000_RXD_STAT_VP) {
4555 #if __FreeBSD_version < 700000
4556 VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
4557 (le16toh(current_desc->special) &
4558 E1000_RXD_SPC_VLAN_MASK));
4560 adapter->fmp->m_pkthdr.ether_vtag =
4561 (le16toh(current_desc->special) &
4562 E1000_RXD_SPC_VLAN_MASK);
4563 adapter->fmp->m_flags |= M_VLANTAG;
4566 #ifndef __NO_STRICT_ALIGNMENT
4570 adapter->fmp = NULL;
4571 adapter->lmp = NULL;
4576 /* Reuse loaded DMA map and just update mbuf chain */
4577 mp = adapter->rx_buffer_area[i].m_head;
4578 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
4579 mp->m_data = mp->m_ext.ext_buf;
4581 if (adapter->max_frame_size <=
4582 (MCLBYTES - ETHER_ALIGN))
4583 m_adj(mp, ETHER_ALIGN);
4584 if (adapter->fmp != NULL) {
4585 m_freem(adapter->fmp);
4586 adapter->fmp = NULL;
4587 adapter->lmp = NULL;
4592 /* Zero out the receive descriptors status. */
4593 current_desc->status = 0;
4594 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4595 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4597 /* Advance our pointers to the next descriptor. */
4598 if (++i == adapter->num_rx_desc)
4600 /* Call into the stack */
4602 adapter->next_rx_desc_to_check = i;
4603 EM_RX_UNLOCK(adapter);
4604 (*ifp->if_input)(ifp, m);
4605 EM_RX_LOCK(adapter);
4607 i = adapter->next_rx_desc_to_check;
4609 current_desc = &adapter->rx_desc_base[i];
4611 adapter->next_rx_desc_to_check = i;
4613 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
4615 i = adapter->num_rx_desc - 1;
4616 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
4617 EM_RX_UNLOCK(adapter);
4621 #ifndef __NO_STRICT_ALIGNMENT
4623 * When jumbo frames are enabled we should realign entire payload on
4624 * architecures with strict alignment. This is serious design mistake of 8254x
4625 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
4626 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
4627 * payload. On architecures without strict alignment restrictions 8254x still
4628 * performs unaligned memory access which would reduce the performance too.
4629 * To avoid copying over an entire frame to align, we allocate a new mbuf and
4630 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
4631 * existing mbuf chain.
4633 * Be aware, best performance of the 8254x is achived only when jumbo frame is
4634 * not used at all on architectures with strict alignment.
4637 em_fixup_rx(struct adapter *adapter)
4644 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
4645 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
4646 m->m_data += ETHER_HDR_LEN;
4648 MGETHDR(n, M_DONTWAIT, MT_DATA);
4650 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
4651 m->m_data += ETHER_HDR_LEN;
4652 m->m_len -= ETHER_HDR_LEN;
4653 n->m_len = ETHER_HDR_LEN;
4654 M_MOVE_PKTHDR(n, m);
4658 adapter->dropped_pkts++;
4659 m_freem(adapter->fmp);
4660 adapter->fmp = NULL;
4669 /*********************************************************************
4671 * Verify that the hardware indicated that the checksum is valid.
4672 * Inform the stack about the status of checksum so that stack
4673 * doesn't spend time verifying the checksum.
4675 *********************************************************************/
4677 em_receive_checksum(struct adapter *adapter,
4678 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
4680 /* 82543 or newer only */
4681 if ((adapter->hw.mac.type < e1000_82543) ||
4682 /* Ignore Checksum bit is set */
4683 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
4684 mp->m_pkthdr.csum_flags = 0;
4688 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
4690 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
4691 /* IP Checksum Good */
4692 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4693 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4696 mp->m_pkthdr.csum_flags = 0;
4700 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
4702 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
4703 mp->m_pkthdr.csum_flags |=
4704 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4705 mp->m_pkthdr.csum_data = htons(0xffff);
4710 #if __FreeBSD_version >= 700029
4712 * This routine is run via an vlan
4716 em_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4718 struct adapter *adapter = ifp->if_softc;
4721 if (ifp->if_softc != arg) /* Not our event */
4724 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
4727 index = (vtag >> 5) & 0x7F;
4729 em_shadow_vfta[index] |= (1 << bit);
4730 ++adapter->num_vlans;
4731 /* Re-init to load the changes */
4736 * This routine is run via an vlan
4740 em_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4742 struct adapter *adapter = ifp->if_softc;
4745 if (ifp->if_softc != arg)
4748 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4751 index = (vtag >> 5) & 0x7F;
4753 em_shadow_vfta[index] &= ~(1 << bit);
4754 --adapter->num_vlans;
4755 /* Re-init to load the changes */
4760 em_setup_vlan_hw_support(struct adapter *adapter)
4762 struct e1000_hw *hw = &adapter->hw;
4766 ** We get here thru init_locked, meaning
4767 ** a soft reset, this has already cleared
4768 ** the VFTA and other state, so if there
4769 ** have been no vlan's registered do nothing.
4771 if (adapter->num_vlans == 0)
4775 ** A soft reset zero's out the VFTA, so
4776 ** we need to repopulate it now.
4778 for (int i = 0; i < EM_VFTA_SIZE; i++)
4779 if (em_shadow_vfta[i] != 0)
4780 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
4781 i, em_shadow_vfta[i]);
4783 reg = E1000_READ_REG(hw, E1000_CTRL);
4784 reg |= E1000_CTRL_VME;
4785 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4787 /* Enable the Filter Table */
4788 reg = E1000_READ_REG(hw, E1000_RCTL);
4789 reg &= ~E1000_RCTL_CFIEN;
4790 reg |= E1000_RCTL_VFE;
4791 E1000_WRITE_REG(hw, E1000_RCTL, reg);
4793 /* Update the frame size */
4794 E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
4795 adapter->max_frame_size + VLAN_TAG_SIZE);
4800 em_enable_intr(struct adapter *adapter)
4802 struct e1000_hw *hw = &adapter->hw;
4803 u32 ims_mask = IMS_ENABLE_MASK;
4805 if (adapter->msix) {
4806 E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
4807 ims_mask |= EM_MSIX_MASK;
4809 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4813 em_disable_intr(struct adapter *adapter)
4815 struct e1000_hw *hw = &adapter->hw;
4818 E1000_WRITE_REG(hw, EM_EIAC, 0);
4819 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
4823 * Bit of a misnomer, what this really means is
4824 * to enable OS management of the system... aka
4825 * to disable special hardware management features
4828 em_init_manageability(struct adapter *adapter)
4830 /* A shared code workaround */
4831 #define E1000_82542_MANC2H E1000_MANC2H
4832 if (adapter->has_manage) {
4833 int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
4834 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4836 /* disable hardware interception of ARP */
4837 manc &= ~(E1000_MANC_ARP_EN);
4839 /* enable receiving management packets to the host */
4840 if (adapter->hw.mac.type >= e1000_82571) {
4841 manc |= E1000_MANC_EN_MNG2HOST;
4842 #define E1000_MNG2HOST_PORT_623 (1 << 5)
4843 #define E1000_MNG2HOST_PORT_664 (1 << 6)
4844 manc2h |= E1000_MNG2HOST_PORT_623;
4845 manc2h |= E1000_MNG2HOST_PORT_664;
4846 E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
4849 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4854 * Give control back to hardware management
4855 * controller if there is one.
4858 em_release_manageability(struct adapter *adapter)
4860 if (adapter->has_manage) {
4861 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4863 /* re-enable hardware interception of ARP */
4864 manc |= E1000_MANC_ARP_EN;
4866 if (adapter->hw.mac.type >= e1000_82571)
4867 manc &= ~E1000_MANC_EN_MNG2HOST;
4869 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4874 * em_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4875 * For ASF and Pass Through versions of f/w this means that
4876 * the driver is loaded. For AMT version (only with 82573)
4877 * of the f/w this means that the network i/f is open.
4881 em_get_hw_control(struct adapter *adapter)
4885 /* Let firmware know the driver has taken over */
4886 switch (adapter->hw.mac.type) {
4888 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4889 E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4890 swsm | E1000_SWSM_DRV_LOAD);
4894 case e1000_80003es2lan:
4897 case e1000_ich10lan:
4898 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4899 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4900 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4908 * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4909 * For ASF and Pass Through versions of f/w this means that the
4910 * driver is no longer loaded. For AMT version (only with 82573) i
4911 * of the f/w this means that the network i/f is closed.
4915 em_release_hw_control(struct adapter *adapter)
4919 /* Let firmware taken over control of h/w */
4920 switch (adapter->hw.mac.type) {
4922 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4923 E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4924 swsm & ~E1000_SWSM_DRV_LOAD);
4928 case e1000_80003es2lan:
4931 case e1000_ich10lan:
4932 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4933 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4934 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4943 em_is_valid_ether_addr(u8 *addr)
4945 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4947 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4955 * Enable PCI Wake On Lan capability
4958 em_enable_wakeup(device_t dev)
4963 /* First find the capabilities pointer*/
4964 cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
4965 /* Read the PM Capabilities */
4966 id = pci_read_config(dev, cap, 1);
4967 if (id != PCIY_PMG) /* Something wrong */
4969 /* OK, we have the power capabilities, so
4970 now get the status register */
4971 cap += PCIR_POWER_STATUS;
4972 status = pci_read_config(dev, cap, 2);
4973 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4974 pci_write_config(dev, cap, status, 2);
4979 /*********************************************************************
4980 * 82544 Coexistence issue workaround.
4981 * There are 2 issues.
4982 * 1. Transmit Hang issue.
4983 * To detect this issue, following equation can be used...
4984 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4985 * If SUM[3:0] is in between 1 to 4, we will have this issue.
4988 * To detect this issue, following equation can be used...
4989 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4990 * If SUM[3:0] is in between 9 to c, we will have this issue.
4994 * Make sure we do not have ending address
4995 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4997 *************************************************************************/
4999 em_fill_descriptors (bus_addr_t address, u32 length,
5000 PDESC_ARRAY desc_array)
5002 u32 safe_terminator;
5004 /* Since issue is sensitive to length and address.*/
5005 /* Let us first check the address...*/
5007 desc_array->descriptor[0].address = address;
5008 desc_array->descriptor[0].length = length;
5009 desc_array->elements = 1;
5010 return (desc_array->elements);
5012 safe_terminator = (u32)((((u32)address & 0x7) +
5013 (length & 0xF)) & 0xF);
5014 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
5015 if (safe_terminator == 0 ||
5016 (safe_terminator > 4 &&
5017 safe_terminator < 9) ||
5018 (safe_terminator > 0xC &&
5019 safe_terminator <= 0xF)) {
5020 desc_array->descriptor[0].address = address;
5021 desc_array->descriptor[0].length = length;
5022 desc_array->elements = 1;
5023 return (desc_array->elements);
5026 desc_array->descriptor[0].address = address;
5027 desc_array->descriptor[0].length = length - 4;
5028 desc_array->descriptor[1].address = address + (length - 4);
5029 desc_array->descriptor[1].length = 4;
5030 desc_array->elements = 2;
5031 return (desc_array->elements);
5034 /**********************************************************************
5036 * Update the board statistics counters.
5038 **********************************************************************/
5040 em_update_stats_counters(struct adapter *adapter)
5044 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
5045 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
5046 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
5047 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
5049 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
5050 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
5051 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
5052 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
5054 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
5055 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
5056 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
5057 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
5058 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
5059 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
5060 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
5061 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
5062 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
5063 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
5064 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
5065 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
5066 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
5067 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
5068 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
5069 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
5070 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
5071 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
5072 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
5073 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
5075 /* For the 64-bit byte counters the low dword must be read first. */
5076 /* Both registers clear on the read of the high dword */
5078 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH);
5079 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
5081 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
5082 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
5083 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
5084 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
5085 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
5087 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
5088 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
5090 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
5091 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
5092 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
5093 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
5094 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
5095 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
5096 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
5097 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
5098 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
5099 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
5101 if (adapter->hw.mac.type >= e1000_82543) {
5102 adapter->stats.algnerrc +=
5103 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
5104 adapter->stats.rxerrc +=
5105 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
5106 adapter->stats.tncrs +=
5107 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
5108 adapter->stats.cexterr +=
5109 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
5110 adapter->stats.tsctc +=
5111 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
5112 adapter->stats.tsctfc +=
5113 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
5117 ifp->if_collisions = adapter->stats.colc;
5120 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
5121 adapter->stats.crcerrs + adapter->stats.algnerrc +
5122 adapter->stats.ruc + adapter->stats.roc +
5123 adapter->stats.mpc + adapter->stats.cexterr;
5126 ifp->if_oerrors = adapter->stats.ecol +
5127 adapter->stats.latecol + adapter->watchdog_events;
5131 /**********************************************************************
5133 * This routine is called only when em_display_debug_stats is enabled.
5134 * This routine provides a way to take a look at important statistics
5135 * maintained by the driver and hardware.
5137 **********************************************************************/
5139 em_print_debug_info(struct adapter *adapter)
5141 device_t dev = adapter->dev;
5142 u8 *hw_addr = adapter->hw.hw_addr;
5144 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
5145 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
5146 E1000_READ_REG(&adapter->hw, E1000_CTRL),
5147 E1000_READ_REG(&adapter->hw, E1000_RCTL));
5148 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
5149 ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
5150 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
5151 device_printf(dev, "Flow control watermarks high = %d low = %d\n",
5152 adapter->hw.fc.high_water,
5153 adapter->hw.fc.low_water);
5154 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
5155 E1000_READ_REG(&adapter->hw, E1000_TIDV),
5156 E1000_READ_REG(&adapter->hw, E1000_TADV));
5157 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
5158 E1000_READ_REG(&adapter->hw, E1000_RDTR),
5159 E1000_READ_REG(&adapter->hw, E1000_RADV));
5160 device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
5161 (long long)adapter->tx_fifo_wrk_cnt,
5162 (long long)adapter->tx_fifo_reset_cnt);
5163 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
5164 E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
5165 E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
5166 device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
5167 E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
5168 E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
5169 device_printf(dev, "Num Tx descriptors avail = %d\n",
5170 adapter->num_tx_desc_avail);
5171 device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
5172 adapter->no_tx_desc_avail1);
5173 device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
5174 adapter->no_tx_desc_avail2);
5175 device_printf(dev, "Std mbuf failed = %ld\n",
5176 adapter->mbuf_alloc_failed);
5177 device_printf(dev, "Std mbuf cluster failed = %ld\n",
5178 adapter->mbuf_cluster_failed);
5179 device_printf(dev, "Driver dropped packets = %ld\n",
5180 adapter->dropped_pkts);
5181 device_printf(dev, "Driver tx dma failure in encap = %ld\n",
5182 adapter->no_tx_dma_setup);
5186 em_print_hw_stats(struct adapter *adapter)
5188 device_t dev = adapter->dev;
5190 device_printf(dev, "Excessive collisions = %lld\n",
5191 (long long)adapter->stats.ecol);
5192 #if (DEBUG_HW > 0) /* Dont output these errors normally */
5193 device_printf(dev, "Symbol errors = %lld\n",
5194 (long long)adapter->stats.symerrs);
5196 device_printf(dev, "Sequence errors = %lld\n",
5197 (long long)adapter->stats.sec);
5198 device_printf(dev, "Defer count = %lld\n",
5199 (long long)adapter->stats.dc);
5200 device_printf(dev, "Missed Packets = %lld\n",
5201 (long long)adapter->stats.mpc);
5202 device_printf(dev, "Receive No Buffers = %lld\n",
5203 (long long)adapter->stats.rnbc);
5204 /* RLEC is inaccurate on some hardware, calculate our own. */
5205 device_printf(dev, "Receive Length Errors = %lld\n",
5206 ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
5207 device_printf(dev, "Receive errors = %lld\n",
5208 (long long)adapter->stats.rxerrc);
5209 device_printf(dev, "Crc errors = %lld\n",
5210 (long long)adapter->stats.crcerrs);
5211 device_printf(dev, "Alignment errors = %lld\n",
5212 (long long)adapter->stats.algnerrc);
5213 device_printf(dev, "Collision/Carrier extension errors = %lld\n",
5214 (long long)adapter->stats.cexterr);
5215 device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
5216 device_printf(dev, "watchdog timeouts = %ld\n",
5217 adapter->watchdog_events);
5218 device_printf(dev, "RX MSIX IRQ = %ld TX MSIX IRQ = %ld"
5219 " LINK MSIX IRQ = %ld\n", adapter->rx_irq,
5220 adapter->tx_irq , adapter->link_irq);
5221 device_printf(dev, "XON Rcvd = %lld\n",
5222 (long long)adapter->stats.xonrxc);
5223 device_printf(dev, "XON Xmtd = %lld\n",
5224 (long long)adapter->stats.xontxc);
5225 device_printf(dev, "XOFF Rcvd = %lld\n",
5226 (long long)adapter->stats.xoffrxc);
5227 device_printf(dev, "XOFF Xmtd = %lld\n",
5228 (long long)adapter->stats.xofftxc);
5229 device_printf(dev, "Good Packets Rcvd = %lld\n",
5230 (long long)adapter->stats.gprc);
5231 device_printf(dev, "Good Packets Xmtd = %lld\n",
5232 (long long)adapter->stats.gptc);
5233 device_printf(dev, "TSO Contexts Xmtd = %lld\n",
5234 (long long)adapter->stats.tsctc);
5235 device_printf(dev, "TSO Contexts Failed = %lld\n",
5236 (long long)adapter->stats.tsctfc);
5239 /**********************************************************************
5241 * This routine provides a way to dump out the adapter eeprom,
5242 * often a useful debug/service tool. This only dumps the first
5243 * 32 words, stuff that matters is in that extent.
5245 **********************************************************************/
5247 em_print_nvm_info(struct adapter *adapter)
5252 /* Its a bit crude, but it gets the job done */
5253 printf("\nInterface EEPROM Dump:\n");
5254 printf("Offset\n0x0000 ");
5255 for (i = 0, j = 0; i < 32; i++, j++) {
5256 if (j == 8) { /* Make the offset block */
5258 printf("\n0x00%x0 ",row);
5260 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
5261 printf("%04x ", eeprom_data);
5267 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5269 struct adapter *adapter;
5274 error = sysctl_handle_int(oidp, &result, 0, req);
5276 if (error || !req->newptr)
5280 adapter = (struct adapter *)arg1;
5281 em_print_debug_info(adapter);
5284 * This value will cause a hex dump of the
5285 * first 32 16-bit words of the EEPROM to
5289 adapter = (struct adapter *)arg1;
5290 em_print_nvm_info(adapter);
5298 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
5300 struct adapter *adapter;
5305 error = sysctl_handle_int(oidp, &result, 0, req);
5307 if (error || !req->newptr)
5311 adapter = (struct adapter *)arg1;
5312 em_print_hw_stats(adapter);
5319 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
5321 struct em_int_delay_info *info;
5322 struct adapter *adapter;
5328 info = (struct em_int_delay_info *)arg1;
5329 usecs = info->value;
5330 error = sysctl_handle_int(oidp, &usecs, 0, req);
5331 if (error != 0 || req->newptr == NULL)
5333 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
5335 info->value = usecs;
5336 ticks = EM_USECS_TO_TICKS(usecs);
5338 adapter = info->adapter;
5340 EM_CORE_LOCK(adapter);
5341 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
5342 regval = (regval & ~0xffff) | (ticks & 0xffff);
5343 /* Handle a few special cases. */
5344 switch (info->offset) {
5349 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
5350 /* Don't write 0 into the TIDV register. */
5353 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
5356 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
5357 EM_CORE_UNLOCK(adapter);
5362 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
5363 const char *description, struct em_int_delay_info *info,
5364 int offset, int value)
5366 info->adapter = adapter;
5367 info->offset = offset;
5368 info->value = value;
5369 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
5370 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5371 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5372 info, 0, em_sysctl_int_delay, "I", description);
5375 #ifndef EM_LEGACY_IRQ
5377 em_add_rx_process_limit(struct adapter *adapter, const char *name,
5378 const char *description, int *limit, int value)
5381 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
5382 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5383 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);