1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #if __FreeBSD_version >= 800000
43 #include <sys/buf_ring.h>
46 #include <sys/endian.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
49 #include <sys/malloc.h>
51 #include <sys/module.h>
53 #include <sys/socket.h>
54 #include <sys/sockio.h>
55 #include <sys/sysctl.h>
56 #include <sys/taskqueue.h>
57 #if __FreeBSD_version >= 700029
58 #include <sys/eventhandler.h>
60 #include <machine/bus.h>
61 #include <machine/resource.h>
64 #include <net/ethernet.h>
66 #include <net/if_arp.h>
67 #include <net/if_dl.h>
68 #include <net/if_media.h>
70 #include <net/if_types.h>
71 #include <net/if_vlan_var.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/in.h>
75 #include <netinet/if_ether.h>
76 #include <netinet/ip.h>
77 #include <netinet/ip6.h>
78 #include <netinet/tcp.h>
79 #include <netinet/udp.h>
81 #include <machine/in_cksum.h>
82 #include <dev/pci/pcivar.h>
83 #include <dev/pci/pcireg.h>
85 #include "e1000_api.h"
86 #include "e1000_82571.h"
89 /*********************************************************************
90 * Set this to one to display debug statistics
91 *********************************************************************/
92 int em_display_debug_stats = 0;
94 /*********************************************************************
96 *********************************************************************/
97 char em_driver_version[] = "6.9.25";
100 /*********************************************************************
101 * PCI Device ID Table
103 * Used by probe to select devices to load on
104 * Last field stores an index into e1000_strings
105 * Last entry must be all 0s
107 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
108 *********************************************************************/
110 static em_vendor_info_t em_vendor_info_array[] =
112 /* Intel(R) PRO/1000 Network Connection */
113 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
132 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
143 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
147 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
148 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
149 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
150 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
151 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
152 PCI_ANY_ID, PCI_ANY_ID, 0},
154 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
155 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
156 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
158 { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
159 { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
160 { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
161 { 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL,
162 PCI_ANY_ID, PCI_ANY_ID, 0},
163 { 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD,
164 PCI_ANY_ID, PCI_ANY_ID, 0},
165 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
166 PCI_ANY_ID, PCI_ANY_ID, 0},
167 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
168 PCI_ANY_ID, PCI_ANY_ID, 0},
169 { 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
170 PCI_ANY_ID, PCI_ANY_ID, 0},
171 { 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
172 PCI_ANY_ID, PCI_ANY_ID, 0},
173 { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
174 { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
175 { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
176 { 0x8086, E1000_DEV_ID_82572EI, PCI_ANY_ID, PCI_ANY_ID, 0},
178 { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0},
179 { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0},
180 { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0},
181 { 0x8086, E1000_DEV_ID_82583V, PCI_ANY_ID, PCI_ANY_ID, 0},
182 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
183 PCI_ANY_ID, PCI_ANY_ID, 0},
184 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
185 PCI_ANY_ID, PCI_ANY_ID, 0},
186 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
187 PCI_ANY_ID, PCI_ANY_ID, 0},
188 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
189 PCI_ANY_ID, PCI_ANY_ID, 0},
190 { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
191 { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
192 { 0x8086, E1000_DEV_ID_ICH8_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
193 { 0x8086, E1000_DEV_ID_ICH8_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
194 { 0x8086, E1000_DEV_ID_ICH8_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
195 { 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
196 { 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
197 { 0x8086, E1000_DEV_ID_ICH8_82567V_3, PCI_ANY_ID, PCI_ANY_ID, 0},
198 { 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
199 { 0x8086, E1000_DEV_ID_ICH9_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
200 { 0x8086, E1000_DEV_ID_ICH9_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
201 { 0x8086, E1000_DEV_ID_ICH9_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
202 { 0x8086, E1000_DEV_ID_ICH9_IGP_M_V, PCI_ANY_ID, PCI_ANY_ID, 0},
203 { 0x8086, E1000_DEV_ID_ICH9_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
204 { 0x8086, E1000_DEV_ID_ICH9_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
205 { 0x8086, E1000_DEV_ID_ICH9_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
206 { 0x8086, E1000_DEV_ID_ICH9_BM, PCI_ANY_ID, PCI_ANY_ID, 0},
207 { 0x8086, E1000_DEV_ID_82574L, PCI_ANY_ID, PCI_ANY_ID, 0},
208 { 0x8086, E1000_DEV_ID_82574LA, PCI_ANY_ID, PCI_ANY_ID, 0},
209 { 0x8086, E1000_DEV_ID_ICH10_R_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
210 { 0x8086, E1000_DEV_ID_ICH10_R_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
211 { 0x8086, E1000_DEV_ID_ICH10_R_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0},
212 { 0x8086, E1000_DEV_ID_ICH10_D_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
213 { 0x8086, E1000_DEV_ID_ICH10_D_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
214 { 0x8086, E1000_DEV_ID_PCH_M_HV_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
215 { 0x8086, E1000_DEV_ID_PCH_M_HV_LC, PCI_ANY_ID, PCI_ANY_ID, 0},
216 { 0x8086, E1000_DEV_ID_PCH_D_HV_DM, PCI_ANY_ID, PCI_ANY_ID, 0},
217 { 0x8086, E1000_DEV_ID_PCH_D_HV_DC, PCI_ANY_ID, PCI_ANY_ID, 0},
218 /* required last entry */
222 /*********************************************************************
223 * Table of branding strings for all supported NICs.
224 *********************************************************************/
226 static char *em_strings[] = {
227 "Intel(R) PRO/1000 Network Connection"
230 /*********************************************************************
231 * Function prototypes
232 *********************************************************************/
233 static int em_probe(device_t);
234 static int em_attach(device_t);
235 static int em_detach(device_t);
236 static int em_shutdown(device_t);
237 static int em_suspend(device_t);
238 static int em_resume(device_t);
239 static void em_start(struct ifnet *);
240 static void em_start_locked(struct ifnet *ifp);
241 #if __FreeBSD_version >= 800000
242 static int em_mq_start(struct ifnet *, struct mbuf *);
243 static int em_mq_start_locked(struct ifnet *, struct mbuf *);
244 static void em_qflush(struct ifnet *);
246 static int em_ioctl(struct ifnet *, u_long, caddr_t);
247 static void em_init(void *);
248 static void em_init_locked(struct adapter *);
249 static void em_stop(void *);
250 static void em_media_status(struct ifnet *, struct ifmediareq *);
251 static int em_media_change(struct ifnet *);
252 static void em_identify_hardware(struct adapter *);
253 static int em_allocate_pci_resources(struct adapter *);
254 static int em_allocate_legacy(struct adapter *adapter);
255 static int em_allocate_msix(struct adapter *adapter);
256 static int em_setup_msix(struct adapter *);
257 static void em_free_pci_resources(struct adapter *);
258 static void em_local_timer(void *);
259 static int em_hardware_init(struct adapter *);
260 static void em_setup_interface(device_t, struct adapter *);
261 static void em_setup_transmit_structures(struct adapter *);
262 static void em_initialize_transmit_unit(struct adapter *);
263 static int em_setup_receive_structures(struct adapter *);
264 static void em_initialize_receive_unit(struct adapter *);
265 static void em_enable_intr(struct adapter *);
266 static void em_disable_intr(struct adapter *);
267 static void em_free_transmit_structures(struct adapter *);
268 static void em_free_receive_structures(struct adapter *);
269 static void em_update_stats_counters(struct adapter *);
270 static void em_txeof(struct adapter *);
271 static void em_tx_purge(struct adapter *);
272 static int em_allocate_receive_structures(struct adapter *);
273 static int em_allocate_transmit_structures(struct adapter *);
274 static int em_rxeof(struct adapter *, int);
275 #ifndef __NO_STRICT_ALIGNMENT
276 static int em_fixup_rx(struct adapter *);
278 static void em_receive_checksum(struct adapter *, struct e1000_rx_desc *,
280 static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
282 #if __FreeBSD_version >= 700000
283 static bool em_tso_setup(struct adapter *, struct mbuf *,
285 #endif /* FreeBSD_version >= 700000 */
286 static void em_set_promisc(struct adapter *);
287 static void em_disable_promisc(struct adapter *);
288 static void em_set_multi(struct adapter *);
289 static void em_print_hw_stats(struct adapter *);
290 static void em_update_link_status(struct adapter *);
291 static int em_get_buf(struct adapter *, int);
292 #if __FreeBSD_version >= 700029
293 static void em_register_vlan(void *, struct ifnet *, u16);
294 static void em_unregister_vlan(void *, struct ifnet *, u16);
295 static void em_setup_vlan_hw_support(struct adapter *);
297 static int em_xmit(struct adapter *, struct mbuf **);
298 static void em_smartspeed(struct adapter *);
299 static int em_82547_fifo_workaround(struct adapter *, int);
300 static void em_82547_update_fifo_head(struct adapter *, int);
301 static int em_82547_tx_fifo_reset(struct adapter *);
302 static void em_82547_move_tail(void *);
303 static int em_dma_malloc(struct adapter *, bus_size_t,
304 struct em_dma_alloc *, int);
305 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
306 static void em_print_debug_info(struct adapter *);
307 static void em_print_nvm_info(struct adapter *);
308 static int em_is_valid_ether_addr(u8 *);
309 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
310 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
311 static u32 em_fill_descriptors (bus_addr_t address, u32 length,
312 PDESC_ARRAY desc_array);
313 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
314 static void em_add_int_delay_sysctl(struct adapter *, const char *,
315 const char *, struct em_int_delay_info *, int, int);
316 /* Management and WOL Support */
317 static void em_init_manageability(struct adapter *);
318 static void em_release_manageability(struct adapter *);
319 static void em_get_hw_control(struct adapter *);
320 static void em_release_hw_control(struct adapter *);
321 static void em_get_wakeup(device_t);
322 static void em_enable_wakeup(device_t);
323 static int em_enable_phy_wakeup(struct adapter *);
326 static void em_intr(void *);
328 #if __FreeBSD_version < 700000
329 static void em_irq_fast(void *);
331 static int em_irq_fast(void *);
335 static void em_msix_tx(void *);
336 static void em_msix_rx(void *);
337 static void em_msix_link(void *);
338 static void em_handle_rx(void *context, int pending);
339 static void em_handle_tx(void *context, int pending);
341 static void em_handle_rxtx(void *context, int pending);
342 static void em_handle_link(void *context, int pending);
343 static void em_add_rx_process_limit(struct adapter *, const char *,
344 const char *, int *, int);
345 #endif /* ~EM_LEGACY_IRQ */
347 #ifdef DEVICE_POLLING
348 static poll_handler_t em_poll;
351 /*********************************************************************
352 * FreeBSD Device Interface Entry Points
353 *********************************************************************/
355 static device_method_t em_methods[] = {
356 /* Device interface */
357 DEVMETHOD(device_probe, em_probe),
358 DEVMETHOD(device_attach, em_attach),
359 DEVMETHOD(device_detach, em_detach),
360 DEVMETHOD(device_shutdown, em_shutdown),
361 DEVMETHOD(device_suspend, em_suspend),
362 DEVMETHOD(device_resume, em_resume),
366 static driver_t em_driver = {
367 "em", em_methods, sizeof(struct adapter),
370 static devclass_t em_devclass;
371 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
372 MODULE_DEPEND(em, pci, 1, 1, 1);
373 MODULE_DEPEND(em, ether, 1, 1, 1);
375 /*********************************************************************
376 * Tunable default values.
377 *********************************************************************/
379 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
380 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
383 /* Allow common code without TSO */
388 static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
389 static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
390 static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
391 static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
392 static int em_rxd = EM_DEFAULT_RXD;
393 static int em_txd = EM_DEFAULT_TXD;
394 static int em_smart_pwr_down = FALSE;
395 /* Controls whether promiscuous also shows bad packets */
396 static int em_debug_sbp = FALSE;
397 /* Local switch for MSI/MSIX */
398 static int em_enable_msi = TRUE;
400 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
401 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
402 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
403 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
404 TUNABLE_INT("hw.em.rxd", &em_rxd);
405 TUNABLE_INT("hw.em.txd", &em_txd);
406 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
407 TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
408 TUNABLE_INT("hw.em.enable_msi", &em_enable_msi);
410 #ifndef EM_LEGACY_IRQ
411 /* How many packets rxeof tries to clean at a time */
412 static int em_rx_process_limit = 100;
413 TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
416 /* Flow control setting - default to FULL */
417 static int em_fc_setting = e1000_fc_full;
418 TUNABLE_INT("hw.em.fc_setting", &em_fc_setting);
421 ** Shadow VFTA table, this is needed because
422 ** the real vlan filter table gets cleared during
423 ** a soft reset and the driver needs to be able
426 static u32 em_shadow_vfta[EM_VFTA_SIZE];
428 /* Global used in WOL setup with multiport cards */
429 static int global_quad_port_a = 0;
431 /*********************************************************************
432 * Device identification routine
434 * em_probe determines if the driver should be loaded on
435 * adapter based on PCI vendor/device id of the adapter.
437 * return BUS_PROBE_DEFAULT on success, positive on failure
438 *********************************************************************/
441 em_probe(device_t dev)
443 char adapter_name[60];
444 u16 pci_vendor_id = 0;
445 u16 pci_device_id = 0;
446 u16 pci_subvendor_id = 0;
447 u16 pci_subdevice_id = 0;
448 em_vendor_info_t *ent;
450 INIT_DEBUGOUT("em_probe: begin");
452 pci_vendor_id = pci_get_vendor(dev);
453 if (pci_vendor_id != EM_VENDOR_ID)
456 pci_device_id = pci_get_device(dev);
457 pci_subvendor_id = pci_get_subvendor(dev);
458 pci_subdevice_id = pci_get_subdevice(dev);
460 ent = em_vendor_info_array;
461 while (ent->vendor_id != 0) {
462 if ((pci_vendor_id == ent->vendor_id) &&
463 (pci_device_id == ent->device_id) &&
465 ((pci_subvendor_id == ent->subvendor_id) ||
466 (ent->subvendor_id == PCI_ANY_ID)) &&
468 ((pci_subdevice_id == ent->subdevice_id) ||
469 (ent->subdevice_id == PCI_ANY_ID))) {
470 sprintf(adapter_name, "%s %s",
471 em_strings[ent->index],
473 device_set_desc_copy(dev, adapter_name);
474 return (BUS_PROBE_DEFAULT);
482 /*********************************************************************
483 * Device initialization routine
485 * The attach entry point is called when the driver is being loaded.
486 * This routine identifies the type of hardware, allocates all resources
487 * and initializes the hardware.
489 * return 0 on success, positive on failure
490 *********************************************************************/
493 em_attach(device_t dev)
495 struct adapter *adapter;
499 INIT_DEBUGOUT("em_attach: begin");
501 adapter = device_get_softc(dev);
502 adapter->dev = adapter->osdep.dev = dev;
503 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
504 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
505 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
508 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
509 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
510 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
511 em_sysctl_debug_info, "I", "Debug Information");
513 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
514 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
515 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
516 em_sysctl_stats, "I", "Statistics");
518 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
519 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
521 /* Determine hardware and mac info */
522 em_identify_hardware(adapter);
524 /* Setup PCI resources */
525 if (em_allocate_pci_resources(adapter)) {
526 device_printf(dev, "Allocation of PCI resources failed\n");
532 ** For ICH8 and family we need to
533 ** map the flash memory, and this
534 ** must happen after the MAC is
537 if ((adapter->hw.mac.type == e1000_ich8lan) ||
538 (adapter->hw.mac.type == e1000_pchlan) ||
539 (adapter->hw.mac.type == e1000_ich9lan) ||
540 (adapter->hw.mac.type == e1000_ich10lan)) {
541 int rid = EM_BAR_TYPE_FLASH;
542 adapter->flash = bus_alloc_resource_any(dev,
543 SYS_RES_MEMORY, &rid, RF_ACTIVE);
544 if (adapter->flash == NULL) {
545 device_printf(dev, "Mapping of Flash failed\n");
549 /* This is used in the shared code */
550 adapter->hw.flash_address = (u8 *)adapter->flash;
551 adapter->osdep.flash_bus_space_tag =
552 rman_get_bustag(adapter->flash);
553 adapter->osdep.flash_bus_space_handle =
554 rman_get_bushandle(adapter->flash);
557 /* Do Shared Code initialization */
558 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
559 device_printf(dev, "Setup of Shared code failed\n");
564 e1000_get_bus_info(&adapter->hw);
566 /* Set up some sysctls for the tunable interrupt delays */
567 em_add_int_delay_sysctl(adapter, "rx_int_delay",
568 "receive interrupt delay in usecs", &adapter->rx_int_delay,
569 E1000_REGISTER(&adapter->hw, E1000_RDTR), em_rx_int_delay_dflt);
570 em_add_int_delay_sysctl(adapter, "tx_int_delay",
571 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
572 E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt);
573 if (adapter->hw.mac.type >= e1000_82540) {
574 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
575 "receive interrupt delay limit in usecs",
576 &adapter->rx_abs_int_delay,
577 E1000_REGISTER(&adapter->hw, E1000_RADV),
578 em_rx_abs_int_delay_dflt);
579 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
580 "transmit interrupt delay limit in usecs",
581 &adapter->tx_abs_int_delay,
582 E1000_REGISTER(&adapter->hw, E1000_TADV),
583 em_tx_abs_int_delay_dflt);
586 #ifndef EM_LEGACY_IRQ
587 /* Sysctls for limiting the amount of work done in the taskqueue */
588 em_add_rx_process_limit(adapter, "rx_processing_limit",
589 "max number of rx packets to process", &adapter->rx_process_limit,
590 em_rx_process_limit);
594 * Validate number of transmit and receive descriptors. It
595 * must not exceed hardware maximum, and must be multiple
596 * of E1000_DBA_ALIGN.
598 if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
599 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
600 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
601 (em_txd < EM_MIN_TXD)) {
602 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
603 EM_DEFAULT_TXD, em_txd);
604 adapter->num_tx_desc = EM_DEFAULT_TXD;
606 adapter->num_tx_desc = em_txd;
607 if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
608 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
609 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
610 (em_rxd < EM_MIN_RXD)) {
611 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
612 EM_DEFAULT_RXD, em_rxd);
613 adapter->num_rx_desc = EM_DEFAULT_RXD;
615 adapter->num_rx_desc = em_rxd;
617 adapter->hw.mac.autoneg = DO_AUTO_NEG;
618 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
619 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
620 adapter->rx_buffer_len = 2048;
622 e1000_init_script_state_82541(&adapter->hw, TRUE);
623 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
626 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
627 adapter->hw.phy.mdix = AUTO_ALL_MODES;
628 adapter->hw.phy.disable_polarity_correction = FALSE;
629 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
633 * Set the frame limits assuming
634 * standard ethernet sized frames.
636 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
637 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
640 * This controls when hardware reports transmit completion
643 adapter->hw.mac.report_tx_early = 1;
645 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
648 /* Allocate Transmit Descriptor ring */
649 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
650 device_printf(dev, "Unable to allocate tx_desc memory\n");
654 adapter->tx_desc_base =
655 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
657 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
660 /* Allocate Receive Descriptor ring */
661 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
662 device_printf(dev, "Unable to allocate rx_desc memory\n");
666 adapter->rx_desc_base =
667 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
670 ** Start from a known state, this is
671 ** important in reading the nvm and
674 e1000_reset_hw(&adapter->hw);
676 /* Make sure we have a good EEPROM before we read from it */
677 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
679 ** Some PCI-E parts fail the first check due to
680 ** the link being in sleep state, call it again,
681 ** if it fails a second time its a real issue.
683 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
685 "The EEPROM Checksum Is Not Valid\n");
691 /* Copy the permanent MAC address out of the EEPROM */
692 if (e1000_read_mac_addr(&adapter->hw) < 0) {
693 device_printf(dev, "EEPROM read error while reading MAC"
699 if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) {
700 device_printf(dev, "Invalid MAC address\n");
705 /* Initialize the hardware */
706 if (em_hardware_init(adapter)) {
707 device_printf(dev, "Unable to initialize the hardware\n");
712 /* Allocate transmit descriptors and buffers */
713 if (em_allocate_transmit_structures(adapter)) {
714 device_printf(dev, "Could not setup transmit structures\n");
719 /* Allocate receive descriptors and buffers */
720 if (em_allocate_receive_structures(adapter)) {
721 device_printf(dev, "Could not setup receive structures\n");
727 ** Do interrupt configuration
729 if (adapter->msi > 1) /* Do MSI/X */
730 error = em_allocate_msix(adapter);
731 else /* MSI or Legacy */
732 error = em_allocate_legacy(adapter);
737 * Get Wake-on-Lan and Management info for later use
741 /* Setup OS specific network interface */
742 em_setup_interface(dev, adapter);
744 /* Initialize statistics */
745 em_update_stats_counters(adapter);
747 adapter->hw.mac.get_link_status = 1;
748 em_update_link_status(adapter);
750 /* Indicate SOL/IDER usage */
751 if (e1000_check_reset_block(&adapter->hw))
753 "PHY reset is blocked due to SOL/IDER session.\n");
755 /* Do we need workaround for 82544 PCI-X adapter? */
756 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
757 adapter->hw.mac.type == e1000_82544)
758 adapter->pcix_82544 = TRUE;
760 adapter->pcix_82544 = FALSE;
762 #if __FreeBSD_version >= 700029
763 /* Register for VLAN events */
764 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
765 em_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
766 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
767 em_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
770 /* Non-AMT based hardware can now take control from firmware */
771 if (adapter->has_manage && !adapter->has_amt)
772 em_get_hw_control(adapter);
774 /* Tell the stack that the interface is not active */
775 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
777 INIT_DEBUGOUT("em_attach: end");
782 em_free_transmit_structures(adapter);
785 em_release_hw_control(adapter);
786 em_dma_free(adapter, &adapter->rxdma);
788 em_dma_free(adapter, &adapter->txdma);
791 em_free_pci_resources(adapter);
792 EM_TX_LOCK_DESTROY(adapter);
793 EM_RX_LOCK_DESTROY(adapter);
794 EM_CORE_LOCK_DESTROY(adapter);
799 /*********************************************************************
800 * Device removal routine
802 * The detach entry point is called when the driver is being removed.
803 * This routine stops the adapter and deallocates all the resources
804 * that were allocated for driver operation.
806 * return 0 on success, positive on failure
807 *********************************************************************/
810 em_detach(device_t dev)
812 struct adapter *adapter = device_get_softc(dev);
813 struct ifnet *ifp = adapter->ifp;
815 INIT_DEBUGOUT("em_detach: begin");
817 /* Make sure VLANS are not using driver */
818 #if __FreeBSD_version >= 700000
819 if (adapter->ifp->if_vlantrunk != NULL) {
821 if (adapter->ifp->if_nvlans != 0) {
823 device_printf(dev,"Vlan in use, detach first\n");
827 #ifdef DEVICE_POLLING
828 if (ifp->if_capenable & IFCAP_POLLING)
829 ether_poll_deregister(ifp);
832 EM_CORE_LOCK(adapter);
834 adapter->in_detach = 1;
836 e1000_phy_hw_reset(&adapter->hw);
838 em_release_manageability(adapter);
840 EM_TX_UNLOCK(adapter);
841 EM_CORE_UNLOCK(adapter);
843 #if __FreeBSD_version >= 700029
844 /* Unregister VLAN events */
845 if (adapter->vlan_attach != NULL)
846 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
847 if (adapter->vlan_detach != NULL)
848 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
851 ether_ifdetach(adapter->ifp);
852 callout_drain(&adapter->timer);
853 callout_drain(&adapter->tx_fifo_timer);
855 em_free_pci_resources(adapter);
856 bus_generic_detach(dev);
859 em_free_transmit_structures(adapter);
860 em_free_receive_structures(adapter);
862 /* Free Transmit Descriptor ring */
863 if (adapter->tx_desc_base) {
864 em_dma_free(adapter, &adapter->txdma);
865 adapter->tx_desc_base = NULL;
868 /* Free Receive Descriptor ring */
869 if (adapter->rx_desc_base) {
870 em_dma_free(adapter, &adapter->rxdma);
871 adapter->rx_desc_base = NULL;
874 em_release_hw_control(adapter);
875 EM_TX_LOCK_DESTROY(adapter);
876 EM_RX_LOCK_DESTROY(adapter);
877 EM_CORE_LOCK_DESTROY(adapter);
882 /*********************************************************************
884 * Shutdown entry point
886 **********************************************************************/
889 em_shutdown(device_t dev)
891 return em_suspend(dev);
895 * Suspend/resume device methods.
898 em_suspend(device_t dev)
900 struct adapter *adapter = device_get_softc(dev);
902 EM_CORE_LOCK(adapter);
904 em_release_manageability(adapter);
905 em_release_hw_control(adapter);
906 em_enable_wakeup(dev);
908 EM_CORE_UNLOCK(adapter);
910 return bus_generic_suspend(dev);
914 em_resume(device_t dev)
916 struct adapter *adapter = device_get_softc(dev);
917 struct ifnet *ifp = adapter->ifp;
919 EM_CORE_LOCK(adapter);
920 em_init_locked(adapter);
921 em_init_manageability(adapter);
922 EM_CORE_UNLOCK(adapter);
925 return bus_generic_resume(dev);
929 /*********************************************************************
930 * Transmit entry point
932 * em_start is called by the stack to initiate a transmit.
933 * The driver will remain in this routine as long as there are
934 * packets to transmit and transmit resources are available.
935 * In case resources are not available stack is notified and
936 * the packet is requeued.
937 **********************************************************************/
939 #if __FreeBSD_version >= 800000
941 em_mq_start_locked(struct ifnet *ifp, struct mbuf *m)
943 struct adapter *adapter = ifp->if_softc;
945 int error = E1000_SUCCESS;
947 EM_TX_LOCK_ASSERT(adapter);
948 /* To allow being called from a tasklet */
952 if (((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
954 || (!adapter->link_active)) {
955 error = drbr_enqueue(ifp, adapter->br, m);
957 } else if (drbr_empty(ifp, adapter->br) &&
958 (adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)) {
959 if ((error = em_xmit(adapter, &m)) != 0) {
961 error = drbr_enqueue(ifp, adapter->br, m);
965 * We've bypassed the buf ring so we need to update
968 drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
970 ** Send a copy of the frame to the BPF
971 ** listener and set the watchdog on.
973 ETHER_BPF_MTAP(ifp, m);
974 adapter->watchdog_check = TRUE;
976 } else if ((error = drbr_enqueue(ifp, adapter->br, m)) != 0)
980 if (drbr_empty(ifp, adapter->br))
982 /* Process the queue */
984 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
986 next = drbr_dequeue(ifp, adapter->br);
989 if ((error = em_xmit(adapter, &next)) != 0) {
991 error = drbr_enqueue(ifp, adapter->br, next);
994 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
995 ETHER_BPF_MTAP(ifp, next);
996 /* Set the watchdog */
997 adapter->watchdog_check = TRUE;
1000 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
1001 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1007 ** Multiqueue capable stack interface, this is not
1008 ** yet truely multiqueue, but that is coming...
1011 em_mq_start(struct ifnet *ifp, struct mbuf *m)
1014 struct adapter *adapter = ifp->if_softc;
1017 if (EM_TX_TRYLOCK(adapter)) {
1018 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1019 error = em_mq_start_locked(ifp, m);
1020 EM_TX_UNLOCK(adapter);
1022 error = drbr_enqueue(ifp, adapter->br, m);
1028 em_qflush(struct ifnet *ifp)
1031 struct adapter *adapter = (struct adapter *)ifp->if_softc;
1033 EM_TX_LOCK(adapter);
1034 while ((m = buf_ring_dequeue_sc(adapter->br)) != NULL)
1037 EM_TX_UNLOCK(adapter);
1039 #endif /* FreeBSD_version */
1042 em_start_locked(struct ifnet *ifp)
1044 struct adapter *adapter = ifp->if_softc;
1045 struct mbuf *m_head;
1047 EM_TX_LOCK_ASSERT(adapter);
1049 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1052 if (!adapter->link_active)
1055 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1057 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1061 * Encapsulation can modify our pointer, and or make it
1062 * NULL on failure. In that event, we can't requeue.
1064 if (em_xmit(adapter, &m_head)) {
1067 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1068 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1072 /* Send a copy of the frame to the BPF listener */
1073 ETHER_BPF_MTAP(ifp, m_head);
1075 /* Set timeout in case hardware has problems transmitting. */
1076 adapter->watchdog_check = TRUE;
1078 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
1079 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1085 em_start(struct ifnet *ifp)
1087 struct adapter *adapter = ifp->if_softc;
1089 EM_TX_LOCK(adapter);
1090 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1091 em_start_locked(ifp);
1092 EM_TX_UNLOCK(adapter);
1095 /*********************************************************************
1098 * em_ioctl is called when the user wants to configure the
1101 * return 0 on success, positive on failure
1102 **********************************************************************/
1105 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1107 struct adapter *adapter = ifp->if_softc;
1108 struct ifreq *ifr = (struct ifreq *)data;
1110 struct ifaddr *ifa = (struct ifaddr *)data;
1114 if (adapter->in_detach)
1120 if (ifa->ifa_addr->sa_family == AF_INET) {
1123 * Since resetting hardware takes a very long time
1124 * and results in link renegotiation we only
1125 * initialize the hardware only when it is absolutely
1128 ifp->if_flags |= IFF_UP;
1129 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1130 EM_CORE_LOCK(adapter);
1131 em_init_locked(adapter);
1132 EM_CORE_UNLOCK(adapter);
1134 arp_ifinit(ifp, ifa);
1137 error = ether_ioctl(ifp, command, data);
1142 u16 eeprom_data = 0;
1144 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1146 EM_CORE_LOCK(adapter);
1147 switch (adapter->hw.mac.type) {
1150 * 82573 only supports jumbo frames
1151 * if ASPM is disabled.
1153 e1000_read_nvm(&adapter->hw,
1154 NVM_INIT_3GIO_3, 1, &eeprom_data);
1155 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1156 max_frame_size = ETHER_MAX_LEN;
1159 /* Allow Jumbo frames - fall thru */
1163 case e1000_ich10lan:
1165 case e1000_80003es2lan: /* Limit Jumbo Frame size */
1166 max_frame_size = 9234;
1169 max_frame_size = 4096;
1171 /* Adapters that do not support jumbo frames */
1175 max_frame_size = ETHER_MAX_LEN;
1178 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1180 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1182 EM_CORE_UNLOCK(adapter);
1187 ifp->if_mtu = ifr->ifr_mtu;
1188 adapter->max_frame_size =
1189 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1190 em_init_locked(adapter);
1191 EM_CORE_UNLOCK(adapter);
1195 IOCTL_DEBUGOUT("ioctl rcv'd:\
1196 SIOCSIFFLAGS (Set Interface Flags)");
1197 EM_CORE_LOCK(adapter);
1198 if (ifp->if_flags & IFF_UP) {
1199 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1200 if ((ifp->if_flags ^ adapter->if_flags) &
1201 (IFF_PROMISC | IFF_ALLMULTI)) {
1202 em_disable_promisc(adapter);
1203 em_set_promisc(adapter);
1206 em_init_locked(adapter);
1208 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1209 EM_TX_LOCK(adapter);
1211 EM_TX_UNLOCK(adapter);
1213 adapter->if_flags = ifp->if_flags;
1214 EM_CORE_UNLOCK(adapter);
1218 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1219 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1220 EM_CORE_LOCK(adapter);
1221 em_disable_intr(adapter);
1222 em_set_multi(adapter);
1223 if (adapter->hw.mac.type == e1000_82542 &&
1224 adapter->hw.revision_id == E1000_REVISION_2) {
1225 em_initialize_receive_unit(adapter);
1227 #ifdef DEVICE_POLLING
1228 if (!(ifp->if_capenable & IFCAP_POLLING))
1230 em_enable_intr(adapter);
1231 EM_CORE_UNLOCK(adapter);
1235 /* Check SOL/IDER usage */
1236 EM_CORE_LOCK(adapter);
1237 if (e1000_check_reset_block(&adapter->hw)) {
1238 EM_CORE_UNLOCK(adapter);
1239 device_printf(adapter->dev, "Media change is"
1240 " blocked due to SOL/IDER session.\n");
1243 EM_CORE_UNLOCK(adapter);
1245 IOCTL_DEBUGOUT("ioctl rcv'd: \
1246 SIOCxIFMEDIA (Get/Set Interface Media)");
1247 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1253 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1255 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1256 #ifdef DEVICE_POLLING
1257 if (mask & IFCAP_POLLING) {
1258 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1259 error = ether_poll_register(em_poll, ifp);
1262 EM_CORE_LOCK(adapter);
1263 em_disable_intr(adapter);
1264 ifp->if_capenable |= IFCAP_POLLING;
1265 EM_CORE_UNLOCK(adapter);
1267 error = ether_poll_deregister(ifp);
1268 /* Enable interrupt even in error case */
1269 EM_CORE_LOCK(adapter);
1270 em_enable_intr(adapter);
1271 ifp->if_capenable &= ~IFCAP_POLLING;
1272 EM_CORE_UNLOCK(adapter);
1276 if (mask & IFCAP_HWCSUM) {
1277 ifp->if_capenable ^= IFCAP_HWCSUM;
1280 #if __FreeBSD_version >= 700000
1281 if (mask & IFCAP_TSO4) {
1282 ifp->if_capenable ^= IFCAP_TSO4;
1286 if (mask & IFCAP_VLAN_HWTAGGING) {
1287 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1291 if (mask & IFCAP_VLAN_HWFILTER) {
1292 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1296 if ((mask & IFCAP_WOL) &&
1297 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1298 if (mask & IFCAP_WOL_MCAST)
1299 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1300 if (mask & IFCAP_WOL_MAGIC)
1301 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1304 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1306 #if __FreeBSD_version >= 700000
1307 VLAN_CAPABILITIES(ifp);
1313 error = ether_ioctl(ifp, command, data);
1321 /*********************************************************************
1324 * This routine is used in two ways. It is used by the stack as
1325 * init entry point in network interface structure. It is also used
1326 * by the driver as a hw/sw initialization routine to get to a
1329 * return 0 on success, positive on failure
1330 **********************************************************************/
1333 em_init_locked(struct adapter *adapter)
1335 struct ifnet *ifp = adapter->ifp;
1336 device_t dev = adapter->dev;
1339 INIT_DEBUGOUT("em_init: begin");
1341 EM_CORE_LOCK_ASSERT(adapter);
1343 EM_TX_LOCK(adapter);
1345 EM_TX_UNLOCK(adapter);
1348 * Packet Buffer Allocation (PBA)
1349 * Writing PBA sets the receive portion of the buffer
1350 * the remainder is used for the transmit buffer.
1352 * Devices before the 82547 had a Packet Buffer of 64K.
1353 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1354 * After the 82547 the buffer was reduced to 40K.
1355 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1356 * Note: default does not leave enough room for Jumbo Frame >10k.
1358 switch (adapter->hw.mac.type) {
1360 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1361 if (adapter->max_frame_size > 8192)
1362 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1364 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1365 adapter->tx_fifo_head = 0;
1366 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1367 adapter->tx_fifo_size =
1368 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1370 /* Total Packet Buffer on these is 48K */
1373 case e1000_80003es2lan:
1374 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1376 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
1377 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1381 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1384 case e1000_ich10lan:
1386 pba = E1000_PBA_10K;
1392 /* Devices before 82547 had a Packet Buffer of 64K. */
1393 if (adapter->max_frame_size > 8192)
1394 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1396 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1399 INIT_DEBUGOUT1("em_init: pba=%dK",pba);
1400 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1402 /* Get the latest mac address, User can use a LAA */
1403 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1406 /* Put the address into the Receive Address Array */
1407 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1410 * With the 82571 adapter, RAR[0] may be overwritten
1411 * when the other port is reset, we make a duplicate
1412 * in RAR[14] for that eventuality, this assures
1413 * the interface continues to function.
1415 if (adapter->hw.mac.type == e1000_82571) {
1416 e1000_set_laa_state_82571(&adapter->hw, TRUE);
1417 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1418 E1000_RAR_ENTRIES - 1);
1421 /* Initialize the hardware */
1422 if (em_hardware_init(adapter)) {
1423 device_printf(dev, "Unable to initialize the hardware\n");
1426 em_update_link_status(adapter);
1428 /* Setup VLAN support, basic and offload if available */
1429 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1430 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1431 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1432 /* Use real VLAN Filter support */
1433 em_setup_vlan_hw_support(adapter);
1436 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1437 ctrl |= E1000_CTRL_VME;
1438 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1442 /* Set hardware offload abilities */
1443 ifp->if_hwassist = 0;
1444 if (adapter->hw.mac.type >= e1000_82543) {
1445 if (ifp->if_capenable & IFCAP_TXCSUM)
1446 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1447 #if __FreeBSD_version >= 700000
1448 if (ifp->if_capenable & IFCAP_TSO4)
1449 ifp->if_hwassist |= CSUM_TSO;
1453 /* Configure for OS presence */
1454 em_init_manageability(adapter);
1456 /* Prepare transmit descriptors and buffers */
1457 em_setup_transmit_structures(adapter);
1458 em_initialize_transmit_unit(adapter);
1460 /* Setup Multicast table */
1461 em_set_multi(adapter);
1463 /* Prepare receive descriptors and buffers */
1464 if (em_setup_receive_structures(adapter)) {
1465 device_printf(dev, "Could not setup receive structures\n");
1466 EM_TX_LOCK(adapter);
1468 EM_TX_UNLOCK(adapter);
1471 em_initialize_receive_unit(adapter);
1473 /* Don't lose promiscuous settings */
1474 em_set_promisc(adapter);
1476 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1477 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1479 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1480 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1482 /* MSI/X configuration for 82574 */
1483 if (adapter->hw.mac.type == e1000_82574) {
1485 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1486 tmp |= E1000_CTRL_EXT_PBA_CLR;
1487 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1489 ** Set the IVAR - interrupt vector routing.
1490 ** Each nibble represents a vector, high bit
1491 ** is enable, other 3 bits are the MSIX table
1492 ** entry, we map RXQ0 to 0, TXQ0 to 1, and
1493 ** Link (other) to 2, hence the magic number.
1495 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1498 #ifdef DEVICE_POLLING
1500 * Only enable interrupts if we are not polling, make sure
1501 * they are off otherwise.
1503 if (ifp->if_capenable & IFCAP_POLLING)
1504 em_disable_intr(adapter);
1506 #endif /* DEVICE_POLLING */
1507 em_enable_intr(adapter);
1509 /* AMT based hardware can now take control from firmware */
1510 if (adapter->has_manage && adapter->has_amt)
1511 em_get_hw_control(adapter);
1513 /* Don't reset the phy next time init gets called */
1514 adapter->hw.phy.reset_disable = TRUE;
1520 struct adapter *adapter = arg;
1522 EM_CORE_LOCK(adapter);
1523 em_init_locked(adapter);
1524 EM_CORE_UNLOCK(adapter);
1528 #ifdef DEVICE_POLLING
1529 /*********************************************************************
1531 * Legacy polling routine
1533 *********************************************************************/
1535 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1537 struct adapter *adapter = ifp->if_softc;
1538 u32 reg_icr, rx_done = 0;
1540 EM_CORE_LOCK(adapter);
1541 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1542 EM_CORE_UNLOCK(adapter);
1546 if (cmd == POLL_AND_CHECK_STATUS) {
1547 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1548 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1549 callout_stop(&adapter->timer);
1550 adapter->hw.mac.get_link_status = 1;
1551 em_update_link_status(adapter);
1552 callout_reset(&adapter->timer, hz,
1553 em_local_timer, adapter);
1556 EM_CORE_UNLOCK(adapter);
1558 rx_done = em_rxeof(adapter, count);
1560 EM_TX_LOCK(adapter);
1562 #if __FreeBSD_version >= 800000
1563 if (!drbr_empty(ifp, adapter->br))
1564 em_mq_start_locked(ifp, NULL);
1566 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1567 em_start_locked(ifp);
1569 EM_TX_UNLOCK(adapter);
1572 #endif /* DEVICE_POLLING */
1574 #ifdef EM_LEGACY_IRQ
1575 /*********************************************************************
1577 * Legacy Interrupt Service routine
1579 *********************************************************************/
1584 struct adapter *adapter = arg;
1585 struct ifnet *ifp = adapter->ifp;
1589 if (ifp->if_capenable & IFCAP_POLLING)
1592 EM_CORE_LOCK(adapter);
1593 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1594 if (reg_icr & E1000_ICR_RXO)
1595 adapter->rx_overruns++;
1596 if ((reg_icr == 0xffffffff) || (reg_icr == 0)||
1597 (adapter->hw.mac.type >= e1000_82571 &&
1598 (reg_icr & E1000_ICR_INT_ASSERTED) == 0))
1601 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1604 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1605 callout_stop(&adapter->timer);
1606 adapter->hw.mac.get_link_status = 1;
1607 em_update_link_status(adapter);
1608 /* Deal with TX cruft when link lost */
1609 em_tx_purge(adapter);
1610 callout_reset(&adapter->timer, hz,
1611 em_local_timer, adapter);
1615 EM_TX_LOCK(adapter);
1617 em_rxeof(adapter, -1);
1619 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1620 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1621 em_start_locked(ifp);
1622 EM_TX_UNLOCK(adapter);
1625 EM_CORE_UNLOCK(adapter);
1629 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1632 em_handle_link(void *context, int pending)
1634 struct adapter *adapter = context;
1635 struct ifnet *ifp = adapter->ifp;
1637 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1640 EM_CORE_LOCK(adapter);
1641 callout_stop(&adapter->timer);
1642 em_update_link_status(adapter);
1643 /* Deal with TX cruft when link lost */
1644 em_tx_purge(adapter);
1645 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1646 EM_CORE_UNLOCK(adapter);
1650 /* Combined RX/TX handler, used by Legacy and MSI */
1652 em_handle_rxtx(void *context, int pending)
1654 struct adapter *adapter = context;
1655 struct ifnet *ifp = adapter->ifp;
1658 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1659 if (em_rxeof(adapter, adapter->rx_process_limit) != 0)
1660 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1661 EM_TX_LOCK(adapter);
1664 #if __FreeBSD_version >= 800000
1665 if (!drbr_empty(ifp, adapter->br))
1666 em_mq_start_locked(ifp, NULL);
1668 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1669 em_start_locked(ifp);
1671 EM_TX_UNLOCK(adapter);
1674 em_enable_intr(adapter);
1677 /*********************************************************************
1679 * Fast Legacy/MSI Combined Interrupt Service routine
1681 *********************************************************************/
1682 #if __FreeBSD_version < 700000
1683 #define FILTER_STRAY
1684 #define FILTER_HANDLED
1689 em_irq_fast(void *arg)
1691 struct adapter *adapter = arg;
1697 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1700 if (reg_icr == 0xffffffff)
1701 return FILTER_STRAY;
1703 /* Definitely not our interrupt. */
1705 return FILTER_STRAY;
1708 * Starting with the 82571 chip, bit 31 should be used to
1709 * determine whether the interrupt belongs to us.
1711 if (adapter->hw.mac.type >= e1000_82571 &&
1712 (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1713 return FILTER_STRAY;
1716 * Mask interrupts until the taskqueue is finished running. This is
1717 * cheap, just assume that it is needed. This also works around the
1718 * MSI message reordering errata on certain systems.
1720 em_disable_intr(adapter);
1721 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1723 /* Link status change */
1724 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1725 adapter->hw.mac.get_link_status = 1;
1726 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1729 if (reg_icr & E1000_ICR_RXO)
1730 adapter->rx_overruns++;
1731 return FILTER_HANDLED;
1734 /*********************************************************************
1736 * MSIX Interrupt Service Routines
1738 **********************************************************************/
1739 #define EM_MSIX_TX 0x00040000
1740 #define EM_MSIX_RX 0x00010000
1741 #define EM_MSIX_LINK 0x00100000
1744 em_msix_tx(void *arg)
1746 struct adapter *adapter = arg;
1747 struct ifnet *ifp = adapter->ifp;
1750 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1751 EM_TX_LOCK(adapter);
1753 EM_TX_UNLOCK(adapter);
1754 taskqueue_enqueue(adapter->tq, &adapter->tx_task);
1756 /* Reenable this interrupt */
1757 E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_TX);
1761 /*********************************************************************
1763 * MSIX RX Interrupt Service routine
1765 **********************************************************************/
1768 em_msix_rx(void *arg)
1770 struct adapter *adapter = arg;
1771 struct ifnet *ifp = adapter->ifp;
1774 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1775 (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1776 taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1777 /* Reenable this interrupt */
1778 E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_RX);
1782 /*********************************************************************
1784 * MSIX Link Fast Interrupt Service routine
1786 **********************************************************************/
1789 em_msix_link(void *arg)
1791 struct adapter *adapter = arg;
1794 ++adapter->link_irq;
1795 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1797 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1798 adapter->hw.mac.get_link_status = 1;
1799 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1801 E1000_WRITE_REG(&adapter->hw, E1000_IMS,
1802 EM_MSIX_LINK | E1000_IMS_LSC);
1807 em_handle_rx(void *context, int pending)
1809 struct adapter *adapter = context;
1810 struct ifnet *ifp = adapter->ifp;
1812 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1813 (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1814 taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1819 em_handle_tx(void *context, int pending)
1821 struct adapter *adapter = context;
1822 struct ifnet *ifp = adapter->ifp;
1824 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1825 if (!EM_TX_TRYLOCK(adapter))
1828 #if __FreeBSD_version >= 800000
1829 if (!drbr_empty(ifp, adapter->br))
1830 em_mq_start_locked(ifp, NULL);
1832 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1833 em_start_locked(ifp);
1835 EM_TX_UNLOCK(adapter);
1838 #endif /* EM_FAST_IRQ */
1840 /*********************************************************************
1842 * Media Ioctl callback
1844 * This routine is called whenever the user queries the status of
1845 * the interface using ifconfig.
1847 **********************************************************************/
1849 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1851 struct adapter *adapter = ifp->if_softc;
1852 u_char fiber_type = IFM_1000_SX;
1854 INIT_DEBUGOUT("em_media_status: begin");
1856 EM_CORE_LOCK(adapter);
1857 em_update_link_status(adapter);
1859 ifmr->ifm_status = IFM_AVALID;
1860 ifmr->ifm_active = IFM_ETHER;
1862 if (!adapter->link_active) {
1863 EM_CORE_UNLOCK(adapter);
1867 ifmr->ifm_status |= IFM_ACTIVE;
1869 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1870 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1871 if (adapter->hw.mac.type == e1000_82545)
1872 fiber_type = IFM_1000_LX;
1873 ifmr->ifm_active |= fiber_type | IFM_FDX;
1875 switch (adapter->link_speed) {
1877 ifmr->ifm_active |= IFM_10_T;
1880 ifmr->ifm_active |= IFM_100_TX;
1883 ifmr->ifm_active |= IFM_1000_T;
1886 if (adapter->link_duplex == FULL_DUPLEX)
1887 ifmr->ifm_active |= IFM_FDX;
1889 ifmr->ifm_active |= IFM_HDX;
1891 EM_CORE_UNLOCK(adapter);
1894 /*********************************************************************
1896 * Media Ioctl callback
1898 * This routine is called when the user changes speed/duplex using
1899 * media/mediopt option with ifconfig.
1901 **********************************************************************/
1903 em_media_change(struct ifnet *ifp)
1905 struct adapter *adapter = ifp->if_softc;
1906 struct ifmedia *ifm = &adapter->media;
1908 INIT_DEBUGOUT("em_media_change: begin");
1910 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1913 EM_CORE_LOCK(adapter);
1914 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1916 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1917 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1922 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1923 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1926 adapter->hw.mac.autoneg = FALSE;
1927 adapter->hw.phy.autoneg_advertised = 0;
1928 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1929 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1931 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1934 adapter->hw.mac.autoneg = FALSE;
1935 adapter->hw.phy.autoneg_advertised = 0;
1936 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1937 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1939 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1942 device_printf(adapter->dev, "Unsupported media type\n");
1945 /* As the speed/duplex settings my have changed we need to
1948 adapter->hw.phy.reset_disable = FALSE;
1950 em_init_locked(adapter);
1951 EM_CORE_UNLOCK(adapter);
1956 /*********************************************************************
1958 * This routine maps the mbufs to tx descriptors.
1960 * return 0 on success, positive on failure
1961 **********************************************************************/
1964 em_xmit(struct adapter *adapter, struct mbuf **m_headp)
1966 bus_dma_segment_t segs[EM_MAX_SCATTER];
1968 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1969 struct e1000_tx_desc *ctxd = NULL;
1970 struct mbuf *m_head;
1971 u32 txd_upper, txd_lower, txd_used, txd_saved;
1972 int nsegs, i, j, first, last = 0;
1973 int error, do_tso, tso_desc = 0;
1974 #if __FreeBSD_version < 700000
1978 txd_upper = txd_lower = txd_used = txd_saved = 0;
1980 #if __FreeBSD_version >= 700000
1981 do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
1987 * Force a cleanup if number of TX descriptors
1988 * available hits the threshold
1990 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1992 /* Now do we at least have a minimal? */
1993 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
1994 adapter->no_tx_desc_avail1++;
2002 * If an mbuf is only header we need
2003 * to pull 4 bytes of data into it.
2005 if (do_tso && (m_head->m_len <= M_TSO_LEN)) {
2006 m_head = m_pullup(m_head, M_TSO_LEN + 4);
2013 * Map the packet for DMA
2015 * Capture the first descriptor index,
2016 * this descriptor will have the index
2017 * of the EOP which is the only one that
2018 * now gets a DONE bit writeback.
2020 first = adapter->next_avail_tx_desc;
2021 tx_buffer = &adapter->tx_buffer_area[first];
2022 tx_buffer_mapped = tx_buffer;
2023 map = tx_buffer->map;
2025 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2026 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2029 * There are two types of errors we can (try) to handle:
2030 * - EFBIG means the mbuf chain was too long and bus_dma ran
2031 * out of segments. Defragment the mbuf chain and try again.
2032 * - ENOMEM means bus_dma could not obtain enough bounce buffers
2033 * at this point in time. Defer sending and try again later.
2034 * All other errors, in particular EINVAL, are fatal and prevent the
2035 * mbuf chain from ever going through. Drop it and report error.
2037 if (error == EFBIG) {
2040 m = m_defrag(*m_headp, M_DONTWAIT);
2042 adapter->mbuf_alloc_failed++;
2050 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2051 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2054 adapter->no_tx_dma_setup++;
2059 } else if (error != 0) {
2060 adapter->no_tx_dma_setup++;
2065 * TSO Hardware workaround, if this packet is not
2066 * TSO, and is only a single descriptor long, and
2067 * it follows a TSO burst, then we need to add a
2068 * sentinel descriptor to prevent premature writeback.
2070 if ((do_tso == 0) && (adapter->tx_tso == TRUE)) {
2073 adapter->tx_tso = FALSE;
2076 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
2077 adapter->no_tx_desc_avail2++;
2078 bus_dmamap_unload(adapter->txtag, map);
2083 /* Do hardware assists */
2084 #if __FreeBSD_version >= 700000
2085 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2086 error = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower);
2088 return (ENXIO); /* something foobar */
2089 /* we need to make a final sentinel transmit desc */
2093 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
2094 em_transmit_checksum_setup(adapter, m_head,
2095 &txd_upper, &txd_lower);
2097 i = adapter->next_avail_tx_desc;
2098 if (adapter->pcix_82544)
2101 /* Set up our transmit descriptors */
2102 for (j = 0; j < nsegs; j++) {
2104 bus_addr_t seg_addr;
2105 /* If adapter is 82544 and on PCIX bus */
2106 if(adapter->pcix_82544) {
2107 DESC_ARRAY desc_array;
2108 u32 array_elements, counter;
2110 * Check the Address and Length combination and
2111 * split the data accordingly
2113 array_elements = em_fill_descriptors(segs[j].ds_addr,
2114 segs[j].ds_len, &desc_array);
2115 for (counter = 0; counter < array_elements; counter++) {
2116 if (txd_used == adapter->num_tx_desc_avail) {
2117 adapter->next_avail_tx_desc = txd_saved;
2118 adapter->no_tx_desc_avail2++;
2119 bus_dmamap_unload(adapter->txtag, map);
2122 tx_buffer = &adapter->tx_buffer_area[i];
2123 ctxd = &adapter->tx_desc_base[i];
2124 ctxd->buffer_addr = htole64(
2125 desc_array.descriptor[counter].address);
2126 ctxd->lower.data = htole32(
2127 (adapter->txd_cmd | txd_lower | (u16)
2128 desc_array.descriptor[counter].length));
2130 htole32((txd_upper));
2132 if (++i == adapter->num_tx_desc)
2134 tx_buffer->m_head = NULL;
2135 tx_buffer->next_eop = -1;
2139 tx_buffer = &adapter->tx_buffer_area[i];
2140 ctxd = &adapter->tx_desc_base[i];
2141 seg_addr = segs[j].ds_addr;
2142 seg_len = segs[j].ds_len;
2145 ** If this is the last descriptor, we want to
2146 ** split it so we have a small final sentinel
2148 if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
2150 ctxd->buffer_addr = htole64(seg_addr);
2151 ctxd->lower.data = htole32(
2152 adapter->txd_cmd | txd_lower | seg_len);
2155 if (++i == adapter->num_tx_desc)
2157 /* Now make the sentinel */
2158 ++txd_used; /* using an extra txd */
2159 ctxd = &adapter->tx_desc_base[i];
2160 tx_buffer = &adapter->tx_buffer_area[i];
2162 htole64(seg_addr + seg_len);
2163 ctxd->lower.data = htole32(
2164 adapter->txd_cmd | txd_lower | 4);
2168 if (++i == adapter->num_tx_desc)
2171 ctxd->buffer_addr = htole64(seg_addr);
2172 ctxd->lower.data = htole32(
2173 adapter->txd_cmd | txd_lower | seg_len);
2177 if (++i == adapter->num_tx_desc)
2180 tx_buffer->m_head = NULL;
2181 tx_buffer->next_eop = -1;
2185 adapter->next_avail_tx_desc = i;
2186 if (adapter->pcix_82544)
2187 adapter->num_tx_desc_avail -= txd_used;
2189 adapter->num_tx_desc_avail -= nsegs;
2190 if (tso_desc) /* TSO used an extra for sentinel */
2191 adapter->num_tx_desc_avail -= txd_used;
2195 ** Handle VLAN tag, this is the
2196 ** biggest difference between
2199 #if __FreeBSD_version < 700000
2200 /* Find out if we are in vlan mode. */
2201 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
2203 ctxd->upper.fields.special =
2204 htole16(VLAN_TAG_VALUE(mtag));
2205 #else /* FreeBSD 7 */
2206 if (m_head->m_flags & M_VLANTAG) {
2207 /* Set the vlan id. */
2208 ctxd->upper.fields.special =
2209 htole16(m_head->m_pkthdr.ether_vtag);
2211 /* Tell hardware to add tag */
2212 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
2215 tx_buffer->m_head = m_head;
2216 tx_buffer_mapped->map = tx_buffer->map;
2217 tx_buffer->map = map;
2218 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
2221 * Last Descriptor of Packet
2222 * needs End Of Packet (EOP)
2223 * and Report Status (RS)
2226 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2228 * Keep track in the first buffer which
2229 * descriptor will be written back
2231 tx_buffer = &adapter->tx_buffer_area[first];
2232 tx_buffer->next_eop = last;
2233 adapter->watchdog_time = ticks;
2236 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2237 * that this frame is available to transmit.
2239 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2240 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2241 if (adapter->hw.mac.type == e1000_82547 &&
2242 adapter->link_duplex == HALF_DUPLEX)
2243 em_82547_move_tail(adapter);
2245 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
2246 if (adapter->hw.mac.type == e1000_82547)
2247 em_82547_update_fifo_head(adapter,
2248 m_head->m_pkthdr.len);
2254 /*********************************************************************
2256 * 82547 workaround to avoid controller hang in half-duplex environment.
2257 * The workaround is to avoid queuing a large packet that would span
2258 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
2259 * in this case. We do that only when FIFO is quiescent.
2261 **********************************************************************/
2263 em_82547_move_tail(void *arg)
2265 struct adapter *adapter = arg;
2266 struct e1000_tx_desc *tx_desc;
2267 u16 hw_tdt, sw_tdt, length = 0;
2270 EM_TX_LOCK_ASSERT(adapter);
2272 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
2273 sw_tdt = adapter->next_avail_tx_desc;
2275 while (hw_tdt != sw_tdt) {
2276 tx_desc = &adapter->tx_desc_base[hw_tdt];
2277 length += tx_desc->lower.flags.length;
2278 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
2279 if (++hw_tdt == adapter->num_tx_desc)
2283 if (em_82547_fifo_workaround(adapter, length)) {
2284 adapter->tx_fifo_wrk_cnt++;
2285 callout_reset(&adapter->tx_fifo_timer, 1,
2286 em_82547_move_tail, adapter);
2289 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
2290 em_82547_update_fifo_head(adapter, length);
2297 em_82547_fifo_workaround(struct adapter *adapter, int len)
2299 int fifo_space, fifo_pkt_len;
2301 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2303 if (adapter->link_duplex == HALF_DUPLEX) {
2304 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2306 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
2307 if (em_82547_tx_fifo_reset(adapter))
2318 em_82547_update_fifo_head(struct adapter *adapter, int len)
2320 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2322 /* tx_fifo_head is always 16 byte aligned */
2323 adapter->tx_fifo_head += fifo_pkt_len;
2324 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
2325 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2331 em_82547_tx_fifo_reset(struct adapter *adapter)
2335 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
2336 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
2337 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
2338 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
2339 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
2340 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
2341 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
2342 /* Disable TX unit */
2343 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2344 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
2345 tctl & ~E1000_TCTL_EN);
2347 /* Reset FIFO pointers */
2348 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
2349 adapter->tx_head_addr);
2350 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
2351 adapter->tx_head_addr);
2352 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
2353 adapter->tx_head_addr);
2354 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
2355 adapter->tx_head_addr);
2357 /* Re-enable TX unit */
2358 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2359 E1000_WRITE_FLUSH(&adapter->hw);
2361 adapter->tx_fifo_head = 0;
2362 adapter->tx_fifo_reset_cnt++;
2372 em_set_promisc(struct adapter *adapter)
2374 struct ifnet *ifp = adapter->ifp;
2377 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2379 if (ifp->if_flags & IFF_PROMISC) {
2380 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2381 /* Turn this on if you want to see bad packets */
2383 reg_rctl |= E1000_RCTL_SBP;
2384 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2385 } else if (ifp->if_flags & IFF_ALLMULTI) {
2386 reg_rctl |= E1000_RCTL_MPE;
2387 reg_rctl &= ~E1000_RCTL_UPE;
2388 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2393 em_disable_promisc(struct adapter *adapter)
2397 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2399 reg_rctl &= (~E1000_RCTL_UPE);
2400 reg_rctl &= (~E1000_RCTL_MPE);
2401 reg_rctl &= (~E1000_RCTL_SBP);
2402 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2406 /*********************************************************************
2409 * This routine is called whenever multicast address list is updated.
2411 **********************************************************************/
2414 em_set_multi(struct adapter *adapter)
2416 struct ifnet *ifp = adapter->ifp;
2417 struct ifmultiaddr *ifma;
2419 u8 *mta; /* Multicast array memory */
2422 IOCTL_DEBUGOUT("em_set_multi: begin");
2424 if (adapter->hw.mac.type == e1000_82542 &&
2425 adapter->hw.revision_id == E1000_REVISION_2) {
2426 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2427 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2428 e1000_pci_clear_mwi(&adapter->hw);
2429 reg_rctl |= E1000_RCTL_RST;
2430 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2434 /* Allocate temporary memory to setup array */
2435 mta = malloc(sizeof(u8) *
2436 (ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES),
2437 M_DEVBUF, M_NOWAIT | M_ZERO);
2439 panic("em_set_multi memory failure\n");
2441 #if __FreeBSD_version < 800000
2444 if_maddr_rlock(ifp);
2446 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2447 if (ifma->ifma_addr->sa_family != AF_LINK)
2450 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2453 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2454 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2457 #if __FreeBSD_version < 800000
2458 IF_ADDR_UNLOCK(ifp);
2460 if_maddr_runlock(ifp);
2462 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2463 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2464 reg_rctl |= E1000_RCTL_MPE;
2465 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2467 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
2469 if (adapter->hw.mac.type == e1000_82542 &&
2470 adapter->hw.revision_id == E1000_REVISION_2) {
2471 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2472 reg_rctl &= ~E1000_RCTL_RST;
2473 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2475 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2476 e1000_pci_set_mwi(&adapter->hw);
2478 free(mta, M_DEVBUF);
2482 /*********************************************************************
2485 * This routine checks for link status and updates statistics.
2487 **********************************************************************/
2490 em_local_timer(void *arg)
2492 struct adapter *adapter = arg;
2493 struct ifnet *ifp = adapter->ifp;
2495 EM_CORE_LOCK_ASSERT(adapter);
2497 #ifndef DEVICE_POLLING
2498 taskqueue_enqueue(adapter->tq,
2499 &adapter->rxtx_task);
2501 em_update_link_status(adapter);
2502 em_update_stats_counters(adapter);
2504 /* Reset LAA into RAR[0] on 82571 */
2505 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
2506 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2508 if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
2509 em_print_hw_stats(adapter);
2511 em_smartspeed(adapter);
2514 * We check the watchdog: the time since
2515 * the last TX descriptor was cleaned.
2516 * This implies a functional TX engine.
2518 if ((adapter->watchdog_check == TRUE) &&
2519 (ticks - adapter->watchdog_time > EM_WATCHDOG))
2522 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
2525 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2526 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2527 adapter->watchdog_events++;
2528 em_init_locked(adapter);
2532 em_update_link_status(struct adapter *adapter)
2534 struct e1000_hw *hw = &adapter->hw;
2535 struct ifnet *ifp = adapter->ifp;
2536 device_t dev = adapter->dev;
2539 /* Get the cached link value or read phy for real */
2540 switch (hw->phy.media_type) {
2541 case e1000_media_type_copper:
2542 if (hw->mac.get_link_status) {
2543 /* Do the work to read phy */
2544 e1000_check_for_link(hw);
2545 link_check = !hw->mac.get_link_status;
2546 if (link_check) /* ESB2 fix */
2547 e1000_cfg_on_link_up(hw);
2551 case e1000_media_type_fiber:
2552 e1000_check_for_link(hw);
2553 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2556 case e1000_media_type_internal_serdes:
2557 e1000_check_for_link(hw);
2558 link_check = adapter->hw.mac.serdes_has_link;
2561 case e1000_media_type_unknown:
2565 /* Now check for a transition */
2566 if (link_check && (adapter->link_active == 0)) {
2567 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2568 &adapter->link_duplex);
2569 /* Check if we must disable SPEED_MODE bit on PCI-E */
2570 if ((adapter->link_speed != SPEED_1000) &&
2571 ((hw->mac.type == e1000_82571) ||
2572 (hw->mac.type == e1000_82572))) {
2574 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2575 tarc0 &= ~SPEED_MODE_BIT;
2576 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
2579 device_printf(dev, "Link is up %d Mbps %s\n",
2580 adapter->link_speed,
2581 ((adapter->link_duplex == FULL_DUPLEX) ?
2582 "Full Duplex" : "Half Duplex"));
2583 adapter->link_active = 1;
2584 adapter->smartspeed = 0;
2585 ifp->if_baudrate = adapter->link_speed * 1000000;
2586 if_link_state_change(ifp, LINK_STATE_UP);
2587 } else if (!link_check && (adapter->link_active == 1)) {
2588 ifp->if_baudrate = adapter->link_speed = 0;
2589 adapter->link_duplex = 0;
2591 device_printf(dev, "Link is Down\n");
2592 adapter->link_active = 0;
2593 /* Link down, disable watchdog */
2594 adapter->watchdog_check = FALSE;
2595 if_link_state_change(ifp, LINK_STATE_DOWN);
2599 /*********************************************************************
2601 * This routine disables all traffic on the adapter by issuing a
2602 * global reset on the MAC and deallocates TX/RX buffers.
2604 * This routine should always be called with BOTH the CORE
2606 **********************************************************************/
2611 struct adapter *adapter = arg;
2612 struct ifnet *ifp = adapter->ifp;
2614 EM_CORE_LOCK_ASSERT(adapter);
2615 EM_TX_LOCK_ASSERT(adapter);
2617 INIT_DEBUGOUT("em_stop: begin");
2619 em_disable_intr(adapter);
2620 callout_stop(&adapter->timer);
2621 callout_stop(&adapter->tx_fifo_timer);
2623 /* Tell the stack that the interface is no longer active */
2624 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2626 e1000_reset_hw(&adapter->hw);
2627 if (adapter->hw.mac.type >= e1000_82544)
2628 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2632 /*********************************************************************
2634 * Determine hardware revision.
2636 **********************************************************************/
2638 em_identify_hardware(struct adapter *adapter)
2640 device_t dev = adapter->dev;
2642 /* Make sure our PCI config space has the necessary stuff set */
2643 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2644 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2645 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2646 device_printf(dev, "Memory Access and/or Bus Master bits "
2648 adapter->hw.bus.pci_cmd_word |=
2649 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2650 pci_write_config(dev, PCIR_COMMAND,
2651 adapter->hw.bus.pci_cmd_word, 2);
2654 /* Save off the information about this board */
2655 adapter->hw.vendor_id = pci_get_vendor(dev);
2656 adapter->hw.device_id = pci_get_device(dev);
2657 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2658 adapter->hw.subsystem_vendor_id =
2659 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2660 adapter->hw.subsystem_device_id =
2661 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2663 /* Do Shared Code Init and Setup */
2664 if (e1000_set_mac_type(&adapter->hw)) {
2665 device_printf(dev, "Setup init failure\n");
2671 em_allocate_pci_resources(struct adapter *adapter)
2673 device_t dev = adapter->dev;
2674 int val, rid, error = E1000_SUCCESS;
2677 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2679 if (adapter->memory == NULL) {
2680 device_printf(dev, "Unable to allocate bus resource: memory\n");
2683 adapter->osdep.mem_bus_space_tag =
2684 rman_get_bustag(adapter->memory);
2685 adapter->osdep.mem_bus_space_handle =
2686 rman_get_bushandle(adapter->memory);
2687 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2689 /* Only older adapters use IO mapping */
2690 if ((adapter->hw.mac.type > e1000_82543) &&
2691 (adapter->hw.mac.type < e1000_82571)) {
2692 /* Figure our where our IO BAR is ? */
2693 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2694 val = pci_read_config(dev, rid, 4);
2695 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2696 adapter->io_rid = rid;
2700 /* check for 64bit BAR */
2701 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2704 if (rid >= PCIR_CIS) {
2705 device_printf(dev, "Unable to locate IO BAR\n");
2708 adapter->ioport = bus_alloc_resource_any(dev,
2709 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2710 if (adapter->ioport == NULL) {
2711 device_printf(dev, "Unable to allocate bus resource: "
2715 adapter->hw.io_base = 0;
2716 adapter->osdep.io_bus_space_tag =
2717 rman_get_bustag(adapter->ioport);
2718 adapter->osdep.io_bus_space_handle =
2719 rman_get_bushandle(adapter->ioport);
2723 ** Init the resource arrays
2724 ** used by MSIX setup
2726 for (int i = 0; i < 3; i++) {
2727 adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
2728 adapter->tag[i] = NULL;
2729 adapter->res[i] = NULL;
2733 * Setup MSI/X or MSI if PCI Express
2736 adapter->msi = em_setup_msix(adapter);
2738 adapter->hw.back = &adapter->osdep;
2743 /*********************************************************************
2745 * Setup the Legacy or MSI Interrupt handler
2747 **********************************************************************/
2749 em_allocate_legacy(struct adapter *adapter)
2751 device_t dev = adapter->dev;
2754 /* Manually turn off all interrupts */
2755 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2757 /* Legacy RID is 0 */
2758 if (adapter->msi == 0)
2759 adapter->rid[0] = 0;
2761 /* We allocate a single interrupt resource */
2762 adapter->res[0] = bus_alloc_resource_any(dev,
2763 SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
2764 if (adapter->res[0] == NULL) {
2765 device_printf(dev, "Unable to allocate bus resource: "
2770 #ifdef EM_LEGACY_IRQ
2771 /* We do Legacy setup */
2772 if ((error = bus_setup_intr(dev, adapter->res[0],
2773 #if __FreeBSD_version > 700000
2774 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_intr, adapter,
2776 INTR_TYPE_NET | INTR_MPSAFE, em_intr, adapter,
2778 &adapter->tag[0])) != 0) {
2779 device_printf(dev, "Failed to register interrupt handler");
2783 #else /* FAST_IRQ */
2785 * Try allocating a fast interrupt and the associated deferred
2786 * processing contexts.
2788 TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2789 TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2790 adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2791 taskqueue_thread_enqueue, &adapter->tq);
2792 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2793 device_get_nameunit(adapter->dev));
2794 #if __FreeBSD_version < 700000
2795 if ((error = bus_setup_intr(dev, adapter->res[0],
2796 INTR_TYPE_NET | INTR_FAST, em_irq_fast, adapter,
2798 if ((error = bus_setup_intr(dev, adapter->res[0],
2799 INTR_TYPE_NET, em_irq_fast, NULL, adapter,
2801 &adapter->tag[0])) != 0) {
2802 device_printf(dev, "Failed to register fast interrupt "
2803 "handler: %d\n", error);
2804 taskqueue_free(adapter->tq);
2808 #endif /* EM_LEGACY_IRQ */
2813 /*********************************************************************
2815 * Setup the MSIX Interrupt handlers
2816 * This is not really Multiqueue, rather
2817 * its just multiple interrupt vectors.
2819 **********************************************************************/
2821 em_allocate_msix(struct adapter *adapter)
2823 device_t dev = adapter->dev;
2826 /* Make sure all interrupts are disabled */
2827 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2829 /* First get the resources */
2830 for (int i = 0; i < adapter->msi; i++) {
2831 adapter->res[i] = bus_alloc_resource_any(dev,
2832 SYS_RES_IRQ, &adapter->rid[i], RF_ACTIVE);
2833 if (adapter->res[i] == NULL) {
2835 "Unable to allocate bus resource: "
2836 "MSIX Interrupt\n");
2842 * Now allocate deferred processing contexts.
2844 TASK_INIT(&adapter->rx_task, 0, em_handle_rx, adapter);
2845 TASK_INIT(&adapter->tx_task, 0, em_handle_tx, adapter);
2847 * Handle compatibility for msi case for deferral due to
2850 TASK_INIT(&adapter->rxtx_task, 0, em_handle_tx, adapter);
2851 TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2852 adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2853 taskqueue_thread_enqueue, &adapter->tq);
2854 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2855 device_get_nameunit(adapter->dev));
2858 * And setup the interrupt handlers
2861 /* First slot to RX */
2862 if ((error = bus_setup_intr(dev, adapter->res[0],
2863 #if __FreeBSD_version > 700000
2864 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, adapter,
2866 INTR_TYPE_NET | INTR_MPSAFE, em_msix_rx, adapter,
2868 &adapter->tag[0])) != 0) {
2869 device_printf(dev, "Failed to register RX handler");
2874 if ((error = bus_setup_intr(dev, adapter->res[1],
2875 #if __FreeBSD_version > 700000
2876 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, adapter,
2878 INTR_TYPE_NET | INTR_MPSAFE, em_msix_tx, adapter,
2880 &adapter->tag[1])) != 0) {
2881 device_printf(dev, "Failed to register TX handler");
2886 if ((error = bus_setup_intr(dev, adapter->res[2],
2887 #if __FreeBSD_version > 700000
2888 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_link, adapter,
2890 INTR_TYPE_NET | INTR_MPSAFE, em_msix_link, adapter,
2892 &adapter->tag[2])) != 0) {
2893 device_printf(dev, "Failed to register TX handler");
2902 em_free_pci_resources(struct adapter *adapter)
2904 device_t dev = adapter->dev;
2906 /* Make sure the for loop below runs once */
2907 if (adapter->msi == 0)
2911 * First release all the interrupt resources:
2912 * notice that since these are just kept
2913 * in an array we can do the same logic
2914 * whether its MSIX or just legacy.
2916 for (int i = 0; i < adapter->msi; i++) {
2917 if (adapter->tag[i] != NULL) {
2918 bus_teardown_intr(dev, adapter->res[i],
2920 adapter->tag[i] = NULL;
2922 if (adapter->res[i] != NULL) {
2923 bus_release_resource(dev, SYS_RES_IRQ,
2924 adapter->rid[i], adapter->res[i]);
2929 pci_release_msi(dev);
2931 if (adapter->msix != NULL)
2932 bus_release_resource(dev, SYS_RES_MEMORY,
2933 PCIR_BAR(EM_MSIX_BAR), adapter->msix);
2935 if (adapter->memory != NULL)
2936 bus_release_resource(dev, SYS_RES_MEMORY,
2937 PCIR_BAR(0), adapter->memory);
2939 if (adapter->flash != NULL)
2940 bus_release_resource(dev, SYS_RES_MEMORY,
2941 EM_FLASH, adapter->flash);
2943 if (adapter->ioport != NULL)
2944 bus_release_resource(dev, SYS_RES_IOPORT,
2945 adapter->io_rid, adapter->ioport);
2949 * Setup MSI or MSI/X
2952 em_setup_msix(struct adapter *adapter)
2954 device_t dev = adapter->dev;
2957 if (adapter->hw.mac.type < e1000_82571)
2960 /* Setup MSI/X for Hartwell */
2961 if (adapter->hw.mac.type == e1000_82574) {
2962 /* Map the MSIX BAR */
2963 int rid = PCIR_BAR(EM_MSIX_BAR);
2964 adapter->msix = bus_alloc_resource_any(dev,
2965 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2966 if (!adapter->msix) {
2967 /* May not be enabled */
2968 device_printf(adapter->dev,
2969 "Unable to map MSIX table \n");
2972 val = pci_msix_count(dev);
2974 ** 82574 can be configured for 5 but
2975 ** we limit use to 3.
2977 if (val > 3) val = 3;
2978 if ((val) && pci_alloc_msix(dev, &val) == 0) {
2979 device_printf(adapter->dev,"Using MSIX interrupts\n");
2984 val = pci_msi_count(dev);
2985 if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
2987 device_printf(adapter->dev,"Using MSI interrupt\n");
2993 /*********************************************************************
2995 * Initialize the hardware to a configuration
2996 * as specified by the adapter structure.
2998 **********************************************************************/
3000 em_hardware_init(struct adapter *adapter)
3002 device_t dev = adapter->dev;
3005 INIT_DEBUGOUT("em_hardware_init: begin");
3007 /* Issue a global reset */
3008 e1000_reset_hw(&adapter->hw);
3010 /* When hardware is reset, fifo_head is also reset */
3011 adapter->tx_fifo_head = 0;
3013 /* Set up smart power down as default off on newer adapters. */
3014 if (!em_smart_pwr_down && (adapter->hw.mac.type == e1000_82571 ||
3015 adapter->hw.mac.type == e1000_82572)) {
3018 /* Speed up time to link by disabling smart power down. */
3019 e1000_read_phy_reg(&adapter->hw,
3020 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
3021 phy_tmp &= ~IGP02E1000_PM_SPD;
3022 e1000_write_phy_reg(&adapter->hw,
3023 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
3027 * These parameters control the automatic generation (Tx) and
3028 * response (Rx) to Ethernet PAUSE frames.
3029 * - High water mark should allow for at least two frames to be
3030 * received after sending an XOFF.
3031 * - Low water mark works best when it is very near the high water mark.
3032 * This allows the receiver to restart by sending XON when it has
3033 * drained a bit. Here we use an arbitary value of 1500 which will
3034 * restart after one full frame is pulled from the buffer. There
3035 * could be several smaller frames in the buffer and if so they will
3036 * not trigger the XON until their total number reduces the buffer
3038 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
3040 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
3043 adapter->hw.fc.high_water = rx_buffer_size -
3044 roundup2(adapter->max_frame_size, 1024);
3045 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
3047 if (adapter->hw.mac.type == e1000_80003es2lan)
3048 adapter->hw.fc.pause_time = 0xFFFF;
3050 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
3051 adapter->hw.fc.send_xon = TRUE;
3053 /* Set Flow control, use the tunable location if sane */
3054 if ((em_fc_setting >= 0) || (em_fc_setting < 4))
3055 adapter->hw.fc.requested_mode = em_fc_setting;
3057 adapter->hw.fc.requested_mode = e1000_fc_none;
3059 /* Override - workaround for PCHLAN issue */
3060 if (adapter->hw.mac.type == e1000_pchlan)
3061 adapter->hw.fc.requested_mode = e1000_fc_rx_pause;
3063 if (e1000_init_hw(&adapter->hw) < 0) {
3064 device_printf(dev, "Hardware Initialization Failed\n");
3068 e1000_check_for_link(&adapter->hw);
3073 /*********************************************************************
3075 * Setup networking device structure and register an interface.
3077 **********************************************************************/
3079 em_setup_interface(device_t dev, struct adapter *adapter)
3083 INIT_DEBUGOUT("em_setup_interface: begin");
3085 ifp = adapter->ifp = if_alloc(IFT_ETHER);
3087 panic("%s: can not if_alloc()", device_get_nameunit(dev));
3088 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3089 ifp->if_mtu = ETHERMTU;
3090 ifp->if_init = em_init;
3091 ifp->if_softc = adapter;
3092 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3093 ifp->if_ioctl = em_ioctl;
3094 ifp->if_start = em_start;
3095 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
3096 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
3097 IFQ_SET_READY(&ifp->if_snd);
3099 ether_ifattach(ifp, adapter->hw.mac.addr);
3101 ifp->if_capabilities = ifp->if_capenable = 0;
3103 #if __FreeBSD_version >= 800000
3104 /* Multiqueue tx functions */
3105 ifp->if_transmit = em_mq_start;
3106 ifp->if_qflush = em_qflush;
3107 adapter->br = buf_ring_alloc(4096, M_DEVBUF, M_WAITOK, &adapter->tx_mtx);
3109 if (adapter->hw.mac.type >= e1000_82543) {
3111 #if __FreeBSD_version < 700000
3112 version_cap = IFCAP_HWCSUM;
3114 version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
3116 ifp->if_capabilities |= version_cap;
3117 ifp->if_capenable |= version_cap;
3120 #if __FreeBSD_version >= 700000
3121 /* Identify TSO capable adapters */
3122 if ((adapter->hw.mac.type > e1000_82544) &&
3123 (adapter->hw.mac.type != e1000_82547))
3124 ifp->if_capabilities |= IFCAP_TSO4;
3126 * By default only enable on PCI-E, this
3127 * can be overriden by ifconfig.
3129 if (adapter->hw.mac.type >= e1000_82571)
3130 ifp->if_capenable |= IFCAP_TSO4;
3133 * Tell the upper layer(s) we
3134 * support full VLAN capability
3136 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3137 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3138 ifp->if_capenable |= (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING);
3141 ** Dont turn this on by default, if vlans are
3142 ** created on another pseudo device (eg. lagg)
3143 ** then vlan events are not passed thru, breaking
3144 ** operation, but with HW FILTER off it works. If
3145 ** using vlans directly on the em driver you can
3146 ** enable this and get full hardware tag filtering.
3148 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
3150 #ifdef DEVICE_POLLING
3151 ifp->if_capabilities |= IFCAP_POLLING;
3154 /* Limit WOL to MAGIC, not clear others are used */
3156 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
3157 ifp->if_capenable |= IFCAP_WOL_MAGIC;
3161 * Specify the media types supported by this adapter and register
3162 * callbacks to update media and link information
3164 ifmedia_init(&adapter->media, IFM_IMASK,
3165 em_media_change, em_media_status);
3166 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3167 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
3168 u_char fiber_type = IFM_1000_SX; /* default type */
3170 if (adapter->hw.mac.type == e1000_82545)
3171 fiber_type = IFM_1000_LX;
3172 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
3174 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
3176 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
3177 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
3179 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
3181 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
3183 if (adapter->hw.phy.type != e1000_phy_ife) {
3184 ifmedia_add(&adapter->media,
3185 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
3186 ifmedia_add(&adapter->media,
3187 IFM_ETHER | IFM_1000_T, 0, NULL);
3190 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3191 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3195 /*********************************************************************
3197 * Workaround for SmartSpeed on 82541 and 82547 controllers
3199 **********************************************************************/
3201 em_smartspeed(struct adapter *adapter)
3205 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
3206 adapter->hw.mac.autoneg == 0 ||
3207 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
3210 if (adapter->smartspeed == 0) {
3211 /* If Master/Slave config fault is asserted twice,
3212 * we assume back-to-back */
3213 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3214 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
3216 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3217 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
3218 e1000_read_phy_reg(&adapter->hw,
3219 PHY_1000T_CTRL, &phy_tmp);
3220 if(phy_tmp & CR_1000T_MS_ENABLE) {
3221 phy_tmp &= ~CR_1000T_MS_ENABLE;
3222 e1000_write_phy_reg(&adapter->hw,
3223 PHY_1000T_CTRL, phy_tmp);
3224 adapter->smartspeed++;
3225 if(adapter->hw.mac.autoneg &&
3226 !e1000_copper_link_autoneg(&adapter->hw) &&
3227 !e1000_read_phy_reg(&adapter->hw,
3228 PHY_CONTROL, &phy_tmp)) {
3229 phy_tmp |= (MII_CR_AUTO_NEG_EN |
3230 MII_CR_RESTART_AUTO_NEG);
3231 e1000_write_phy_reg(&adapter->hw,
3232 PHY_CONTROL, phy_tmp);
3237 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
3238 /* If still no link, perhaps using 2/3 pair cable */
3239 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
3240 phy_tmp |= CR_1000T_MS_ENABLE;
3241 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
3242 if(adapter->hw.mac.autoneg &&
3243 !e1000_copper_link_autoneg(&adapter->hw) &&
3244 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
3245 phy_tmp |= (MII_CR_AUTO_NEG_EN |
3246 MII_CR_RESTART_AUTO_NEG);
3247 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
3250 /* Restart process after EM_SMARTSPEED_MAX iterations */
3251 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
3252 adapter->smartspeed = 0;
3257 * Manage DMA'able memory.
3260 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3264 *(bus_addr_t *) arg = segs[0].ds_addr;
3268 em_dma_malloc(struct adapter *adapter, bus_size_t size,
3269 struct em_dma_alloc *dma, int mapflags)
3273 #if __FreeBSD_version >= 700000
3274 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
3276 error = bus_dma_tag_create(NULL, /* parent */
3278 EM_DBA_ALIGN, 0, /* alignment, bounds */
3279 BUS_SPACE_MAXADDR, /* lowaddr */
3280 BUS_SPACE_MAXADDR, /* highaddr */
3281 NULL, NULL, /* filter, filterarg */
3284 size, /* maxsegsize */
3286 NULL, /* lockfunc */
3290 device_printf(adapter->dev,
3291 "%s: bus_dma_tag_create failed: %d\n",
3296 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
3297 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
3299 device_printf(adapter->dev,
3300 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
3301 __func__, (uintmax_t)size, error);
3306 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3307 size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
3308 if (error || dma->dma_paddr == 0) {
3309 device_printf(adapter->dev,
3310 "%s: bus_dmamap_load failed: %d\n",
3318 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3320 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3321 bus_dma_tag_destroy(dma->dma_tag);
3323 dma->dma_map = NULL;
3324 dma->dma_tag = NULL;
3330 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
3332 if (dma->dma_tag == NULL)
3334 if (dma->dma_map != NULL) {
3335 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3336 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3337 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3338 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3339 dma->dma_map = NULL;
3341 bus_dma_tag_destroy(dma->dma_tag);
3342 dma->dma_tag = NULL;
3346 /*********************************************************************
3348 * Allocate memory for tx_buffer structures. The tx_buffer stores all
3349 * the information needed to transmit a packet on the wire.
3351 **********************************************************************/
3353 em_allocate_transmit_structures(struct adapter *adapter)
3355 device_t dev = adapter->dev;
3356 struct em_buffer *tx_buffer;
3360 * Create DMA tags for tx descriptors
3362 #if __FreeBSD_version >= 700000
3363 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3365 if ((error = bus_dma_tag_create(NULL, /* parent */
3367 1, 0, /* alignment, bounds */
3368 BUS_SPACE_MAXADDR, /* lowaddr */
3369 BUS_SPACE_MAXADDR, /* highaddr */
3370 NULL, NULL, /* filter, filterarg */
3371 EM_TSO_SIZE, /* maxsize */
3372 EM_MAX_SCATTER, /* nsegments */
3373 EM_TSO_SEG_SIZE, /* maxsegsize */
3375 NULL, /* lockfunc */
3377 &adapter->txtag)) != 0) {
3378 device_printf(dev, "Unable to allocate TX DMA tag\n");
3382 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
3383 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3384 if (adapter->tx_buffer_area == NULL) {
3385 device_printf(dev, "Unable to allocate tx_buffer memory\n");
3390 /* Create the descriptor buffer dma maps */
3391 for (int i = 0; i < adapter->num_tx_desc; i++) {
3392 tx_buffer = &adapter->tx_buffer_area[i];
3393 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
3395 device_printf(dev, "Unable to create TX DMA map\n");
3398 tx_buffer->next_eop = -1;
3403 em_free_transmit_structures(adapter);
3407 /*********************************************************************
3409 * (Re)Initialize transmit structures.
3411 **********************************************************************/
3413 em_setup_transmit_structures(struct adapter *adapter)
3415 struct em_buffer *tx_buffer;
3417 /* Clear the old ring contents */
3418 bzero(adapter->tx_desc_base,
3419 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
3421 /* Free any existing TX buffers */
3422 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
3423 tx_buffer = &adapter->tx_buffer_area[i];
3424 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3425 BUS_DMASYNC_POSTWRITE);
3426 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
3427 m_freem(tx_buffer->m_head);
3428 tx_buffer->m_head = NULL;
3429 tx_buffer->next_eop = -1;
3433 adapter->next_avail_tx_desc = 0;
3434 adapter->next_tx_to_clean = 0;
3435 adapter->num_tx_desc_avail = adapter->num_tx_desc;
3437 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3438 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3443 /*********************************************************************
3445 * Enable transmit unit.
3447 **********************************************************************/
3449 em_initialize_transmit_unit(struct adapter *adapter)
3451 u32 tctl, tarc, tipg = 0;
3454 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3455 /* Setup the Base and Length of the Tx Descriptor Ring */
3456 bus_addr = adapter->txdma.dma_paddr;
3457 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
3458 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
3459 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
3460 (u32)(bus_addr >> 32));
3461 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
3463 /* Setup the HW Tx Head and Tail descriptor pointers */
3464 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
3465 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
3467 HW_DEBUGOUT2("Base = %x, Length = %x\n",
3468 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
3469 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
3471 /* Set the default values for the Tx Inter Packet Gap timer */
3472 switch (adapter->hw.mac.type) {
3474 tipg = DEFAULT_82542_TIPG_IPGT;
3475 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3476 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3478 case e1000_80003es2lan:
3479 tipg = DEFAULT_82543_TIPG_IPGR1;
3480 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3481 E1000_TIPG_IPGR2_SHIFT;
3484 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3485 (adapter->hw.phy.media_type ==
3486 e1000_media_type_internal_serdes))
3487 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3489 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3490 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3491 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3494 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
3495 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
3496 if(adapter->hw.mac.type >= e1000_82540)
3497 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
3498 adapter->tx_abs_int_delay.value);
3500 if ((adapter->hw.mac.type == e1000_82571) ||
3501 (adapter->hw.mac.type == e1000_82572)) {
3502 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3503 tarc |= SPEED_MODE_BIT;
3504 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3505 } else if (adapter->hw.mac.type == e1000_80003es2lan) {
3506 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3508 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3509 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
3511 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3514 /* Program the Transmit Control Register */
3515 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3516 tctl &= ~E1000_TCTL_CT;
3517 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3518 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3520 if (adapter->hw.mac.type >= e1000_82571)
3521 tctl |= E1000_TCTL_MULR;
3523 /* This write will effectively turn on the transmit unit. */
3524 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
3526 /* Setup Transmit Descriptor Base Settings */
3527 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3529 if (adapter->tx_int_delay.value > 0)
3530 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3533 /*********************************************************************
3535 * Free all transmit related data structures.
3537 **********************************************************************/
3539 em_free_transmit_structures(struct adapter *adapter)
3541 struct em_buffer *tx_buffer;
3543 INIT_DEBUGOUT("free_transmit_structures: begin");
3545 if (adapter->tx_buffer_area != NULL) {
3546 for (int i = 0; i < adapter->num_tx_desc; i++) {
3547 tx_buffer = &adapter->tx_buffer_area[i];
3548 if (tx_buffer->m_head != NULL) {
3549 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3550 BUS_DMASYNC_POSTWRITE);
3551 bus_dmamap_unload(adapter->txtag,
3553 m_freem(tx_buffer->m_head);
3554 tx_buffer->m_head = NULL;
3555 } else if (tx_buffer->map != NULL)
3556 bus_dmamap_unload(adapter->txtag,
3558 if (tx_buffer->map != NULL) {
3559 bus_dmamap_destroy(adapter->txtag,
3561 tx_buffer->map = NULL;
3565 if (adapter->tx_buffer_area != NULL) {
3566 free(adapter->tx_buffer_area, M_DEVBUF);
3567 adapter->tx_buffer_area = NULL;
3569 if (adapter->txtag != NULL) {
3570 bus_dma_tag_destroy(adapter->txtag);
3571 adapter->txtag = NULL;
3573 #if __FreeBSD_version >= 800000
3574 if (adapter->br != NULL)
3575 buf_ring_free(adapter->br, M_DEVBUF);
3579 /*********************************************************************
3581 * The offload context needs to be set when we transfer the first
3582 * packet of a particular protocol (TCP/UDP). This routine has been
3583 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
3585 * Added back the old method of keeping the current context type
3586 * and not setting if unnecessary, as this is reported to be a
3587 * big performance win. -jfv
3588 **********************************************************************/
3590 em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
3591 u32 *txd_upper, u32 *txd_lower)
3593 struct e1000_context_desc *TXD = NULL;
3594 struct em_buffer *tx_buffer;
3595 struct ether_vlan_header *eh;
3596 struct ip *ip = NULL;
3597 struct ip6_hdr *ip6;
3598 int curr_txd, ehdrlen;
3599 u32 cmd, hdr_len, ip_hlen;
3604 cmd = hdr_len = ipproto = 0;
3605 curr_txd = adapter->next_avail_tx_desc;
3608 * Determine where frame payload starts.
3609 * Jump over vlan headers if already present,
3610 * helpful for QinQ too.
3612 eh = mtod(mp, struct ether_vlan_header *);
3613 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3614 etype = ntohs(eh->evl_proto);
3615 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3617 etype = ntohs(eh->evl_encap_proto);
3618 ehdrlen = ETHER_HDR_LEN;
3622 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
3623 * TODO: Support SCTP too when it hits the tree.
3627 ip = (struct ip *)(mp->m_data + ehdrlen);
3628 ip_hlen = ip->ip_hl << 2;
3630 /* Setup of IP header checksum. */
3631 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3633 * Start offset for header checksum calculation.
3634 * End offset for header checksum calculation.
3635 * Offset of place to put the checksum.
3637 TXD = (struct e1000_context_desc *)
3638 &adapter->tx_desc_base[curr_txd];
3639 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3640 TXD->lower_setup.ip_fields.ipcse =
3641 htole16(ehdrlen + ip_hlen);
3642 TXD->lower_setup.ip_fields.ipcso =
3643 ehdrlen + offsetof(struct ip, ip_sum);
3644 cmd |= E1000_TXD_CMD_IP;
3645 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3648 if (mp->m_len < ehdrlen + ip_hlen)
3649 return; /* failure */
3651 hdr_len = ehdrlen + ip_hlen;
3655 case ETHERTYPE_IPV6:
3656 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3657 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3659 if (mp->m_len < ehdrlen + ip_hlen)
3660 return; /* failure */
3662 /* IPv6 doesn't have a header checksum. */
3664 hdr_len = ehdrlen + ip_hlen;
3665 ipproto = ip6->ip6_nxt;
3676 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3677 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3678 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3679 /* no need for context if already set */
3680 if (adapter->last_hw_offload == CSUM_TCP)
3682 adapter->last_hw_offload = CSUM_TCP;
3684 * Start offset for payload checksum calculation.
3685 * End offset for payload checksum calculation.
3686 * Offset of place to put the checksum.
3688 TXD = (struct e1000_context_desc *)
3689 &adapter->tx_desc_base[curr_txd];
3690 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3691 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3692 TXD->upper_setup.tcp_fields.tucso =
3693 hdr_len + offsetof(struct tcphdr, th_sum);
3694 cmd |= E1000_TXD_CMD_TCP;
3699 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3700 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3701 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3702 /* no need for context if already set */
3703 if (adapter->last_hw_offload == CSUM_UDP)
3705 adapter->last_hw_offload = CSUM_UDP;
3707 * Start offset for header checksum calculation.
3708 * End offset for header checksum calculation.
3709 * Offset of place to put the checksum.
3711 TXD = (struct e1000_context_desc *)
3712 &adapter->tx_desc_base[curr_txd];
3713 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3714 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3715 TXD->upper_setup.tcp_fields.tucso =
3716 hdr_len + offsetof(struct udphdr, uh_sum);
3724 TXD->tcp_seg_setup.data = htole32(0);
3725 TXD->cmd_and_length =
3726 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3727 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3728 tx_buffer->m_head = NULL;
3729 tx_buffer->next_eop = -1;
3731 if (++curr_txd == adapter->num_tx_desc)
3734 adapter->num_tx_desc_avail--;
3735 adapter->next_avail_tx_desc = curr_txd;
3739 #if __FreeBSD_version >= 700000
3740 /**********************************************************************
3742 * Setup work for hardware segmentation offload (TSO)
3744 **********************************************************************/
3746 em_tso_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper,
3749 struct e1000_context_desc *TXD;
3750 struct em_buffer *tx_buffer;
3751 struct ether_vlan_header *eh;
3753 struct ip6_hdr *ip6;
3755 int curr_txd, ehdrlen, hdr_len, ip_hlen, isip6;
3759 * This function could/should be extended to support IP/IPv6
3760 * fragmentation as well. But as they say, one step at a time.
3764 * Determine where frame payload starts.
3765 * Jump over vlan headers if already present,
3766 * helpful for QinQ too.
3768 eh = mtod(mp, struct ether_vlan_header *);
3769 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3770 etype = ntohs(eh->evl_proto);
3771 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3773 etype = ntohs(eh->evl_encap_proto);
3774 ehdrlen = ETHER_HDR_LEN;
3777 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3778 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3779 return FALSE; /* -1 */
3782 * We only support TCP for IPv4 and IPv6 (notyet) for the moment.
3783 * TODO: Support SCTP too when it hits the tree.
3788 ip = (struct ip *)(mp->m_data + ehdrlen);
3789 if (ip->ip_p != IPPROTO_TCP)
3790 return FALSE; /* 0 */
3793 ip_hlen = ip->ip_hl << 2;
3794 if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3795 return FALSE; /* -1 */
3796 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3798 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3799 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3801 th->th_sum = mp->m_pkthdr.csum_data;
3804 case ETHERTYPE_IPV6:
3806 return FALSE; /* Not supported yet. */
3807 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3808 if (ip6->ip6_nxt != IPPROTO_TCP)
3809 return FALSE; /* 0 */
3811 ip_hlen = sizeof(struct ip6_hdr); /* XXX: no header stacking. */
3812 if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3813 return FALSE; /* -1 */
3814 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
3816 th->th_sum = in6_pseudo(ip6->ip6_src, ip->ip6_dst,
3817 htons(IPPROTO_TCP)); /* XXX: function notyet. */
3819 th->th_sum = mp->m_pkthdr.csum_data;
3825 hdr_len = ehdrlen + ip_hlen + (th->th_off << 2);
3827 *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
3828 E1000_TXD_DTYP_D | /* Data descr type */
3829 E1000_TXD_CMD_TSE); /* Do TSE on this packet */
3831 /* IP and/or TCP header checksum calculation and insertion. */
3832 *txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) |
3833 E1000_TXD_POPTS_TXSM) << 8;
3835 curr_txd = adapter->next_avail_tx_desc;
3836 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3837 TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3839 /* IPv6 doesn't have a header checksum. */
3842 * Start offset for header checksum calculation.
3843 * End offset for header checksum calculation.
3844 * Offset of place put the checksum.
3846 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3847 TXD->lower_setup.ip_fields.ipcse =
3848 htole16(ehdrlen + ip_hlen - 1);
3849 TXD->lower_setup.ip_fields.ipcso =
3850 ehdrlen + offsetof(struct ip, ip_sum);
3853 * Start offset for payload checksum calculation.
3854 * End offset for payload checksum calculation.
3855 * Offset of place to put the checksum.
3857 TXD->upper_setup.tcp_fields.tucss =
3859 TXD->upper_setup.tcp_fields.tucse = 0;
3860 TXD->upper_setup.tcp_fields.tucso =
3861 ehdrlen + ip_hlen + offsetof(struct tcphdr, th_sum);
3863 * Payload size per packet w/o any headers.
3864 * Length of all headers up to payload.
3866 TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz);
3867 TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
3869 TXD->cmd_and_length = htole32(adapter->txd_cmd |
3870 E1000_TXD_CMD_DEXT | /* Extended descr */
3871 E1000_TXD_CMD_TSE | /* TSE context */
3872 (isip6 ? 0 : E1000_TXD_CMD_IP) | /* Do IP csum */
3873 E1000_TXD_CMD_TCP | /* Do TCP checksum */
3874 (mp->m_pkthdr.len - (hdr_len))); /* Total len */
3876 tx_buffer->m_head = NULL;
3877 tx_buffer->next_eop = -1;
3879 if (++curr_txd == adapter->num_tx_desc)
3882 adapter->num_tx_desc_avail--;
3883 adapter->next_avail_tx_desc = curr_txd;
3884 adapter->tx_tso = TRUE;
3889 #endif /* __FreeBSD_version >= 700000 */
3891 /**********************************************************************
3893 * Examine each tx_buffer in the used queue. If the hardware is done
3894 * processing the packet then free associated resources. The
3895 * tx_buffer is put back on the free queue.
3897 **********************************************************************/
3899 em_txeof(struct adapter *adapter)
3901 int first, last, done, num_avail;
3902 struct em_buffer *tx_buffer;
3903 struct e1000_tx_desc *tx_desc, *eop_desc;
3904 struct ifnet *ifp = adapter->ifp;
3906 EM_TX_LOCK_ASSERT(adapter);
3908 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3911 num_avail = adapter->num_tx_desc_avail;
3912 first = adapter->next_tx_to_clean;
3913 tx_desc = &adapter->tx_desc_base[first];
3914 tx_buffer = &adapter->tx_buffer_area[first];
3915 last = tx_buffer->next_eop;
3916 eop_desc = &adapter->tx_desc_base[last];
3919 * What this does is get the index of the
3920 * first descriptor AFTER the EOP of the
3921 * first packet, that way we can do the
3922 * simple comparison on the inner while loop.
3924 if (++last == adapter->num_tx_desc)
3928 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3929 BUS_DMASYNC_POSTREAD);
3931 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3932 /* We clean the range of the packet */
3933 while (first != done) {
3934 tx_desc->upper.data = 0;
3935 tx_desc->lower.data = 0;
3936 tx_desc->buffer_addr = 0;
3939 if (tx_buffer->m_head) {
3941 bus_dmamap_sync(adapter->txtag,
3943 BUS_DMASYNC_POSTWRITE);
3944 bus_dmamap_unload(adapter->txtag,
3947 m_freem(tx_buffer->m_head);
3948 tx_buffer->m_head = NULL;
3950 tx_buffer->next_eop = -1;
3951 adapter->watchdog_time = ticks;
3953 if (++first == adapter->num_tx_desc)
3956 tx_buffer = &adapter->tx_buffer_area[first];
3957 tx_desc = &adapter->tx_desc_base[first];
3959 /* See if we can continue to the next packet */
3960 last = tx_buffer->next_eop;
3962 eop_desc = &adapter->tx_desc_base[last];
3963 /* Get new done point */
3964 if (++last == adapter->num_tx_desc) last = 0;
3969 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3970 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3972 adapter->next_tx_to_clean = first;
3975 * If we have enough room, clear IFF_DRV_OACTIVE to
3976 * tell the stack that it is OK to send packets.
3977 * If there are no pending descriptors, clear the watchdog.
3979 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
3980 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3981 if (num_avail == adapter->num_tx_desc) {
3982 adapter->watchdog_check = FALSE;
3983 adapter->num_tx_desc_avail = num_avail;
3988 adapter->num_tx_desc_avail = num_avail;
3992 /*********************************************************************
3994 * When Link is lost sometimes there is work still in the TX ring
3995 * which may result in a watchdog, rather than allow that we do an
3996 * attempted cleanup and then reinit here. Note that this has been
3997 * seens mostly with fiber adapters.
3999 **********************************************************************/
4001 em_tx_purge(struct adapter *adapter)
4003 if ((!adapter->link_active) && (adapter->watchdog_check)) {
4004 EM_TX_LOCK(adapter);
4006 EM_TX_UNLOCK(adapter);
4007 if (adapter->watchdog_check) /* Still outstanding? */
4008 em_init_locked(adapter);
4012 /*********************************************************************
4014 * Get a buffer from system mbuf buffer pool.
4016 **********************************************************************/
4018 em_get_buf(struct adapter *adapter, int i)
4021 bus_dma_segment_t segs[1];
4023 struct em_buffer *rx_buffer;
4026 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
4028 adapter->mbuf_cluster_failed++;
4031 m->m_len = m->m_pkthdr.len = MCLBYTES;
4033 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
4034 m_adj(m, ETHER_ALIGN);
4037 * Using memory from the mbuf cluster pool, invoke the
4038 * bus_dma machinery to arrange the memory mapping.
4040 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
4041 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
4047 /* If nsegs is wrong then the stack is corrupt. */
4048 KASSERT(nsegs == 1, ("Too many segments returned!"));
4050 rx_buffer = &adapter->rx_buffer_area[i];
4051 if (rx_buffer->m_head != NULL)
4052 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4054 map = rx_buffer->map;
4055 rx_buffer->map = adapter->rx_sparemap;
4056 adapter->rx_sparemap = map;
4057 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
4058 rx_buffer->m_head = m;
4060 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
4064 /*********************************************************************
4066 * Allocate memory for rx_buffer structures. Since we use one
4067 * rx_buffer per received packet, the maximum number of rx_buffer's
4068 * that we'll need is equal to the number of receive descriptors
4069 * that we've allocated.
4071 **********************************************************************/
4073 em_allocate_receive_structures(struct adapter *adapter)
4075 device_t dev = adapter->dev;
4076 struct em_buffer *rx_buffer;
4079 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
4080 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
4081 if (adapter->rx_buffer_area == NULL) {
4082 device_printf(dev, "Unable to allocate rx_buffer memory\n");
4086 #if __FreeBSD_version >= 700000
4087 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
4089 error = bus_dma_tag_create(NULL, /* parent */
4091 1, 0, /* alignment, bounds */
4092 BUS_SPACE_MAXADDR, /* lowaddr */
4093 BUS_SPACE_MAXADDR, /* highaddr */
4094 NULL, NULL, /* filter, filterarg */
4095 MCLBYTES, /* maxsize */
4097 MCLBYTES, /* maxsegsize */
4099 NULL, /* lockfunc */
4103 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
4108 /* Create the spare map (used by getbuf) */
4109 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4110 &adapter->rx_sparemap);
4112 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4117 rx_buffer = adapter->rx_buffer_area;
4118 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4119 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4122 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4131 em_free_receive_structures(adapter);
4135 /*********************************************************************
4137 * (Re)initialize receive structures.
4139 **********************************************************************/
4141 em_setup_receive_structures(struct adapter *adapter)
4143 struct em_buffer *rx_buffer;
4146 /* Reset descriptor ring */
4147 bzero(adapter->rx_desc_base,
4148 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
4150 /* Free current RX buffers. */
4151 rx_buffer = adapter->rx_buffer_area;
4152 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4153 if (rx_buffer->m_head != NULL) {
4154 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4155 BUS_DMASYNC_POSTREAD);
4156 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4157 m_freem(rx_buffer->m_head);
4158 rx_buffer->m_head = NULL;
4162 /* Allocate new ones. */
4163 for (i = 0; i < adapter->num_rx_desc; i++) {
4164 error = em_get_buf(adapter, i);
4169 /* Setup our descriptor pointers */
4170 adapter->next_rx_desc_to_check = 0;
4171 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4172 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4177 /*********************************************************************
4179 * Enable receive unit.
4181 **********************************************************************/
4182 #define MAX_INTS_PER_SEC 8000
4183 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
4186 em_initialize_receive_unit(struct adapter *adapter)
4188 struct ifnet *ifp = adapter->ifp;
4192 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
4195 * Make sure receives are disabled while setting
4196 * up the descriptor ring
4198 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4199 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
4201 if (adapter->hw.mac.type >= e1000_82540) {
4202 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
4203 adapter->rx_abs_int_delay.value);
4205 * Set the interrupt throttling rate. Value is calculated
4206 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
4208 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
4212 ** When using MSIX interrupts we need to throttle
4213 ** using the EITR register (82574 only)
4216 for (int i = 0; i < 4; i++)
4217 E1000_WRITE_REG(&adapter->hw,
4218 E1000_EITR_82574(i), DEFAULT_ITR);
4220 /* Disable accelerated ackknowledge */
4221 if (adapter->hw.mac.type == e1000_82574)
4222 E1000_WRITE_REG(&adapter->hw,
4223 E1000_RFCTL, E1000_RFCTL_ACK_DIS);
4225 /* Setup the Base and Length of the Rx Descriptor Ring */
4226 bus_addr = adapter->rxdma.dma_paddr;
4227 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
4228 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
4229 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
4230 (u32)(bus_addr >> 32));
4231 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
4234 /* Setup the Receive Control Register */
4235 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4236 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
4237 E1000_RCTL_RDMTS_HALF |
4238 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4240 /* Make sure VLAN Filters are off */
4241 rctl &= ~E1000_RCTL_VFE;
4243 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
4244 rctl |= E1000_RCTL_SBP;
4246 rctl &= ~E1000_RCTL_SBP;
4248 switch (adapter->rx_buffer_len) {
4251 rctl |= E1000_RCTL_SZ_2048;
4254 rctl |= E1000_RCTL_SZ_4096 |
4255 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4258 rctl |= E1000_RCTL_SZ_8192 |
4259 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4262 rctl |= E1000_RCTL_SZ_16384 |
4263 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4267 if (ifp->if_mtu > ETHERMTU)
4268 rctl |= E1000_RCTL_LPE;
4270 rctl &= ~E1000_RCTL_LPE;
4272 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
4273 if ((adapter->hw.mac.type >= e1000_82543) &&
4274 (ifp->if_capenable & IFCAP_RXCSUM)) {
4275 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
4276 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
4277 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
4281 ** XXX TEMPORARY WORKAROUND: on some systems with 82573
4282 ** long latencies are observed, like Lenovo X60. This
4283 ** change eliminates the problem, but since having positive
4284 ** values in RDTR is a known source of problems on other
4285 ** platforms another solution is being sought.
4287 if (adapter->hw.mac.type == e1000_82573)
4288 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, 0x20);
4290 /* Enable Receives */
4291 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4294 * Setup the HW Rx Head and
4295 * Tail Descriptor Pointers
4297 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
4298 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
4303 /*********************************************************************
4305 * Free receive related data structures.
4307 **********************************************************************/
4309 em_free_receive_structures(struct adapter *adapter)
4311 struct em_buffer *rx_buffer;
4314 INIT_DEBUGOUT("free_receive_structures: begin");
4316 if (adapter->rx_sparemap) {
4317 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
4318 adapter->rx_sparemap = NULL;
4321 /* Cleanup any existing buffers */
4322 if (adapter->rx_buffer_area != NULL) {
4323 rx_buffer = adapter->rx_buffer_area;
4324 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4325 if (rx_buffer->m_head != NULL) {
4326 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4327 BUS_DMASYNC_POSTREAD);
4328 bus_dmamap_unload(adapter->rxtag,
4330 m_freem(rx_buffer->m_head);
4331 rx_buffer->m_head = NULL;
4332 } else if (rx_buffer->map != NULL)
4333 bus_dmamap_unload(adapter->rxtag,
4335 if (rx_buffer->map != NULL) {
4336 bus_dmamap_destroy(adapter->rxtag,
4338 rx_buffer->map = NULL;
4343 if (adapter->rx_buffer_area != NULL) {
4344 free(adapter->rx_buffer_area, M_DEVBUF);
4345 adapter->rx_buffer_area = NULL;
4348 if (adapter->rxtag != NULL) {
4349 bus_dma_tag_destroy(adapter->rxtag);
4350 adapter->rxtag = NULL;
4354 /*********************************************************************
4356 * This routine executes in interrupt context. It replenishes
4357 * the mbufs in the descriptor and sends data which has been
4358 * dma'ed into host memory to upper layer.
4360 * We loop at most count times if count is > 0, or until done if
4363 * For polling we also now return the number of cleaned packets
4364 *********************************************************************/
4366 em_rxeof(struct adapter *adapter, int count)
4368 struct ifnet *ifp = adapter->ifp;;
4370 u8 status, accept_frame = 0, eop = 0;
4371 u16 len, desc_len, prev_len_adj;
4373 struct e1000_rx_desc *current_desc;
4375 EM_RX_LOCK(adapter);
4376 i = adapter->next_rx_desc_to_check;
4377 current_desc = &adapter->rx_desc_base[i];
4378 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4379 BUS_DMASYNC_POSTREAD);
4381 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
4382 EM_RX_UNLOCK(adapter);
4386 while ((current_desc->status & E1000_RXD_STAT_DD) &&
4388 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4389 struct mbuf *m = NULL;
4391 mp = adapter->rx_buffer_area[i].m_head;
4393 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
4394 * needs to access the last received byte in the mbuf.
4396 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
4397 BUS_DMASYNC_POSTREAD);
4401 desc_len = le16toh(current_desc->length);
4402 status = current_desc->status;
4403 if (status & E1000_RXD_STAT_EOP) {
4406 if (desc_len < ETHER_CRC_LEN) {
4408 prev_len_adj = ETHER_CRC_LEN - desc_len;
4410 len = desc_len - ETHER_CRC_LEN;
4416 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
4418 u32 pkt_len = desc_len;
4420 if (adapter->fmp != NULL)
4421 pkt_len += adapter->fmp->m_pkthdr.len;
4423 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
4424 if (TBI_ACCEPT(&adapter->hw, status,
4425 current_desc->errors, pkt_len, last_byte,
4426 adapter->min_frame_size, adapter->max_frame_size)) {
4427 e1000_tbi_adjust_stats_82543(&adapter->hw,
4428 &adapter->stats, pkt_len,
4429 adapter->hw.mac.addr,
4430 adapter->max_frame_size);
4438 if (em_get_buf(adapter, i) != 0) {
4443 /* Assign correct length to the current fragment */
4446 if (adapter->fmp == NULL) {
4447 mp->m_pkthdr.len = len;
4448 adapter->fmp = mp; /* Store the first mbuf */
4451 /* Chain mbuf's together */
4452 mp->m_flags &= ~M_PKTHDR;
4454 * Adjust length of previous mbuf in chain if
4455 * we received less than 4 bytes in the last
4458 if (prev_len_adj > 0) {
4459 adapter->lmp->m_len -= prev_len_adj;
4460 adapter->fmp->m_pkthdr.len -=
4463 adapter->lmp->m_next = mp;
4464 adapter->lmp = adapter->lmp->m_next;
4465 adapter->fmp->m_pkthdr.len += len;
4469 adapter->fmp->m_pkthdr.rcvif = ifp;
4471 em_receive_checksum(adapter, current_desc,
4473 #ifndef __NO_STRICT_ALIGNMENT
4474 if (adapter->max_frame_size >
4475 (MCLBYTES - ETHER_ALIGN) &&
4476 em_fixup_rx(adapter) != 0)
4479 if (status & E1000_RXD_STAT_VP) {
4480 #if __FreeBSD_version < 700000
4481 VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
4482 (le16toh(current_desc->special) &
4483 E1000_RXD_SPC_VLAN_MASK));
4485 adapter->fmp->m_pkthdr.ether_vtag =
4486 (le16toh(current_desc->special) &
4487 E1000_RXD_SPC_VLAN_MASK);
4488 adapter->fmp->m_flags |= M_VLANTAG;
4491 #ifndef __NO_STRICT_ALIGNMENT
4495 adapter->fmp = NULL;
4496 adapter->lmp = NULL;
4501 /* Reuse loaded DMA map and just update mbuf chain */
4502 mp = adapter->rx_buffer_area[i].m_head;
4503 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
4504 mp->m_data = mp->m_ext.ext_buf;
4506 if (adapter->max_frame_size <=
4507 (MCLBYTES - ETHER_ALIGN))
4508 m_adj(mp, ETHER_ALIGN);
4509 if (adapter->fmp != NULL) {
4510 m_freem(adapter->fmp);
4511 adapter->fmp = NULL;
4512 adapter->lmp = NULL;
4517 /* Zero out the receive descriptors status. */
4518 current_desc->status = 0;
4519 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4520 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4522 /* Advance our pointers to the next descriptor. */
4523 if (++i == adapter->num_rx_desc)
4525 /* Call into the stack */
4527 adapter->next_rx_desc_to_check = i;
4528 EM_RX_UNLOCK(adapter);
4529 (*ifp->if_input)(ifp, m);
4530 EM_RX_LOCK(adapter);
4532 i = adapter->next_rx_desc_to_check;
4534 current_desc = &adapter->rx_desc_base[i];
4536 adapter->next_rx_desc_to_check = i;
4538 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
4540 i = adapter->num_rx_desc - 1;
4541 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
4542 EM_RX_UNLOCK(adapter);
4546 #ifndef __NO_STRICT_ALIGNMENT
4548 * When jumbo frames are enabled we should realign entire payload on
4549 * architecures with strict alignment. This is serious design mistake of 8254x
4550 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
4551 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
4552 * payload. On architecures without strict alignment restrictions 8254x still
4553 * performs unaligned memory access which would reduce the performance too.
4554 * To avoid copying over an entire frame to align, we allocate a new mbuf and
4555 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
4556 * existing mbuf chain.
4558 * Be aware, best performance of the 8254x is achived only when jumbo frame is
4559 * not used at all on architectures with strict alignment.
4562 em_fixup_rx(struct adapter *adapter)
4569 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
4570 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
4571 m->m_data += ETHER_HDR_LEN;
4573 MGETHDR(n, M_DONTWAIT, MT_DATA);
4575 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
4576 m->m_data += ETHER_HDR_LEN;
4577 m->m_len -= ETHER_HDR_LEN;
4578 n->m_len = ETHER_HDR_LEN;
4579 M_MOVE_PKTHDR(n, m);
4583 adapter->dropped_pkts++;
4584 m_freem(adapter->fmp);
4585 adapter->fmp = NULL;
4594 /*********************************************************************
4596 * Verify that the hardware indicated that the checksum is valid.
4597 * Inform the stack about the status of checksum so that stack
4598 * doesn't spend time verifying the checksum.
4600 *********************************************************************/
4602 em_receive_checksum(struct adapter *adapter,
4603 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
4605 /* 82543 or newer only */
4606 if ((adapter->hw.mac.type < e1000_82543) ||
4607 /* Ignore Checksum bit is set */
4608 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
4609 mp->m_pkthdr.csum_flags = 0;
4613 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
4615 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
4616 /* IP Checksum Good */
4617 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4618 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4621 mp->m_pkthdr.csum_flags = 0;
4625 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
4627 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
4628 mp->m_pkthdr.csum_flags |=
4629 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4630 mp->m_pkthdr.csum_data = htons(0xffff);
4635 #if __FreeBSD_version >= 700029
4637 * This routine is run via an vlan
4641 em_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4643 struct adapter *adapter = ifp->if_softc;
4646 if (ifp->if_softc != arg) /* Not our event */
4649 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
4652 index = (vtag >> 5) & 0x7F;
4654 em_shadow_vfta[index] |= (1 << bit);
4655 ++adapter->num_vlans;
4656 /* Re-init to load the changes */
4661 * This routine is run via an vlan
4665 em_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4667 struct adapter *adapter = ifp->if_softc;
4670 if (ifp->if_softc != arg)
4673 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4676 index = (vtag >> 5) & 0x7F;
4678 em_shadow_vfta[index] &= ~(1 << bit);
4679 --adapter->num_vlans;
4680 /* Re-init to load the changes */
4685 em_setup_vlan_hw_support(struct adapter *adapter)
4687 struct e1000_hw *hw = &adapter->hw;
4691 ** We get here thru init_locked, meaning
4692 ** a soft reset, this has already cleared
4693 ** the VFTA and other state, so if there
4694 ** have been no vlan's registered do nothing.
4696 if (adapter->num_vlans == 0)
4700 ** A soft reset zero's out the VFTA, so
4701 ** we need to repopulate it now.
4703 for (int i = 0; i < EM_VFTA_SIZE; i++)
4704 if (em_shadow_vfta[i] != 0)
4705 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
4706 i, em_shadow_vfta[i]);
4708 reg = E1000_READ_REG(hw, E1000_CTRL);
4709 reg |= E1000_CTRL_VME;
4710 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4712 /* Enable the Filter Table */
4713 reg = E1000_READ_REG(hw, E1000_RCTL);
4714 reg &= ~E1000_RCTL_CFIEN;
4715 reg |= E1000_RCTL_VFE;
4716 E1000_WRITE_REG(hw, E1000_RCTL, reg);
4718 /* Update the frame size */
4719 E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
4720 adapter->max_frame_size + VLAN_TAG_SIZE);
4725 em_enable_intr(struct adapter *adapter)
4727 struct e1000_hw *hw = &adapter->hw;
4728 u32 ims_mask = IMS_ENABLE_MASK;
4730 if (adapter->msix) {
4731 E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
4732 ims_mask |= EM_MSIX_MASK;
4734 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4738 em_disable_intr(struct adapter *adapter)
4740 struct e1000_hw *hw = &adapter->hw;
4743 E1000_WRITE_REG(hw, EM_EIAC, 0);
4744 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
4748 * Bit of a misnomer, what this really means is
4749 * to enable OS management of the system... aka
4750 * to disable special hardware management features
4753 em_init_manageability(struct adapter *adapter)
4755 /* A shared code workaround */
4756 #define E1000_82542_MANC2H E1000_MANC2H
4757 if (adapter->has_manage) {
4758 int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
4759 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4761 /* disable hardware interception of ARP */
4762 manc &= ~(E1000_MANC_ARP_EN);
4764 /* enable receiving management packets to the host */
4765 if (adapter->hw.mac.type >= e1000_82571) {
4766 manc |= E1000_MANC_EN_MNG2HOST;
4767 #define E1000_MNG2HOST_PORT_623 (1 << 5)
4768 #define E1000_MNG2HOST_PORT_664 (1 << 6)
4769 manc2h |= E1000_MNG2HOST_PORT_623;
4770 manc2h |= E1000_MNG2HOST_PORT_664;
4771 E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
4774 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4779 * Give control back to hardware management
4780 * controller if there is one.
4783 em_release_manageability(struct adapter *adapter)
4785 if (adapter->has_manage) {
4786 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4788 /* re-enable hardware interception of ARP */
4789 manc |= E1000_MANC_ARP_EN;
4791 if (adapter->hw.mac.type >= e1000_82571)
4792 manc &= ~E1000_MANC_EN_MNG2HOST;
4794 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4799 * em_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
4800 * For ASF and Pass Through versions of f/w this means
4801 * that the driver is loaded. For AMT version type f/w
4802 * this means that the network i/f is open.
4805 em_get_hw_control(struct adapter *adapter)
4809 if (adapter->hw.mac.type == e1000_82573) {
4810 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4811 E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4812 swsm | E1000_SWSM_DRV_LOAD);
4816 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4817 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4818 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4823 * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4824 * For ASF and Pass Through versions of f/w this means that
4825 * the driver is no longer loaded. For AMT versions of the
4826 * f/w this means that the network i/f is closed.
4829 em_release_hw_control(struct adapter *adapter)
4833 if (!adapter->has_manage)
4836 if (adapter->hw.mac.type == e1000_82573) {
4837 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4838 E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4839 swsm & ~E1000_SWSM_DRV_LOAD);
4843 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4844 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4845 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4850 em_is_valid_ether_addr(u8 *addr)
4852 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4854 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4862 ** Parse the interface capabilities with regard
4863 ** to both system management and wake-on-lan for
4867 em_get_wakeup(device_t dev)
4869 struct adapter *adapter = device_get_softc(dev);
4870 u16 eeprom_data = 0, device_id, apme_mask;
4872 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
4873 apme_mask = EM_EEPROM_APME;
4875 switch (adapter->hw.mac.type) {
4880 e1000_read_nvm(&adapter->hw,
4881 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
4882 apme_mask = EM_82544_APME;
4886 adapter->has_amt = TRUE;
4889 case e1000_82546_rev_3:
4892 case e1000_80003es2lan:
4893 if (adapter->hw.bus.func == 1) {
4894 e1000_read_nvm(&adapter->hw,
4895 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
4898 e1000_read_nvm(&adapter->hw,
4899 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4903 case e1000_ich10lan:
4905 apme_mask = E1000_WUC_APME;
4906 adapter->has_amt = TRUE;
4907 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
4910 e1000_read_nvm(&adapter->hw,
4911 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4914 if (eeprom_data & apme_mask)
4915 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
4917 * We have the eeprom settings, now apply the special cases
4918 * where the eeprom may be wrong or the board won't support
4919 * wake on lan on a particular port
4921 device_id = pci_get_device(dev);
4922 switch (device_id) {
4923 case E1000_DEV_ID_82546GB_PCIE:
4926 case E1000_DEV_ID_82546EB_FIBER:
4927 case E1000_DEV_ID_82546GB_FIBER:
4928 case E1000_DEV_ID_82571EB_FIBER:
4929 /* Wake events only supported on port A for dual fiber
4930 * regardless of eeprom setting */
4931 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
4932 E1000_STATUS_FUNC_1)
4935 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
4936 case E1000_DEV_ID_82571EB_QUAD_COPPER:
4937 case E1000_DEV_ID_82571EB_QUAD_FIBER:
4938 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
4939 /* if quad port adapter, disable WoL on all but port A */
4940 if (global_quad_port_a != 0)
4942 /* Reset for multiple quad port adapters */
4943 if (++global_quad_port_a == 4)
4944 global_quad_port_a = 0;
4952 * Enable PCI Wake On Lan capability
4955 em_enable_wakeup(device_t dev)
4957 struct adapter *adapter = device_get_softc(dev);
4958 struct ifnet *ifp = adapter->ifp;
4959 u32 pmc, ctrl, ctrl_ext, rctl;
4962 if ((pci_find_extcap(dev, PCIY_PMG, &pmc) != 0))
4965 /* Advertise the wakeup capability */
4966 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4967 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4968 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4969 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4971 /* ICH workaround code */
4972 if ((adapter->hw.mac.type == e1000_ich8lan) ||
4973 (adapter->hw.mac.type == e1000_pchlan) ||
4974 (adapter->hw.mac.type == e1000_ich9lan) ||
4975 (adapter->hw.mac.type == e1000_ich10lan)) {
4976 e1000_disable_gig_wol_ich8lan(&adapter->hw);
4977 e1000_hv_phy_powerdown_workaround_ich8lan(&adapter->hw);
4980 /* Keep the laser running on Fiber adapters */
4981 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4982 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4983 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4984 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4985 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4989 ** Determine type of Wakeup: note that wol
4990 ** is set with all bits on by default.
4992 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
4993 adapter->wol &= ~E1000_WUFC_MAG;
4995 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
4996 adapter->wol &= ~E1000_WUFC_MC;
4998 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4999 rctl |= E1000_RCTL_MPE;
5000 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
5003 if (adapter->hw.mac.type == e1000_pchlan) {
5004 if (em_enable_phy_wakeup(adapter))
5007 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
5008 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
5011 if (adapter->hw.phy.type == e1000_phy_igp_3)
5012 e1000_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5015 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
5016 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
5017 if (ifp->if_capenable & IFCAP_WOL)
5018 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
5019 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
5025 ** WOL in the newer chipset interfaces (pchlan)
5026 ** require thing to be copied into the phy
5029 em_enable_phy_wakeup(struct adapter *adapter)
5031 struct e1000_hw *hw = &adapter->hw;
5035 /* copy MAC RARs to PHY RARs */
5036 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
5037 mreg = E1000_READ_REG(hw, E1000_RAL(i));
5038 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
5039 e1000_write_phy_reg(hw, BM_RAR_M(i),
5040 (u16)((mreg >> 16) & 0xFFFF));
5041 mreg = E1000_READ_REG(hw, E1000_RAH(i));
5042 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
5043 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
5044 (u16)((mreg >> 16) & 0xFFFF));
5047 /* copy MAC MTA to PHY MTA */
5048 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5049 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5050 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
5051 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
5052 (u16)((mreg >> 16) & 0xFFFF));
5055 /* configure PHY Rx Control register */
5056 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
5057 mreg = E1000_READ_REG(hw, E1000_RCTL);
5058 if (mreg & E1000_RCTL_UPE)
5059 preg |= BM_RCTL_UPE;
5060 if (mreg & E1000_RCTL_MPE)
5061 preg |= BM_RCTL_MPE;
5062 preg &= ~(BM_RCTL_MO_MASK);
5063 if (mreg & E1000_RCTL_MO_3)
5064 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5065 << BM_RCTL_MO_SHIFT);
5066 if (mreg & E1000_RCTL_BAM)
5067 preg |= BM_RCTL_BAM;
5068 if (mreg & E1000_RCTL_PMCF)
5069 preg |= BM_RCTL_PMCF;
5070 mreg = E1000_READ_REG(hw, E1000_CTRL);
5071 if (mreg & E1000_CTRL_RFCE)
5072 preg |= BM_RCTL_RFCE;
5073 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
5075 /* enable PHY wakeup in MAC register */
5076 E1000_WRITE_REG(hw, E1000_WUC,
5077 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5078 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
5080 /* configure and enable PHY wakeup in PHY registers */
5081 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
5082 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5084 /* activate PHY wakeup */
5085 ret = hw->phy.ops.acquire(hw);
5087 printf("Could not acquire PHY\n");
5090 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
5091 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
5092 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
5094 printf("Could not read PHY page 769\n");
5097 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5098 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
5100 printf("Could not set PHY Host Wakeup bit\n");
5102 hw->phy.ops.release(hw);
5108 /*********************************************************************
5109 * 82544 Coexistence issue workaround.
5110 * There are 2 issues.
5111 * 1. Transmit Hang issue.
5112 * To detect this issue, following equation can be used...
5113 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
5114 * If SUM[3:0] is in between 1 to 4, we will have this issue.
5117 * To detect this issue, following equation can be used...
5118 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
5119 * If SUM[3:0] is in between 9 to c, we will have this issue.
5123 * Make sure we do not have ending address
5124 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
5126 *************************************************************************/
5128 em_fill_descriptors (bus_addr_t address, u32 length,
5129 PDESC_ARRAY desc_array)
5131 u32 safe_terminator;
5133 /* Since issue is sensitive to length and address.*/
5134 /* Let us first check the address...*/
5136 desc_array->descriptor[0].address = address;
5137 desc_array->descriptor[0].length = length;
5138 desc_array->elements = 1;
5139 return (desc_array->elements);
5141 safe_terminator = (u32)((((u32)address & 0x7) +
5142 (length & 0xF)) & 0xF);
5143 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
5144 if (safe_terminator == 0 ||
5145 (safe_terminator > 4 &&
5146 safe_terminator < 9) ||
5147 (safe_terminator > 0xC &&
5148 safe_terminator <= 0xF)) {
5149 desc_array->descriptor[0].address = address;
5150 desc_array->descriptor[0].length = length;
5151 desc_array->elements = 1;
5152 return (desc_array->elements);
5155 desc_array->descriptor[0].address = address;
5156 desc_array->descriptor[0].length = length - 4;
5157 desc_array->descriptor[1].address = address + (length - 4);
5158 desc_array->descriptor[1].length = 4;
5159 desc_array->elements = 2;
5160 return (desc_array->elements);
5163 /**********************************************************************
5165 * Update the board statistics counters.
5167 **********************************************************************/
5169 em_update_stats_counters(struct adapter *adapter)
5173 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
5174 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
5175 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
5176 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
5178 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
5179 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
5180 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
5181 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
5183 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
5184 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
5185 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
5186 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
5187 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
5188 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
5189 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
5190 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
5191 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
5192 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
5193 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
5194 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
5195 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
5196 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
5197 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
5198 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
5199 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
5200 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
5201 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
5202 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
5204 /* For the 64-bit byte counters the low dword must be read first. */
5205 /* Both registers clear on the read of the high dword */
5207 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH);
5208 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
5210 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
5211 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
5212 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
5213 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
5214 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
5216 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
5217 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
5219 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
5220 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
5221 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
5222 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
5223 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
5224 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
5225 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
5226 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
5227 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
5228 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
5230 if (adapter->hw.mac.type >= e1000_82543) {
5231 adapter->stats.algnerrc +=
5232 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
5233 adapter->stats.rxerrc +=
5234 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
5235 adapter->stats.tncrs +=
5236 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
5237 adapter->stats.cexterr +=
5238 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
5239 adapter->stats.tsctc +=
5240 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
5241 adapter->stats.tsctfc +=
5242 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
5246 ifp->if_collisions = adapter->stats.colc;
5249 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
5250 adapter->stats.crcerrs + adapter->stats.algnerrc +
5251 adapter->stats.ruc + adapter->stats.roc +
5252 adapter->stats.mpc + adapter->stats.cexterr;
5255 ifp->if_oerrors = adapter->stats.ecol +
5256 adapter->stats.latecol + adapter->watchdog_events;
5260 /**********************************************************************
5262 * This routine is called only when em_display_debug_stats is enabled.
5263 * This routine provides a way to take a look at important statistics
5264 * maintained by the driver and hardware.
5266 **********************************************************************/
5268 em_print_debug_info(struct adapter *adapter)
5270 device_t dev = adapter->dev;
5271 u8 *hw_addr = adapter->hw.hw_addr;
5273 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
5274 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
5275 E1000_READ_REG(&adapter->hw, E1000_CTRL),
5276 E1000_READ_REG(&adapter->hw, E1000_RCTL));
5277 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
5278 ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
5279 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
5280 device_printf(dev, "Flow control watermarks high = %d low = %d\n",
5281 adapter->hw.fc.high_water,
5282 adapter->hw.fc.low_water);
5283 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
5284 E1000_READ_REG(&adapter->hw, E1000_TIDV),
5285 E1000_READ_REG(&adapter->hw, E1000_TADV));
5286 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
5287 E1000_READ_REG(&adapter->hw, E1000_RDTR),
5288 E1000_READ_REG(&adapter->hw, E1000_RADV));
5289 device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
5290 (long long)adapter->tx_fifo_wrk_cnt,
5291 (long long)adapter->tx_fifo_reset_cnt);
5292 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
5293 E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
5294 E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
5295 device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
5296 E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
5297 E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
5298 device_printf(dev, "Num Tx descriptors avail = %d\n",
5299 adapter->num_tx_desc_avail);
5300 device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
5301 adapter->no_tx_desc_avail1);
5302 device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
5303 adapter->no_tx_desc_avail2);
5304 device_printf(dev, "Std mbuf failed = %ld\n",
5305 adapter->mbuf_alloc_failed);
5306 device_printf(dev, "Std mbuf cluster failed = %ld\n",
5307 adapter->mbuf_cluster_failed);
5308 device_printf(dev, "Driver dropped packets = %ld\n",
5309 adapter->dropped_pkts);
5310 device_printf(dev, "Driver tx dma failure in encap = %ld\n",
5311 adapter->no_tx_dma_setup);
5315 em_print_hw_stats(struct adapter *adapter)
5317 device_t dev = adapter->dev;
5319 device_printf(dev, "Excessive collisions = %lld\n",
5320 (long long)adapter->stats.ecol);
5321 #if (DEBUG_HW > 0) /* Dont output these errors normally */
5322 device_printf(dev, "Symbol errors = %lld\n",
5323 (long long)adapter->stats.symerrs);
5325 device_printf(dev, "Sequence errors = %lld\n",
5326 (long long)adapter->stats.sec);
5327 device_printf(dev, "Defer count = %lld\n",
5328 (long long)adapter->stats.dc);
5329 device_printf(dev, "Missed Packets = %lld\n",
5330 (long long)adapter->stats.mpc);
5331 device_printf(dev, "Receive No Buffers = %lld\n",
5332 (long long)adapter->stats.rnbc);
5333 /* RLEC is inaccurate on some hardware, calculate our own. */
5334 device_printf(dev, "Receive Length Errors = %lld\n",
5335 ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
5336 device_printf(dev, "Receive errors = %lld\n",
5337 (long long)adapter->stats.rxerrc);
5338 device_printf(dev, "Crc errors = %lld\n",
5339 (long long)adapter->stats.crcerrs);
5340 device_printf(dev, "Alignment errors = %lld\n",
5341 (long long)adapter->stats.algnerrc);
5342 device_printf(dev, "Collision/Carrier extension errors = %lld\n",
5343 (long long)adapter->stats.cexterr);
5344 device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
5345 device_printf(dev, "watchdog timeouts = %ld\n",
5346 adapter->watchdog_events);
5347 device_printf(dev, "RX MSIX IRQ = %ld TX MSIX IRQ = %ld"
5348 " LINK MSIX IRQ = %ld\n", adapter->rx_irq,
5349 adapter->tx_irq , adapter->link_irq);
5350 device_printf(dev, "XON Rcvd = %lld\n",
5351 (long long)adapter->stats.xonrxc);
5352 device_printf(dev, "XON Xmtd = %lld\n",
5353 (long long)adapter->stats.xontxc);
5354 device_printf(dev, "XOFF Rcvd = %lld\n",
5355 (long long)adapter->stats.xoffrxc);
5356 device_printf(dev, "XOFF Xmtd = %lld\n",
5357 (long long)adapter->stats.xofftxc);
5358 device_printf(dev, "Good Packets Rcvd = %lld\n",
5359 (long long)adapter->stats.gprc);
5360 device_printf(dev, "Good Packets Xmtd = %lld\n",
5361 (long long)adapter->stats.gptc);
5362 device_printf(dev, "TSO Contexts Xmtd = %lld\n",
5363 (long long)adapter->stats.tsctc);
5364 device_printf(dev, "TSO Contexts Failed = %lld\n",
5365 (long long)adapter->stats.tsctfc);
5368 /**********************************************************************
5370 * This routine provides a way to dump out the adapter eeprom,
5371 * often a useful debug/service tool. This only dumps the first
5372 * 32 words, stuff that matters is in that extent.
5374 **********************************************************************/
5376 em_print_nvm_info(struct adapter *adapter)
5381 /* Its a bit crude, but it gets the job done */
5382 printf("\nInterface EEPROM Dump:\n");
5383 printf("Offset\n0x0000 ");
5384 for (i = 0, j = 0; i < 32; i++, j++) {
5385 if (j == 8) { /* Make the offset block */
5387 printf("\n0x00%x0 ",row);
5389 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
5390 printf("%04x ", eeprom_data);
5396 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5398 struct adapter *adapter;
5403 error = sysctl_handle_int(oidp, &result, 0, req);
5405 if (error || !req->newptr)
5409 adapter = (struct adapter *)arg1;
5410 em_print_debug_info(adapter);
5413 * This value will cause a hex dump of the
5414 * first 32 16-bit words of the EEPROM to
5418 adapter = (struct adapter *)arg1;
5419 em_print_nvm_info(adapter);
5427 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
5429 struct adapter *adapter;
5434 error = sysctl_handle_int(oidp, &result, 0, req);
5436 if (error || !req->newptr)
5440 adapter = (struct adapter *)arg1;
5441 em_print_hw_stats(adapter);
5448 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
5450 struct em_int_delay_info *info;
5451 struct adapter *adapter;
5457 info = (struct em_int_delay_info *)arg1;
5458 usecs = info->value;
5459 error = sysctl_handle_int(oidp, &usecs, 0, req);
5460 if (error != 0 || req->newptr == NULL)
5462 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
5464 info->value = usecs;
5465 ticks = EM_USECS_TO_TICKS(usecs);
5467 adapter = info->adapter;
5469 EM_CORE_LOCK(adapter);
5470 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
5471 regval = (regval & ~0xffff) | (ticks & 0xffff);
5472 /* Handle a few special cases. */
5473 switch (info->offset) {
5478 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
5479 /* Don't write 0 into the TIDV register. */
5482 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
5485 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
5486 EM_CORE_UNLOCK(adapter);
5491 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
5492 const char *description, struct em_int_delay_info *info,
5493 int offset, int value)
5495 info->adapter = adapter;
5496 info->offset = offset;
5497 info->value = value;
5498 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
5499 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5500 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5501 info, 0, em_sysctl_int_delay, "I", description);
5504 #ifndef EM_LEGACY_IRQ
5506 em_add_rx_process_limit(struct adapter *adapter, const char *name,
5507 const char *description, int *limit, int value)
5510 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
5511 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5512 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);