1 /**************************************************************************
3 Copyright (c) 2001-2007, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
43 #include <sys/endian.h>
44 #include <sys/kernel.h>
45 #include <sys/kthread.h>
46 #include <sys/malloc.h>
48 #include <sys/module.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/taskqueue.h>
55 #include <machine/bus.h>
56 #include <machine/resource.h>
59 #include <net/ethernet.h>
61 #include <net/if_arp.h>
62 #include <net/if_dl.h>
63 #include <net/if_media.h>
65 #include <net/if_types.h>
66 #include <net/if_vlan_var.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/in.h>
70 #include <netinet/if_ether.h>
71 #include <netinet/ip.h>
72 #include <netinet/ip6.h>
73 #include <netinet/tcp.h>
74 #include <netinet/udp.h>
76 #include <machine/in_cksum.h>
77 #include <dev/pci/pcivar.h>
78 #include <dev/pci/pcireg.h>
80 #include "e1000_api.h"
81 #include "e1000_82575.h"
84 /*********************************************************************
85 * Set this to one to display debug statistics
86 *********************************************************************/
87 int em_display_debug_stats = 0;
89 /*********************************************************************
91 *********************************************************************/
92 char em_driver_version[] = "Version - 6.7.3";
95 /*********************************************************************
98 * Used by probe to select devices to load on
99 * Last field stores an index into e1000_strings
100 * Last entry must be all 0s
102 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
103 *********************************************************************/
105 static em_vendor_info_t em_vendor_info_array[] =
107 /* Intel(R) PRO/1000 Network Connection */
108 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
109 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
110 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
111 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
112 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
118 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
128 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
132 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
142 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
143 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
147 PCI_ANY_ID, PCI_ANY_ID, 0},
149 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
150 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
151 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
153 { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
154 { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
155 { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
156 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
157 PCI_ANY_ID, PCI_ANY_ID, 0},
158 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
159 PCI_ANY_ID, PCI_ANY_ID, 0},
160 { 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
161 PCI_ANY_ID, PCI_ANY_ID, 0},
162 { 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
163 PCI_ANY_ID, PCI_ANY_ID, 0},
164 { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
165 { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
166 { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
167 { 0x8086, E1000_DEV_ID_82572EI, PCI_ANY_ID, PCI_ANY_ID, 0},
169 { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0},
170 { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0},
171 { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0},
172 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
173 PCI_ANY_ID, PCI_ANY_ID, 0},
174 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
175 PCI_ANY_ID, PCI_ANY_ID, 0},
176 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
177 PCI_ANY_ID, PCI_ANY_ID, 0},
178 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
179 PCI_ANY_ID, PCI_ANY_ID, 0},
180 { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
181 { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
182 { 0x8086, E1000_DEV_ID_ICH8_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
183 { 0x8086, E1000_DEV_ID_ICH8_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
184 { 0x8086, E1000_DEV_ID_ICH8_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
185 { 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
186 { 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
188 { 0x8086, E1000_DEV_ID_ICH9_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
189 { 0x8086, E1000_DEV_ID_ICH9_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
190 { 0x8086, E1000_DEV_ID_ICH9_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
191 { 0x8086, E1000_DEV_ID_ICH9_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
192 { 0x8086, E1000_DEV_ID_ICH9_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
194 { 0x8086, E1000_DEV_ID_82575EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
195 { 0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES,
196 PCI_ANY_ID, PCI_ANY_ID, 0},
197 { 0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER,
198 PCI_ANY_ID, PCI_ANY_ID, 0},
199 /* required last entry */
203 /*********************************************************************
204 * Table of branding strings for all supported NICs.
205 *********************************************************************/
207 static char *em_strings[] = {
208 "Intel(R) PRO/1000 Network Connection"
211 /*********************************************************************
212 * Function prototypes
213 *********************************************************************/
214 static int em_probe(device_t);
215 static int em_attach(device_t);
216 static int em_detach(device_t);
217 static int em_shutdown(device_t);
218 static int em_suspend(device_t);
219 static int em_resume(device_t);
220 static void em_start(struct ifnet *);
221 static void em_start_locked(struct ifnet *ifp);
222 static int em_ioctl(struct ifnet *, u_long, caddr_t);
223 static void em_watchdog(struct adapter *);
224 static void em_init(void *);
225 static void em_init_locked(struct adapter *);
226 static void em_stop(void *);
227 static void em_media_status(struct ifnet *, struct ifmediareq *);
228 static int em_media_change(struct ifnet *);
229 static void em_identify_hardware(struct adapter *);
230 static int em_allocate_pci_resources(struct adapter *);
231 static int em_allocate_intr(struct adapter *);
232 static bool em_setup_msix(struct adapter *);
233 static void em_free_intr(struct adapter *);
234 static void em_free_pci_resources(struct adapter *);
235 static void em_local_timer(void *);
236 static int em_hardware_init(struct adapter *);
237 static void em_setup_interface(device_t, struct adapter *);
238 static void em_setup_transmit_structures(struct adapter *);
239 static void em_initialize_transmit_unit(struct adapter *);
240 static int em_setup_receive_structures(struct adapter *);
241 static void em_initialize_receive_unit(struct adapter *);
242 static void em_enable_intr(struct adapter *);
243 static void em_disable_intr(struct adapter *);
244 static void em_free_transmit_structures(struct adapter *);
245 static void em_free_receive_structures(struct adapter *);
246 static void em_update_stats_counters(struct adapter *);
247 static void em_txeof(struct adapter *);
248 static void em_tx_purge(struct adapter *);
249 static int em_allocate_receive_structures(struct adapter *);
250 static int em_allocate_transmit_structures(struct adapter *);
251 static int em_rxeof(struct adapter *, int);
252 #ifndef __NO_STRICT_ALIGNMENT
253 static int em_fixup_rx(struct adapter *);
255 static void em_receive_checksum(struct adapter *, struct e1000_rx_desc *,
257 static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
258 uint32_t *, uint32_t *);
259 static boolean_t em_tx_adv_ctx_setup(struct adapter *, struct mbuf *);
260 #if __FreeBSD_version >= 700000
261 static boolean_t em_tso_setup(struct adapter *, struct mbuf *, uint32_t *,
263 static boolean_t em_tso_adv_setup(struct adapter *, struct mbuf *, uint32_t *);
264 #endif /* FreeBSD_version >= 700000 */
265 static void em_set_promisc(struct adapter *);
266 static void em_disable_promisc(struct adapter *);
267 static void em_set_multi(struct adapter *);
268 static void em_print_hw_stats(struct adapter *);
269 static void em_update_link_status(struct adapter *);
270 static int em_get_buf(struct adapter *, int);
271 static void em_enable_hw_vlans(struct adapter *);
272 static int em_encap(struct adapter *, struct mbuf **);
273 static int em_adv_encap(struct adapter *, struct mbuf **);
274 static void em_smartspeed(struct adapter *);
275 static int em_82547_fifo_workaround(struct adapter *, int);
276 static void em_82547_update_fifo_head(struct adapter *, int);
277 static int em_82547_tx_fifo_reset(struct adapter *);
278 static void em_82547_move_tail(void *);
279 static int em_dma_malloc(struct adapter *, bus_size_t,
280 struct em_dma_alloc *, int);
281 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
282 static void em_print_debug_info(struct adapter *);
283 static void em_print_nvm_info(struct adapter *);
284 static int em_is_valid_ether_addr(uint8_t *);
285 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
286 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
287 static uint32_t em_fill_descriptors (bus_addr_t address, uint32_t length,
288 PDESC_ARRAY desc_array);
289 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
290 static void em_add_int_delay_sysctl(struct adapter *, const char *,
291 const char *, struct em_int_delay_info *, int, int);
292 /* Management and WOL Support */
293 static void em_init_manageability(struct adapter *);
294 static void em_release_manageability(struct adapter *);
295 static void em_get_hw_control(struct adapter *);
296 static void em_release_hw_control(struct adapter *);
297 static void em_enable_wakeup(device_t);
300 static void em_intr(void *);
302 #if __FreeBSD_version < 700000
303 static void em_intr_fast(void *);
305 static int em_intr_fast(void *);
307 static void em_add_rx_process_limit(struct adapter *, const char *,
308 const char *, int *, int);
309 static void em_handle_rxtx(void *context, int pending);
310 static void em_handle_link(void *context, int pending);
311 #endif /* EM_FAST_IRQ */
313 #ifdef DEVICE_POLLING
314 static poll_handler_t em_poll;
317 /*********************************************************************
318 * FreeBSD Device Interface Entry Points
319 *********************************************************************/
321 static device_method_t em_methods[] = {
322 /* Device interface */
323 DEVMETHOD(device_probe, em_probe),
324 DEVMETHOD(device_attach, em_attach),
325 DEVMETHOD(device_detach, em_detach),
326 DEVMETHOD(device_shutdown, em_shutdown),
327 DEVMETHOD(device_suspend, em_suspend),
328 DEVMETHOD(device_resume, em_resume),
332 static driver_t em_driver = {
333 "em", em_methods, sizeof(struct adapter),
336 static devclass_t em_devclass;
337 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
338 MODULE_DEPEND(em, pci, 1, 1, 1);
339 MODULE_DEPEND(em, ether, 1, 1, 1);
341 /*********************************************************************
342 * Tunable default values.
343 *********************************************************************/
345 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
346 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
349 /* Allow common code without TSO */
354 static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
355 static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
356 static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
357 static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
358 static int em_rxd = EM_DEFAULT_RXD;
359 static int em_txd = EM_DEFAULT_TXD;
360 static int em_smart_pwr_down = FALSE;
362 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
363 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
364 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
365 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
366 TUNABLE_INT("hw.em.rxd", &em_rxd);
367 TUNABLE_INT("hw.em.txd", &em_txd);
368 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
370 /* How many packets rxeof tries to clean at a time */
371 static int em_rx_process_limit = 100;
372 TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
374 /* Global used in WOL setup with multiport cards */
375 static int global_quad_port_a = 0;
377 /*********************************************************************
378 * Device identification routine
380 * em_probe determines if the driver should be loaded on
381 * adapter based on PCI vendor/device id of the adapter.
383 * return BUS_PROBE_DEFAULT on success, positive on failure
384 *********************************************************************/
387 em_probe(device_t dev)
389 char adapter_name[60];
390 uint16_t pci_vendor_id = 0;
391 uint16_t pci_device_id = 0;
392 uint16_t pci_subvendor_id = 0;
393 uint16_t pci_subdevice_id = 0;
394 em_vendor_info_t *ent;
396 INIT_DEBUGOUT("em_probe: begin");
398 pci_vendor_id = pci_get_vendor(dev);
399 if (pci_vendor_id != EM_VENDOR_ID)
402 pci_device_id = pci_get_device(dev);
403 pci_subvendor_id = pci_get_subvendor(dev);
404 pci_subdevice_id = pci_get_subdevice(dev);
406 ent = em_vendor_info_array;
407 while (ent->vendor_id != 0) {
408 if ((pci_vendor_id == ent->vendor_id) &&
409 (pci_device_id == ent->device_id) &&
411 ((pci_subvendor_id == ent->subvendor_id) ||
412 (ent->subvendor_id == PCI_ANY_ID)) &&
414 ((pci_subdevice_id == ent->subdevice_id) ||
415 (ent->subdevice_id == PCI_ANY_ID))) {
416 sprintf(adapter_name, "%s %s",
417 em_strings[ent->index],
419 device_set_desc_copy(dev, adapter_name);
420 return (BUS_PROBE_DEFAULT);
428 /*********************************************************************
429 * Device initialization routine
431 * The attach entry point is called when the driver is being loaded.
432 * This routine identifies the type of hardware, allocates all resources
433 * and initializes the hardware.
435 * return 0 on success, positive on failure
436 *********************************************************************/
439 em_attach(device_t dev)
441 struct adapter *adapter;
444 u16 eeprom_data, device_id;
446 INIT_DEBUGOUT("em_attach: begin");
448 adapter = device_get_softc(dev);
449 adapter->dev = adapter->osdep.dev = dev;
450 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
451 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
454 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
455 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
456 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
457 em_sysctl_debug_info, "I", "Debug Information");
459 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
460 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
461 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
462 em_sysctl_stats, "I", "Statistics");
464 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
465 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
467 /* Determine hardware and mac info */
468 em_identify_hardware(adapter);
470 /* Setup PCI resources */
471 if (em_allocate_pci_resources(adapter)) {
472 device_printf(dev, "Allocation of PCI resources failed\n");
478 ** For ICH8 and family we need to
479 ** map the flash memory, and this
480 ** must happen after the MAC is
483 if ((adapter->hw.mac.type == e1000_ich8lan) ||
484 (adapter->hw.mac.type == e1000_ich9lan)) {
485 int rid = EM_BAR_TYPE_FLASH;
486 adapter->flash_mem = bus_alloc_resource_any(dev,
487 SYS_RES_MEMORY, &rid, RF_ACTIVE);
488 /* This is used in the shared code */
489 adapter->hw.flash_address = (u8 *)adapter->flash_mem;
490 adapter->osdep.flash_bus_space_tag =
491 rman_get_bustag(adapter->flash_mem);
492 adapter->osdep.flash_bus_space_handle =
493 rman_get_bushandle(adapter->flash_mem);
496 /* Do Shared Code initialization */
497 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
498 device_printf(dev, "Setup of Shared code failed\n");
503 e1000_get_bus_info(&adapter->hw);
505 /* Set up some sysctls for the tunable interrupt delays */
506 em_add_int_delay_sysctl(adapter, "rx_int_delay",
507 "receive interrupt delay in usecs", &adapter->rx_int_delay,
508 E1000_REGISTER(&adapter->hw, E1000_RDTR), em_rx_int_delay_dflt);
509 em_add_int_delay_sysctl(adapter, "tx_int_delay",
510 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
511 E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt);
512 if (adapter->hw.mac.type >= e1000_82540) {
513 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
514 "receive interrupt delay limit in usecs",
515 &adapter->rx_abs_int_delay,
516 E1000_REGISTER(&adapter->hw, E1000_RADV),
517 em_rx_abs_int_delay_dflt);
518 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
519 "transmit interrupt delay limit in usecs",
520 &adapter->tx_abs_int_delay,
521 E1000_REGISTER(&adapter->hw, E1000_TADV),
522 em_tx_abs_int_delay_dflt);
526 /* Sysctls for limiting the amount of work done in the taskqueue */
527 em_add_rx_process_limit(adapter, "rx_processing_limit",
528 "max number of rx packets to process", &adapter->rx_process_limit,
529 em_rx_process_limit);
533 * Validate number of transmit and receive descriptors. It
534 * must not exceed hardware maximum, and must be multiple
535 * of E1000_DBA_ALIGN.
537 if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
538 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
539 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
540 (em_txd < EM_MIN_TXD)) {
541 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
542 EM_DEFAULT_TXD, em_txd);
543 adapter->num_tx_desc = EM_DEFAULT_TXD;
545 adapter->num_tx_desc = em_txd;
546 if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
547 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
548 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
549 (em_rxd < EM_MIN_RXD)) {
550 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
551 EM_DEFAULT_RXD, em_rxd);
552 adapter->num_rx_desc = EM_DEFAULT_RXD;
554 adapter->num_rx_desc = em_rxd;
556 adapter->hw.mac.autoneg = DO_AUTO_NEG;
557 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
558 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
559 adapter->rx_buffer_len = 2048;
561 e1000_init_script_state_82541(&adapter->hw, TRUE);
562 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
565 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
566 adapter->hw.phy.mdix = AUTO_ALL_MODES;
567 adapter->hw.phy.disable_polarity_correction = FALSE;
568 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
572 * Set the frame limits assuming
573 * standard ethernet sized frames.
575 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
576 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
579 * This controls when hardware reports transmit completion
582 adapter->hw.mac.report_tx_early = 1;
584 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
587 /* Allocate Transmit Descriptor ring */
588 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
589 device_printf(dev, "Unable to allocate tx_desc memory\n");
593 adapter->tx_desc_base =
594 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
596 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
599 /* Allocate Receive Descriptor ring */
600 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
601 device_printf(dev, "Unable to allocate rx_desc memory\n");
605 adapter->rx_desc_base =
606 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
608 /* Make sure we have a good EEPROM before we read from it */
609 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
611 ** Some PCI-E parts fail the first check due to
612 ** the link being in sleep state, call it again,
613 ** if it fails a second time its a real issue.
615 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
617 "The EEPROM Checksum Is Not Valid\n");
623 /* Initialize the hardware */
624 if (em_hardware_init(adapter)) {
625 device_printf(dev, "Unable to initialize the hardware\n");
630 /* Copy the permanent MAC address out of the EEPROM */
631 if (e1000_read_mac_addr(&adapter->hw) < 0) {
632 device_printf(dev, "EEPROM read error while reading MAC"
638 if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) {
639 device_printf(dev, "Invalid MAC address\n");
644 /* Allocate transmit descriptors and buffers */
645 if (em_allocate_transmit_structures(adapter)) {
646 device_printf(dev, "Could not setup transmit structures\n");
651 /* Allocate receive descriptors and buffers */
652 if (em_allocate_receive_structures(adapter)) {
653 device_printf(dev, "Could not setup receive structures\n");
658 /* Setup OS specific network interface */
659 em_setup_interface(dev, adapter);
661 em_allocate_intr(adapter);
663 /* Initialize statistics */
664 em_update_stats_counters(adapter);
666 adapter->hw.mac.get_link_status = 1;
667 em_update_link_status(adapter);
669 /* Indicate SOL/IDER usage */
670 if (e1000_check_reset_block(&adapter->hw))
672 "PHY reset is blocked due to SOL/IDER session.\n");
674 /* Determine if we have to control management hardware */
675 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
680 switch (adapter->hw.mac.type) {
686 case e1000_82546_rev_3:
688 case e1000_80003es2lan:
689 if (adapter->hw.bus.func == 1)
690 e1000_read_nvm(&adapter->hw,
691 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
693 e1000_read_nvm(&adapter->hw,
694 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
695 eeprom_data &= EM_EEPROM_APME;
698 /* APME bit in EEPROM is mapped to WUC.APME */
699 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) &
704 adapter->wol = E1000_WUFC_MAG;
706 * We have the eeprom settings, now apply the special cases
707 * where the eeprom may be wrong or the board won't support
708 * wake on lan on a particular port
710 device_id = pci_get_device(dev);
712 case E1000_DEV_ID_82546GB_PCIE:
715 case E1000_DEV_ID_82546EB_FIBER:
716 case E1000_DEV_ID_82546GB_FIBER:
717 case E1000_DEV_ID_82571EB_FIBER:
718 /* Wake events only supported on port A for dual fiber
719 * regardless of eeprom setting */
720 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
724 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
725 case E1000_DEV_ID_82571EB_QUAD_COPPER:
726 case E1000_DEV_ID_82571EB_QUAD_FIBER:
727 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
728 /* if quad port adapter, disable WoL on all but port A */
729 if (global_quad_port_a != 0)
731 /* Reset for multiple quad port adapters */
732 if (++global_quad_port_a == 4)
733 global_quad_port_a = 0;
737 /* Do we need workaround for 82544 PCI-X adapter? */
738 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
739 adapter->hw.mac.type == e1000_82544)
740 adapter->pcix_82544 = TRUE;
742 adapter->pcix_82544 = FALSE;
744 /* Tell the stack that the interface is not active */
745 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
747 INIT_DEBUGOUT("em_attach: end");
752 em_free_transmit_structures(adapter);
755 em_release_hw_control(adapter);
756 e1000_remove_device(&adapter->hw);
757 em_dma_free(adapter, &adapter->rxdma);
759 em_dma_free(adapter, &adapter->txdma);
762 em_free_intr(adapter);
763 em_free_pci_resources(adapter);
764 EM_TX_LOCK_DESTROY(adapter);
765 EM_CORE_LOCK_DESTROY(adapter);
770 /*********************************************************************
771 * Device removal routine
773 * The detach entry point is called when the driver is being removed.
774 * This routine stops the adapter and deallocates all the resources
775 * that were allocated for driver operation.
777 * return 0 on success, positive on failure
778 *********************************************************************/
781 em_detach(device_t dev)
783 struct adapter *adapter = device_get_softc(dev);
784 struct ifnet *ifp = adapter->ifp;
786 INIT_DEBUGOUT("em_detach: begin");
788 /* Make sure VLANS are not using driver */
789 #if __FreeBSD_version >= 700000
790 if (adapter->ifp->if_vlantrunk != NULL) {
792 if (adapter->ifp->if_nvlans != 0) {
794 device_printf(dev,"Vlan in use, detach first\n");
798 #ifdef DEVICE_POLLING
799 if (ifp->if_capenable & IFCAP_POLLING)
800 ether_poll_deregister(ifp);
803 em_disable_intr(adapter);
804 em_free_intr(adapter);
805 EM_CORE_LOCK(adapter);
807 adapter->in_detach = 1;
809 e1000_phy_hw_reset(&adapter->hw);
811 em_release_manageability(adapter);
813 if (((adapter->hw.mac.type == e1000_82573) ||
814 (adapter->hw.mac.type == e1000_ich8lan) ||
815 (adapter->hw.mac.type == e1000_ich9lan)) &&
816 e1000_check_mng_mode(&adapter->hw))
817 em_release_hw_control(adapter);
820 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
821 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
822 em_enable_wakeup(dev);
825 ether_ifdetach(adapter->ifp);
827 callout_drain(&adapter->timer);
828 callout_drain(&adapter->tx_fifo_timer);
830 em_free_pci_resources(adapter);
831 bus_generic_detach(dev);
834 e1000_remove_device(&adapter->hw);
835 em_free_transmit_structures(adapter);
836 em_free_receive_structures(adapter);
837 EM_TX_UNLOCK(adapter);
838 EM_CORE_UNLOCK(adapter);
840 /* Free Transmit Descriptor ring */
841 if (adapter->tx_desc_base) {
842 em_dma_free(adapter, &adapter->txdma);
843 adapter->tx_desc_base = NULL;
846 /* Free Receive Descriptor ring */
847 if (adapter->rx_desc_base) {
848 em_dma_free(adapter, &adapter->rxdma);
849 adapter->rx_desc_base = NULL;
852 EM_TX_LOCK_DESTROY(adapter);
853 EM_CORE_LOCK_DESTROY(adapter);
858 /*********************************************************************
860 * Shutdown entry point
862 **********************************************************************/
865 em_shutdown(device_t dev)
867 return em_suspend(dev);
871 * Suspend/resume device methods.
874 em_suspend(device_t dev)
876 struct adapter *adapter = device_get_softc(dev);
878 EM_CORE_LOCK(adapter);
882 EM_TX_UNLOCK(adapter);
884 em_release_manageability(adapter);
886 if (((adapter->hw.mac.type == e1000_82573) ||
887 (adapter->hw.mac.type == e1000_ich8lan) ||
888 (adapter->hw.mac.type == e1000_ich9lan)) &&
889 e1000_check_mng_mode(&adapter->hw))
890 em_release_hw_control(adapter);
893 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
894 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
895 em_enable_wakeup(dev);
898 EM_CORE_UNLOCK(adapter);
900 return bus_generic_suspend(dev);
904 em_resume(device_t dev)
906 struct adapter *adapter = device_get_softc(dev);
907 struct ifnet *ifp = adapter->ifp;
909 EM_CORE_LOCK(adapter);
910 em_init_locked(adapter);
911 em_init_manageability(adapter);
913 if ((ifp->if_flags & IFF_UP) &&
914 (ifp->if_drv_flags & IFF_DRV_RUNNING))
915 em_start_locked(ifp);
917 EM_CORE_UNLOCK(adapter);
919 return bus_generic_resume(dev);
923 /*********************************************************************
924 * Transmit entry point
926 * em_start is called by the stack to initiate a transmit.
927 * The driver will remain in this routine as long as there are
928 * packets to transmit and transmit resources are available.
929 * In case resources are not available stack is notified and
930 * the packet is requeued.
931 **********************************************************************/
934 em_start_locked(struct ifnet *ifp)
936 struct adapter *adapter = ifp->if_softc;
939 EM_TX_LOCK_ASSERT(adapter);
941 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
944 if (!adapter->link_active)
947 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
949 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
953 * Encapsulation can modify our pointer, and or make it
954 * NULL on failure. In that event, we can't requeue.
956 * We now use a pointer to accomodate legacy and
957 * advanced transmit functions.
959 if (adapter->em_xmit(adapter, &m_head)) {
962 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
963 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
967 /* Send a copy of the frame to the BPF listener */
968 ETHER_BPF_MTAP(ifp, m_head);
970 /* Set timeout in case hardware has problems transmitting. */
971 adapter->watchdog_timer = EM_TX_TIMEOUT;
976 em_start(struct ifnet *ifp)
978 struct adapter *adapter = ifp->if_softc;
981 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
982 em_start_locked(ifp);
983 EM_TX_UNLOCK(adapter);
986 /*********************************************************************
989 * em_ioctl is called when the user wants to configure the
992 * return 0 on success, positive on failure
993 **********************************************************************/
996 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
998 struct adapter *adapter = ifp->if_softc;
999 struct ifreq *ifr = (struct ifreq *)data;
1000 struct ifaddr *ifa = (struct ifaddr *)data;
1003 if (adapter->in_detach)
1008 if (ifa->ifa_addr->sa_family == AF_INET) {
1011 * Since resetting hardware takes a very long time
1012 * and results in link renegotiation we only
1013 * initialize the hardware only when it is absolutely
1016 ifp->if_flags |= IFF_UP;
1017 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1018 EM_CORE_LOCK(adapter);
1019 em_init_locked(adapter);
1020 EM_CORE_UNLOCK(adapter);
1022 arp_ifinit(ifp, ifa);
1024 error = ether_ioctl(ifp, command, data);
1029 uint16_t eeprom_data = 0;
1031 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1033 EM_CORE_LOCK(adapter);
1034 switch (adapter->hw.mac.type) {
1037 * 82573 only supports jumbo frames
1038 * if ASPM is disabled.
1040 e1000_read_nvm(&adapter->hw,
1041 NVM_INIT_3GIO_3, 1, &eeprom_data);
1042 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1043 max_frame_size = ETHER_MAX_LEN;
1046 /* Allow Jumbo frames - fall thru */
1051 case e1000_80003es2lan: /* Limit Jumbo Frame size */
1052 max_frame_size = 9234;
1054 /* Adapters that do not support jumbo frames */
1057 max_frame_size = ETHER_MAX_LEN;
1060 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1062 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1064 EM_CORE_UNLOCK(adapter);
1069 ifp->if_mtu = ifr->ifr_mtu;
1070 adapter->max_frame_size =
1071 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1072 em_init_locked(adapter);
1073 EM_CORE_UNLOCK(adapter);
1077 IOCTL_DEBUGOUT("ioctl rcv'd:\
1078 SIOCSIFFLAGS (Set Interface Flags)");
1079 EM_CORE_LOCK(adapter);
1080 if (ifp->if_flags & IFF_UP) {
1081 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1082 if ((ifp->if_flags ^ adapter->if_flags) &
1084 em_disable_promisc(adapter);
1085 em_set_promisc(adapter);
1088 em_init_locked(adapter);
1090 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1091 EM_TX_LOCK(adapter);
1093 EM_TX_UNLOCK(adapter);
1095 adapter->if_flags = ifp->if_flags;
1096 EM_CORE_UNLOCK(adapter);
1100 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1101 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1102 EM_CORE_LOCK(adapter);
1103 em_disable_intr(adapter);
1104 em_set_multi(adapter);
1105 if (adapter->hw.mac.type == e1000_82542 &&
1106 adapter->hw.revision_id == E1000_REVISION_2) {
1107 em_initialize_receive_unit(adapter);
1109 #ifdef DEVICE_POLLING
1110 if (!(ifp->if_capenable & IFCAP_POLLING))
1112 em_enable_intr(adapter);
1113 EM_CORE_UNLOCK(adapter);
1117 /* Check SOL/IDER usage */
1118 EM_CORE_LOCK(adapter);
1119 if (e1000_check_reset_block(&adapter->hw)) {
1120 EM_CORE_UNLOCK(adapter);
1121 device_printf(adapter->dev, "Media change is"
1122 " blocked due to SOL/IDER session.\n");
1125 EM_CORE_UNLOCK(adapter);
1127 IOCTL_DEBUGOUT("ioctl rcv'd: \
1128 SIOCxIFMEDIA (Get/Set Interface Media)");
1129 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1135 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1137 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1138 #ifdef DEVICE_POLLING
1139 if (mask & IFCAP_POLLING) {
1140 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1141 error = ether_poll_register(em_poll, ifp);
1144 EM_CORE_LOCK(adapter);
1145 em_disable_intr(adapter);
1146 ifp->if_capenable |= IFCAP_POLLING;
1147 EM_CORE_UNLOCK(adapter);
1149 error = ether_poll_deregister(ifp);
1150 /* Enable interrupt even in error case */
1151 EM_CORE_LOCK(adapter);
1152 em_enable_intr(adapter);
1153 ifp->if_capenable &= ~IFCAP_POLLING;
1154 EM_CORE_UNLOCK(adapter);
1158 if (mask & IFCAP_HWCSUM) {
1159 ifp->if_capenable ^= IFCAP_HWCSUM;
1162 #if __FreeBSD_version >= 700000
1163 if (mask & IFCAP_TSO4) {
1164 ifp->if_capenable ^= IFCAP_TSO4;
1168 if (mask & IFCAP_VLAN_HWTAGGING) {
1169 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1172 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1174 #if __FreeBSD_version >= 700000
1175 VLAN_CAPABILITIES(ifp);
1180 error = ether_ioctl(ifp, command, data);
1187 /*********************************************************************
1190 * This routine is called from the local timer every second.
1191 * As long as transmit descriptors are being cleaned the value
1192 * is non-zero and we do nothing. Reaching 0 indicates a tx hang
1193 * and we then reset the device.
1195 **********************************************************************/
1198 em_watchdog(struct adapter *adapter)
1201 EM_CORE_LOCK_ASSERT(adapter);
1204 ** The timer is set to 5 every time start queues a packet.
1205 ** Then txeof keeps resetting it as long as it cleans at
1206 ** least one descriptor.
1207 ** Finally, anytime all descriptors are clean the timer is
1210 if ((adapter->watchdog_timer == 0) || (--adapter->watchdog_timer))
1213 /* If we are in this routine because of pause frames, then
1214 * don't reset the hardware.
1216 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
1217 E1000_STATUS_TXOFF) {
1218 adapter->watchdog_timer = EM_TX_TIMEOUT;
1222 if (e1000_check_for_link(&adapter->hw) == 0)
1223 device_printf(adapter->dev, "watchdog timeout -- resetting\n");
1224 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1225 adapter->watchdog_events++;
1227 em_init_locked(adapter);
1230 /*********************************************************************
1233 * This routine is used in two ways. It is used by the stack as
1234 * init entry point in network interface structure. It is also used
1235 * by the driver as a hw/sw initialization routine to get to a
1238 * return 0 on success, positive on failure
1239 **********************************************************************/
1242 em_init_locked(struct adapter *adapter)
1244 struct ifnet *ifp = adapter->ifp;
1245 device_t dev = adapter->dev;
1248 INIT_DEBUGOUT("em_init: begin");
1250 EM_CORE_LOCK_ASSERT(adapter);
1252 EM_TX_LOCK(adapter);
1254 EM_TX_UNLOCK(adapter);
1257 * Packet Buffer Allocation (PBA)
1258 * Writing PBA sets the receive portion of the buffer
1259 * the remainder is used for the transmit buffer.
1261 * Devices before the 82547 had a Packet Buffer of 64K.
1262 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1263 * After the 82547 the buffer was reduced to 40K.
1264 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1265 * Note: default does not leave enough room for Jumbo Frame >10k.
1267 switch (adapter->hw.mac.type) {
1269 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1270 if (adapter->max_frame_size > 8192)
1271 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1273 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1274 adapter->tx_fifo_head = 0;
1275 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1276 adapter->tx_fifo_size =
1277 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1279 /* Total Packet Buffer on these is 48K */
1283 case e1000_80003es2lan:
1284 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1286 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
1287 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1290 #define E1000_PBA_10K 0x000A
1291 pba = E1000_PBA_10K;
1297 /* Devices before 82547 had a Packet Buffer of 64K. */
1298 if (adapter->max_frame_size > 8192)
1299 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1301 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1304 INIT_DEBUGOUT1("em_init: pba=%dK",pba);
1305 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1307 /* Get the latest mac address, User can use a LAA */
1308 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1311 /* Put the address into the Receive Address Array */
1312 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1315 * With the 82571 adapter, RAR[0] may be overwritten
1316 * when the other port is reset, we make a duplicate
1317 * in RAR[14] for that eventuality, this assures
1318 * the interface continues to function.
1320 if (adapter->hw.mac.type == e1000_82571) {
1321 e1000_set_laa_state_82571(&adapter->hw, TRUE);
1322 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1323 E1000_RAR_ENTRIES - 1);
1326 /* Initialize the hardware */
1327 if (em_hardware_init(adapter)) {
1328 device_printf(dev, "Unable to initialize the hardware\n");
1331 em_update_link_status(adapter);
1333 /* Setup VLAN support, basic and offload if available */
1334 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1335 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1336 em_enable_hw_vlans(adapter);
1338 /* Set hardware offload abilities */
1339 ifp->if_hwassist = 0;
1340 if (adapter->hw.mac.type >= e1000_82543) {
1341 if (ifp->if_capenable & IFCAP_TXCSUM)
1342 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1343 #if __FreeBSD_version >= 700000
1344 if (ifp->if_capenable & IFCAP_TSO4)
1345 ifp->if_hwassist |= CSUM_TSO;
1349 /* Configure for OS presence */
1350 em_init_manageability(adapter);
1352 /* Prepare transmit descriptors and buffers */
1353 em_setup_transmit_structures(adapter);
1354 em_initialize_transmit_unit(adapter);
1356 /* Setup Multicast table */
1357 em_set_multi(adapter);
1359 /* Prepare receive descriptors and buffers */
1360 if (em_setup_receive_structures(adapter)) {
1361 device_printf(dev, "Could not setup receive structures\n");
1362 EM_TX_LOCK(adapter);
1364 EM_TX_UNLOCK(adapter);
1367 em_initialize_receive_unit(adapter);
1369 /* Don't lose promiscuous settings */
1370 em_set_promisc(adapter);
1372 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1373 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1375 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1376 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1378 #ifdef DEVICE_POLLING
1380 * Only enable interrupts if we are not polling, make sure
1381 * they are off otherwise.
1383 if (ifp->if_capenable & IFCAP_POLLING)
1384 em_disable_intr(adapter);
1386 #endif /* DEVICE_POLLING */
1387 em_enable_intr(adapter);
1389 /* Don't reset the phy next time init gets called */
1390 adapter->hw.phy.reset_disable = TRUE;
1396 struct adapter *adapter = arg;
1398 EM_CORE_LOCK(adapter);
1399 em_init_locked(adapter);
1400 EM_CORE_UNLOCK(adapter);
1404 #ifdef DEVICE_POLLING
1405 /*********************************************************************
1407 * Legacy polling routine
1409 *********************************************************************/
1411 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1413 struct adapter *adapter = ifp->if_softc;
1416 EM_CORE_LOCK(adapter);
1417 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1418 EM_CORE_UNLOCK(adapter);
1422 if (cmd == POLL_AND_CHECK_STATUS) {
1423 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1424 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1425 callout_stop(&adapter->timer);
1426 adapter->hw.mac.get_link_status = 1;
1427 e1000_check_for_link(&adapter->hw);
1428 em_update_link_status(adapter);
1429 callout_reset(&adapter->timer, hz,
1430 em_local_timer, adapter);
1433 em_rxeof(adapter, count);
1434 EM_CORE_UNLOCK(adapter);
1436 EM_TX_LOCK(adapter);
1439 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1440 em_start_locked(ifp);
1441 EM_TX_UNLOCK(adapter);
1443 #endif /* DEVICE_POLLING */
1446 /*********************************************************************
1448 * Legacy Interrupt Service routine
1450 *********************************************************************/
1455 struct adapter *adapter = arg;
1459 EM_CORE_LOCK(adapter);
1462 if (ifp->if_capenable & IFCAP_POLLING) {
1463 EM_CORE_UNLOCK(adapter);
1468 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1470 if (adapter->hw.mac.type >= e1000_82571 &&
1471 (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1473 else if (reg_icr == 0)
1477 * XXX: some laptops trigger several spurious interrupts
1478 * on em(4) when in the resume cycle. The ICR register
1479 * reports all-ones value in this case. Processing such
1480 * interrupts would lead to a freeze. I don't know why.
1482 if (reg_icr == 0xffffffff)
1485 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1486 em_rxeof(adapter, -1);
1487 EM_TX_LOCK(adapter);
1489 EM_TX_UNLOCK(adapter);
1492 /* Link status change */
1493 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1494 callout_stop(&adapter->timer);
1495 adapter->hw.mac.get_link_status = 1;
1496 e1000_check_for_link(&adapter->hw);
1497 em_update_link_status(adapter);
1498 /* Deal with TX cruft when link lost */
1499 em_tx_purge(adapter);
1500 callout_reset(&adapter->timer, hz,
1501 em_local_timer, adapter);
1504 if (reg_icr & E1000_ICR_RXO)
1505 adapter->rx_overruns++;
1507 EM_CORE_UNLOCK(adapter);
1509 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1510 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1514 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1517 em_handle_link(void *context, int pending)
1519 struct adapter *adapter = context;
1524 EM_CORE_LOCK(adapter);
1525 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1526 EM_CORE_UNLOCK(adapter);
1530 callout_stop(&adapter->timer);
1531 adapter->hw.mac.get_link_status = 1;
1532 e1000_check_for_link(&adapter->hw);
1533 em_update_link_status(adapter);
1534 /* Deal with TX cruft when link lost */
1535 em_tx_purge(adapter);
1536 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1537 EM_CORE_UNLOCK(adapter);
1540 #if __FreeBSD_version >= 700000
1541 #if !defined(NET_LOCK_GIANT)
1542 #define NET_LOCK_GIANT()
1543 #define NET_UNLOCK_GIANT()
1548 em_handle_rxtx(void *context, int pending)
1550 struct adapter *adapter = context;
1558 * It should be possible to run the tx clean loop without the lock.
1560 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1561 if (em_rxeof(adapter, adapter->rx_process_limit) != 0)
1562 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1563 EM_TX_LOCK(adapter);
1566 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1567 em_start_locked(ifp);
1568 EM_TX_UNLOCK(adapter);
1571 em_enable_intr(adapter);
1575 /*********************************************************************
1577 * Fast Interrupt Service routine
1579 *********************************************************************/
1580 #if __FreeBSD_version < 700000
1581 #define FILTER_STRAY
1582 #define FILTER_HANDLED
1587 em_intr_fast(void *arg)
1589 struct adapter *adapter = arg;
1595 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1598 if (reg_icr == 0xffffffff)
1599 return FILTER_STRAY;
1601 /* Definitely not our interrupt. */
1603 return FILTER_STRAY;
1606 * Starting with the 82571 chip, bit 31 should be used to
1607 * determine whether the interrupt belongs to us.
1609 if (adapter->hw.mac.type >= e1000_82571 &&
1610 (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1611 return FILTER_STRAY;
1614 * Mask interrupts until the taskqueue is finished running. This is
1615 * cheap, just assume that it is needed. This also works around the
1616 * MSI message reordering errata on certain systems.
1618 em_disable_intr(adapter);
1619 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1621 /* Link status change */
1622 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
1623 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1625 if (reg_icr & E1000_ICR_RXO)
1626 adapter->rx_overruns++;
1627 return FILTER_HANDLED;
1629 #endif /* EM_FAST_IRQ */
1631 /*********************************************************************
1633 * Media Ioctl callback
1635 * This routine is called whenever the user queries the status of
1636 * the interface using ifconfig.
1638 **********************************************************************/
1640 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1642 struct adapter *adapter = ifp->if_softc;
1643 u_char fiber_type = IFM_1000_SX;
1645 INIT_DEBUGOUT("em_media_status: begin");
1647 EM_CORE_LOCK(adapter);
1648 e1000_check_for_link(&adapter->hw);
1649 em_update_link_status(adapter);
1651 ifmr->ifm_status = IFM_AVALID;
1652 ifmr->ifm_active = IFM_ETHER;
1654 if (!adapter->link_active) {
1655 EM_CORE_UNLOCK(adapter);
1659 ifmr->ifm_status |= IFM_ACTIVE;
1661 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1662 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1663 if (adapter->hw.mac.type == e1000_82545)
1664 fiber_type = IFM_1000_LX;
1665 ifmr->ifm_active |= fiber_type | IFM_FDX;
1667 switch (adapter->link_speed) {
1669 ifmr->ifm_active |= IFM_10_T;
1672 ifmr->ifm_active |= IFM_100_TX;
1675 ifmr->ifm_active |= IFM_1000_T;
1678 if (adapter->link_duplex == FULL_DUPLEX)
1679 ifmr->ifm_active |= IFM_FDX;
1681 ifmr->ifm_active |= IFM_HDX;
1683 EM_CORE_UNLOCK(adapter);
1686 /*********************************************************************
1688 * Media Ioctl callback
1690 * This routine is called when the user changes speed/duplex using
1691 * media/mediopt option with ifconfig.
1693 **********************************************************************/
1695 em_media_change(struct ifnet *ifp)
1697 struct adapter *adapter = ifp->if_softc;
1698 struct ifmedia *ifm = &adapter->media;
1700 INIT_DEBUGOUT("em_media_change: begin");
1702 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1705 EM_CORE_LOCK(adapter);
1706 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1708 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1709 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1714 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1715 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1718 adapter->hw.mac.autoneg = FALSE;
1719 adapter->hw.phy.autoneg_advertised = 0;
1720 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1721 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1723 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1726 adapter->hw.mac.autoneg = FALSE;
1727 adapter->hw.phy.autoneg_advertised = 0;
1728 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1729 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1731 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1734 device_printf(adapter->dev, "Unsupported media type\n");
1737 /* As the speed/duplex settings my have changed we need to
1740 adapter->hw.phy.reset_disable = FALSE;
1742 em_init_locked(adapter);
1743 EM_CORE_UNLOCK(adapter);
1748 /*********************************************************************
1750 * This routine maps the mbufs to tx descriptors.
1752 * return 0 on success, positive on failure
1753 **********************************************************************/
1756 em_encap(struct adapter *adapter, struct mbuf **m_headp)
1758 bus_dma_segment_t segs[EM_MAX_SCATTER];
1760 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1761 struct e1000_tx_desc *ctxd = NULL;
1762 struct mbuf *m_head;
1763 uint32_t txd_upper, txd_lower, txd_used, txd_saved;
1764 int nsegs, i, j, first, last = 0;
1765 int error, do_tso, tso_desc = 0;
1766 #if __FreeBSD_version < 700000
1770 txd_upper = txd_lower = txd_used = txd_saved = 0;
1772 #if __FreeBSD_version >= 700000
1773 do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
1779 * Force a cleanup if number of TX descriptors
1780 * available hits the threshold
1782 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1784 /* Now do we at least have a minimal? */
1785 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
1786 adapter->no_tx_desc_avail1++;
1794 * If an mbuf is only header we need
1795 * to pull 4 bytes of data into it.
1797 if (do_tso && (m_head->m_len <= M_TSO_LEN)) {
1798 m_head = m_pullup(m_head, M_TSO_LEN + 4);
1805 * Map the packet for DMA
1807 * Capture the first descriptor index,
1808 * this descriptor will have the index
1809 * of the EOP which is the only one that
1810 * now gets a DONE bit writeback.
1812 first = adapter->next_avail_tx_desc;
1813 tx_buffer = &adapter->tx_buffer_area[first];
1814 tx_buffer_mapped = tx_buffer;
1815 map = tx_buffer->map;
1817 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1818 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1821 * There are two types of errors we can (try) to handle:
1822 * - EFBIG means the mbuf chain was too long and bus_dma ran
1823 * out of segments. Defragment the mbuf chain and try again.
1824 * - ENOMEM means bus_dma could not obtain enough bounce buffers
1825 * at this point in time. Defer sending and try again later.
1826 * All other errors, in particular EINVAL, are fatal and prevent the
1827 * mbuf chain from ever going through. Drop it and report error.
1829 if (error == EFBIG) {
1832 m = m_defrag(*m_headp, M_DONTWAIT);
1834 adapter->mbuf_alloc_failed++;
1842 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1843 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1845 if (error == ENOMEM) {
1846 adapter->no_tx_dma_setup++;
1848 } else if (error != 0) {
1849 adapter->no_tx_dma_setup++;
1854 } else if (error == ENOMEM) {
1855 adapter->no_tx_dma_setup++;
1857 } else if (error != 0) {
1858 adapter->no_tx_dma_setup++;
1865 * TSO Hardware workaround, if this packet is not
1866 * TSO, and is only a single descriptor long, and
1867 * it follows a TSO burst, then we need to add a
1868 * sentinel descriptor to prevent premature writeback.
1870 if ((do_tso == 0) && (adapter->tx_tso == TRUE)) {
1873 adapter->tx_tso = FALSE;
1876 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1877 adapter->no_tx_desc_avail2++;
1878 bus_dmamap_unload(adapter->txtag, map);
1883 /* Do hardware assists */
1884 #if __FreeBSD_version >= 700000
1885 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1886 error = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower);
1888 return (ENXIO); /* something foobar */
1889 /* we need to make a final sentinel transmit desc */
1893 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1894 em_transmit_checksum_setup(adapter, m_head,
1895 &txd_upper, &txd_lower);
1897 i = adapter->next_avail_tx_desc;
1898 if (adapter->pcix_82544)
1901 /* Set up our transmit descriptors */
1902 for (j = 0; j < nsegs; j++) {
1904 bus_addr_t seg_addr;
1905 /* If adapter is 82544 and on PCIX bus */
1906 if(adapter->pcix_82544) {
1907 DESC_ARRAY desc_array;
1908 uint32_t array_elements, counter;
1910 * Check the Address and Length combination and
1911 * split the data accordingly
1913 array_elements = em_fill_descriptors(segs[j].ds_addr,
1914 segs[j].ds_len, &desc_array);
1915 for (counter = 0; counter < array_elements; counter++) {
1916 if (txd_used == adapter->num_tx_desc_avail) {
1917 adapter->next_avail_tx_desc = txd_saved;
1918 adapter->no_tx_desc_avail2++;
1919 bus_dmamap_unload(adapter->txtag, map);
1922 tx_buffer = &adapter->tx_buffer_area[i];
1923 ctxd = &adapter->tx_desc_base[i];
1924 ctxd->buffer_addr = htole64(
1925 desc_array.descriptor[counter].address);
1926 ctxd->lower.data = htole32(
1927 (adapter->txd_cmd | txd_lower | (uint16_t)
1928 desc_array.descriptor[counter].length));
1930 htole32((txd_upper));
1932 if (++i == adapter->num_tx_desc)
1934 tx_buffer->m_head = NULL;
1935 tx_buffer->next_eop = -1;
1939 tx_buffer = &adapter->tx_buffer_area[i];
1940 ctxd = &adapter->tx_desc_base[i];
1941 seg_addr = segs[j].ds_addr;
1942 seg_len = segs[j].ds_len;
1945 ** If this is the last descriptor, we want to
1946 ** split it so we have a small final sentinel
1948 if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
1950 ctxd->buffer_addr = htole64(seg_addr);
1951 ctxd->lower.data = htole32(
1952 adapter->txd_cmd | txd_lower | seg_len);
1955 if (++i == adapter->num_tx_desc)
1957 /* Now make the sentinel */
1958 ++txd_used; /* using an extra txd */
1959 ctxd = &adapter->tx_desc_base[i];
1960 tx_buffer = &adapter->tx_buffer_area[i];
1962 htole64(seg_addr + seg_len);
1963 ctxd->lower.data = htole32(
1964 adapter->txd_cmd | txd_lower | 4);
1968 if (++i == adapter->num_tx_desc)
1971 ctxd->buffer_addr = htole64(seg_addr);
1972 ctxd->lower.data = htole32(
1973 adapter->txd_cmd | txd_lower | seg_len);
1977 if (++i == adapter->num_tx_desc)
1980 tx_buffer->m_head = NULL;
1981 tx_buffer->next_eop = -1;
1985 adapter->next_avail_tx_desc = i;
1986 if (adapter->pcix_82544)
1987 adapter->num_tx_desc_avail -= txd_used;
1989 adapter->num_tx_desc_avail -= nsegs;
1990 if (tso_desc) /* TSO used an extra for sentinel */
1991 adapter->num_tx_desc_avail -= txd_used;
1995 ** Handle VLAN tag, this is the
1996 ** biggest difference between
1999 #if __FreeBSD_version < 700000
2000 /* Find out if we are in vlan mode. */
2001 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
2003 ctxd->upper.fields.special =
2004 htole16(VLAN_TAG_VALUE(mtag));
2005 #else /* FreeBSD 7 */
2006 if (m_head->m_flags & M_VLANTAG) {
2007 /* Set the vlan id. */
2008 ctxd->upper.fields.special =
2009 htole16(m_head->m_pkthdr.ether_vtag);
2011 /* Tell hardware to add tag */
2012 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
2015 tx_buffer->m_head = m_head;
2016 tx_buffer_mapped->map = tx_buffer->map;
2017 tx_buffer->map = map;
2018 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
2021 * Last Descriptor of Packet
2022 * needs End Of Packet (EOP)
2023 * and Report Status (RS)
2026 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2028 * Keep track in the first buffer which
2029 * descriptor will be written back
2031 tx_buffer = &adapter->tx_buffer_area[first];
2032 tx_buffer->next_eop = last;
2035 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2036 * that this frame is available to transmit.
2038 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2039 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2040 if (adapter->hw.mac.type == e1000_82547 &&
2041 adapter->link_duplex == HALF_DUPLEX)
2042 em_82547_move_tail(adapter);
2044 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
2045 if (adapter->hw.mac.type == e1000_82547)
2046 em_82547_update_fifo_head(adapter,
2047 m_head->m_pkthdr.len);
2053 /*********************************************************************
2055 * This routine maps the mbufs to Advanced TX descriptors.
2056 * used by the 82575 adapter. It also needs no workarounds.
2058 **********************************************************************/
2061 em_adv_encap(struct adapter *adapter, struct mbuf **m_headp)
2063 bus_dma_segment_t segs[EM_MAX_SCATTER];
2065 struct em_buffer *tx_buffer, *tx_buffer_mapped;
2066 union e1000_adv_tx_desc *txd = NULL;
2067 struct mbuf *m_head;
2068 u32 olinfo_status = 0, cmd_type_len = 0;
2070 int nsegs, i, j, error, first, last = 0;
2071 #if __FreeBSD_version < 700000
2078 /* Set basic descriptor constants */
2079 cmd_type_len |= E1000_ADVTXD_DTYP_DATA;
2080 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
2081 #if __FreeBSD_version < 700000
2082 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
2085 if (m_head->m_flags & M_VLANTAG)
2087 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2090 * Force a cleanup if number of TX descriptors
2091 * available hits the threshold
2093 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
2095 /* Now do we at least have a minimal? */
2096 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
2097 adapter->no_tx_desc_avail1++;
2103 * Map the packet for DMA.
2105 * Capture the first descriptor index,
2106 * this descriptor will have the index
2107 * of the EOP which is the only one that
2108 * now gets a DONE bit writeback.
2110 first = adapter->next_avail_tx_desc;
2111 tx_buffer = &adapter->tx_buffer_area[first];
2112 tx_buffer_mapped = tx_buffer;
2113 map = tx_buffer->map;
2115 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2116 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2118 if (error == EFBIG) {
2121 m = m_defrag(*m_headp, M_DONTWAIT);
2123 adapter->mbuf_alloc_failed++;
2131 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2132 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2134 if (error == ENOMEM) {
2135 adapter->no_tx_dma_setup++;
2137 } else if (error != 0) {
2138 adapter->no_tx_dma_setup++;
2143 } else if (error == ENOMEM) {
2144 adapter->no_tx_dma_setup++;
2146 } else if (error != 0) {
2147 adapter->no_tx_dma_setup++;
2153 /* Check again to be sure we have enough descriptors */
2154 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
2155 adapter->no_tx_desc_avail2++;
2156 bus_dmamap_unload(adapter->txtag, map);
2162 * Set up the context descriptor:
2163 * used when any hardware offload is done.
2164 * This includes CSUM, VLAN, and TSO. It
2165 * will use the first descriptor.
2167 #if __FreeBSD_version >= 700000
2168 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2169 if (em_tso_adv_setup(adapter, m_head, &hdrlen)) {
2170 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2171 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2172 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2177 /* Do all other context descriptor setup */
2178 if (em_tx_adv_ctx_setup(adapter, m_head))
2179 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2181 /* Calculate payload length */
2182 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen)
2183 << E1000_ADVTXD_PAYLEN_SHIFT);
2185 /* Set up our transmit descriptors */
2186 i = adapter->next_avail_tx_desc;
2187 for (j = 0; j < nsegs; j++) {
2189 bus_addr_t seg_addr;
2191 tx_buffer = &adapter->tx_buffer_area[i];
2192 txd = (union e1000_adv_tx_desc *)&adapter->tx_desc_base[i];
2193 seg_addr = segs[j].ds_addr;
2194 seg_len = segs[j].ds_len;
2196 txd->read.buffer_addr = htole64(seg_addr);
2197 txd->read.cmd_type_len = htole32(
2198 adapter->txd_cmd | cmd_type_len | seg_len);
2199 txd->read.olinfo_status = htole32(olinfo_status);
2201 if (++i == adapter->num_tx_desc)
2203 tx_buffer->m_head = NULL;
2204 tx_buffer->next_eop = -1;
2207 adapter->next_avail_tx_desc = i;
2208 adapter->num_tx_desc_avail -= nsegs;
2210 tx_buffer->m_head = m_head;
2211 tx_buffer_mapped->map = tx_buffer->map;
2212 tx_buffer->map = map;
2213 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
2216 * Last Descriptor of Packet
2217 * needs End Of Packet (EOP)
2218 * and Report Status (RS)
2220 txd->read.cmd_type_len |=
2221 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2223 * Keep track in the first buffer which
2224 * descriptor will be written back
2226 tx_buffer = &adapter->tx_buffer_area[first];
2227 tx_buffer->next_eop = last;
2230 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2231 * that this frame is available to transmit.
2233 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2234 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2235 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
2241 /*********************************************************************
2243 * 82547 workaround to avoid controller hang in half-duplex environment.
2244 * The workaround is to avoid queuing a large packet that would span
2245 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
2246 * in this case. We do that only when FIFO is quiescent.
2248 **********************************************************************/
2250 em_82547_move_tail(void *arg)
2252 struct adapter *adapter = arg;
2255 struct e1000_tx_desc *tx_desc;
2256 uint16_t length = 0;
2259 EM_TX_LOCK_ASSERT(adapter);
2261 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
2262 sw_tdt = adapter->next_avail_tx_desc;
2264 while (hw_tdt != sw_tdt) {
2265 tx_desc = &adapter->tx_desc_base[hw_tdt];
2266 length += tx_desc->lower.flags.length;
2267 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
2268 if (++hw_tdt == adapter->num_tx_desc)
2272 if (em_82547_fifo_workaround(adapter, length)) {
2273 adapter->tx_fifo_wrk_cnt++;
2274 callout_reset(&adapter->tx_fifo_timer, 1,
2275 em_82547_move_tail, adapter);
2278 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
2279 em_82547_update_fifo_head(adapter, length);
2286 em_82547_fifo_workaround(struct adapter *adapter, int len)
2288 int fifo_space, fifo_pkt_len;
2290 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2292 if (adapter->link_duplex == HALF_DUPLEX) {
2293 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2295 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
2296 if (em_82547_tx_fifo_reset(adapter))
2307 em_82547_update_fifo_head(struct adapter *adapter, int len)
2309 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2311 /* tx_fifo_head is always 16 byte aligned */
2312 adapter->tx_fifo_head += fifo_pkt_len;
2313 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
2314 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2320 em_82547_tx_fifo_reset(struct adapter *adapter)
2324 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
2325 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
2326 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
2327 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
2328 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
2329 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
2330 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
2331 /* Disable TX unit */
2332 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2333 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
2334 tctl & ~E1000_TCTL_EN);
2336 /* Reset FIFO pointers */
2337 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
2338 adapter->tx_head_addr);
2339 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
2340 adapter->tx_head_addr);
2341 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
2342 adapter->tx_head_addr);
2343 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
2344 adapter->tx_head_addr);
2346 /* Re-enable TX unit */
2347 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2348 E1000_WRITE_FLUSH(&adapter->hw);
2350 adapter->tx_fifo_head = 0;
2351 adapter->tx_fifo_reset_cnt++;
2361 em_set_promisc(struct adapter *adapter)
2363 struct ifnet *ifp = adapter->ifp;
2366 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2368 if (ifp->if_flags & IFF_PROMISC) {
2369 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2370 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2371 } else if (ifp->if_flags & IFF_ALLMULTI) {
2372 reg_rctl |= E1000_RCTL_MPE;
2373 reg_rctl &= ~E1000_RCTL_UPE;
2374 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2379 em_disable_promisc(struct adapter *adapter)
2383 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2385 reg_rctl &= (~E1000_RCTL_UPE);
2386 reg_rctl &= (~E1000_RCTL_MPE);
2387 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2391 /*********************************************************************
2394 * This routine is called whenever multicast address list is updated.
2396 **********************************************************************/
2399 em_set_multi(struct adapter *adapter)
2401 struct ifnet *ifp = adapter->ifp;
2402 struct ifmultiaddr *ifma;
2403 uint32_t reg_rctl = 0;
2404 uint8_t mta[512]; /* Largest MTS is 4096 bits */
2407 IOCTL_DEBUGOUT("em_set_multi: begin");
2409 if (adapter->hw.mac.type == e1000_82542 &&
2410 adapter->hw.revision_id == E1000_REVISION_2) {
2411 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2412 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2413 e1000_pci_clear_mwi(&adapter->hw);
2414 reg_rctl |= E1000_RCTL_RST;
2415 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2420 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2421 if (ifma->ifma_addr->sa_family != AF_LINK)
2424 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2427 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2428 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2431 IF_ADDR_UNLOCK(ifp);
2433 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2434 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2435 reg_rctl |= E1000_RCTL_MPE;
2436 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2438 e1000_update_mc_addr_list(&adapter->hw, mta,
2439 mcnt, 1, adapter->hw.mac.rar_entry_count);
2441 if (adapter->hw.mac.type == e1000_82542 &&
2442 adapter->hw.revision_id == E1000_REVISION_2) {
2443 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2444 reg_rctl &= ~E1000_RCTL_RST;
2445 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2447 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2448 e1000_pci_set_mwi(&adapter->hw);
2453 /*********************************************************************
2456 * This routine checks for link status and updates statistics.
2458 **********************************************************************/
2461 em_local_timer(void *arg)
2463 struct adapter *adapter = arg;
2464 struct ifnet *ifp = adapter->ifp;
2466 EM_CORE_LOCK_ASSERT(adapter);
2468 e1000_check_for_link(&adapter->hw);
2469 em_update_link_status(adapter);
2470 em_update_stats_counters(adapter);
2472 /* Reset LAA into RAR[0] on 82571 */
2473 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
2474 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2476 if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
2477 em_print_hw_stats(adapter);
2479 em_smartspeed(adapter);
2482 * Each second we check the watchdog to
2483 * protect against hardware hangs.
2485 em_watchdog(adapter);
2487 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
2492 em_update_link_status(struct adapter *adapter)
2494 struct ifnet *ifp = adapter->ifp;
2495 device_t dev = adapter->dev;
2497 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
2499 if (adapter->link_active == 0) {
2500 e1000_get_speed_and_duplex(&adapter->hw,
2501 &adapter->link_speed, &adapter->link_duplex);
2502 /* Check if we must disable SPEED_MODE bit on PCI-E */
2503 if ((adapter->link_speed != SPEED_1000) &&
2504 ((adapter->hw.mac.type == e1000_82571) ||
2505 (adapter->hw.mac.type == e1000_82572))) {
2508 tarc0 = E1000_READ_REG(&adapter->hw,
2510 tarc0 &= ~SPEED_MODE_BIT;
2511 E1000_WRITE_REG(&adapter->hw,
2512 E1000_TARC(0), tarc0);
2515 device_printf(dev, "Link is up %d Mbps %s\n",
2516 adapter->link_speed,
2517 ((adapter->link_duplex == FULL_DUPLEX) ?
2518 "Full Duplex" : "Half Duplex"));
2519 adapter->link_active = 1;
2520 adapter->smartspeed = 0;
2521 ifp->if_baudrate = adapter->link_speed * 1000000;
2522 if_link_state_change(ifp, LINK_STATE_UP);
2525 if (adapter->link_active == 1) {
2526 ifp->if_baudrate = adapter->link_speed = 0;
2527 adapter->link_duplex = 0;
2529 device_printf(dev, "Link is Down\n");
2530 adapter->link_active = 0;
2531 if_link_state_change(ifp, LINK_STATE_DOWN);
2536 /*********************************************************************
2538 * This routine disables all traffic on the adapter by issuing a
2539 * global reset on the MAC and deallocates TX/RX buffers.
2541 * This routine should always be called with BOTH the CORE
2543 **********************************************************************/
2548 struct adapter *adapter = arg;
2549 struct ifnet *ifp = adapter->ifp;
2551 EM_CORE_LOCK_ASSERT(adapter);
2552 EM_TX_LOCK_ASSERT(adapter);
2554 INIT_DEBUGOUT("em_stop: begin");
2556 em_disable_intr(adapter);
2557 callout_stop(&adapter->timer);
2558 callout_stop(&adapter->tx_fifo_timer);
2560 /* Tell the stack that the interface is no longer active */
2561 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2563 e1000_reset_hw(&adapter->hw);
2564 if (adapter->hw.mac.type >= e1000_82544)
2565 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2569 /*********************************************************************
2571 * Determine hardware revision.
2573 **********************************************************************/
2575 em_identify_hardware(struct adapter *adapter)
2577 device_t dev = adapter->dev;
2579 /* Make sure our PCI config space has the necessary stuff set */
2580 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2581 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2582 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2583 device_printf(dev, "Memory Access and/or Bus Master bits "
2585 adapter->hw.bus.pci_cmd_word |=
2586 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2587 pci_write_config(dev, PCIR_COMMAND,
2588 adapter->hw.bus.pci_cmd_word, 2);
2591 /* Save off the information about this board */
2592 adapter->hw.vendor_id = pci_get_vendor(dev);
2593 adapter->hw.device_id = pci_get_device(dev);
2594 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2595 adapter->hw.subsystem_vendor_id =
2596 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2597 adapter->hw.subsystem_device_id =
2598 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2600 /* Do Shared Code Init and Setup */
2601 if (e1000_set_mac_type(&adapter->hw)) {
2602 device_printf(dev, "Setup init failure\n");
2608 em_allocate_pci_resources(struct adapter *adapter)
2610 device_t dev = adapter->dev;
2614 adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2616 if (adapter->res_memory == NULL) {
2617 device_printf(dev, "Unable to allocate bus resource: memory\n");
2620 adapter->osdep.mem_bus_space_tag =
2621 rman_get_bustag(adapter->res_memory);
2622 adapter->osdep.mem_bus_space_handle =
2623 rman_get_bushandle(adapter->res_memory);
2624 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
2626 /* Only older adapters use IO mapping */
2627 if ((adapter->hw.mac.type > e1000_82543) &&
2628 (adapter->hw.mac.type < e1000_82571)) {
2629 /* Figure our where our IO BAR is ? */
2630 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2631 val = pci_read_config(dev, rid, 4);
2632 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2633 adapter->io_rid = rid;
2637 /* check for 64bit BAR */
2638 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2641 if (rid >= PCIR_CIS) {
2642 device_printf(dev, "Unable to locate IO BAR\n");
2645 adapter->res_ioport = bus_alloc_resource_any(dev,
2646 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2647 if (adapter->res_ioport == NULL) {
2648 device_printf(dev, "Unable to allocate bus resource: "
2652 adapter->hw.io_base = 0;
2653 adapter->osdep.io_bus_space_tag =
2654 rman_get_bustag(adapter->res_ioport);
2655 adapter->osdep.io_bus_space_handle =
2656 rman_get_bushandle(adapter->res_ioport);
2660 * Setup MSI/X or MSI if PCI Express
2661 * only the latest can use MSI/X and
2662 * real support for it is forthcoming
2664 adapter->msi = 0; /* Set defaults */
2667 #if __FreeBSD_version > 602111 /* MSI support is present */
2668 /* This will setup either MSI/X or MSI */
2669 if (em_setup_msix(adapter))
2671 #endif /* FreeBSD_version */
2673 adapter->res_interrupt = bus_alloc_resource_any(dev,
2674 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2675 if (adapter->res_interrupt == NULL) {
2676 device_printf(dev, "Unable to allocate bus resource: "
2681 adapter->hw.back = &adapter->osdep;
2686 /*********************************************************************
2688 * Setup the appropriate Interrupt handlers.
2690 **********************************************************************/
2692 em_allocate_intr(struct adapter *adapter)
2694 device_t dev = adapter->dev;
2697 /* Manually turn off all interrupts */
2698 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2701 /* We do Legacy setup */
2702 if (adapter->int_handler_tag == NULL &&
2703 (error = bus_setup_intr(dev, adapter->res_interrupt,
2704 #if __FreeBSD_version > 700000
2705 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_intr, adapter,
2707 INTR_TYPE_NET | INTR_MPSAFE, em_intr, adapter,
2709 &adapter->int_handler_tag)) != 0) {
2710 device_printf(dev, "Failed to register interrupt handler");
2714 #else /* FAST_IRQ */
2716 * Try allocating a fast interrupt and the associated deferred
2717 * processing contexts.
2719 TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2720 TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2721 adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2722 taskqueue_thread_enqueue, &adapter->tq);
2723 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2724 device_get_nameunit(adapter->dev));
2725 #if __FreeBSD_version < 700000
2726 if ((error = bus_setup_intr(dev, adapter->res_interrupt,
2727 INTR_TYPE_NET | INTR_FAST, em_intr_fast, adapter,
2729 if ((error = bus_setup_intr(dev, adapter->res_interrupt,
2730 INTR_TYPE_NET, em_intr_fast, NULL, adapter,
2732 &adapter->int_handler_tag)) != 0) {
2733 device_printf(dev, "Failed to register fast interrupt "
2734 "handler: %d\n", error);
2735 taskqueue_free(adapter->tq);
2739 #endif /* EM_FAST_IRQ */
2741 em_enable_intr(adapter);
2746 em_free_intr(struct adapter *adapter)
2748 device_t dev = adapter->dev;
2750 if (adapter->res_interrupt != NULL) {
2751 bus_teardown_intr(dev, adapter->res_interrupt,
2752 adapter->int_handler_tag);
2753 adapter->int_handler_tag = NULL;
2755 if (adapter->tq != NULL) {
2756 taskqueue_drain(adapter->tq, &adapter->rxtx_task);
2757 taskqueue_drain(taskqueue_fast, &adapter->link_task);
2758 taskqueue_free(adapter->tq);
2764 em_free_pci_resources(struct adapter *adapter)
2766 device_t dev = adapter->dev;
2768 if (adapter->res_interrupt != NULL)
2769 bus_release_resource(dev, SYS_RES_IRQ,
2770 adapter->msi ? 1 : 0, adapter->res_interrupt);
2772 #if __FreeBSD_version > 602111 /* MSI support is present */
2773 if (adapter->msix_mem != NULL)
2774 bus_release_resource(dev, SYS_RES_MEMORY,
2775 PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem);
2778 pci_release_msi(dev);
2779 #endif /* FreeBSD_version */
2781 if (adapter->res_memory != NULL)
2782 bus_release_resource(dev, SYS_RES_MEMORY,
2783 PCIR_BAR(0), adapter->res_memory);
2785 if (adapter->flash_mem != NULL)
2786 bus_release_resource(dev, SYS_RES_MEMORY,
2787 EM_FLASH, adapter->flash_mem);
2789 if (adapter->res_ioport != NULL)
2790 bus_release_resource(dev, SYS_RES_IOPORT,
2791 adapter->io_rid, adapter->res_ioport);
2794 #if __FreeBSD_version > 602111 /* MSI support is present */
2799 em_setup_msix(struct adapter *adapter)
2801 device_t dev = adapter->dev;
2804 if (adapter->hw.mac.type < e1000_82571)
2807 /* First try MSI/X if possible */
2808 if (adapter->hw.mac.type >= e1000_82575) {
2809 rid = PCIR_BAR(EM_MSIX_BAR);
2810 adapter->msix_mem = bus_alloc_resource_any(dev,
2811 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2812 if (!adapter->msix_mem) {
2813 /* May not be enabled */
2814 device_printf(adapter->dev,
2815 "Unable to map MSIX table \n");
2818 val = pci_msix_count(dev);
2819 if ((val) && pci_alloc_msix(dev, &val) == 0) {
2821 device_printf(adapter->dev,"Using MSIX interrupts\n");
2826 val = pci_msi_count(dev);
2827 if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
2829 device_printf(adapter->dev,"Using MSI interrupt\n");
2834 #endif /* FreeBSD_version */
2836 /*********************************************************************
2838 * Initialize the hardware to a configuration
2839 * as specified by the adapter structure.
2841 **********************************************************************/
2843 em_hardware_init(struct adapter *adapter)
2845 device_t dev = adapter->dev;
2846 uint16_t rx_buffer_size;
2848 INIT_DEBUGOUT("em_hardware_init: begin");
2850 /* Issue a global reset */
2851 e1000_reset_hw(&adapter->hw);
2853 /* Get control from any management/hw control */
2854 if (((adapter->hw.mac.type == e1000_82573) ||
2855 (adapter->hw.mac.type == e1000_ich8lan) ||
2856 (adapter->hw.mac.type == e1000_ich9lan)) &&
2857 e1000_check_mng_mode(&adapter->hw))
2858 em_get_hw_control(adapter);
2860 /* When hardware is reset, fifo_head is also reset */
2861 adapter->tx_fifo_head = 0;
2863 /* Set up smart power down as default off on newer adapters. */
2864 if (!em_smart_pwr_down && (adapter->hw.mac.type == e1000_82571 ||
2865 adapter->hw.mac.type == e1000_82572)) {
2866 uint16_t phy_tmp = 0;
2868 /* Speed up time to link by disabling smart power down. */
2869 e1000_read_phy_reg(&adapter->hw,
2870 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
2871 phy_tmp &= ~IGP02E1000_PM_SPD;
2872 e1000_write_phy_reg(&adapter->hw,
2873 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
2877 * These parameters control the automatic generation (Tx) and
2878 * response (Rx) to Ethernet PAUSE frames.
2879 * - High water mark should allow for at least two frames to be
2880 * received after sending an XOFF.
2881 * - Low water mark works best when it is very near the high water mark.
2882 * This allows the receiver to restart by sending XON when it has
2883 * drained a bit. Here we use an arbitary value of 1500 which will
2884 * restart after one full frame is pulled from the buffer. There
2885 * could be several smaller frames in the buffer and if so they will
2886 * not trigger the XON until their total number reduces the buffer
2888 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2890 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2893 adapter->hw.fc.high_water = rx_buffer_size -
2894 roundup2(adapter->max_frame_size, 1024);
2895 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2897 if (adapter->hw.mac.type == e1000_80003es2lan)
2898 adapter->hw.fc.pause_time = 0xFFFF;
2900 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2901 adapter->hw.fc.send_xon = TRUE;
2902 adapter->hw.fc.type = e1000_fc_full;
2904 if (e1000_init_hw(&adapter->hw) < 0) {
2905 device_printf(dev, "Hardware Initialization Failed\n");
2909 e1000_check_for_link(&adapter->hw);
2914 /*********************************************************************
2916 * Setup networking device structure and register an interface.
2918 **********************************************************************/
2920 em_setup_interface(device_t dev, struct adapter *adapter)
2924 INIT_DEBUGOUT("em_setup_interface: begin");
2926 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2928 panic("%s: can not if_alloc()", device_get_nameunit(dev));
2929 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2930 ifp->if_mtu = ETHERMTU;
2931 ifp->if_init = em_init;
2932 ifp->if_softc = adapter;
2933 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2934 ifp->if_ioctl = em_ioctl;
2935 ifp->if_start = em_start;
2936 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2937 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2938 IFQ_SET_READY(&ifp->if_snd);
2940 ether_ifattach(ifp, adapter->hw.mac.addr);
2942 ifp->if_capabilities = ifp->if_capenable = 0;
2944 if (adapter->hw.mac.type >= e1000_82543) {
2946 #if __FreeBSD_version < 700000
2947 version_cap = IFCAP_HWCSUM;
2949 version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2951 ifp->if_capabilities |= version_cap;
2952 ifp->if_capenable |= version_cap;
2955 #if __FreeBSD_version >= 700000
2956 /* Identify TSO capable adapters */
2957 if ((adapter->hw.mac.type > e1000_82544) &&
2958 (adapter->hw.mac.type != e1000_82547))
2959 ifp->if_capabilities |= IFCAP_TSO4;
2961 * By default only enable on PCI-E, this
2962 * can be overriden by ifconfig.
2964 if (adapter->hw.mac.type >= e1000_82571)
2965 ifp->if_capenable |= IFCAP_TSO4;
2969 * Tell the upper layer(s) we support long frames.
2971 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2972 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2973 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2975 #ifdef DEVICE_POLLING
2976 ifp->if_capabilities |= IFCAP_POLLING;
2980 * Specify the media types supported by this adapter and register
2981 * callbacks to update media and link information
2983 ifmedia_init(&adapter->media, IFM_IMASK,
2984 em_media_change, em_media_status);
2985 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2986 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2987 u_char fiber_type = IFM_1000_SX; /* default type */
2989 if (adapter->hw.mac.type == e1000_82545)
2990 fiber_type = IFM_1000_LX;
2991 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
2993 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2995 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2996 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2998 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
3000 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
3002 if (adapter->hw.phy.type != e1000_phy_ife) {
3003 ifmedia_add(&adapter->media,
3004 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
3005 ifmedia_add(&adapter->media,
3006 IFM_ETHER | IFM_1000_T, 0, NULL);
3009 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3010 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3014 /*********************************************************************
3016 * Workaround for SmartSpeed on 82541 and 82547 controllers
3018 **********************************************************************/
3020 em_smartspeed(struct adapter *adapter)
3024 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
3025 adapter->hw.mac.autoneg == 0 ||
3026 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
3029 if (adapter->smartspeed == 0) {
3030 /* If Master/Slave config fault is asserted twice,
3031 * we assume back-to-back */
3032 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3033 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
3035 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3036 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
3037 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
3038 if(phy_tmp & CR_1000T_MS_ENABLE) {
3039 phy_tmp &= ~CR_1000T_MS_ENABLE;
3040 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
3042 adapter->smartspeed++;
3043 if(adapter->hw.mac.autoneg &&
3044 !e1000_phy_setup_autoneg(&adapter->hw) &&
3045 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL,
3047 phy_tmp |= (MII_CR_AUTO_NEG_EN |
3048 MII_CR_RESTART_AUTO_NEG);
3049 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL,
3055 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
3056 /* If still no link, perhaps using 2/3 pair cable */
3057 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
3058 phy_tmp |= CR_1000T_MS_ENABLE;
3059 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
3060 if(adapter->hw.mac.autoneg &&
3061 !e1000_phy_setup_autoneg(&adapter->hw) &&
3062 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
3063 phy_tmp |= (MII_CR_AUTO_NEG_EN |
3064 MII_CR_RESTART_AUTO_NEG);
3065 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
3068 /* Restart process after EM_SMARTSPEED_MAX iterations */
3069 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
3070 adapter->smartspeed = 0;
3075 * Manage DMA'able memory.
3078 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3082 *(bus_addr_t *) arg = segs[0].ds_addr;
3086 em_dma_malloc(struct adapter *adapter, bus_size_t size,
3087 struct em_dma_alloc *dma, int mapflags)
3091 #if __FreeBSD_version >= 700000
3092 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
3094 error = bus_dma_tag_create(NULL, /* parent */
3096 EM_DBA_ALIGN, 0, /* alignment, bounds */
3097 BUS_SPACE_MAXADDR, /* lowaddr */
3098 BUS_SPACE_MAXADDR, /* highaddr */
3099 NULL, NULL, /* filter, filterarg */
3102 size, /* maxsegsize */
3104 NULL, /* lockfunc */
3108 device_printf(adapter->dev,
3109 "%s: bus_dma_tag_create failed: %d\n",
3114 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
3115 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
3117 device_printf(adapter->dev,
3118 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
3119 __func__, (uintmax_t)size, error);
3124 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3125 size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
3126 if (error || dma->dma_paddr == 0) {
3127 device_printf(adapter->dev,
3128 "%s: bus_dmamap_load failed: %d\n",
3136 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3138 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3139 bus_dma_tag_destroy(dma->dma_tag);
3141 dma->dma_map = NULL;
3142 dma->dma_tag = NULL;
3148 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
3150 if (dma->dma_tag == NULL)
3152 if (dma->dma_map != NULL) {
3153 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3154 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3155 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3156 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3157 dma->dma_map = NULL;
3159 bus_dma_tag_destroy(dma->dma_tag);
3160 dma->dma_tag = NULL;
3164 /*********************************************************************
3166 * Allocate memory for tx_buffer structures. The tx_buffer stores all
3167 * the information needed to transmit a packet on the wire.
3169 **********************************************************************/
3171 em_allocate_transmit_structures(struct adapter *adapter)
3173 device_t dev = adapter->dev;
3174 struct em_buffer *tx_buffer;
3178 * Create DMA tags for tx descriptors
3180 #if __FreeBSD_version >= 700000
3181 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3183 if ((error = bus_dma_tag_create(NULL, /* parent */
3185 1, 0, /* alignment, bounds */
3186 BUS_SPACE_MAXADDR, /* lowaddr */
3187 BUS_SPACE_MAXADDR, /* highaddr */
3188 NULL, NULL, /* filter, filterarg */
3189 EM_TSO_SIZE, /* maxsize */
3190 EM_MAX_SCATTER, /* nsegments */
3191 EM_TSO_SEG_SIZE, /* maxsegsize */
3193 NULL, /* lockfunc */
3195 &adapter->txtag)) != 0) {
3196 device_printf(dev, "Unable to allocate TX DMA tag\n");
3200 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
3201 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3202 if (adapter->tx_buffer_area == NULL) {
3203 device_printf(dev, "Unable to allocate tx_buffer memory\n");
3208 /* Create the descriptor buffer dma maps */
3209 for (int i = 0; i < adapter->num_tx_desc; i++) {
3210 tx_buffer = &adapter->tx_buffer_area[i];
3211 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
3213 device_printf(dev, "Unable to create TX DMA map\n");
3216 tx_buffer->next_eop = -1;
3221 em_free_transmit_structures(adapter);
3225 /*********************************************************************
3227 * (Re)Initialize transmit structures.
3229 **********************************************************************/
3231 em_setup_transmit_structures(struct adapter *adapter)
3233 struct em_buffer *tx_buffer;
3235 /* Clear the old ring contents */
3236 bzero(adapter->tx_desc_base,
3237 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
3239 /* Free any existing TX buffers */
3240 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
3241 tx_buffer = &adapter->tx_buffer_area[i];
3242 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3243 BUS_DMASYNC_POSTWRITE);
3244 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
3245 m_freem(tx_buffer->m_head);
3246 tx_buffer->m_head = NULL;
3247 tx_buffer->next_eop = -1;
3251 adapter->next_avail_tx_desc = 0;
3252 adapter->next_tx_to_clean = 0;
3253 adapter->num_tx_desc_avail = adapter->num_tx_desc;
3255 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3256 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3261 /*********************************************************************
3263 * Enable transmit unit.
3265 **********************************************************************/
3267 em_initialize_transmit_unit(struct adapter *adapter)
3269 uint32_t tctl, tarc, tipg = 0;
3272 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3273 /* Setup the Base and Length of the Tx Descriptor Ring */
3274 bus_addr = adapter->txdma.dma_paddr;
3275 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
3276 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
3277 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
3278 (uint32_t)(bus_addr >> 32));
3279 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
3280 (uint32_t)bus_addr);
3281 /* Setup the HW Tx Head and Tail descriptor pointers */
3282 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
3283 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
3285 HW_DEBUGOUT2("Base = %x, Length = %x\n",
3286 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
3287 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
3289 /* Set the default values for the Tx Inter Packet Gap timer */
3290 switch (adapter->hw.mac.type) {
3292 tipg = DEFAULT_82542_TIPG_IPGT;
3293 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3294 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3296 case e1000_80003es2lan:
3297 tipg = DEFAULT_82543_TIPG_IPGR1;
3298 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3299 E1000_TIPG_IPGR2_SHIFT;
3302 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3303 (adapter->hw.phy.media_type ==
3304 e1000_media_type_internal_serdes))
3305 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3307 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3308 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3309 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3312 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
3313 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
3314 if(adapter->hw.mac.type >= e1000_82540)
3315 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
3316 adapter->tx_abs_int_delay.value);
3318 if ((adapter->hw.mac.type == e1000_82571) ||
3319 (adapter->hw.mac.type == e1000_82572)) {
3320 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3321 tarc |= SPEED_MODE_BIT;
3322 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3323 } else if (adapter->hw.mac.type == e1000_80003es2lan) {
3324 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3326 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3327 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
3329 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3332 /* Program the Transmit Control Register */
3333 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3334 tctl &= ~E1000_TCTL_CT;
3335 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3336 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3338 if (adapter->hw.mac.type >= e1000_82571)
3339 tctl |= E1000_TCTL_MULR;
3341 /* This write will effectively turn on the transmit unit. */
3342 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
3344 /* Setup Transmit Descriptor Base Settings */
3345 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3347 if ((adapter->tx_int_delay.value > 0) &&
3348 (adapter->hw.mac.type != e1000_82575))
3349 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3351 /* Set the function pointer for the transmit routine */
3352 if (adapter->hw.mac.type >= e1000_82575)
3353 adapter->em_xmit = em_adv_encap;
3355 adapter->em_xmit = em_encap;
3358 /*********************************************************************
3360 * Free all transmit related data structures.
3362 **********************************************************************/
3364 em_free_transmit_structures(struct adapter *adapter)
3366 struct em_buffer *tx_buffer;
3368 INIT_DEBUGOUT("free_transmit_structures: begin");
3370 if (adapter->tx_buffer_area != NULL) {
3371 for (int i = 0; i < adapter->num_tx_desc; i++) {
3372 tx_buffer = &adapter->tx_buffer_area[i];
3373 if (tx_buffer->m_head != NULL) {
3374 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3375 BUS_DMASYNC_POSTWRITE);
3376 bus_dmamap_unload(adapter->txtag,
3378 m_freem(tx_buffer->m_head);
3379 tx_buffer->m_head = NULL;
3380 } else if (tx_buffer->map != NULL)
3381 bus_dmamap_unload(adapter->txtag,
3383 if (tx_buffer->map != NULL) {
3384 bus_dmamap_destroy(adapter->txtag,
3386 tx_buffer->map = NULL;
3390 if (adapter->tx_buffer_area != NULL) {
3391 free(adapter->tx_buffer_area, M_DEVBUF);
3392 adapter->tx_buffer_area = NULL;
3394 if (adapter->txtag != NULL) {
3395 bus_dma_tag_destroy(adapter->txtag);
3396 adapter->txtag = NULL;
3400 /*********************************************************************
3402 * The offload context needs to be set when we transfer the first
3403 * packet of a particular protocol (TCP/UDP). This routine has been
3404 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
3406 **********************************************************************/
3408 em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
3409 uint32_t *txd_upper, uint32_t *txd_lower)
3411 struct e1000_context_desc *TXD;
3412 struct em_buffer *tx_buffer;
3413 struct ether_vlan_header *eh;
3415 struct ip6_hdr *ip6;
3417 int curr_txd, ehdrlen, hdr_len, ip_hlen;
3422 /* Setup checksum offload context. */
3423 curr_txd = adapter->next_avail_tx_desc;
3424 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3425 TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3427 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
3428 E1000_TXD_DTYP_D; /* Data descr */
3431 * Determine where frame payload starts.
3432 * Jump over vlan headers if already present,
3433 * helpful for QinQ too.
3435 eh = mtod(mp, struct ether_vlan_header *);
3436 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3437 etype = ntohs(eh->evl_proto);
3438 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3440 etype = ntohs(eh->evl_encap_proto);
3441 ehdrlen = ETHER_HDR_LEN;
3445 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
3446 * TODO: Support SCTP too when it hits the tree.
3450 ip = (struct ip *)(mp->m_data + ehdrlen);
3451 ip_hlen = ip->ip_hl << 2;
3453 /* Setup of IP header checksum. */
3454 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3456 * Start offset for header checksum calculation.
3457 * End offset for header checksum calculation.
3458 * Offset of place to put the checksum.
3460 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3461 TXD->lower_setup.ip_fields.ipcse =
3462 htole16(ehdrlen + ip_hlen);
3463 TXD->lower_setup.ip_fields.ipcso =
3464 ehdrlen + offsetof(struct ip, ip_sum);
3465 cmd |= E1000_TXD_CMD_IP;
3466 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3469 if (mp->m_len < ehdrlen + ip_hlen)
3470 return; /* failure */
3472 hdr_len = ehdrlen + ip_hlen;
3476 case ETHERTYPE_IPV6:
3477 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3478 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3480 if (mp->m_len < ehdrlen + ip_hlen)
3481 return; /* failure */
3483 /* IPv6 doesn't have a header checksum. */
3485 hdr_len = ehdrlen + ip_hlen;
3486 ipproto = ip6->ip6_nxt;
3497 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3499 * Start offset for payload checksum calculation.
3500 * End offset for payload checksum calculation.
3501 * Offset of place to put the checksum.
3503 th = (struct tcp_hdr *)(mp->m_data + hdr_len);
3504 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3505 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3506 TXD->upper_setup.tcp_fields.tucso =
3507 hdr_len + offsetof(struct tcphdr, th_sum);
3508 cmd |= E1000_TXD_CMD_TCP;
3509 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3513 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3515 * Start offset for header checksum calculation.
3516 * End offset for header checksum calculation.
3517 * Offset of place to put the checksum.
3519 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3520 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3521 TXD->upper_setup.tcp_fields.tucso =
3522 hdr_len + offsetof(struct udphdr, uh_sum);
3523 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3530 TXD->tcp_seg_setup.data = htole32(0);
3531 TXD->cmd_and_length =
3532 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3533 tx_buffer->m_head = NULL;
3534 tx_buffer->next_eop = -1;
3536 if (++curr_txd == adapter->num_tx_desc)
3539 adapter->num_tx_desc_avail--;
3540 adapter->next_avail_tx_desc = curr_txd;
3544 #if __FreeBSD_version >= 700000
3545 /**********************************************************************
3547 * Setup work for hardware segmentation offload (TSO)
3549 **********************************************************************/
3551 em_tso_setup(struct adapter *adapter, struct mbuf *mp, uint32_t *txd_upper,
3552 uint32_t *txd_lower)
3554 struct e1000_context_desc *TXD;
3555 struct em_buffer *tx_buffer;
3556 struct ether_vlan_header *eh;
3558 struct ip6_hdr *ip6;
3560 int curr_txd, ehdrlen, hdr_len, ip_hlen, isip6;
3564 * This function could/should be extended to support IP/IPv6
3565 * fragmentation as well. But as they say, one step at a time.
3569 * Determine where frame payload starts.
3570 * Jump over vlan headers if already present,
3571 * helpful for QinQ too.
3573 eh = mtod(mp, struct ether_vlan_header *);
3574 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3575 etype = ntohs(eh->evl_proto);
3576 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3578 etype = ntohs(eh->evl_encap_proto);
3579 ehdrlen = ETHER_HDR_LEN;
3582 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3583 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3584 return FALSE; /* -1 */
3587 * We only support TCP for IPv4 and IPv6 (notyet) for the moment.
3588 * TODO: Support SCTP too when it hits the tree.
3593 ip = (struct ip *)(mp->m_data + ehdrlen);
3594 if (ip->ip_p != IPPROTO_TCP)
3595 return FALSE; /* 0 */
3598 ip_hlen = ip->ip_hl << 2;
3599 if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3600 return FALSE; /* -1 */
3601 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3603 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3604 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3606 th->th_sum = mp->m_pkthdr.csum_data;
3609 case ETHERTYPE_IPV6:
3611 return FALSE; /* Not supported yet. */
3612 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3613 if (ip6->ip6_nxt != IPPROTO_TCP)
3614 return FALSE; /* 0 */
3616 ip_hlen = sizeof(struct ip6_hdr); /* XXX: no header stacking. */
3617 if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3618 return FALSE; /* -1 */
3619 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
3621 th->th_sum = in6_pseudo(ip6->ip6_src, ip->ip6_dst,
3622 htons(IPPROTO_TCP)); /* XXX: function notyet. */
3624 th->th_sum = mp->m_pkthdr.csum_data;
3630 hdr_len = ehdrlen + ip_hlen + (th->th_off << 2);
3632 *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
3633 E1000_TXD_DTYP_D | /* Data descr type */
3634 E1000_TXD_CMD_TSE); /* Do TSE on this packet */
3636 /* IP and/or TCP header checksum calculation and insertion. */
3637 *txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) |
3638 E1000_TXD_POPTS_TXSM) << 8;
3640 curr_txd = adapter->next_avail_tx_desc;
3641 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3642 TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3644 /* IPv6 doesn't have a header checksum. */
3647 * Start offset for header checksum calculation.
3648 * End offset for header checksum calculation.
3649 * Offset of place put the checksum.
3651 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3652 TXD->lower_setup.ip_fields.ipcse =
3653 htole16(ehdrlen + ip_hlen - 1);
3654 TXD->lower_setup.ip_fields.ipcso =
3655 ehdrlen + offsetof(struct ip, ip_sum);
3658 * Start offset for payload checksum calculation.
3659 * End offset for payload checksum calculation.
3660 * Offset of place to put the checksum.
3662 TXD->upper_setup.tcp_fields.tucss =
3664 TXD->upper_setup.tcp_fields.tucse = 0;
3665 TXD->upper_setup.tcp_fields.tucso =
3666 ehdrlen + ip_hlen + offsetof(struct tcphdr, th_sum);
3668 * Payload size per packet w/o any headers.
3669 * Length of all headers up to payload.
3671 TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz);
3672 TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
3674 TXD->cmd_and_length = htole32(adapter->txd_cmd |
3675 E1000_TXD_CMD_DEXT | /* Extended descr */
3676 E1000_TXD_CMD_TSE | /* TSE context */
3677 (isip6 ? 0 : E1000_TXD_CMD_IP) | /* Do IP csum */
3678 E1000_TXD_CMD_TCP | /* Do TCP checksum */
3679 (mp->m_pkthdr.len - (hdr_len))); /* Total len */
3681 tx_buffer->m_head = NULL;
3682 tx_buffer->next_eop = -1;
3684 if (++curr_txd == adapter->num_tx_desc)
3687 adapter->num_tx_desc_avail--;
3688 adapter->next_avail_tx_desc = curr_txd;
3689 adapter->tx_tso = TRUE;
3695 /**********************************************************************
3697 * Setup work for hardware segmentation offload (TSO) on
3698 * adapters using advanced tx descriptors (82575)
3700 **********************************************************************/
3702 em_tso_adv_setup(struct adapter *adapter, struct mbuf *mp, u32 *hdrlen)
3704 struct e1000_adv_tx_context_desc *TXD;
3705 struct em_buffer *tx_buffer;
3706 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3707 u32 mss_l4len_idx = 0;
3709 int ctxd, ehdrlen, ip_hlen, tcp_hlen;
3710 struct ether_vlan_header *eh;
3715 * Determine where frame payload starts.
3716 * Jump over vlan headers if already present
3718 eh = mtod(mp, struct ether_vlan_header *);
3719 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
3720 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3722 ehdrlen = ETHER_HDR_LEN;
3724 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3725 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3728 /* Only supports IPV4 for now */
3729 ctxd = adapter->next_avail_tx_desc;
3730 tx_buffer = &adapter->tx_buffer_area[ctxd];
3731 TXD = (struct e1000_adv_tx_context_desc *) &adapter->tx_desc_base[ctxd];
3733 ip = (struct ip *)(mp->m_data + ehdrlen);
3734 if (ip->ip_p != IPPROTO_TCP)
3735 return FALSE; /* 0 */
3738 ip_hlen = ip->ip_hl << 2;
3739 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3740 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3741 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3742 tcp_hlen = th->th_off << 2;
3744 * Calculate header length, this is used
3745 * in the transmit desc in igb_encap
3747 *hdrlen = ehdrlen + ip_hlen + tcp_hlen;
3749 /* VLAN MACLEN IPLEN */
3750 if (mp->m_flags & M_VLANTAG) {
3751 vtag = htole16(mp->m_pkthdr.ether_vtag);
3752 vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT);
3755 vlan_macip_lens |= (ehdrlen << E1000_ADVTXD_MACLEN_SHIFT);
3756 vlan_macip_lens |= ip_hlen;
3757 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3759 /* ADV DTYPE TUCMD */
3760 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
3761 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
3762 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
3763 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3766 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT);
3767 mss_l4len_idx |= (tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT);
3768 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
3770 TXD->seqnum_seed = htole32(0);
3771 tx_buffer->m_head = NULL;
3772 tx_buffer->next_eop = -1;
3774 if (++ctxd == adapter->num_tx_desc)
3777 adapter->num_tx_desc_avail--;
3778 adapter->next_avail_tx_desc = ctxd;
3782 #endif /* FreeBSD_version >= 700000 */
3784 /*********************************************************************
3786 * Advanced Context Descriptor setup for VLAN or CSUM
3788 **********************************************************************/
3791 em_tx_adv_ctx_setup(struct adapter *adapter, struct mbuf *mp)
3793 struct e1000_adv_tx_context_desc *TXD;
3794 struct em_buffer *tx_buffer;
3795 uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3796 struct ether_vlan_header *eh;
3798 struct ip6_hdr *ip6;
3799 int ehdrlen, ip_hlen = 0;
3802 bool offload = TRUE;
3803 #if __FreeBSD_version < 700000
3809 int ctxd = adapter->next_avail_tx_desc;
3810 tx_buffer = &adapter->tx_buffer_area[ctxd];
3811 TXD = (struct e1000_adv_tx_context_desc *) &adapter->tx_desc_base[ctxd];
3813 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
3814 offload = FALSE; /* Only here to handle VLANs */
3816 ** In advanced descriptors the vlan tag must
3817 ** be placed into the descriptor itself.
3819 #if __FreeBSD_version < 700000
3820 mtag = VLAN_OUTPUT_TAG(ifp, mp);
3823 htole16(VLAN_TAG_VALUE(mtag)) << E1000_ADVTXD_VLAN_SHIFT;
3824 } else if (offload == FALSE)
3825 return FALSE; /* No CTX needed */
3827 if (mp->m_flags & M_VLANTAG) {
3828 vtag = htole16(mp->m_pkthdr.ether_vtag);
3829 vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT);
3830 } else if (offload == FALSE)
3834 * Determine where frame payload starts.
3835 * Jump over vlan headers if already present,
3836 * helpful for QinQ too.
3838 eh = mtod(mp, struct ether_vlan_header *);
3839 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3840 etype = ntohs(eh->evl_proto);
3841 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3843 etype = ntohs(eh->evl_encap_proto);
3844 ehdrlen = ETHER_HDR_LEN;
3847 /* Set the ether header length */
3848 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
3852 ip = (struct ip *)(mp->m_data + ehdrlen);
3853 ip_hlen = ip->ip_hl << 2;
3854 if (mp->m_len < ehdrlen + ip_hlen) {
3859 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
3861 case ETHERTYPE_IPV6:
3862 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3863 ip_hlen = sizeof(struct ip6_hdr);
3864 if (mp->m_len < ehdrlen + ip_hlen)
3865 return FALSE; /* failure */
3866 ipproto = ip6->ip6_nxt;
3867 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
3874 vlan_macip_lens |= ip_hlen;
3875 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
3879 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
3880 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
3883 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
3884 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
3891 /* Now copy bits into descriptor */
3892 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3893 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3894 TXD->seqnum_seed = htole32(0);
3895 TXD->mss_l4len_idx = htole32(0);
3897 tx_buffer->m_head = NULL;
3898 tx_buffer->next_eop = -1;
3900 /* We've consumed the first desc, adjust counters */
3901 if (++ctxd == adapter->num_tx_desc)
3903 adapter->next_avail_tx_desc = ctxd;
3904 --adapter->num_tx_desc_avail;
3910 /**********************************************************************
3912 * Examine each tx_buffer in the used queue. If the hardware is done
3913 * processing the packet then free associated resources. The
3914 * tx_buffer is put back on the free queue.
3916 **********************************************************************/
3918 em_txeof(struct adapter *adapter)
3920 int first, last, done, num_avail;
3921 struct em_buffer *tx_buffer;
3922 struct e1000_tx_desc *tx_desc, *eop_desc;
3923 struct ifnet *ifp = adapter->ifp;
3925 EM_TX_LOCK_ASSERT(adapter);
3927 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3930 num_avail = adapter->num_tx_desc_avail;
3931 first = adapter->next_tx_to_clean;
3932 tx_desc = &adapter->tx_desc_base[first];
3933 tx_buffer = &adapter->tx_buffer_area[first];
3934 last = tx_buffer->next_eop;
3935 eop_desc = &adapter->tx_desc_base[last];
3938 * What this does is get the index of the
3939 * first descriptor AFTER the EOP of the
3940 * first packet, that way we can do the
3941 * simple comparison on the inner while loop.
3943 if (++last == adapter->num_tx_desc)
3947 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3948 BUS_DMASYNC_POSTREAD);
3950 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3951 /* We clean the range of the packet */
3952 while (first != done) {
3953 tx_desc->upper.data = 0;
3954 tx_desc->lower.data = 0;
3955 tx_desc->buffer_addr = 0;
3958 if (tx_buffer->m_head) {
3960 bus_dmamap_sync(adapter->txtag,
3962 BUS_DMASYNC_POSTWRITE);
3963 bus_dmamap_unload(adapter->txtag,
3966 m_freem(tx_buffer->m_head);
3967 tx_buffer->m_head = NULL;
3969 tx_buffer->next_eop = -1;
3971 if (++first == adapter->num_tx_desc)
3974 tx_buffer = &adapter->tx_buffer_area[first];
3975 tx_desc = &adapter->tx_desc_base[first];
3977 /* See if we can continue to the next packet */
3978 last = tx_buffer->next_eop;
3980 eop_desc = &adapter->tx_desc_base[last];
3981 /* Get new done point */
3982 if (++last == adapter->num_tx_desc) last = 0;
3987 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3988 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3990 adapter->next_tx_to_clean = first;
3993 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
3994 * that it is OK to send packets.
3995 * If there are no pending descriptors, clear the timeout. Otherwise,
3996 * if some descriptors have been freed, restart the timeout.
3998 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
3999 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4000 /* All clean, turn off the timer */
4001 if (num_avail == adapter->num_tx_desc)
4002 adapter->watchdog_timer = 0;
4003 /* Some cleaned, reset the timer */
4004 else if (num_avail != adapter->num_tx_desc_avail)
4005 adapter->watchdog_timer = EM_TX_TIMEOUT;
4007 adapter->num_tx_desc_avail = num_avail;
4011 /*********************************************************************
4013 * When Link is lost sometimes there is work still in the TX ring
4014 * which will result in a watchdog, rather than allow that do an
4015 * attempted cleanup and then reinit here. Note that this has been
4016 * seens mostly with fiber adapters.
4018 **********************************************************************/
4020 em_tx_purge(struct adapter *adapter)
4022 if ((!adapter->link_active) && (adapter->watchdog_timer)) {
4023 EM_TX_LOCK(adapter);
4025 EM_TX_UNLOCK(adapter);
4026 if (adapter->watchdog_timer) { /* Still not clean? */
4027 adapter->watchdog_timer = 0;
4028 em_init_locked(adapter);
4033 /*********************************************************************
4035 * Get a buffer from system mbuf buffer pool.
4037 **********************************************************************/
4039 em_get_buf(struct adapter *adapter, int i)
4042 bus_dma_segment_t segs[1];
4044 struct em_buffer *rx_buffer;
4047 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
4049 adapter->mbuf_cluster_failed++;
4052 m->m_len = m->m_pkthdr.len = MCLBYTES;
4054 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
4055 m_adj(m, ETHER_ALIGN);
4058 * Using memory from the mbuf cluster pool, invoke the
4059 * bus_dma machinery to arrange the memory mapping.
4061 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
4062 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
4068 /* If nsegs is wrong then the stack is corrupt. */
4069 KASSERT(nsegs == 1, ("Too many segments returned!"));
4071 rx_buffer = &adapter->rx_buffer_area[i];
4072 if (rx_buffer->m_head != NULL)
4073 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4075 map = rx_buffer->map;
4076 rx_buffer->map = adapter->rx_sparemap;
4077 adapter->rx_sparemap = map;
4078 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
4079 rx_buffer->m_head = m;
4081 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
4085 /*********************************************************************
4087 * Allocate memory for rx_buffer structures. Since we use one
4088 * rx_buffer per received packet, the maximum number of rx_buffer's
4089 * that we'll need is equal to the number of receive descriptors
4090 * that we've allocated.
4092 **********************************************************************/
4094 em_allocate_receive_structures(struct adapter *adapter)
4096 device_t dev = adapter->dev;
4097 struct em_buffer *rx_buffer;
4100 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
4101 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
4102 if (adapter->rx_buffer_area == NULL) {
4103 device_printf(dev, "Unable to allocate rx_buffer memory\n");
4107 #if __FreeBSD_version >= 700000
4108 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
4110 error = bus_dma_tag_create(NULL, /* parent */
4112 1, 0, /* alignment, bounds */
4113 BUS_SPACE_MAXADDR, /* lowaddr */
4114 BUS_SPACE_MAXADDR, /* highaddr */
4115 NULL, NULL, /* filter, filterarg */
4116 MCLBYTES, /* maxsize */
4118 MCLBYTES, /* maxsegsize */
4120 NULL, /* lockfunc */
4124 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
4129 /* Create the spare map (used by getbuf) */
4130 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4131 &adapter->rx_sparemap);
4133 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4138 rx_buffer = adapter->rx_buffer_area;
4139 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4140 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4143 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4152 em_free_receive_structures(adapter);
4156 /*********************************************************************
4158 * (Re)initialize receive structures.
4160 **********************************************************************/
4162 em_setup_receive_structures(struct adapter *adapter)
4164 struct em_buffer *rx_buffer;
4167 /* Reset descriptor ring */
4168 bzero(adapter->rx_desc_base,
4169 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
4171 /* Free current RX buffers. */
4172 rx_buffer = adapter->rx_buffer_area;
4173 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4174 if (rx_buffer->m_head != NULL) {
4175 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4176 BUS_DMASYNC_POSTREAD);
4177 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4178 m_freem(rx_buffer->m_head);
4179 rx_buffer->m_head = NULL;
4183 /* Allocate new ones. */
4184 for (i = 0; i < adapter->num_rx_desc; i++) {
4185 error = em_get_buf(adapter, i);
4190 /* Setup our descriptor pointers */
4191 adapter->next_rx_desc_to_check = 0;
4192 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4193 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4198 /*********************************************************************
4200 * Enable receive unit.
4202 **********************************************************************/
4204 em_initialize_receive_unit(struct adapter *adapter)
4206 struct ifnet *ifp = adapter->ifp;
4209 uint32_t reg_rxcsum;
4211 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
4214 * Make sure receives are disabled while setting
4215 * up the descriptor ring
4217 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4218 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl & ~E1000_RCTL_EN);
4220 if(adapter->hw.mac.type >= e1000_82540) {
4221 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
4222 adapter->rx_abs_int_delay.value);
4224 * Set the interrupt throttling rate. Value is calculated
4225 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
4227 #define MAX_INTS_PER_SEC 8000
4228 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
4229 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
4232 /* Setup the Base and Length of the Rx Descriptor Ring */
4233 bus_addr = adapter->rxdma.dma_paddr;
4234 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
4235 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
4236 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
4237 (uint32_t)(bus_addr >> 32));
4238 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
4239 (uint32_t)bus_addr);
4241 /* Setup the Receive Control Register */
4242 reg_rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4243 reg_rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
4244 E1000_RCTL_RDMTS_HALF |
4245 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4247 /* Make sure VLAN Filters are off */
4248 reg_rctl &= ~E1000_RCTL_VFE;
4250 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
4251 reg_rctl |= E1000_RCTL_SBP;
4253 reg_rctl &= ~E1000_RCTL_SBP;
4255 switch (adapter->rx_buffer_len) {
4258 reg_rctl |= E1000_RCTL_SZ_2048;
4261 reg_rctl |= E1000_RCTL_SZ_4096 |
4262 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4265 reg_rctl |= E1000_RCTL_SZ_8192 |
4266 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4269 reg_rctl |= E1000_RCTL_SZ_16384 |
4270 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4274 if (ifp->if_mtu > ETHERMTU)
4275 reg_rctl |= E1000_RCTL_LPE;
4277 reg_rctl &= ~E1000_RCTL_LPE;
4279 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
4280 if ((adapter->hw.mac.type >= e1000_82543) &&
4281 (ifp->if_capenable & IFCAP_RXCSUM)) {
4282 reg_rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
4283 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
4284 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, reg_rxcsum);
4288 ** XXX TEMPORARY WORKAROUND: on some systems with 82573
4289 ** long latencies are observed, like Lenovo X60. This
4290 ** change eliminates the problem, but since having positive
4291 ** values in RDTR is a known source of problems on other
4292 ** platforms another solution is being sought.
4294 if (adapter->hw.mac.type == e1000_82573)
4295 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, 0x20);
4297 /* Enable Receives */
4298 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
4301 * Setup the HW Rx Head and
4302 * Tail Descriptor Pointers
4304 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
4305 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
4310 /*********************************************************************
4312 * Free receive related data structures.
4314 **********************************************************************/
4316 em_free_receive_structures(struct adapter *adapter)
4318 struct em_buffer *rx_buffer;
4321 INIT_DEBUGOUT("free_receive_structures: begin");
4323 if (adapter->rx_sparemap) {
4324 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
4325 adapter->rx_sparemap = NULL;
4328 /* Cleanup any existing buffers */
4329 if (adapter->rx_buffer_area != NULL) {
4330 rx_buffer = adapter->rx_buffer_area;
4331 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4332 if (rx_buffer->m_head != NULL) {
4333 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4334 BUS_DMASYNC_POSTREAD);
4335 bus_dmamap_unload(adapter->rxtag,
4337 m_freem(rx_buffer->m_head);
4338 rx_buffer->m_head = NULL;
4339 } else if (rx_buffer->map != NULL)
4340 bus_dmamap_unload(adapter->rxtag,
4342 if (rx_buffer->map != NULL) {
4343 bus_dmamap_destroy(adapter->rxtag,
4345 rx_buffer->map = NULL;
4350 if (adapter->rx_buffer_area != NULL) {
4351 free(adapter->rx_buffer_area, M_DEVBUF);
4352 adapter->rx_buffer_area = NULL;
4355 if (adapter->rxtag != NULL) {
4356 bus_dma_tag_destroy(adapter->rxtag);
4357 adapter->rxtag = NULL;
4361 /*********************************************************************
4363 * This routine executes in interrupt context. It replenishes
4364 * the mbufs in the descriptor and sends data which has been
4365 * dma'ed into host memory to upper layer.
4367 * We loop at most count times if count is > 0, or until done if
4370 *********************************************************************/
4372 em_rxeof(struct adapter *adapter, int count)
4376 uint8_t accept_frame = 0;
4378 uint16_t len, desc_len, prev_len_adj;
4381 /* Pointer to the receive descriptor being examined. */
4382 struct e1000_rx_desc *current_desc;
4386 i = adapter->next_rx_desc_to_check;
4387 current_desc = &adapter->rx_desc_base[i];
4388 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4389 BUS_DMASYNC_POSTREAD);
4391 if (!((current_desc->status) & E1000_RXD_STAT_DD))
4394 while ((current_desc->status & E1000_RXD_STAT_DD) &&
4396 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4397 struct mbuf *m = NULL;
4399 mp = adapter->rx_buffer_area[i].m_head;
4401 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
4402 * needs to access the last received byte in the mbuf.
4404 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
4405 BUS_DMASYNC_POSTREAD);
4409 desc_len = le16toh(current_desc->length);
4410 status = current_desc->status;
4411 if (status & E1000_RXD_STAT_EOP) {
4414 if (desc_len < ETHER_CRC_LEN) {
4416 prev_len_adj = ETHER_CRC_LEN - desc_len;
4418 len = desc_len - ETHER_CRC_LEN;
4424 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
4426 uint32_t pkt_len = desc_len;
4428 if (adapter->fmp != NULL)
4429 pkt_len += adapter->fmp->m_pkthdr.len;
4431 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
4432 if (TBI_ACCEPT(&adapter->hw, status,
4433 current_desc->errors, pkt_len, last_byte,
4434 adapter->min_frame_size, adapter->max_frame_size)) {
4435 e1000_tbi_adjust_stats_82543(&adapter->hw,
4436 &adapter->stats, pkt_len,
4437 adapter->hw.mac.addr,
4438 adapter->max_frame_size);
4446 if (em_get_buf(adapter, i) != 0) {
4451 /* Assign correct length to the current fragment */
4454 if (adapter->fmp == NULL) {
4455 mp->m_pkthdr.len = len;
4456 adapter->fmp = mp; /* Store the first mbuf */
4459 /* Chain mbuf's together */
4460 mp->m_flags &= ~M_PKTHDR;
4462 * Adjust length of previous mbuf in chain if
4463 * we received less than 4 bytes in the last
4466 if (prev_len_adj > 0) {
4467 adapter->lmp->m_len -= prev_len_adj;
4468 adapter->fmp->m_pkthdr.len -=
4471 adapter->lmp->m_next = mp;
4472 adapter->lmp = adapter->lmp->m_next;
4473 adapter->fmp->m_pkthdr.len += len;
4477 adapter->fmp->m_pkthdr.rcvif = ifp;
4479 em_receive_checksum(adapter, current_desc,
4481 #ifndef __NO_STRICT_ALIGNMENT
4482 if (adapter->max_frame_size >
4483 (MCLBYTES - ETHER_ALIGN) &&
4484 em_fixup_rx(adapter) != 0)
4487 if (status & E1000_RXD_STAT_VP) {
4488 #if __FreeBSD_version < 700000
4489 VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
4490 (le16toh(current_desc->special) &
4491 E1000_RXD_SPC_VLAN_MASK));
4493 adapter->fmp->m_pkthdr.ether_vtag =
4494 (le16toh(current_desc->special) &
4495 E1000_RXD_SPC_VLAN_MASK);
4496 adapter->fmp->m_flags |= M_VLANTAG;
4499 #ifndef __NO_STRICT_ALIGNMENT
4503 adapter->fmp = NULL;
4504 adapter->lmp = NULL;
4509 /* Reuse loaded DMA map and just update mbuf chain */
4510 mp = adapter->rx_buffer_area[i].m_head;
4511 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
4512 mp->m_data = mp->m_ext.ext_buf;
4514 if (adapter->max_frame_size <=
4515 (MCLBYTES - ETHER_ALIGN))
4516 m_adj(mp, ETHER_ALIGN);
4517 if (adapter->fmp != NULL) {
4518 m_freem(adapter->fmp);
4519 adapter->fmp = NULL;
4520 adapter->lmp = NULL;
4525 /* Zero out the receive descriptors status. */
4526 current_desc->status = 0;
4527 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4528 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4530 /* Advance our pointers to the next descriptor. */
4531 if (++i == adapter->num_rx_desc)
4534 adapter->next_rx_desc_to_check = i;
4536 EM_CORE_UNLOCK(adapter);
4537 (*ifp->if_input)(ifp, m);
4538 EM_CORE_LOCK(adapter);
4540 /* Already running unlocked */
4541 (*ifp->if_input)(ifp, m);
4543 i = adapter->next_rx_desc_to_check;
4545 current_desc = &adapter->rx_desc_base[i];
4547 adapter->next_rx_desc_to_check = i;
4549 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
4551 i = adapter->num_rx_desc - 1;
4552 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
4553 if (!((current_desc->status) & E1000_RXD_STAT_DD))
4559 #ifndef __NO_STRICT_ALIGNMENT
4561 * When jumbo frames are enabled we should realign entire payload on
4562 * architecures with strict alignment. This is serious design mistake of 8254x
4563 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
4564 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
4565 * payload. On architecures without strict alignment restrictions 8254x still
4566 * performs unaligned memory access which would reduce the performance too.
4567 * To avoid copying over an entire frame to align, we allocate a new mbuf and
4568 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
4569 * existing mbuf chain.
4571 * Be aware, best performance of the 8254x is achived only when jumbo frame is
4572 * not used at all on architectures with strict alignment.
4575 em_fixup_rx(struct adapter *adapter)
4582 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
4583 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
4584 m->m_data += ETHER_HDR_LEN;
4586 MGETHDR(n, M_DONTWAIT, MT_DATA);
4588 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
4589 m->m_data += ETHER_HDR_LEN;
4590 m->m_len -= ETHER_HDR_LEN;
4591 n->m_len = ETHER_HDR_LEN;
4592 M_MOVE_PKTHDR(n, m);
4596 adapter->dropped_pkts++;
4597 m_freem(adapter->fmp);
4598 adapter->fmp = NULL;
4607 /*********************************************************************
4609 * Verify that the hardware indicated that the checksum is valid.
4610 * Inform the stack about the status of checksum so that stack
4611 * doesn't spend time verifying the checksum.
4613 *********************************************************************/
4615 em_receive_checksum(struct adapter *adapter,
4616 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
4618 /* 82543 or newer only */
4619 if ((adapter->hw.mac.type < e1000_82543) ||
4620 /* Ignore Checksum bit is set */
4621 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
4622 mp->m_pkthdr.csum_flags = 0;
4626 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
4628 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
4629 /* IP Checksum Good */
4630 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4631 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4634 mp->m_pkthdr.csum_flags = 0;
4638 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
4640 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
4641 mp->m_pkthdr.csum_flags |=
4642 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4643 mp->m_pkthdr.csum_data = htons(0xffff);
4649 * This turns on the hardware offload of the VLAN
4650 * tag insertion and strip
4653 em_enable_hw_vlans(struct adapter *adapter)
4657 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4658 ctrl |= E1000_CTRL_VME;
4659 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4663 em_enable_intr(struct adapter *adapter)
4665 E1000_WRITE_REG(&adapter->hw, E1000_IMS,
4670 em_disable_intr(struct adapter *adapter)
4672 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
4676 * Bit of a misnomer, what this really means is
4677 * to enable OS management of the system... aka
4678 * to disable special hardware management features
4681 em_init_manageability(struct adapter *adapter)
4683 /* A shared code workaround */
4684 #define E1000_82542_MANC2H E1000_MANC2H
4685 if (adapter->has_manage) {
4686 int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
4687 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4689 /* disable hardware interception of ARP */
4690 manc &= ~(E1000_MANC_ARP_EN);
4692 /* enable receiving management packets to the host */
4693 if (adapter->hw.mac.type >= e1000_82571) {
4694 manc |= E1000_MANC_EN_MNG2HOST;
4695 #define E1000_MNG2HOST_PORT_623 (1 << 5)
4696 #define E1000_MNG2HOST_PORT_664 (1 << 6)
4697 manc2h |= E1000_MNG2HOST_PORT_623;
4698 manc2h |= E1000_MNG2HOST_PORT_664;
4699 E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
4702 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4707 * Give control back to hardware management
4708 * controller if there is one.
4711 em_release_manageability(struct adapter *adapter)
4713 if (adapter->has_manage) {
4714 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4716 /* re-enable hardware interception of ARP */
4717 manc |= E1000_MANC_ARP_EN;
4719 if (adapter->hw.mac.type >= e1000_82571)
4720 manc &= ~E1000_MANC_EN_MNG2HOST;
4722 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4727 * em_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4728 * For ASF and Pass Through versions of f/w this means that
4729 * the driver is loaded. For AMT version (only with 82573)
4730 * of the f/w this means that the network i/f is open.
4734 em_get_hw_control(struct adapter *adapter)
4738 /* Let firmware know the driver has taken over */
4739 switch (adapter->hw.mac.type) {
4741 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4742 E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4743 swsm | E1000_SWSM_DRV_LOAD);
4747 case e1000_80003es2lan:
4750 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4751 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4752 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4760 * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4761 * For ASF and Pass Through versions of f/w this means that the
4762 * driver is no longer loaded. For AMT version (only with 82573) i
4763 * of the f/w this means that the network i/f is closed.
4767 em_release_hw_control(struct adapter *adapter)
4771 /* Let firmware taken over control of h/w */
4772 switch (adapter->hw.mac.type) {
4774 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4775 E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4776 swsm & ~E1000_SWSM_DRV_LOAD);
4780 case e1000_80003es2lan:
4783 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4784 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4785 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4794 em_is_valid_ether_addr(uint8_t *addr)
4796 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4798 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4806 * NOTE: the following routines using the e1000
4807 * naming style are provided to the shared
4808 * code which expects that rather than 'em'
4812 e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4814 pci_write_config(((struct e1000_osdep *)hw->back)->dev, reg, *value, 2);
4818 e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4820 *value = pci_read_config(((struct e1000_osdep *)hw->back)->dev, reg, 2);
4824 e1000_pci_set_mwi(struct e1000_hw *hw)
4826 pci_write_config(((struct e1000_osdep *)hw->back)->dev, PCIR_COMMAND,
4827 (hw->bus.pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
4831 e1000_pci_clear_mwi(struct e1000_hw *hw)
4833 pci_write_config(((struct e1000_osdep *)hw->back)->dev, PCIR_COMMAND,
4834 (hw->bus.pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
4838 * Read the PCI Express capabilities
4841 e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4843 int32_t error = E1000_SUCCESS;
4846 switch (hw->mac.type) {
4851 case e1000_80003es2lan:
4853 e1000_read_pci_cfg(hw, cap_off + reg, value);
4856 error = ~E1000_NOT_IMPLEMENTED;
4864 e1000_alloc_zeroed_dev_spec_struct(struct e1000_hw *hw, uint32_t size)
4868 hw->dev_spec = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4869 if (hw->dev_spec == NULL)
4876 e1000_free_dev_spec_struct(struct e1000_hw *hw)
4878 if (hw->dev_spec != NULL)
4879 free(hw->dev_spec, M_DEVBUF);
4884 * Enable PCI Wake On Lan capability
4887 em_enable_wakeup(device_t dev)
4892 /* First find the capabilities pointer*/
4893 cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
4894 /* Read the PM Capabilities */
4895 id = pci_read_config(dev, cap, 1);
4896 if (id != PCIY_PMG) /* Something wrong */
4898 /* OK, we have the power capabilities, so
4899 now get the status register */
4900 cap += PCIR_POWER_STATUS;
4901 status = pci_read_config(dev, cap, 2);
4902 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4903 pci_write_config(dev, cap, status, 2);
4908 /*********************************************************************
4909 * 82544 Coexistence issue workaround.
4910 * There are 2 issues.
4911 * 1. Transmit Hang issue.
4912 * To detect this issue, following equation can be used...
4913 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4914 * If SUM[3:0] is in between 1 to 4, we will have this issue.
4917 * To detect this issue, following equation can be used...
4918 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4919 * If SUM[3:0] is in between 9 to c, we will have this issue.
4923 * Make sure we do not have ending address
4924 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4926 *************************************************************************/
4928 em_fill_descriptors (bus_addr_t address, uint32_t length,
4929 PDESC_ARRAY desc_array)
4931 /* Since issue is sensitive to length and address.*/
4932 /* Let us first check the address...*/
4933 uint32_t safe_terminator;
4935 desc_array->descriptor[0].address = address;
4936 desc_array->descriptor[0].length = length;
4937 desc_array->elements = 1;
4938 return (desc_array->elements);
4940 safe_terminator = (uint32_t)((((uint32_t)address & 0x7) +
4941 (length & 0xF)) & 0xF);
4942 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4943 if (safe_terminator == 0 ||
4944 (safe_terminator > 4 &&
4945 safe_terminator < 9) ||
4946 (safe_terminator > 0xC &&
4947 safe_terminator <= 0xF)) {
4948 desc_array->descriptor[0].address = address;
4949 desc_array->descriptor[0].length = length;
4950 desc_array->elements = 1;
4951 return (desc_array->elements);
4954 desc_array->descriptor[0].address = address;
4955 desc_array->descriptor[0].length = length - 4;
4956 desc_array->descriptor[1].address = address + (length - 4);
4957 desc_array->descriptor[1].length = 4;
4958 desc_array->elements = 2;
4959 return (desc_array->elements);
4962 /**********************************************************************
4964 * Update the board statistics counters.
4966 **********************************************************************/
4968 em_update_stats_counters(struct adapter *adapter)
4972 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4973 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4974 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4975 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4977 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4978 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4979 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4980 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4982 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4983 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4984 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4985 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4986 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4987 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4988 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4989 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4990 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4991 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4992 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4993 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4994 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4995 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4996 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4997 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4998 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4999 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
5000 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
5001 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
5003 /* For the 64-bit byte counters the low dword must be read first. */
5004 /* Both registers clear on the read of the high dword */
5006 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH);
5007 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
5009 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
5010 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
5011 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
5012 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
5013 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
5015 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
5016 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
5018 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
5019 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
5020 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
5021 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
5022 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
5023 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
5024 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
5025 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
5026 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
5027 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
5029 if (adapter->hw.mac.type >= e1000_82543) {
5030 adapter->stats.algnerrc +=
5031 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
5032 adapter->stats.rxerrc +=
5033 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
5034 adapter->stats.tncrs +=
5035 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
5036 adapter->stats.cexterr +=
5037 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
5038 adapter->stats.tsctc +=
5039 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
5040 adapter->stats.tsctfc +=
5041 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
5045 ifp->if_collisions = adapter->stats.colc;
5048 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
5049 adapter->stats.crcerrs + adapter->stats.algnerrc +
5050 adapter->stats.ruc + adapter->stats.roc +
5051 adapter->stats.mpc + adapter->stats.cexterr;
5054 ifp->if_oerrors = adapter->stats.ecol +
5055 adapter->stats.latecol + adapter->watchdog_events;
5059 /**********************************************************************
5061 * This routine is called only when em_display_debug_stats is enabled.
5062 * This routine provides a way to take a look at important statistics
5063 * maintained by the driver and hardware.
5065 **********************************************************************/
5067 em_print_debug_info(struct adapter *adapter)
5069 device_t dev = adapter->dev;
5070 uint8_t *hw_addr = adapter->hw.hw_addr;
5072 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
5073 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
5074 E1000_READ_REG(&adapter->hw, E1000_CTRL),
5075 E1000_READ_REG(&adapter->hw, E1000_RCTL));
5076 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
5077 ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
5078 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
5079 device_printf(dev, "Flow control watermarks high = %d low = %d\n",
5080 adapter->hw.fc.high_water,
5081 adapter->hw.fc.low_water);
5082 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
5083 E1000_READ_REG(&adapter->hw, E1000_TIDV),
5084 E1000_READ_REG(&adapter->hw, E1000_TADV));
5085 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
5086 E1000_READ_REG(&adapter->hw, E1000_RDTR),
5087 E1000_READ_REG(&adapter->hw, E1000_RADV));
5088 device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
5089 (long long)adapter->tx_fifo_wrk_cnt,
5090 (long long)adapter->tx_fifo_reset_cnt);
5091 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
5092 E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
5093 E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
5094 device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
5095 E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
5096 E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
5097 device_printf(dev, "Num Tx descriptors avail = %d\n",
5098 adapter->num_tx_desc_avail);
5099 device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
5100 adapter->no_tx_desc_avail1);
5101 device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
5102 adapter->no_tx_desc_avail2);
5103 device_printf(dev, "Std mbuf failed = %ld\n",
5104 adapter->mbuf_alloc_failed);
5105 device_printf(dev, "Std mbuf cluster failed = %ld\n",
5106 adapter->mbuf_cluster_failed);
5107 device_printf(dev, "Driver dropped packets = %ld\n",
5108 adapter->dropped_pkts);
5109 device_printf(dev, "Driver tx dma failure in encap = %ld\n",
5110 adapter->no_tx_dma_setup);
5114 em_print_hw_stats(struct adapter *adapter)
5116 device_t dev = adapter->dev;
5118 device_printf(dev, "Excessive collisions = %lld\n",
5119 (long long)adapter->stats.ecol);
5120 #if (DEBUG_HW > 0) /* Dont output these errors normally */
5121 device_printf(dev, "Symbol errors = %lld\n",
5122 (long long)adapter->stats.symerrs);
5124 device_printf(dev, "Sequence errors = %lld\n",
5125 (long long)adapter->stats.sec);
5126 device_printf(dev, "Defer count = %lld\n",
5127 (long long)adapter->stats.dc);
5128 device_printf(dev, "Missed Packets = %lld\n",
5129 (long long)adapter->stats.mpc);
5130 device_printf(dev, "Receive No Buffers = %lld\n",
5131 (long long)adapter->stats.rnbc);
5132 /* RLEC is inaccurate on some hardware, calculate our own. */
5133 device_printf(dev, "Receive Length Errors = %lld\n",
5134 ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
5135 device_printf(dev, "Receive errors = %lld\n",
5136 (long long)adapter->stats.rxerrc);
5137 device_printf(dev, "Crc errors = %lld\n",
5138 (long long)adapter->stats.crcerrs);
5139 device_printf(dev, "Alignment errors = %lld\n",
5140 (long long)adapter->stats.algnerrc);
5141 /* On 82575 these are collision counts */
5142 device_printf(dev, "Collision/Carrier extension errors = %lld\n",
5143 (long long)adapter->stats.cexterr);
5144 device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
5145 device_printf(dev, "watchdog timeouts = %ld\n",
5146 adapter->watchdog_events);
5147 device_printf(dev, "XON Rcvd = %lld\n",
5148 (long long)adapter->stats.xonrxc);
5149 device_printf(dev, "XON Xmtd = %lld\n",
5150 (long long)adapter->stats.xontxc);
5151 device_printf(dev, "XOFF Rcvd = %lld\n",
5152 (long long)adapter->stats.xoffrxc);
5153 device_printf(dev, "XOFF Xmtd = %lld\n",
5154 (long long)adapter->stats.xofftxc);
5155 device_printf(dev, "Good Packets Rcvd = %lld\n",
5156 (long long)adapter->stats.gprc);
5157 device_printf(dev, "Good Packets Xmtd = %lld\n",
5158 (long long)adapter->stats.gptc);
5159 device_printf(dev, "TSO Contexts Xmtd = %lld\n",
5160 (long long)adapter->stats.tsctc);
5161 device_printf(dev, "TSO Contexts Failed = %lld\n",
5162 (long long)adapter->stats.tsctfc);
5165 /**********************************************************************
5167 * This routine provides a way to dump out the adapter eeprom,
5168 * often a useful debug/service tool. This only dumps the first
5169 * 32 words, stuff that matters is in that extent.
5171 **********************************************************************/
5173 em_print_nvm_info(struct adapter *adapter)
5178 /* Its a bit crude, but it gets the job done */
5179 printf("\nInterface EEPROM Dump:\n");
5180 printf("Offset\n0x0000 ");
5181 for (i = 0, j = 0; i < 32; i++, j++) {
5182 if (j == 8) { /* Make the offset block */
5184 printf("\n0x00%x0 ",row);
5186 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
5187 printf("%04x ", eeprom_data);
5193 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5195 struct adapter *adapter;
5200 error = sysctl_handle_int(oidp, &result, 0, req);
5202 if (error || !req->newptr)
5206 adapter = (struct adapter *)arg1;
5207 em_print_debug_info(adapter);
5210 * This value will cause a hex dump of the
5211 * first 32 16-bit words of the EEPROM to
5215 adapter = (struct adapter *)arg1;
5216 em_print_nvm_info(adapter);
5224 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
5226 struct adapter *adapter;
5231 error = sysctl_handle_int(oidp, &result, 0, req);
5233 if (error || !req->newptr)
5237 adapter = (struct adapter *)arg1;
5238 em_print_hw_stats(adapter);
5245 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
5247 struct em_int_delay_info *info;
5248 struct adapter *adapter;
5254 info = (struct em_int_delay_info *)arg1;
5255 usecs = info->value;
5256 error = sysctl_handle_int(oidp, &usecs, 0, req);
5257 if (error != 0 || req->newptr == NULL)
5259 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
5261 info->value = usecs;
5262 ticks = EM_USECS_TO_TICKS(usecs);
5264 adapter = info->adapter;
5266 EM_CORE_LOCK(adapter);
5267 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
5268 regval = (regval & ~0xffff) | (ticks & 0xffff);
5269 /* Handle a few special cases. */
5270 switch (info->offset) {
5275 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
5276 /* Don't write 0 into the TIDV register. */
5279 if (adapter->hw.mac.type != e1000_82575)
5280 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
5283 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
5284 EM_CORE_UNLOCK(adapter);
5289 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
5290 const char *description, struct em_int_delay_info *info,
5291 int offset, int value)
5293 info->adapter = adapter;
5294 info->offset = offset;
5295 info->value = value;
5296 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
5297 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5298 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5299 info, 0, em_sysctl_int_delay, "I", description);
5304 em_add_rx_process_limit(struct adapter *adapter, const char *name,
5305 const char *description, int *limit, int value)
5308 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
5309 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5310 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);