]> CyberLeo.Net >> Repos - FreeBSD/releng/9.0.git/blob - sys/dev/e1000/if_lem.c
Copy stable/9 to releng/9.0 as part of the FreeBSD 9.0-RELEASE release
[FreeBSD/releng/9.0.git] / sys / dev / e1000 / if_lem.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2010, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
37 #include "opt_inet.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bus.h>
43 #include <sys/endian.h>
44 #include <sys/kernel.h>
45 #include <sys/kthread.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/module.h>
49 #include <sys/rman.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/taskqueue.h>
54 #include <sys/eventhandler.h>
55 #include <machine/bus.h>
56 #include <machine/resource.h>
57
58 #include <net/bpf.h>
59 #include <net/ethernet.h>
60 #include <net/if.h>
61 #include <net/if_arp.h>
62 #include <net/if_dl.h>
63 #include <net/if_media.h>
64
65 #include <net/if_types.h>
66 #include <net/if_vlan_var.h>
67
68 #include <netinet/in_systm.h>
69 #include <netinet/in.h>
70 #include <netinet/if_ether.h>
71 #include <netinet/ip.h>
72 #include <netinet/ip6.h>
73 #include <netinet/tcp.h>
74 #include <netinet/udp.h>
75
76 #include <machine/in_cksum.h>
77 #include <dev/led/led.h>
78 #include <dev/pci/pcivar.h>
79 #include <dev/pci/pcireg.h>
80
81 #include "e1000_api.h"
82 #include "if_lem.h"
83
84 /*********************************************************************
85  *  Legacy Em Driver version:
86  *********************************************************************/
87 char lem_driver_version[] = "1.0.3";
88
89 /*********************************************************************
90  *  PCI Device ID Table
91  *
92  *  Used by probe to select devices to load on
93  *  Last field stores an index into e1000_strings
94  *  Last entry must be all 0s
95  *
96  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
97  *********************************************************************/
98
99 static em_vendor_info_t lem_vendor_info_array[] =
100 {
101         /* Intel(R) PRO/1000 Network Connection */
102         { 0x8086, E1000_DEV_ID_82540EM,         PCI_ANY_ID, PCI_ANY_ID, 0},
103         { 0x8086, E1000_DEV_ID_82540EM_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
104         { 0x8086, E1000_DEV_ID_82540EP,         PCI_ANY_ID, PCI_ANY_ID, 0},
105         { 0x8086, E1000_DEV_ID_82540EP_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
106         { 0x8086, E1000_DEV_ID_82540EP_LP,      PCI_ANY_ID, PCI_ANY_ID, 0},
107
108         { 0x8086, E1000_DEV_ID_82541EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
109         { 0x8086, E1000_DEV_ID_82541ER,         PCI_ANY_ID, PCI_ANY_ID, 0},
110         { 0x8086, E1000_DEV_ID_82541ER_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
111         { 0x8086, E1000_DEV_ID_82541EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
112         { 0x8086, E1000_DEV_ID_82541GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
113         { 0x8086, E1000_DEV_ID_82541GI_LF,      PCI_ANY_ID, PCI_ANY_ID, 0},
114         { 0x8086, E1000_DEV_ID_82541GI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
115
116         { 0x8086, E1000_DEV_ID_82542,           PCI_ANY_ID, PCI_ANY_ID, 0},
117
118         { 0x8086, E1000_DEV_ID_82543GC_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
119         { 0x8086, E1000_DEV_ID_82543GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
120
121         { 0x8086, E1000_DEV_ID_82544EI_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
122         { 0x8086, E1000_DEV_ID_82544EI_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
123         { 0x8086, E1000_DEV_ID_82544GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
124         { 0x8086, E1000_DEV_ID_82544GC_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
125
126         { 0x8086, E1000_DEV_ID_82545EM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
127         { 0x8086, E1000_DEV_ID_82545EM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
128         { 0x8086, E1000_DEV_ID_82545GM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
129         { 0x8086, E1000_DEV_ID_82545GM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
130         { 0x8086, E1000_DEV_ID_82545GM_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
131
132         { 0x8086, E1000_DEV_ID_82546EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
133         { 0x8086, E1000_DEV_ID_82546EB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
134         { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
135         { 0x8086, E1000_DEV_ID_82546GB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
136         { 0x8086, E1000_DEV_ID_82546GB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
137         { 0x8086, E1000_DEV_ID_82546GB_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
138         { 0x8086, E1000_DEV_ID_82546GB_PCIE,    PCI_ANY_ID, PCI_ANY_ID, 0},
139         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
140         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
141                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
142
143         { 0x8086, E1000_DEV_ID_82547EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
144         { 0x8086, E1000_DEV_ID_82547EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
145         { 0x8086, E1000_DEV_ID_82547GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
146         /* required last entry */
147         { 0, 0, 0, 0, 0}
148 };
149
150 /*********************************************************************
151  *  Table of branding strings for all supported NICs.
152  *********************************************************************/
153
154 static char *lem_strings[] = {
155         "Intel(R) PRO/1000 Legacy Network Connection"
156 };
157
158 /*********************************************************************
159  *  Function prototypes
160  *********************************************************************/
161 static int      lem_probe(device_t);
162 static int      lem_attach(device_t);
163 static int      lem_detach(device_t);
164 static int      lem_shutdown(device_t);
165 static int      lem_suspend(device_t);
166 static int      lem_resume(device_t);
167 static void     lem_start(struct ifnet *);
168 static void     lem_start_locked(struct ifnet *ifp);
169 static int      lem_ioctl(struct ifnet *, u_long, caddr_t);
170 static void     lem_init(void *);
171 static void     lem_init_locked(struct adapter *);
172 static void     lem_stop(void *);
173 static void     lem_media_status(struct ifnet *, struct ifmediareq *);
174 static int      lem_media_change(struct ifnet *);
175 static void     lem_identify_hardware(struct adapter *);
176 static int      lem_allocate_pci_resources(struct adapter *);
177 static int      lem_allocate_irq(struct adapter *adapter);
178 static void     lem_free_pci_resources(struct adapter *);
179 static void     lem_local_timer(void *);
180 static int      lem_hardware_init(struct adapter *);
181 static int      lem_setup_interface(device_t, struct adapter *);
182 static void     lem_setup_transmit_structures(struct adapter *);
183 static void     lem_initialize_transmit_unit(struct adapter *);
184 static int      lem_setup_receive_structures(struct adapter *);
185 static void     lem_initialize_receive_unit(struct adapter *);
186 static void     lem_enable_intr(struct adapter *);
187 static void     lem_disable_intr(struct adapter *);
188 static void     lem_free_transmit_structures(struct adapter *);
189 static void     lem_free_receive_structures(struct adapter *);
190 static void     lem_update_stats_counters(struct adapter *);
191 static void     lem_add_hw_stats(struct adapter *adapter);
192 static void     lem_txeof(struct adapter *);
193 static void     lem_tx_purge(struct adapter *);
194 static int      lem_allocate_receive_structures(struct adapter *);
195 static int      lem_allocate_transmit_structures(struct adapter *);
196 static bool     lem_rxeof(struct adapter *, int, int *);
197 #ifndef __NO_STRICT_ALIGNMENT
198 static int      lem_fixup_rx(struct adapter *);
199 #endif
200 static void     lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
201                     struct mbuf *);
202 static void     lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
203                     u32 *, u32 *);
204 static void     lem_set_promisc(struct adapter *);
205 static void     lem_disable_promisc(struct adapter *);
206 static void     lem_set_multi(struct adapter *);
207 static void     lem_update_link_status(struct adapter *);
208 static int      lem_get_buf(struct adapter *, int);
209 static void     lem_register_vlan(void *, struct ifnet *, u16);
210 static void     lem_unregister_vlan(void *, struct ifnet *, u16);
211 static void     lem_setup_vlan_hw_support(struct adapter *);
212 static int      lem_xmit(struct adapter *, struct mbuf **);
213 static void     lem_smartspeed(struct adapter *);
214 static int      lem_82547_fifo_workaround(struct adapter *, int);
215 static void     lem_82547_update_fifo_head(struct adapter *, int);
216 static int      lem_82547_tx_fifo_reset(struct adapter *);
217 static void     lem_82547_move_tail(void *);
218 static int      lem_dma_malloc(struct adapter *, bus_size_t,
219                     struct em_dma_alloc *, int);
220 static void     lem_dma_free(struct adapter *, struct em_dma_alloc *);
221 static int      lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
222 static void     lem_print_nvm_info(struct adapter *);
223 static int      lem_is_valid_ether_addr(u8 *);
224 static u32      lem_fill_descriptors (bus_addr_t address, u32 length,
225                     PDESC_ARRAY desc_array);
226 static int      lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
227 static void     lem_add_int_delay_sysctl(struct adapter *, const char *,
228                     const char *, struct em_int_delay_info *, int, int);
229 static void     lem_set_flow_cntrl(struct adapter *, const char *,
230                     const char *, int *, int);
231 /* Management and WOL Support */
232 static void     lem_init_manageability(struct adapter *);
233 static void     lem_release_manageability(struct adapter *);
234 static void     lem_get_hw_control(struct adapter *);
235 static void     lem_release_hw_control(struct adapter *);
236 static void     lem_get_wakeup(device_t);
237 static void     lem_enable_wakeup(device_t);
238 static int      lem_enable_phy_wakeup(struct adapter *);
239 static void     lem_led_func(void *, int);
240
241 #ifdef EM_LEGACY_IRQ
242 static void     lem_intr(void *);
243 #else /* FAST IRQ */
244 static int      lem_irq_fast(void *);
245 static void     lem_handle_rxtx(void *context, int pending);
246 static void     lem_handle_link(void *context, int pending);
247 static void     lem_add_rx_process_limit(struct adapter *, const char *,
248                     const char *, int *, int);
249 #endif /* ~EM_LEGACY_IRQ */
250
251 #ifdef DEVICE_POLLING
252 static poll_handler_t lem_poll;
253 #endif /* POLLING */
254
255 /*********************************************************************
256  *  FreeBSD Device Interface Entry Points
257  *********************************************************************/
258
259 static device_method_t lem_methods[] = {
260         /* Device interface */
261         DEVMETHOD(device_probe, lem_probe),
262         DEVMETHOD(device_attach, lem_attach),
263         DEVMETHOD(device_detach, lem_detach),
264         DEVMETHOD(device_shutdown, lem_shutdown),
265         DEVMETHOD(device_suspend, lem_suspend),
266         DEVMETHOD(device_resume, lem_resume),
267         {0, 0}
268 };
269
270 static driver_t lem_driver = {
271         "em", lem_methods, sizeof(struct adapter),
272 };
273
274 extern devclass_t em_devclass;
275 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
276 MODULE_DEPEND(lem, pci, 1, 1, 1);
277 MODULE_DEPEND(lem, ether, 1, 1, 1);
278
279 /*********************************************************************
280  *  Tunable default values.
281  *********************************************************************/
282
283 #define EM_TICKS_TO_USECS(ticks)        ((1024 * (ticks) + 500) / 1000)
284 #define EM_USECS_TO_TICKS(usecs)        ((1000 * (usecs) + 512) / 1024)
285
286 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
287 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
288 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
289 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
290 static int lem_rxd = EM_DEFAULT_RXD;
291 static int lem_txd = EM_DEFAULT_TXD;
292 static int lem_smart_pwr_down = FALSE;
293
294 /* Controls whether promiscuous also shows bad packets */
295 static int lem_debug_sbp = FALSE;
296
297 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
298 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
299 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
300 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
301 TUNABLE_INT("hw.em.rxd", &lem_rxd);
302 TUNABLE_INT("hw.em.txd", &lem_txd);
303 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
304 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
305
306 #ifndef EM_LEGACY_IRQ
307 /* How many packets rxeof tries to clean at a time */
308 static int lem_rx_process_limit = 100;
309 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
310 #endif
311
312 /* Flow control setting - default to FULL */
313 static int lem_fc_setting = e1000_fc_full;
314 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
315
316 /* Global used in WOL setup with multiport cards */
317 static int global_quad_port_a = 0;
318
319 /*********************************************************************
320  *  Device identification routine
321  *
322  *  em_probe determines if the driver should be loaded on
323  *  adapter based on PCI vendor/device id of the adapter.
324  *
325  *  return BUS_PROBE_DEFAULT on success, positive on failure
326  *********************************************************************/
327
328 static int
329 lem_probe(device_t dev)
330 {
331         char            adapter_name[60];
332         u16             pci_vendor_id = 0;
333         u16             pci_device_id = 0;
334         u16             pci_subvendor_id = 0;
335         u16             pci_subdevice_id = 0;
336         em_vendor_info_t *ent;
337
338         INIT_DEBUGOUT("em_probe: begin");
339
340         pci_vendor_id = pci_get_vendor(dev);
341         if (pci_vendor_id != EM_VENDOR_ID)
342                 return (ENXIO);
343
344         pci_device_id = pci_get_device(dev);
345         pci_subvendor_id = pci_get_subvendor(dev);
346         pci_subdevice_id = pci_get_subdevice(dev);
347
348         ent = lem_vendor_info_array;
349         while (ent->vendor_id != 0) {
350                 if ((pci_vendor_id == ent->vendor_id) &&
351                     (pci_device_id == ent->device_id) &&
352
353                     ((pci_subvendor_id == ent->subvendor_id) ||
354                     (ent->subvendor_id == PCI_ANY_ID)) &&
355
356                     ((pci_subdevice_id == ent->subdevice_id) ||
357                     (ent->subdevice_id == PCI_ANY_ID))) {
358                         sprintf(adapter_name, "%s %s",
359                                 lem_strings[ent->index],
360                                 lem_driver_version);
361                         device_set_desc_copy(dev, adapter_name);
362                         return (BUS_PROBE_DEFAULT);
363                 }
364                 ent++;
365         }
366
367         return (ENXIO);
368 }
369
370 /*********************************************************************
371  *  Device initialization routine
372  *
373  *  The attach entry point is called when the driver is being loaded.
374  *  This routine identifies the type of hardware, allocates all resources
375  *  and initializes the hardware.
376  *
377  *  return 0 on success, positive on failure
378  *********************************************************************/
379
380 static int
381 lem_attach(device_t dev)
382 {
383         struct adapter  *adapter;
384         int             tsize, rsize;
385         int             error = 0;
386
387         INIT_DEBUGOUT("lem_attach: begin");
388
389         adapter = device_get_softc(dev);
390         adapter->dev = adapter->osdep.dev = dev;
391         EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
392         EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
393         EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
394
395         /* SYSCTL stuff */
396         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
397             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
398             OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
399             lem_sysctl_nvm_info, "I", "NVM Information");
400
401         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
402         callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
403
404         /* Determine hardware and mac info */
405         lem_identify_hardware(adapter);
406
407         /* Setup PCI resources */
408         if (lem_allocate_pci_resources(adapter)) {
409                 device_printf(dev, "Allocation of PCI resources failed\n");
410                 error = ENXIO;
411                 goto err_pci;
412         }
413
414         /* Do Shared Code initialization */
415         if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
416                 device_printf(dev, "Setup of Shared code failed\n");
417                 error = ENXIO;
418                 goto err_pci;
419         }
420
421         e1000_get_bus_info(&adapter->hw);
422
423         /* Set up some sysctls for the tunable interrupt delays */
424         lem_add_int_delay_sysctl(adapter, "rx_int_delay",
425             "receive interrupt delay in usecs", &adapter->rx_int_delay,
426             E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
427         lem_add_int_delay_sysctl(adapter, "tx_int_delay",
428             "transmit interrupt delay in usecs", &adapter->tx_int_delay,
429             E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
430         if (adapter->hw.mac.type >= e1000_82540) {
431                 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
432                     "receive interrupt delay limit in usecs",
433                     &adapter->rx_abs_int_delay,
434                     E1000_REGISTER(&adapter->hw, E1000_RADV),
435                     lem_rx_abs_int_delay_dflt);
436                 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
437                     "transmit interrupt delay limit in usecs",
438                     &adapter->tx_abs_int_delay,
439                     E1000_REGISTER(&adapter->hw, E1000_TADV),
440                     lem_tx_abs_int_delay_dflt);
441         }
442
443 #ifndef EM_LEGACY_IRQ
444         /* Sysctls for limiting the amount of work done in the taskqueue */
445         lem_add_rx_process_limit(adapter, "rx_processing_limit",
446             "max number of rx packets to process", &adapter->rx_process_limit,
447             lem_rx_process_limit);
448 #endif
449
450         /* Sysctl for setting the interface flow control */
451         lem_set_flow_cntrl(adapter, "flow_control",
452             "max number of rx packets to process",
453             &adapter->fc_setting, lem_fc_setting);
454
455         /*
456          * Validate number of transmit and receive descriptors. It
457          * must not exceed hardware maximum, and must be multiple
458          * of E1000_DBA_ALIGN.
459          */
460         if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
461             (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
462             (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
463             (lem_txd < EM_MIN_TXD)) {
464                 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
465                     EM_DEFAULT_TXD, lem_txd);
466                 adapter->num_tx_desc = EM_DEFAULT_TXD;
467         } else
468                 adapter->num_tx_desc = lem_txd;
469         if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
470             (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
471             (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
472             (lem_rxd < EM_MIN_RXD)) {
473                 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
474                     EM_DEFAULT_RXD, lem_rxd);
475                 adapter->num_rx_desc = EM_DEFAULT_RXD;
476         } else
477                 adapter->num_rx_desc = lem_rxd;
478
479         adapter->hw.mac.autoneg = DO_AUTO_NEG;
480         adapter->hw.phy.autoneg_wait_to_complete = FALSE;
481         adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
482         adapter->rx_buffer_len = 2048;
483
484         e1000_init_script_state_82541(&adapter->hw, TRUE);
485         e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
486
487         /* Copper options */
488         if (adapter->hw.phy.media_type == e1000_media_type_copper) {
489                 adapter->hw.phy.mdix = AUTO_ALL_MODES;
490                 adapter->hw.phy.disable_polarity_correction = FALSE;
491                 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
492         }
493
494         /*
495          * Set the frame limits assuming
496          * standard ethernet sized frames.
497          */
498         adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
499         adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
500
501         /*
502          * This controls when hardware reports transmit completion
503          * status.
504          */
505         adapter->hw.mac.report_tx_early = 1;
506
507         tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
508             EM_DBA_ALIGN);
509
510         /* Allocate Transmit Descriptor ring */
511         if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
512                 device_printf(dev, "Unable to allocate tx_desc memory\n");
513                 error = ENOMEM;
514                 goto err_tx_desc;
515         }
516         adapter->tx_desc_base = 
517             (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
518
519         rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
520             EM_DBA_ALIGN);
521
522         /* Allocate Receive Descriptor ring */
523         if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
524                 device_printf(dev, "Unable to allocate rx_desc memory\n");
525                 error = ENOMEM;
526                 goto err_rx_desc;
527         }
528         adapter->rx_desc_base =
529             (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
530
531         /* Allocate multicast array memory. */
532         adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
533             MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
534         if (adapter->mta == NULL) {
535                 device_printf(dev, "Can not allocate multicast setup array\n");
536                 error = ENOMEM;
537                 goto err_hw_init;
538         }
539
540         /*
541         ** Start from a known state, this is
542         ** important in reading the nvm and
543         ** mac from that.
544         */
545         e1000_reset_hw(&adapter->hw);
546
547         /* Make sure we have a good EEPROM before we read from it */
548         if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
549                 /*
550                 ** Some PCI-E parts fail the first check due to
551                 ** the link being in sleep state, call it again,
552                 ** if it fails a second time its a real issue.
553                 */
554                 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
555                         device_printf(dev,
556                             "The EEPROM Checksum Is Not Valid\n");
557                         error = EIO;
558                         goto err_hw_init;
559                 }
560         }
561
562         /* Copy the permanent MAC address out of the EEPROM */
563         if (e1000_read_mac_addr(&adapter->hw) < 0) {
564                 device_printf(dev, "EEPROM read error while reading MAC"
565                     " address\n");
566                 error = EIO;
567                 goto err_hw_init;
568         }
569
570         if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
571                 device_printf(dev, "Invalid MAC address\n");
572                 error = EIO;
573                 goto err_hw_init;
574         }
575
576         /* Initialize the hardware */
577         if (lem_hardware_init(adapter)) {
578                 device_printf(dev, "Unable to initialize the hardware\n");
579                 error = EIO;
580                 goto err_hw_init;
581         }
582
583         /* Allocate transmit descriptors and buffers */
584         if (lem_allocate_transmit_structures(adapter)) {
585                 device_printf(dev, "Could not setup transmit structures\n");
586                 error = ENOMEM;
587                 goto err_tx_struct;
588         }
589
590         /* Allocate receive descriptors and buffers */
591         if (lem_allocate_receive_structures(adapter)) {
592                 device_printf(dev, "Could not setup receive structures\n");
593                 error = ENOMEM;
594                 goto err_rx_struct;
595         }
596
597         /*
598         **  Do interrupt configuration
599         */
600         error = lem_allocate_irq(adapter);
601         if (error)
602                 goto err_rx_struct;
603
604         /*
605          * Get Wake-on-Lan and Management info for later use
606          */
607         lem_get_wakeup(dev);
608
609         /* Setup OS specific network interface */
610         if (lem_setup_interface(dev, adapter) != 0)
611                 goto err_rx_struct;
612
613         /* Initialize statistics */
614         lem_update_stats_counters(adapter);
615
616         adapter->hw.mac.get_link_status = 1;
617         lem_update_link_status(adapter);
618
619         /* Indicate SOL/IDER usage */
620         if (e1000_check_reset_block(&adapter->hw))
621                 device_printf(dev,
622                     "PHY reset is blocked due to SOL/IDER session.\n");
623
624         /* Do we need workaround for 82544 PCI-X adapter? */
625         if (adapter->hw.bus.type == e1000_bus_type_pcix &&
626             adapter->hw.mac.type == e1000_82544)
627                 adapter->pcix_82544 = TRUE;
628         else
629                 adapter->pcix_82544 = FALSE;
630
631         /* Register for VLAN events */
632         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
633             lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
634         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
635             lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 
636
637         lem_add_hw_stats(adapter);
638
639         /* Non-AMT based hardware can now take control from firmware */
640         if (adapter->has_manage && !adapter->has_amt)
641                 lem_get_hw_control(adapter);
642
643         /* Tell the stack that the interface is not active */
644         adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
645
646         adapter->led_dev = led_create(lem_led_func, adapter,
647             device_get_nameunit(dev));
648
649         INIT_DEBUGOUT("lem_attach: end");
650
651         return (0);
652
653 err_rx_struct:
654         lem_free_transmit_structures(adapter);
655 err_tx_struct:
656 err_hw_init:
657         lem_release_hw_control(adapter);
658         lem_dma_free(adapter, &adapter->rxdma);
659 err_rx_desc:
660         lem_dma_free(adapter, &adapter->txdma);
661 err_tx_desc:
662 err_pci:
663         if (adapter->ifp != NULL)
664                 if_free(adapter->ifp);
665         lem_free_pci_resources(adapter);
666         free(adapter->mta, M_DEVBUF);
667         EM_TX_LOCK_DESTROY(adapter);
668         EM_RX_LOCK_DESTROY(adapter);
669         EM_CORE_LOCK_DESTROY(adapter);
670
671         return (error);
672 }
673
674 /*********************************************************************
675  *  Device removal routine
676  *
677  *  The detach entry point is called when the driver is being removed.
678  *  This routine stops the adapter and deallocates all the resources
679  *  that were allocated for driver operation.
680  *
681  *  return 0 on success, positive on failure
682  *********************************************************************/
683
684 static int
685 lem_detach(device_t dev)
686 {
687         struct adapter  *adapter = device_get_softc(dev);
688         struct ifnet    *ifp = adapter->ifp;
689
690         INIT_DEBUGOUT("em_detach: begin");
691
692         /* Make sure VLANS are not using driver */
693         if (adapter->ifp->if_vlantrunk != NULL) {
694                 device_printf(dev,"Vlan in use, detach first\n");
695                 return (EBUSY);
696         }
697
698 #ifdef DEVICE_POLLING
699         if (ifp->if_capenable & IFCAP_POLLING)
700                 ether_poll_deregister(ifp);
701 #endif
702
703         if (adapter->led_dev != NULL)
704                 led_destroy(adapter->led_dev);
705
706         EM_CORE_LOCK(adapter);
707         EM_TX_LOCK(adapter);
708         adapter->in_detach = 1;
709         lem_stop(adapter);
710         e1000_phy_hw_reset(&adapter->hw);
711
712         lem_release_manageability(adapter);
713
714         EM_TX_UNLOCK(adapter);
715         EM_CORE_UNLOCK(adapter);
716
717         /* Unregister VLAN events */
718         if (adapter->vlan_attach != NULL)
719                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
720         if (adapter->vlan_detach != NULL)
721                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 
722
723         ether_ifdetach(adapter->ifp);
724         callout_drain(&adapter->timer);
725         callout_drain(&adapter->tx_fifo_timer);
726
727         lem_free_pci_resources(adapter);
728         bus_generic_detach(dev);
729         if_free(ifp);
730
731         lem_free_transmit_structures(adapter);
732         lem_free_receive_structures(adapter);
733
734         /* Free Transmit Descriptor ring */
735         if (adapter->tx_desc_base) {
736                 lem_dma_free(adapter, &adapter->txdma);
737                 adapter->tx_desc_base = NULL;
738         }
739
740         /* Free Receive Descriptor ring */
741         if (adapter->rx_desc_base) {
742                 lem_dma_free(adapter, &adapter->rxdma);
743                 adapter->rx_desc_base = NULL;
744         }
745
746         lem_release_hw_control(adapter);
747         free(adapter->mta, M_DEVBUF);
748         EM_TX_LOCK_DESTROY(adapter);
749         EM_RX_LOCK_DESTROY(adapter);
750         EM_CORE_LOCK_DESTROY(adapter);
751
752         return (0);
753 }
754
755 /*********************************************************************
756  *
757  *  Shutdown entry point
758  *
759  **********************************************************************/
760
761 static int
762 lem_shutdown(device_t dev)
763 {
764         return lem_suspend(dev);
765 }
766
767 /*
768  * Suspend/resume device methods.
769  */
770 static int
771 lem_suspend(device_t dev)
772 {
773         struct adapter *adapter = device_get_softc(dev);
774
775         EM_CORE_LOCK(adapter);
776
777         lem_release_manageability(adapter);
778         lem_release_hw_control(adapter);
779         lem_enable_wakeup(dev);
780
781         EM_CORE_UNLOCK(adapter);
782
783         return bus_generic_suspend(dev);
784 }
785
786 static int
787 lem_resume(device_t dev)
788 {
789         struct adapter *adapter = device_get_softc(dev);
790         struct ifnet *ifp = adapter->ifp;
791
792         EM_CORE_LOCK(adapter);
793         lem_init_locked(adapter);
794         lem_init_manageability(adapter);
795         EM_CORE_UNLOCK(adapter);
796         lem_start(ifp);
797
798         return bus_generic_resume(dev);
799 }
800
801
802 static void
803 lem_start_locked(struct ifnet *ifp)
804 {
805         struct adapter  *adapter = ifp->if_softc;
806         struct mbuf     *m_head;
807
808         EM_TX_LOCK_ASSERT(adapter);
809
810         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
811             IFF_DRV_RUNNING)
812                 return;
813         if (!adapter->link_active)
814                 return;
815
816         /*
817          * Force a cleanup if number of TX descriptors
818          * available hits the threshold
819          */
820         if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
821                 lem_txeof(adapter);
822                 /* Now do we at least have a minimal? */
823                 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
824                         adapter->no_tx_desc_avail1++;
825                         return;
826                 }
827         }
828
829         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
830
831                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
832                 if (m_head == NULL)
833                         break;
834                 /*
835                  *  Encapsulation can modify our pointer, and or make it
836                  *  NULL on failure.  In that event, we can't requeue.
837                  */
838                 if (lem_xmit(adapter, &m_head)) {
839                         if (m_head == NULL)
840                                 break;
841                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
842                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
843                         break;
844                 }
845
846                 /* Send a copy of the frame to the BPF listener */
847                 ETHER_BPF_MTAP(ifp, m_head);
848
849                 /* Set timeout in case hardware has problems transmitting. */
850                 adapter->watchdog_check = TRUE;
851                 adapter->watchdog_time = ticks;
852         }
853         if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
854                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
855
856         return;
857 }
858
859 static void
860 lem_start(struct ifnet *ifp)
861 {
862         struct adapter *adapter = ifp->if_softc;
863
864         EM_TX_LOCK(adapter);
865         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
866                 lem_start_locked(ifp);
867         EM_TX_UNLOCK(adapter);
868 }
869
870 /*********************************************************************
871  *  Ioctl entry point
872  *
873  *  em_ioctl is called when the user wants to configure the
874  *  interface.
875  *
876  *  return 0 on success, positive on failure
877  **********************************************************************/
878
879 static int
880 lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
881 {
882         struct adapter  *adapter = ifp->if_softc;
883         struct ifreq *ifr = (struct ifreq *)data;
884 #ifdef INET
885         struct ifaddr *ifa = (struct ifaddr *)data;
886 #endif
887         int error = 0;
888
889         if (adapter->in_detach)
890                 return (error);
891
892         switch (command) {
893         case SIOCSIFADDR:
894 #ifdef INET
895                 if (ifa->ifa_addr->sa_family == AF_INET) {
896                         /*
897                          * XXX
898                          * Since resetting hardware takes a very long time
899                          * and results in link renegotiation we only
900                          * initialize the hardware only when it is absolutely
901                          * required.
902                          */
903                         ifp->if_flags |= IFF_UP;
904                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
905                                 EM_CORE_LOCK(adapter);
906                                 lem_init_locked(adapter);
907                                 EM_CORE_UNLOCK(adapter);
908                         }
909                         arp_ifinit(ifp, ifa);
910                 } else
911 #endif
912                         error = ether_ioctl(ifp, command, data);
913                 break;
914         case SIOCSIFMTU:
915             {
916                 int max_frame_size;
917
918                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
919
920                 EM_CORE_LOCK(adapter);
921                 switch (adapter->hw.mac.type) {
922                 case e1000_82542:
923                         max_frame_size = ETHER_MAX_LEN;
924                         break;
925                 default:
926                         max_frame_size = MAX_JUMBO_FRAME_SIZE;
927                 }
928                 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
929                     ETHER_CRC_LEN) {
930                         EM_CORE_UNLOCK(adapter);
931                         error = EINVAL;
932                         break;
933                 }
934
935                 ifp->if_mtu = ifr->ifr_mtu;
936                 adapter->max_frame_size =
937                     ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
938                 lem_init_locked(adapter);
939                 EM_CORE_UNLOCK(adapter);
940                 break;
941             }
942         case SIOCSIFFLAGS:
943                 IOCTL_DEBUGOUT("ioctl rcv'd:\
944                     SIOCSIFFLAGS (Set Interface Flags)");
945                 EM_CORE_LOCK(adapter);
946                 if (ifp->if_flags & IFF_UP) {
947                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
948                                 if ((ifp->if_flags ^ adapter->if_flags) &
949                                     (IFF_PROMISC | IFF_ALLMULTI)) {
950                                         lem_disable_promisc(adapter);
951                                         lem_set_promisc(adapter);
952                                 }
953                         } else
954                                 lem_init_locked(adapter);
955                 } else
956                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
957                                 EM_TX_LOCK(adapter);
958                                 lem_stop(adapter);
959                                 EM_TX_UNLOCK(adapter);
960                         }
961                 adapter->if_flags = ifp->if_flags;
962                 EM_CORE_UNLOCK(adapter);
963                 break;
964         case SIOCADDMULTI:
965         case SIOCDELMULTI:
966                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
967                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
968                         EM_CORE_LOCK(adapter);
969                         lem_disable_intr(adapter);
970                         lem_set_multi(adapter);
971                         if (adapter->hw.mac.type == e1000_82542 && 
972                             adapter->hw.revision_id == E1000_REVISION_2) {
973                                 lem_initialize_receive_unit(adapter);
974                         }
975 #ifdef DEVICE_POLLING
976                         if (!(ifp->if_capenable & IFCAP_POLLING))
977 #endif
978                                 lem_enable_intr(adapter);
979                         EM_CORE_UNLOCK(adapter);
980                 }
981                 break;
982         case SIOCSIFMEDIA:
983                 /* Check SOL/IDER usage */
984                 EM_CORE_LOCK(adapter);
985                 if (e1000_check_reset_block(&adapter->hw)) {
986                         EM_CORE_UNLOCK(adapter);
987                         device_printf(adapter->dev, "Media change is"
988                             " blocked due to SOL/IDER session.\n");
989                         break;
990                 }
991                 EM_CORE_UNLOCK(adapter);
992         case SIOCGIFMEDIA:
993                 IOCTL_DEBUGOUT("ioctl rcv'd: \
994                     SIOCxIFMEDIA (Get/Set Interface Media)");
995                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
996                 break;
997         case SIOCSIFCAP:
998             {
999                 int mask, reinit;
1000
1001                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1002                 reinit = 0;
1003                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1004 #ifdef DEVICE_POLLING
1005                 if (mask & IFCAP_POLLING) {
1006                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
1007                                 error = ether_poll_register(lem_poll, ifp);
1008                                 if (error)
1009                                         return (error);
1010                                 EM_CORE_LOCK(adapter);
1011                                 lem_disable_intr(adapter);
1012                                 ifp->if_capenable |= IFCAP_POLLING;
1013                                 EM_CORE_UNLOCK(adapter);
1014                         } else {
1015                                 error = ether_poll_deregister(ifp);
1016                                 /* Enable interrupt even in error case */
1017                                 EM_CORE_LOCK(adapter);
1018                                 lem_enable_intr(adapter);
1019                                 ifp->if_capenable &= ~IFCAP_POLLING;
1020                                 EM_CORE_UNLOCK(adapter);
1021                         }
1022                 }
1023 #endif
1024                 if (mask & IFCAP_HWCSUM) {
1025                         ifp->if_capenable ^= IFCAP_HWCSUM;
1026                         reinit = 1;
1027                 }
1028                 if (mask & IFCAP_VLAN_HWTAGGING) {
1029                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1030                         reinit = 1;
1031                 }
1032                 if ((mask & IFCAP_WOL) &&
1033                     (ifp->if_capabilities & IFCAP_WOL) != 0) {
1034                         if (mask & IFCAP_WOL_MCAST)
1035                                 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1036                         if (mask & IFCAP_WOL_MAGIC)
1037                                 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1038                 }
1039                 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1040                         lem_init(adapter);
1041                 VLAN_CAPABILITIES(ifp);
1042                 break;
1043             }
1044
1045         default:
1046                 error = ether_ioctl(ifp, command, data);
1047                 break;
1048         }
1049
1050         return (error);
1051 }
1052
1053
1054 /*********************************************************************
1055  *  Init entry point
1056  *
1057  *  This routine is used in two ways. It is used by the stack as
1058  *  init entry point in network interface structure. It is also used
1059  *  by the driver as a hw/sw initialization routine to get to a
1060  *  consistent state.
1061  *
1062  *  return 0 on success, positive on failure
1063  **********************************************************************/
1064
1065 static void
1066 lem_init_locked(struct adapter *adapter)
1067 {
1068         struct ifnet    *ifp = adapter->ifp;
1069         device_t        dev = adapter->dev;
1070         u32             pba;
1071
1072         INIT_DEBUGOUT("lem_init: begin");
1073
1074         EM_CORE_LOCK_ASSERT(adapter);
1075
1076         EM_TX_LOCK(adapter);
1077         lem_stop(adapter);
1078         EM_TX_UNLOCK(adapter);
1079
1080         /*
1081          * Packet Buffer Allocation (PBA)
1082          * Writing PBA sets the receive portion of the buffer
1083          * the remainder is used for the transmit buffer.
1084          *
1085          * Devices before the 82547 had a Packet Buffer of 64K.
1086          *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1087          * After the 82547 the buffer was reduced to 40K.
1088          *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1089          *   Note: default does not leave enough room for Jumbo Frame >10k.
1090          */
1091         switch (adapter->hw.mac.type) {
1092         case e1000_82547:
1093         case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1094                 if (adapter->max_frame_size > 8192)
1095                         pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1096                 else
1097                         pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1098                 adapter->tx_fifo_head = 0;
1099                 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1100                 adapter->tx_fifo_size =
1101                     (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1102                 break;
1103         default:
1104                 /* Devices before 82547 had a Packet Buffer of 64K.   */
1105                 if (adapter->max_frame_size > 8192)
1106                         pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1107                 else
1108                         pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1109         }
1110
1111         INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1112         E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1113         
1114         /* Get the latest mac address, User can use a LAA */
1115         bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1116               ETHER_ADDR_LEN);
1117
1118         /* Put the address into the Receive Address Array */
1119         e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1120
1121         /* Initialize the hardware */
1122         if (lem_hardware_init(adapter)) {
1123                 device_printf(dev, "Unable to initialize the hardware\n");
1124                 return;
1125         }
1126         lem_update_link_status(adapter);
1127
1128         /* Setup VLAN support, basic and offload if available */
1129         E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1130
1131         /* Set hardware offload abilities */
1132         ifp->if_hwassist = 0;
1133         if (adapter->hw.mac.type >= e1000_82543) {
1134                 if (ifp->if_capenable & IFCAP_TXCSUM)
1135                         ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1136         }
1137
1138         /* Configure for OS presence */
1139         lem_init_manageability(adapter);
1140
1141         /* Prepare transmit descriptors and buffers */
1142         lem_setup_transmit_structures(adapter);
1143         lem_initialize_transmit_unit(adapter);
1144
1145         /* Setup Multicast table */
1146         lem_set_multi(adapter);
1147
1148         /* Prepare receive descriptors and buffers */
1149         if (lem_setup_receive_structures(adapter)) {
1150                 device_printf(dev, "Could not setup receive structures\n");
1151                 EM_TX_LOCK(adapter);
1152                 lem_stop(adapter);
1153                 EM_TX_UNLOCK(adapter);
1154                 return;
1155         }
1156         lem_initialize_receive_unit(adapter);
1157
1158         /* Use real VLAN Filter support? */
1159         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1160                 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1161                         /* Use real VLAN Filter support */
1162                         lem_setup_vlan_hw_support(adapter);
1163                 else {
1164                         u32 ctrl;
1165                         ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1166                         ctrl |= E1000_CTRL_VME;
1167                         E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1168                 }
1169         }
1170
1171         /* Don't lose promiscuous settings */
1172         lem_set_promisc(adapter);
1173
1174         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1175         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1176
1177         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1178         e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1179
1180         /* MSI/X configuration for 82574 */
1181         if (adapter->hw.mac.type == e1000_82574) {
1182                 int tmp;
1183                 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1184                 tmp |= E1000_CTRL_EXT_PBA_CLR;
1185                 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1186                 /*
1187                 ** Set the IVAR - interrupt vector routing.
1188                 ** Each nibble represents a vector, high bit
1189                 ** is enable, other 3 bits are the MSIX table
1190                 ** entry, we map RXQ0 to 0, TXQ0 to 1, and
1191                 ** Link (other) to 2, hence the magic number.
1192                 */
1193                 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1194         }
1195
1196 #ifdef DEVICE_POLLING
1197         /*
1198          * Only enable interrupts if we are not polling, make sure
1199          * they are off otherwise.
1200          */
1201         if (ifp->if_capenable & IFCAP_POLLING)
1202                 lem_disable_intr(adapter);
1203         else
1204 #endif /* DEVICE_POLLING */
1205                 lem_enable_intr(adapter);
1206
1207         /* AMT based hardware can now take control from firmware */
1208         if (adapter->has_manage && adapter->has_amt)
1209                 lem_get_hw_control(adapter);
1210
1211         /* Don't reset the phy next time init gets called */
1212         adapter->hw.phy.reset_disable = TRUE;
1213 }
1214
1215 static void
1216 lem_init(void *arg)
1217 {
1218         struct adapter *adapter = arg;
1219
1220         EM_CORE_LOCK(adapter);
1221         lem_init_locked(adapter);
1222         EM_CORE_UNLOCK(adapter);
1223 }
1224
1225
1226 #ifdef DEVICE_POLLING
1227 /*********************************************************************
1228  *
1229  *  Legacy polling routine  
1230  *
1231  *********************************************************************/
1232 static int
1233 lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1234 {
1235         struct adapter *adapter = ifp->if_softc;
1236         u32             reg_icr, rx_done = 0;
1237
1238         EM_CORE_LOCK(adapter);
1239         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1240                 EM_CORE_UNLOCK(adapter);
1241                 return (rx_done);
1242         }
1243
1244         if (cmd == POLL_AND_CHECK_STATUS) {
1245                 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1246                 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1247                         callout_stop(&adapter->timer);
1248                         adapter->hw.mac.get_link_status = 1;
1249                         lem_update_link_status(adapter);
1250                         callout_reset(&adapter->timer, hz,
1251                             lem_local_timer, adapter);
1252                 }
1253         }
1254         EM_CORE_UNLOCK(adapter);
1255
1256         lem_rxeof(adapter, count, &rx_done);
1257
1258         EM_TX_LOCK(adapter);
1259         lem_txeof(adapter);
1260         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1261                 lem_start_locked(ifp);
1262         EM_TX_UNLOCK(adapter);
1263         return (rx_done);
1264 }
1265 #endif /* DEVICE_POLLING */
1266
1267 #ifdef EM_LEGACY_IRQ 
1268 /*********************************************************************
1269  *
1270  *  Legacy Interrupt Service routine  
1271  *
1272  *********************************************************************/
1273 static void
1274 lem_intr(void *arg)
1275 {
1276         struct adapter  *adapter = arg;
1277         struct ifnet    *ifp = adapter->ifp;
1278         u32             reg_icr;
1279
1280
1281         if (ifp->if_capenable & IFCAP_POLLING)
1282                 return;
1283
1284         EM_CORE_LOCK(adapter);
1285         reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1286         if (reg_icr & E1000_ICR_RXO)
1287                 adapter->rx_overruns++;
1288
1289         if ((reg_icr == 0xffffffff) || (reg_icr == 0))
1290                         goto out;
1291
1292         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1293                         goto out;
1294
1295         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1296                 callout_stop(&adapter->timer);
1297                 adapter->hw.mac.get_link_status = 1;
1298                 lem_update_link_status(adapter);
1299                 /* Deal with TX cruft when link lost */
1300                 lem_tx_purge(adapter);
1301                 callout_reset(&adapter->timer, hz,
1302                     lem_local_timer, adapter);
1303                 goto out;
1304         }
1305
1306         EM_TX_LOCK(adapter);
1307         lem_rxeof(adapter, -1, NULL);
1308         lem_txeof(adapter);
1309         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1310             !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1311                 lem_start_locked(ifp);
1312         EM_TX_UNLOCK(adapter);
1313
1314 out:
1315         EM_CORE_UNLOCK(adapter);
1316         return;
1317 }
1318
1319 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1320
1321 static void
1322 lem_handle_link(void *context, int pending)
1323 {
1324         struct adapter  *adapter = context;
1325         struct ifnet *ifp = adapter->ifp;
1326
1327         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1328                 return;
1329
1330         EM_CORE_LOCK(adapter);
1331         callout_stop(&adapter->timer);
1332         lem_update_link_status(adapter);
1333         /* Deal with TX cruft when link lost */
1334         lem_tx_purge(adapter);
1335         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1336         EM_CORE_UNLOCK(adapter);
1337 }
1338
1339
1340 /* Combined RX/TX handler, used by Legacy and MSI */
1341 static void
1342 lem_handle_rxtx(void *context, int pending)
1343 {
1344         struct adapter  *adapter = context;
1345         struct ifnet    *ifp = adapter->ifp;
1346
1347
1348         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1349                 lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1350                 EM_TX_LOCK(adapter);
1351                 lem_txeof(adapter);
1352                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1353                         lem_start_locked(ifp);
1354                 EM_TX_UNLOCK(adapter);
1355         }
1356
1357         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1358                 lem_enable_intr(adapter);
1359 }
1360
1361 /*********************************************************************
1362  *
1363  *  Fast Legacy/MSI Combined Interrupt Service routine  
1364  *
1365  *********************************************************************/
1366 static int
1367 lem_irq_fast(void *arg)
1368 {
1369         struct adapter  *adapter = arg;
1370         struct ifnet    *ifp;
1371         u32             reg_icr;
1372
1373         ifp = adapter->ifp;
1374
1375         reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1376
1377         /* Hot eject?  */
1378         if (reg_icr == 0xffffffff)
1379                 return FILTER_STRAY;
1380
1381         /* Definitely not our interrupt.  */
1382         if (reg_icr == 0x0)
1383                 return FILTER_STRAY;
1384
1385         /*
1386          * Mask interrupts until the taskqueue is finished running.  This is
1387          * cheap, just assume that it is needed.  This also works around the
1388          * MSI message reordering errata on certain systems.
1389          */
1390         lem_disable_intr(adapter);
1391         taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1392
1393         /* Link status change */
1394         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1395                 adapter->hw.mac.get_link_status = 1;
1396                 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1397         }
1398
1399         if (reg_icr & E1000_ICR_RXO)
1400                 adapter->rx_overruns++;
1401         return FILTER_HANDLED;
1402 }
1403 #endif /* ~EM_LEGACY_IRQ */
1404
1405
1406 /*********************************************************************
1407  *
1408  *  Media Ioctl callback
1409  *
1410  *  This routine is called whenever the user queries the status of
1411  *  the interface using ifconfig.
1412  *
1413  **********************************************************************/
1414 static void
1415 lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1416 {
1417         struct adapter *adapter = ifp->if_softc;
1418         u_char fiber_type = IFM_1000_SX;
1419
1420         INIT_DEBUGOUT("lem_media_status: begin");
1421
1422         EM_CORE_LOCK(adapter);
1423         lem_update_link_status(adapter);
1424
1425         ifmr->ifm_status = IFM_AVALID;
1426         ifmr->ifm_active = IFM_ETHER;
1427
1428         if (!adapter->link_active) {
1429                 EM_CORE_UNLOCK(adapter);
1430                 return;
1431         }
1432
1433         ifmr->ifm_status |= IFM_ACTIVE;
1434
1435         if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1436             (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1437                 if (adapter->hw.mac.type == e1000_82545)
1438                         fiber_type = IFM_1000_LX;
1439                 ifmr->ifm_active |= fiber_type | IFM_FDX;
1440         } else {
1441                 switch (adapter->link_speed) {
1442                 case 10:
1443                         ifmr->ifm_active |= IFM_10_T;
1444                         break;
1445                 case 100:
1446                         ifmr->ifm_active |= IFM_100_TX;
1447                         break;
1448                 case 1000:
1449                         ifmr->ifm_active |= IFM_1000_T;
1450                         break;
1451                 }
1452                 if (adapter->link_duplex == FULL_DUPLEX)
1453                         ifmr->ifm_active |= IFM_FDX;
1454                 else
1455                         ifmr->ifm_active |= IFM_HDX;
1456         }
1457         EM_CORE_UNLOCK(adapter);
1458 }
1459
1460 /*********************************************************************
1461  *
1462  *  Media Ioctl callback
1463  *
1464  *  This routine is called when the user changes speed/duplex using
1465  *  media/mediopt option with ifconfig.
1466  *
1467  **********************************************************************/
1468 static int
1469 lem_media_change(struct ifnet *ifp)
1470 {
1471         struct adapter *adapter = ifp->if_softc;
1472         struct ifmedia  *ifm = &adapter->media;
1473
1474         INIT_DEBUGOUT("lem_media_change: begin");
1475
1476         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1477                 return (EINVAL);
1478
1479         EM_CORE_LOCK(adapter);
1480         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1481         case IFM_AUTO:
1482                 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1483                 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1484                 break;
1485         case IFM_1000_LX:
1486         case IFM_1000_SX:
1487         case IFM_1000_T:
1488                 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1489                 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1490                 break;
1491         case IFM_100_TX:
1492                 adapter->hw.mac.autoneg = FALSE;
1493                 adapter->hw.phy.autoneg_advertised = 0;
1494                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1495                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1496                 else
1497                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1498                 break;
1499         case IFM_10_T:
1500                 adapter->hw.mac.autoneg = FALSE;
1501                 adapter->hw.phy.autoneg_advertised = 0;
1502                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1503                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1504                 else
1505                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1506                 break;
1507         default:
1508                 device_printf(adapter->dev, "Unsupported media type\n");
1509         }
1510
1511         /* As the speed/duplex settings my have changed we need to
1512          * reset the PHY.
1513          */
1514         adapter->hw.phy.reset_disable = FALSE;
1515
1516         lem_init_locked(adapter);
1517         EM_CORE_UNLOCK(adapter);
1518
1519         return (0);
1520 }
1521
1522 /*********************************************************************
1523  *
1524  *  This routine maps the mbufs to tx descriptors.
1525  *
1526  *  return 0 on success, positive on failure
1527  **********************************************************************/
1528
1529 static int
1530 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1531 {
1532         bus_dma_segment_t       segs[EM_MAX_SCATTER];
1533         bus_dmamap_t            map;
1534         struct em_buffer        *tx_buffer, *tx_buffer_mapped;
1535         struct e1000_tx_desc    *ctxd = NULL;
1536         struct mbuf             *m_head;
1537         u32                     txd_upper, txd_lower, txd_used, txd_saved;
1538         int                     error, nsegs, i, j, first, last = 0;
1539
1540         m_head = *m_headp;
1541         txd_upper = txd_lower = txd_used = txd_saved = 0;
1542
1543         /*
1544         ** When doing checksum offload, it is critical to
1545         ** make sure the first mbuf has more than header,
1546         ** because that routine expects data to be present.
1547         */
1548         if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1549             (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1550                 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1551                 *m_headp = m_head;
1552                 if (m_head == NULL)
1553                         return (ENOBUFS);
1554         }
1555
1556         /*
1557          * Map the packet for DMA
1558          *
1559          * Capture the first descriptor index,
1560          * this descriptor will have the index
1561          * of the EOP which is the only one that
1562          * now gets a DONE bit writeback.
1563          */
1564         first = adapter->next_avail_tx_desc;
1565         tx_buffer = &adapter->tx_buffer_area[first];
1566         tx_buffer_mapped = tx_buffer;
1567         map = tx_buffer->map;
1568
1569         error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1570             *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1571
1572         /*
1573          * There are two types of errors we can (try) to handle:
1574          * - EFBIG means the mbuf chain was too long and bus_dma ran
1575          *   out of segments.  Defragment the mbuf chain and try again.
1576          * - ENOMEM means bus_dma could not obtain enough bounce buffers
1577          *   at this point in time.  Defer sending and try again later.
1578          * All other errors, in particular EINVAL, are fatal and prevent the
1579          * mbuf chain from ever going through.  Drop it and report error.
1580          */
1581         if (error == EFBIG) {
1582                 struct mbuf *m;
1583
1584                 m = m_defrag(*m_headp, M_DONTWAIT);
1585                 if (m == NULL) {
1586                         adapter->mbuf_alloc_failed++;
1587                         m_freem(*m_headp);
1588                         *m_headp = NULL;
1589                         return (ENOBUFS);
1590                 }
1591                 *m_headp = m;
1592
1593                 /* Try it again */
1594                 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1595                     *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1596
1597                 if (error) {
1598                         adapter->no_tx_dma_setup++;
1599                         m_freem(*m_headp);
1600                         *m_headp = NULL;
1601                         return (error);
1602                 }
1603         } else if (error != 0) {
1604                 adapter->no_tx_dma_setup++;
1605                 return (error);
1606         }
1607
1608         if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1609                 adapter->no_tx_desc_avail2++;
1610                 bus_dmamap_unload(adapter->txtag, map);
1611                 return (ENOBUFS);
1612         }
1613         m_head = *m_headp;
1614
1615         /* Do hardware assists */
1616         if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1617                 lem_transmit_checksum_setup(adapter,  m_head,
1618                     &txd_upper, &txd_lower);
1619
1620         i = adapter->next_avail_tx_desc;
1621         if (adapter->pcix_82544) 
1622                 txd_saved = i;
1623
1624         /* Set up our transmit descriptors */
1625         for (j = 0; j < nsegs; j++) {
1626                 bus_size_t seg_len;
1627                 bus_addr_t seg_addr;
1628                 /* If adapter is 82544 and on PCIX bus */
1629                 if(adapter->pcix_82544) {
1630                         DESC_ARRAY      desc_array;
1631                         u32             array_elements, counter;
1632                         /*
1633                          * Check the Address and Length combination and
1634                          * split the data accordingly
1635                          */
1636                         array_elements = lem_fill_descriptors(segs[j].ds_addr,
1637                             segs[j].ds_len, &desc_array);
1638                         for (counter = 0; counter < array_elements; counter++) {
1639                                 if (txd_used == adapter->num_tx_desc_avail) {
1640                                         adapter->next_avail_tx_desc = txd_saved;
1641                                         adapter->no_tx_desc_avail2++;
1642                                         bus_dmamap_unload(adapter->txtag, map);
1643                                         return (ENOBUFS);
1644                                 }
1645                                 tx_buffer = &adapter->tx_buffer_area[i];
1646                                 ctxd = &adapter->tx_desc_base[i];
1647                                 ctxd->buffer_addr = htole64(
1648                                     desc_array.descriptor[counter].address);
1649                                 ctxd->lower.data = htole32(
1650                                     (adapter->txd_cmd | txd_lower | (u16)
1651                                     desc_array.descriptor[counter].length));
1652                                 ctxd->upper.data =
1653                                     htole32((txd_upper));
1654                                 last = i;
1655                                 if (++i == adapter->num_tx_desc)
1656                                          i = 0;
1657                                 tx_buffer->m_head = NULL;
1658                                 tx_buffer->next_eop = -1;
1659                                 txd_used++;
1660                         }
1661                 } else {
1662                         tx_buffer = &adapter->tx_buffer_area[i];
1663                         ctxd = &adapter->tx_desc_base[i];
1664                         seg_addr = segs[j].ds_addr;
1665                         seg_len  = segs[j].ds_len;
1666                         ctxd->buffer_addr = htole64(seg_addr);
1667                         ctxd->lower.data = htole32(
1668                         adapter->txd_cmd | txd_lower | seg_len);
1669                         ctxd->upper.data =
1670                             htole32(txd_upper);
1671                         last = i;
1672                         if (++i == adapter->num_tx_desc)
1673                                 i = 0;
1674                         tx_buffer->m_head = NULL;
1675                         tx_buffer->next_eop = -1;
1676                 }
1677         }
1678
1679         adapter->next_avail_tx_desc = i;
1680
1681         if (adapter->pcix_82544)
1682                 adapter->num_tx_desc_avail -= txd_used;
1683         else
1684                 adapter->num_tx_desc_avail -= nsegs;
1685
1686         if (m_head->m_flags & M_VLANTAG) {
1687                 /* Set the vlan id. */
1688                 ctxd->upper.fields.special =
1689                     htole16(m_head->m_pkthdr.ether_vtag);
1690                 /* Tell hardware to add tag */
1691                 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1692         }
1693
1694         tx_buffer->m_head = m_head;
1695         tx_buffer_mapped->map = tx_buffer->map;
1696         tx_buffer->map = map;
1697         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1698
1699         /*
1700          * Last Descriptor of Packet
1701          * needs End Of Packet (EOP)
1702          * and Report Status (RS)
1703          */
1704         ctxd->lower.data |=
1705             htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1706         /*
1707          * Keep track in the first buffer which
1708          * descriptor will be written back
1709          */
1710         tx_buffer = &adapter->tx_buffer_area[first];
1711         tx_buffer->next_eop = last;
1712         adapter->watchdog_time = ticks;
1713
1714         /*
1715          * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1716          * that this frame is available to transmit.
1717          */
1718         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1719             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1720         if (adapter->hw.mac.type == e1000_82547 &&
1721             adapter->link_duplex == HALF_DUPLEX)
1722                 lem_82547_move_tail(adapter);
1723         else {
1724                 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1725                 if (adapter->hw.mac.type == e1000_82547)
1726                         lem_82547_update_fifo_head(adapter,
1727                             m_head->m_pkthdr.len);
1728         }
1729
1730         return (0);
1731 }
1732
1733 /*********************************************************************
1734  *
1735  * 82547 workaround to avoid controller hang in half-duplex environment.
1736  * The workaround is to avoid queuing a large packet that would span
1737  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1738  * in this case. We do that only when FIFO is quiescent.
1739  *
1740  **********************************************************************/
1741 static void
1742 lem_82547_move_tail(void *arg)
1743 {
1744         struct adapter *adapter = arg;
1745         struct e1000_tx_desc *tx_desc;
1746         u16     hw_tdt, sw_tdt, length = 0;
1747         bool    eop = 0;
1748
1749         EM_TX_LOCK_ASSERT(adapter);
1750
1751         hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1752         sw_tdt = adapter->next_avail_tx_desc;
1753         
1754         while (hw_tdt != sw_tdt) {
1755                 tx_desc = &adapter->tx_desc_base[hw_tdt];
1756                 length += tx_desc->lower.flags.length;
1757                 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1758                 if (++hw_tdt == adapter->num_tx_desc)
1759                         hw_tdt = 0;
1760
1761                 if (eop) {
1762                         if (lem_82547_fifo_workaround(adapter, length)) {
1763                                 adapter->tx_fifo_wrk_cnt++;
1764                                 callout_reset(&adapter->tx_fifo_timer, 1,
1765                                         lem_82547_move_tail, adapter);
1766                                 break;
1767                         }
1768                         E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1769                         lem_82547_update_fifo_head(adapter, length);
1770                         length = 0;
1771                 }
1772         }       
1773 }
1774
1775 static int
1776 lem_82547_fifo_workaround(struct adapter *adapter, int len)
1777 {       
1778         int fifo_space, fifo_pkt_len;
1779
1780         fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1781
1782         if (adapter->link_duplex == HALF_DUPLEX) {
1783                 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1784
1785                 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1786                         if (lem_82547_tx_fifo_reset(adapter))
1787                                 return (0);
1788                         else
1789                                 return (1);
1790                 }
1791         }
1792
1793         return (0);
1794 }
1795
1796 static void
1797 lem_82547_update_fifo_head(struct adapter *adapter, int len)
1798 {
1799         int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1800         
1801         /* tx_fifo_head is always 16 byte aligned */
1802         adapter->tx_fifo_head += fifo_pkt_len;
1803         if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1804                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1805         }
1806 }
1807
1808
1809 static int
1810 lem_82547_tx_fifo_reset(struct adapter *adapter)
1811 {
1812         u32 tctl;
1813
1814         if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1815             E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1816             (E1000_READ_REG(&adapter->hw, E1000_TDFT) == 
1817             E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1818             (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1819             E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1820             (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1821                 /* Disable TX unit */
1822                 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1823                 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1824                     tctl & ~E1000_TCTL_EN);
1825
1826                 /* Reset FIFO pointers */
1827                 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1828                     adapter->tx_head_addr);
1829                 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1830                     adapter->tx_head_addr);
1831                 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1832                     adapter->tx_head_addr);
1833                 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1834                     adapter->tx_head_addr);
1835
1836                 /* Re-enable TX unit */
1837                 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1838                 E1000_WRITE_FLUSH(&adapter->hw);
1839
1840                 adapter->tx_fifo_head = 0;
1841                 adapter->tx_fifo_reset_cnt++;
1842
1843                 return (TRUE);
1844         }
1845         else {
1846                 return (FALSE);
1847         }
1848 }
1849
1850 static void
1851 lem_set_promisc(struct adapter *adapter)
1852 {
1853         struct ifnet    *ifp = adapter->ifp;
1854         u32             reg_rctl;
1855
1856         reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1857
1858         if (ifp->if_flags & IFF_PROMISC) {
1859                 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1860                 /* Turn this on if you want to see bad packets */
1861                 if (lem_debug_sbp)
1862                         reg_rctl |= E1000_RCTL_SBP;
1863                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1864         } else if (ifp->if_flags & IFF_ALLMULTI) {
1865                 reg_rctl |= E1000_RCTL_MPE;
1866                 reg_rctl &= ~E1000_RCTL_UPE;
1867                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1868         }
1869 }
1870
1871 static void
1872 lem_disable_promisc(struct adapter *adapter)
1873 {
1874         u32     reg_rctl;
1875
1876         reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1877
1878         reg_rctl &=  (~E1000_RCTL_UPE);
1879         reg_rctl &=  (~E1000_RCTL_MPE);
1880         reg_rctl &=  (~E1000_RCTL_SBP);
1881         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1882 }
1883
1884
1885 /*********************************************************************
1886  *  Multicast Update
1887  *
1888  *  This routine is called whenever multicast address list is updated.
1889  *
1890  **********************************************************************/
1891
1892 static void
1893 lem_set_multi(struct adapter *adapter)
1894 {
1895         struct ifnet    *ifp = adapter->ifp;
1896         struct ifmultiaddr *ifma;
1897         u32 reg_rctl = 0;
1898         u8  *mta; /* Multicast array memory */
1899         int mcnt = 0;
1900
1901         IOCTL_DEBUGOUT("lem_set_multi: begin");
1902
1903         mta = adapter->mta;
1904         bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1905
1906         if (adapter->hw.mac.type == e1000_82542 && 
1907             adapter->hw.revision_id == E1000_REVISION_2) {
1908                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1909                 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1910                         e1000_pci_clear_mwi(&adapter->hw);
1911                 reg_rctl |= E1000_RCTL_RST;
1912                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1913                 msec_delay(5);
1914         }
1915
1916 #if __FreeBSD_version < 800000
1917         IF_ADDR_LOCK(ifp);
1918 #else
1919         if_maddr_rlock(ifp);
1920 #endif
1921         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1922                 if (ifma->ifma_addr->sa_family != AF_LINK)
1923                         continue;
1924
1925                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1926                         break;
1927
1928                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1929                     &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1930                 mcnt++;
1931         }
1932 #if __FreeBSD_version < 800000
1933         IF_ADDR_UNLOCK(ifp);
1934 #else
1935         if_maddr_runlock(ifp);
1936 #endif
1937         if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1938                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1939                 reg_rctl |= E1000_RCTL_MPE;
1940                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1941         } else
1942                 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1943
1944         if (adapter->hw.mac.type == e1000_82542 && 
1945             adapter->hw.revision_id == E1000_REVISION_2) {
1946                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1947                 reg_rctl &= ~E1000_RCTL_RST;
1948                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1949                 msec_delay(5);
1950                 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1951                         e1000_pci_set_mwi(&adapter->hw);
1952         }
1953 }
1954
1955
1956 /*********************************************************************
1957  *  Timer routine
1958  *
1959  *  This routine checks for link status and updates statistics.
1960  *
1961  **********************************************************************/
1962
1963 static void
1964 lem_local_timer(void *arg)
1965 {
1966         struct adapter  *adapter = arg;
1967
1968         EM_CORE_LOCK_ASSERT(adapter);
1969
1970         lem_update_link_status(adapter);
1971         lem_update_stats_counters(adapter);
1972
1973         lem_smartspeed(adapter);
1974
1975         /*
1976          * We check the watchdog: the time since
1977          * the last TX descriptor was cleaned.
1978          * This implies a functional TX engine.
1979          */
1980         if ((adapter->watchdog_check == TRUE) &&
1981             (ticks - adapter->watchdog_time > EM_WATCHDOG))
1982                 goto hung;
1983
1984         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1985         return;
1986 hung:
1987         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1988         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1989         adapter->watchdog_events++;
1990         lem_init_locked(adapter);
1991 }
1992
1993 static void
1994 lem_update_link_status(struct adapter *adapter)
1995 {
1996         struct e1000_hw *hw = &adapter->hw;
1997         struct ifnet *ifp = adapter->ifp;
1998         device_t dev = adapter->dev;
1999         u32 link_check = 0;
2000
2001         /* Get the cached link value or read phy for real */
2002         switch (hw->phy.media_type) {
2003         case e1000_media_type_copper:
2004                 if (hw->mac.get_link_status) {
2005                         /* Do the work to read phy */
2006                         e1000_check_for_link(hw);
2007                         link_check = !hw->mac.get_link_status;
2008                         if (link_check) /* ESB2 fix */
2009                                 e1000_cfg_on_link_up(hw);
2010                 } else
2011                         link_check = TRUE;
2012                 break;
2013         case e1000_media_type_fiber:
2014                 e1000_check_for_link(hw);
2015                 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2016                                  E1000_STATUS_LU);
2017                 break;
2018         case e1000_media_type_internal_serdes:
2019                 e1000_check_for_link(hw);
2020                 link_check = adapter->hw.mac.serdes_has_link;
2021                 break;
2022         default:
2023         case e1000_media_type_unknown:
2024                 break;
2025         }
2026
2027         /* Now check for a transition */
2028         if (link_check && (adapter->link_active == 0)) {
2029                 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2030                     &adapter->link_duplex);
2031                 if (bootverbose)
2032                         device_printf(dev, "Link is up %d Mbps %s\n",
2033                             adapter->link_speed,
2034                             ((adapter->link_duplex == FULL_DUPLEX) ?
2035                             "Full Duplex" : "Half Duplex"));
2036                 adapter->link_active = 1;
2037                 adapter->smartspeed = 0;
2038                 ifp->if_baudrate = adapter->link_speed * 1000000;
2039                 if_link_state_change(ifp, LINK_STATE_UP);
2040         } else if (!link_check && (adapter->link_active == 1)) {
2041                 ifp->if_baudrate = adapter->link_speed = 0;
2042                 adapter->link_duplex = 0;
2043                 if (bootverbose)
2044                         device_printf(dev, "Link is Down\n");
2045                 adapter->link_active = 0;
2046                 /* Link down, disable watchdog */
2047                 adapter->watchdog_check = FALSE;
2048                 if_link_state_change(ifp, LINK_STATE_DOWN);
2049         }
2050 }
2051
2052 /*********************************************************************
2053  *
2054  *  This routine disables all traffic on the adapter by issuing a
2055  *  global reset on the MAC and deallocates TX/RX buffers.
2056  *
2057  *  This routine should always be called with BOTH the CORE
2058  *  and TX locks.
2059  **********************************************************************/
2060
2061 static void
2062 lem_stop(void *arg)
2063 {
2064         struct adapter  *adapter = arg;
2065         struct ifnet    *ifp = adapter->ifp;
2066
2067         EM_CORE_LOCK_ASSERT(adapter);
2068         EM_TX_LOCK_ASSERT(adapter);
2069
2070         INIT_DEBUGOUT("lem_stop: begin");
2071
2072         lem_disable_intr(adapter);
2073         callout_stop(&adapter->timer);
2074         callout_stop(&adapter->tx_fifo_timer);
2075
2076         /* Tell the stack that the interface is no longer active */
2077         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2078
2079         e1000_reset_hw(&adapter->hw);
2080         if (adapter->hw.mac.type >= e1000_82544)
2081                 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2082
2083         e1000_led_off(&adapter->hw);
2084         e1000_cleanup_led(&adapter->hw);
2085 }
2086
2087
2088 /*********************************************************************
2089  *
2090  *  Determine hardware revision.
2091  *
2092  **********************************************************************/
2093 static void
2094 lem_identify_hardware(struct adapter *adapter)
2095 {
2096         device_t dev = adapter->dev;
2097
2098         /* Make sure our PCI config space has the necessary stuff set */
2099         adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2100         if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2101             (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2102                 device_printf(dev, "Memory Access and/or Bus Master bits "
2103                     "were not set!\n");
2104                 adapter->hw.bus.pci_cmd_word |=
2105                 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2106                 pci_write_config(dev, PCIR_COMMAND,
2107                     adapter->hw.bus.pci_cmd_word, 2);
2108         }
2109
2110         /* Save off the information about this board */
2111         adapter->hw.vendor_id = pci_get_vendor(dev);
2112         adapter->hw.device_id = pci_get_device(dev);
2113         adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2114         adapter->hw.subsystem_vendor_id =
2115             pci_read_config(dev, PCIR_SUBVEND_0, 2);
2116         adapter->hw.subsystem_device_id =
2117             pci_read_config(dev, PCIR_SUBDEV_0, 2);
2118
2119         /* Do Shared Code Init and Setup */
2120         if (e1000_set_mac_type(&adapter->hw)) {
2121                 device_printf(dev, "Setup init failure\n");
2122                 return;
2123         }
2124 }
2125
2126 static int
2127 lem_allocate_pci_resources(struct adapter *adapter)
2128 {
2129         device_t        dev = adapter->dev;
2130         int             val, rid, error = E1000_SUCCESS;
2131
2132         rid = PCIR_BAR(0);
2133         adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2134             &rid, RF_ACTIVE);
2135         if (adapter->memory == NULL) {
2136                 device_printf(dev, "Unable to allocate bus resource: memory\n");
2137                 return (ENXIO);
2138         }
2139         adapter->osdep.mem_bus_space_tag =
2140             rman_get_bustag(adapter->memory);
2141         adapter->osdep.mem_bus_space_handle =
2142             rman_get_bushandle(adapter->memory);
2143         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2144
2145         /* Only older adapters use IO mapping */
2146         if (adapter->hw.mac.type > e1000_82543) {
2147                 /* Figure our where our IO BAR is ? */
2148                 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2149                         val = pci_read_config(dev, rid, 4);
2150                         if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2151                                 adapter->io_rid = rid;
2152                                 break;
2153                         }
2154                         rid += 4;
2155                         /* check for 64bit BAR */
2156                         if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2157                                 rid += 4;
2158                 }
2159                 if (rid >= PCIR_CIS) {
2160                         device_printf(dev, "Unable to locate IO BAR\n");
2161                         return (ENXIO);
2162                 }
2163                 adapter->ioport = bus_alloc_resource_any(dev,
2164                     SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2165                 if (adapter->ioport == NULL) {
2166                         device_printf(dev, "Unable to allocate bus resource: "
2167                             "ioport\n");
2168                         return (ENXIO);
2169                 }
2170                 adapter->hw.io_base = 0;
2171                 adapter->osdep.io_bus_space_tag =
2172                     rman_get_bustag(adapter->ioport);
2173                 adapter->osdep.io_bus_space_handle =
2174                     rman_get_bushandle(adapter->ioport);
2175         }
2176
2177         adapter->hw.back = &adapter->osdep;
2178
2179         return (error);
2180 }
2181
2182 /*********************************************************************
2183  *
2184  *  Setup the Legacy or MSI Interrupt handler
2185  *
2186  **********************************************************************/
2187 int
2188 lem_allocate_irq(struct adapter *adapter)
2189 {
2190         device_t dev = adapter->dev;
2191         int error, rid = 0;
2192
2193         /* Manually turn off all interrupts */
2194         E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2195
2196         /* We allocate a single interrupt resource */
2197         adapter->res[0] = bus_alloc_resource_any(dev,
2198             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2199         if (adapter->res[0] == NULL) {
2200                 device_printf(dev, "Unable to allocate bus resource: "
2201                     "interrupt\n");
2202                 return (ENXIO);
2203         }
2204
2205 #ifdef EM_LEGACY_IRQ
2206         /* We do Legacy setup */
2207         if ((error = bus_setup_intr(dev, adapter->res[0],
2208             INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2209             &adapter->tag[0])) != 0) {
2210                 device_printf(dev, "Failed to register interrupt handler");
2211                 return (error);
2212         }
2213
2214 #else /* FAST_IRQ */
2215         /*
2216          * Try allocating a fast interrupt and the associated deferred
2217          * processing contexts.
2218          */
2219         TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2220         TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2221         adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2222             taskqueue_thread_enqueue, &adapter->tq);
2223         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2224             device_get_nameunit(adapter->dev));
2225         if ((error = bus_setup_intr(dev, adapter->res[0],
2226             INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2227             &adapter->tag[0])) != 0) {
2228                 device_printf(dev, "Failed to register fast interrupt "
2229                             "handler: %d\n", error);
2230                 taskqueue_free(adapter->tq);
2231                 adapter->tq = NULL;
2232                 return (error);
2233         }
2234 #endif  /* EM_LEGACY_IRQ */
2235         
2236         return (0);
2237 }
2238
2239
2240 static void
2241 lem_free_pci_resources(struct adapter *adapter)
2242 {
2243         device_t dev = adapter->dev;
2244
2245
2246         if (adapter->tag[0] != NULL) {
2247                 bus_teardown_intr(dev, adapter->res[0],
2248                     adapter->tag[0]);
2249                 adapter->tag[0] = NULL;
2250         }
2251
2252         if (adapter->res[0] != NULL) {
2253                 bus_release_resource(dev, SYS_RES_IRQ,
2254                     0, adapter->res[0]);
2255         }
2256
2257         if (adapter->memory != NULL)
2258                 bus_release_resource(dev, SYS_RES_MEMORY,
2259                     PCIR_BAR(0), adapter->memory);
2260
2261         if (adapter->ioport != NULL)
2262                 bus_release_resource(dev, SYS_RES_IOPORT,
2263                     adapter->io_rid, adapter->ioport);
2264 }
2265
2266
2267 /*********************************************************************
2268  *
2269  *  Initialize the hardware to a configuration
2270  *  as specified by the adapter structure.
2271  *
2272  **********************************************************************/
2273 static int
2274 lem_hardware_init(struct adapter *adapter)
2275 {
2276         device_t dev = adapter->dev;
2277         u16     rx_buffer_size;
2278
2279         INIT_DEBUGOUT("lem_hardware_init: begin");
2280
2281         /* Issue a global reset */
2282         e1000_reset_hw(&adapter->hw);
2283
2284         /* When hardware is reset, fifo_head is also reset */
2285         adapter->tx_fifo_head = 0;
2286
2287         /*
2288          * These parameters control the automatic generation (Tx) and
2289          * response (Rx) to Ethernet PAUSE frames.
2290          * - High water mark should allow for at least two frames to be
2291          *   received after sending an XOFF.
2292          * - Low water mark works best when it is very near the high water mark.
2293          *   This allows the receiver to restart by sending XON when it has
2294          *   drained a bit. Here we use an arbitary value of 1500 which will
2295          *   restart after one full frame is pulled from the buffer. There
2296          *   could be several smaller frames in the buffer and if so they will
2297          *   not trigger the XON until their total number reduces the buffer
2298          *   by 1500.
2299          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2300          */
2301         rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2302             0xffff) << 10 );
2303
2304         adapter->hw.fc.high_water = rx_buffer_size -
2305             roundup2(adapter->max_frame_size, 1024);
2306         adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2307
2308         adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2309         adapter->hw.fc.send_xon = TRUE;
2310
2311         /* Set Flow control, use the tunable location if sane */
2312         if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2313                 adapter->hw.fc.requested_mode = lem_fc_setting;
2314         else
2315                 adapter->hw.fc.requested_mode = e1000_fc_none;
2316
2317         if (e1000_init_hw(&adapter->hw) < 0) {
2318                 device_printf(dev, "Hardware Initialization Failed\n");
2319                 return (EIO);
2320         }
2321
2322         e1000_check_for_link(&adapter->hw);
2323
2324         return (0);
2325 }
2326
2327 /*********************************************************************
2328  *
2329  *  Setup networking device structure and register an interface.
2330  *
2331  **********************************************************************/
2332 static int
2333 lem_setup_interface(device_t dev, struct adapter *adapter)
2334 {
2335         struct ifnet   *ifp;
2336
2337         INIT_DEBUGOUT("lem_setup_interface: begin");
2338
2339         ifp = adapter->ifp = if_alloc(IFT_ETHER);
2340         if (ifp == NULL) {
2341                 device_printf(dev, "can not allocate ifnet structure\n");
2342                 return (-1);
2343         }
2344         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2345         ifp->if_mtu = ETHERMTU;
2346         ifp->if_init =  lem_init;
2347         ifp->if_softc = adapter;
2348         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2349         ifp->if_ioctl = lem_ioctl;
2350         ifp->if_start = lem_start;
2351         IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2352         ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2353         IFQ_SET_READY(&ifp->if_snd);
2354
2355         ether_ifattach(ifp, adapter->hw.mac.addr);
2356
2357         ifp->if_capabilities = ifp->if_capenable = 0;
2358
2359         if (adapter->hw.mac.type >= e1000_82543) {
2360                 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2361                 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2362         }
2363
2364         /*
2365          * Tell the upper layer(s) we support long frames.
2366          */
2367         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2368         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2369         ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2370
2371         /*
2372         ** Dont turn this on by default, if vlans are
2373         ** created on another pseudo device (eg. lagg)
2374         ** then vlan events are not passed thru, breaking
2375         ** operation, but with HW FILTER off it works. If
2376         ** using vlans directly on the em driver you can
2377         ** enable this and get full hardware tag filtering.
2378         */
2379         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2380
2381 #ifdef DEVICE_POLLING
2382         ifp->if_capabilities |= IFCAP_POLLING;
2383 #endif
2384
2385         /* Enable only WOL MAGIC by default */
2386         if (adapter->wol) {
2387                 ifp->if_capabilities |= IFCAP_WOL;
2388                 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2389         }
2390                 
2391         /*
2392          * Specify the media types supported by this adapter and register
2393          * callbacks to update media and link information
2394          */
2395         ifmedia_init(&adapter->media, IFM_IMASK,
2396             lem_media_change, lem_media_status);
2397         if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2398             (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2399                 u_char fiber_type = IFM_1000_SX;        /* default type */
2400
2401                 if (adapter->hw.mac.type == e1000_82545)
2402                         fiber_type = IFM_1000_LX;
2403                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 
2404                             0, NULL);
2405                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2406         } else {
2407                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2408                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2409                             0, NULL);
2410                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2411                             0, NULL);
2412                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2413                             0, NULL);
2414                 if (adapter->hw.phy.type != e1000_phy_ife) {
2415                         ifmedia_add(&adapter->media,
2416                                 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2417                         ifmedia_add(&adapter->media,
2418                                 IFM_ETHER | IFM_1000_T, 0, NULL);
2419                 }
2420         }
2421         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2422         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2423         return (0);
2424 }
2425
2426
2427 /*********************************************************************
2428  *
2429  *  Workaround for SmartSpeed on 82541 and 82547 controllers
2430  *
2431  **********************************************************************/
2432 static void
2433 lem_smartspeed(struct adapter *adapter)
2434 {
2435         u16 phy_tmp;
2436
2437         if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2438             adapter->hw.mac.autoneg == 0 ||
2439             (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2440                 return;
2441
2442         if (adapter->smartspeed == 0) {
2443                 /* If Master/Slave config fault is asserted twice,
2444                  * we assume back-to-back */
2445                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2446                 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2447                         return;
2448                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2449                 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2450                         e1000_read_phy_reg(&adapter->hw,
2451                             PHY_1000T_CTRL, &phy_tmp);
2452                         if(phy_tmp & CR_1000T_MS_ENABLE) {
2453                                 phy_tmp &= ~CR_1000T_MS_ENABLE;
2454                                 e1000_write_phy_reg(&adapter->hw,
2455                                     PHY_1000T_CTRL, phy_tmp);
2456                                 adapter->smartspeed++;
2457                                 if(adapter->hw.mac.autoneg &&
2458                                    !e1000_copper_link_autoneg(&adapter->hw) &&
2459                                    !e1000_read_phy_reg(&adapter->hw,
2460                                     PHY_CONTROL, &phy_tmp)) {
2461                                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2462                                                     MII_CR_RESTART_AUTO_NEG);
2463                                         e1000_write_phy_reg(&adapter->hw,
2464                                             PHY_CONTROL, phy_tmp);
2465                                 }
2466                         }
2467                 }
2468                 return;
2469         } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2470                 /* If still no link, perhaps using 2/3 pair cable */
2471                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2472                 phy_tmp |= CR_1000T_MS_ENABLE;
2473                 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2474                 if(adapter->hw.mac.autoneg &&
2475                    !e1000_copper_link_autoneg(&adapter->hw) &&
2476                    !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2477                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2478                                     MII_CR_RESTART_AUTO_NEG);
2479                         e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2480                 }
2481         }
2482         /* Restart process after EM_SMARTSPEED_MAX iterations */
2483         if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2484                 adapter->smartspeed = 0;
2485 }
2486
2487
2488 /*
2489  * Manage DMA'able memory.
2490  */
2491 static void
2492 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2493 {
2494         if (error)
2495                 return;
2496         *(bus_addr_t *) arg = segs[0].ds_addr;
2497 }
2498
2499 static int
2500 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2501         struct em_dma_alloc *dma, int mapflags)
2502 {
2503         int error;
2504
2505         error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2506                                 EM_DBA_ALIGN, 0,        /* alignment, bounds */
2507                                 BUS_SPACE_MAXADDR,      /* lowaddr */
2508                                 BUS_SPACE_MAXADDR,      /* highaddr */
2509                                 NULL, NULL,             /* filter, filterarg */
2510                                 size,                   /* maxsize */
2511                                 1,                      /* nsegments */
2512                                 size,                   /* maxsegsize */
2513                                 0,                      /* flags */
2514                                 NULL,                   /* lockfunc */
2515                                 NULL,                   /* lockarg */
2516                                 &dma->dma_tag);
2517         if (error) {
2518                 device_printf(adapter->dev,
2519                     "%s: bus_dma_tag_create failed: %d\n",
2520                     __func__, error);
2521                 goto fail_0;
2522         }
2523
2524         error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2525             BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2526         if (error) {
2527                 device_printf(adapter->dev,
2528                     "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2529                     __func__, (uintmax_t)size, error);
2530                 goto fail_2;
2531         }
2532
2533         dma->dma_paddr = 0;
2534         error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2535             size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2536         if (error || dma->dma_paddr == 0) {
2537                 device_printf(adapter->dev,
2538                     "%s: bus_dmamap_load failed: %d\n",
2539                     __func__, error);
2540                 goto fail_3;
2541         }
2542
2543         return (0);
2544
2545 fail_3:
2546         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2547 fail_2:
2548         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2549         bus_dma_tag_destroy(dma->dma_tag);
2550 fail_0:
2551         dma->dma_map = NULL;
2552         dma->dma_tag = NULL;
2553
2554         return (error);
2555 }
2556
2557 static void
2558 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2559 {
2560         if (dma->dma_tag == NULL)
2561                 return;
2562         if (dma->dma_map != NULL) {
2563                 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2564                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2565                 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2566                 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2567                 dma->dma_map = NULL;
2568         }
2569         bus_dma_tag_destroy(dma->dma_tag);
2570         dma->dma_tag = NULL;
2571 }
2572
2573
2574 /*********************************************************************
2575  *
2576  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2577  *  the information needed to transmit a packet on the wire.
2578  *
2579  **********************************************************************/
2580 static int
2581 lem_allocate_transmit_structures(struct adapter *adapter)
2582 {
2583         device_t dev = adapter->dev;
2584         struct em_buffer *tx_buffer;
2585         int error;
2586
2587         /*
2588          * Create DMA tags for tx descriptors
2589          */
2590         if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2591                                 1, 0,                   /* alignment, bounds */
2592                                 BUS_SPACE_MAXADDR,      /* lowaddr */
2593                                 BUS_SPACE_MAXADDR,      /* highaddr */
2594                                 NULL, NULL,             /* filter, filterarg */
2595                                 MCLBYTES * EM_MAX_SCATTER,      /* maxsize */
2596                                 EM_MAX_SCATTER,         /* nsegments */
2597                                 MCLBYTES,               /* maxsegsize */
2598                                 0,                      /* flags */
2599                                 NULL,                   /* lockfunc */
2600                                 NULL,                   /* lockarg */
2601                                 &adapter->txtag)) != 0) {
2602                 device_printf(dev, "Unable to allocate TX DMA tag\n");
2603                 goto fail;
2604         }
2605
2606         adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2607             adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2608         if (adapter->tx_buffer_area == NULL) {
2609                 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2610                 error = ENOMEM;
2611                 goto fail;
2612         }
2613
2614         /* Create the descriptor buffer dma maps */
2615         for (int i = 0; i < adapter->num_tx_desc; i++) {
2616                 tx_buffer = &adapter->tx_buffer_area[i];
2617                 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2618                 if (error != 0) {
2619                         device_printf(dev, "Unable to create TX DMA map\n");
2620                         goto fail;
2621                 }
2622                 tx_buffer->next_eop = -1;
2623         }
2624
2625         return (0);
2626 fail:
2627         lem_free_transmit_structures(adapter);
2628         return (error);
2629 }
2630
2631 /*********************************************************************
2632  *
2633  *  (Re)Initialize transmit structures.
2634  *
2635  **********************************************************************/
2636 static void
2637 lem_setup_transmit_structures(struct adapter *adapter)
2638 {
2639         struct em_buffer *tx_buffer;
2640
2641         /* Clear the old ring contents */
2642         bzero(adapter->tx_desc_base,
2643             (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2644
2645         /* Free any existing TX buffers */
2646         for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2647                 tx_buffer = &adapter->tx_buffer_area[i];
2648                 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2649                     BUS_DMASYNC_POSTWRITE);
2650                 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2651                 m_freem(tx_buffer->m_head);
2652                 tx_buffer->m_head = NULL;
2653                 tx_buffer->next_eop = -1;
2654         }
2655
2656         /* Reset state */
2657         adapter->last_hw_offload = 0;
2658         adapter->next_avail_tx_desc = 0;
2659         adapter->next_tx_to_clean = 0;
2660         adapter->num_tx_desc_avail = adapter->num_tx_desc;
2661
2662         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2663             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2664
2665         return;
2666 }
2667
2668 /*********************************************************************
2669  *
2670  *  Enable transmit unit.
2671  *
2672  **********************************************************************/
2673 static void
2674 lem_initialize_transmit_unit(struct adapter *adapter)
2675 {
2676         u32     tctl, tipg = 0;
2677         u64     bus_addr;
2678
2679          INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2680         /* Setup the Base and Length of the Tx Descriptor Ring */
2681         bus_addr = adapter->txdma.dma_paddr;
2682         E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2683             adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2684         E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2685             (u32)(bus_addr >> 32));
2686         E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2687             (u32)bus_addr);
2688         /* Setup the HW Tx Head and Tail descriptor pointers */
2689         E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2690         E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2691
2692         HW_DEBUGOUT2("Base = %x, Length = %x\n",
2693             E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2694             E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2695
2696         /* Set the default values for the Tx Inter Packet Gap timer */
2697         switch (adapter->hw.mac.type) {
2698         case e1000_82542:
2699                 tipg = DEFAULT_82542_TIPG_IPGT;
2700                 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2701                 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2702                 break;
2703         default:
2704                 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2705                     (adapter->hw.phy.media_type ==
2706                     e1000_media_type_internal_serdes))
2707                         tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2708                 else
2709                         tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2710                 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2711                 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2712         }
2713
2714         E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2715         E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2716         if(adapter->hw.mac.type >= e1000_82540)
2717                 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2718                     adapter->tx_abs_int_delay.value);
2719
2720         /* Program the Transmit Control Register */
2721         tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2722         tctl &= ~E1000_TCTL_CT;
2723         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2724                    (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2725
2726         /* This write will effectively turn on the transmit unit. */
2727         E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2728
2729         /* Setup Transmit Descriptor Base Settings */   
2730         adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2731
2732         if (adapter->tx_int_delay.value > 0)
2733                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2734 }
2735
2736 /*********************************************************************
2737  *
2738  *  Free all transmit related data structures.
2739  *
2740  **********************************************************************/
2741 static void
2742 lem_free_transmit_structures(struct adapter *adapter)
2743 {
2744         struct em_buffer *tx_buffer;
2745
2746         INIT_DEBUGOUT("free_transmit_structures: begin");
2747
2748         if (adapter->tx_buffer_area != NULL) {
2749                 for (int i = 0; i < adapter->num_tx_desc; i++) {
2750                         tx_buffer = &adapter->tx_buffer_area[i];
2751                         if (tx_buffer->m_head != NULL) {
2752                                 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2753                                     BUS_DMASYNC_POSTWRITE);
2754                                 bus_dmamap_unload(adapter->txtag,
2755                                     tx_buffer->map);
2756                                 m_freem(tx_buffer->m_head);
2757                                 tx_buffer->m_head = NULL;
2758                         } else if (tx_buffer->map != NULL)
2759                                 bus_dmamap_unload(adapter->txtag,
2760                                     tx_buffer->map);
2761                         if (tx_buffer->map != NULL) {
2762                                 bus_dmamap_destroy(adapter->txtag,
2763                                     tx_buffer->map);
2764                                 tx_buffer->map = NULL;
2765                         }
2766                 }
2767         }
2768         if (adapter->tx_buffer_area != NULL) {
2769                 free(adapter->tx_buffer_area, M_DEVBUF);
2770                 adapter->tx_buffer_area = NULL;
2771         }
2772         if (adapter->txtag != NULL) {
2773                 bus_dma_tag_destroy(adapter->txtag);
2774                 adapter->txtag = NULL;
2775         }
2776 #if __FreeBSD_version >= 800000
2777         if (adapter->br != NULL)
2778                 buf_ring_free(adapter->br, M_DEVBUF);
2779 #endif
2780 }
2781
2782 /*********************************************************************
2783  *
2784  *  The offload context needs to be set when we transfer the first
2785  *  packet of a particular protocol (TCP/UDP). This routine has been
2786  *  enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2787  *
2788  *  Added back the old method of keeping the current context type
2789  *  and not setting if unnecessary, as this is reported to be a
2790  *  big performance win.  -jfv
2791  **********************************************************************/
2792 static void
2793 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2794     u32 *txd_upper, u32 *txd_lower)
2795 {
2796         struct e1000_context_desc *TXD = NULL;
2797         struct em_buffer *tx_buffer;
2798         struct ether_vlan_header *eh;
2799         struct ip *ip = NULL;
2800         struct ip6_hdr *ip6;
2801         int curr_txd, ehdrlen;
2802         u32 cmd, hdr_len, ip_hlen;
2803         u16 etype;
2804         u8 ipproto;
2805
2806
2807         cmd = hdr_len = ipproto = 0;
2808         *txd_upper = *txd_lower = 0;
2809         curr_txd = adapter->next_avail_tx_desc;
2810
2811         /*
2812          * Determine where frame payload starts.
2813          * Jump over vlan headers if already present,
2814          * helpful for QinQ too.
2815          */
2816         eh = mtod(mp, struct ether_vlan_header *);
2817         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2818                 etype = ntohs(eh->evl_proto);
2819                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2820         } else {
2821                 etype = ntohs(eh->evl_encap_proto);
2822                 ehdrlen = ETHER_HDR_LEN;
2823         }
2824
2825         /*
2826          * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2827          * TODO: Support SCTP too when it hits the tree.
2828          */
2829         switch (etype) {
2830         case ETHERTYPE_IP:
2831                 ip = (struct ip *)(mp->m_data + ehdrlen);
2832                 ip_hlen = ip->ip_hl << 2;
2833
2834                 /* Setup of IP header checksum. */
2835                 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2836                         /*
2837                          * Start offset for header checksum calculation.
2838                          * End offset for header checksum calculation.
2839                          * Offset of place to put the checksum.
2840                          */
2841                         TXD = (struct e1000_context_desc *)
2842                             &adapter->tx_desc_base[curr_txd];
2843                         TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2844                         TXD->lower_setup.ip_fields.ipcse =
2845                             htole16(ehdrlen + ip_hlen);
2846                         TXD->lower_setup.ip_fields.ipcso =
2847                             ehdrlen + offsetof(struct ip, ip_sum);
2848                         cmd |= E1000_TXD_CMD_IP;
2849                         *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2850                 }
2851
2852                 hdr_len = ehdrlen + ip_hlen;
2853                 ipproto = ip->ip_p;
2854
2855                 break;
2856         case ETHERTYPE_IPV6:
2857                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2858                 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2859
2860                 /* IPv6 doesn't have a header checksum. */
2861
2862                 hdr_len = ehdrlen + ip_hlen;
2863                 ipproto = ip6->ip6_nxt;
2864                 break;
2865
2866         default:
2867                 return;
2868         }
2869
2870         switch (ipproto) {
2871         case IPPROTO_TCP:
2872                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2873                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2874                         *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2875                         /* no need for context if already set */
2876                         if (adapter->last_hw_offload == CSUM_TCP)
2877                                 return;
2878                         adapter->last_hw_offload = CSUM_TCP;
2879                         /*
2880                          * Start offset for payload checksum calculation.
2881                          * End offset for payload checksum calculation.
2882                          * Offset of place to put the checksum.
2883                          */
2884                         TXD = (struct e1000_context_desc *)
2885                             &adapter->tx_desc_base[curr_txd];
2886                         TXD->upper_setup.tcp_fields.tucss = hdr_len;
2887                         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2888                         TXD->upper_setup.tcp_fields.tucso =
2889                             hdr_len + offsetof(struct tcphdr, th_sum);
2890                         cmd |= E1000_TXD_CMD_TCP;
2891                 }
2892                 break;
2893         case IPPROTO_UDP:
2894         {
2895                 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2896                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2897                         *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2898                         /* no need for context if already set */
2899                         if (adapter->last_hw_offload == CSUM_UDP)
2900                                 return;
2901                         adapter->last_hw_offload = CSUM_UDP;
2902                         /*
2903                          * Start offset for header checksum calculation.
2904                          * End offset for header checksum calculation.
2905                          * Offset of place to put the checksum.
2906                          */
2907                         TXD = (struct e1000_context_desc *)
2908                             &adapter->tx_desc_base[curr_txd];
2909                         TXD->upper_setup.tcp_fields.tucss = hdr_len;
2910                         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2911                         TXD->upper_setup.tcp_fields.tucso =
2912                             hdr_len + offsetof(struct udphdr, uh_sum);
2913                 }
2914                 /* Fall Thru */
2915         }
2916         default:
2917                 break;
2918         }
2919
2920         if (TXD == NULL)
2921                 return;
2922         TXD->tcp_seg_setup.data = htole32(0);
2923         TXD->cmd_and_length =
2924             htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
2925         tx_buffer = &adapter->tx_buffer_area[curr_txd];
2926         tx_buffer->m_head = NULL;
2927         tx_buffer->next_eop = -1;
2928
2929         if (++curr_txd == adapter->num_tx_desc)
2930                 curr_txd = 0;
2931
2932         adapter->num_tx_desc_avail--;
2933         adapter->next_avail_tx_desc = curr_txd;
2934 }
2935
2936
2937 /**********************************************************************
2938  *
2939  *  Examine each tx_buffer in the used queue. If the hardware is done
2940  *  processing the packet then free associated resources. The
2941  *  tx_buffer is put back on the free queue.
2942  *
2943  **********************************************************************/
2944 static void
2945 lem_txeof(struct adapter *adapter)
2946 {
2947         int first, last, done, num_avail;
2948         struct em_buffer *tx_buffer;
2949         struct e1000_tx_desc   *tx_desc, *eop_desc;
2950         struct ifnet   *ifp = adapter->ifp;
2951
2952         EM_TX_LOCK_ASSERT(adapter);
2953
2954         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2955                 return;
2956
2957         num_avail = adapter->num_tx_desc_avail;
2958         first = adapter->next_tx_to_clean;
2959         tx_desc = &adapter->tx_desc_base[first];
2960         tx_buffer = &adapter->tx_buffer_area[first];
2961         last = tx_buffer->next_eop;
2962         eop_desc = &adapter->tx_desc_base[last];
2963
2964         /*
2965          * What this does is get the index of the
2966          * first descriptor AFTER the EOP of the 
2967          * first packet, that way we can do the
2968          * simple comparison on the inner while loop.
2969          */
2970         if (++last == adapter->num_tx_desc)
2971                 last = 0;
2972         done = last;
2973
2974         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2975             BUS_DMASYNC_POSTREAD);
2976
2977         while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2978                 /* We clean the range of the packet */
2979                 while (first != done) {
2980                         tx_desc->upper.data = 0;
2981                         tx_desc->lower.data = 0;
2982                         tx_desc->buffer_addr = 0;
2983                         ++num_avail;
2984
2985                         if (tx_buffer->m_head) {
2986                                 ifp->if_opackets++;
2987                                 bus_dmamap_sync(adapter->txtag,
2988                                     tx_buffer->map,
2989                                     BUS_DMASYNC_POSTWRITE);
2990                                 bus_dmamap_unload(adapter->txtag,
2991                                     tx_buffer->map);
2992
2993                                 m_freem(tx_buffer->m_head);
2994                                 tx_buffer->m_head = NULL;
2995                         }
2996                         tx_buffer->next_eop = -1;
2997                         adapter->watchdog_time = ticks;
2998
2999                         if (++first == adapter->num_tx_desc)
3000                                 first = 0;
3001
3002                         tx_buffer = &adapter->tx_buffer_area[first];
3003                         tx_desc = &adapter->tx_desc_base[first];
3004                 }
3005                 /* See if we can continue to the next packet */
3006                 last = tx_buffer->next_eop;
3007                 if (last != -1) {
3008                         eop_desc = &adapter->tx_desc_base[last];
3009                         /* Get new done point */
3010                         if (++last == adapter->num_tx_desc) last = 0;
3011                         done = last;
3012                 } else
3013                         break;
3014         }
3015         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3016             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3017
3018         adapter->next_tx_to_clean = first;
3019         adapter->num_tx_desc_avail = num_avail;
3020
3021         /*
3022          * If we have enough room, clear IFF_DRV_OACTIVE to
3023          * tell the stack that it is OK to send packets.
3024          * If there are no pending descriptors, clear the watchdog.
3025          */
3026         if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {                
3027                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3028                 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3029                         adapter->watchdog_check = FALSE;
3030                         return;
3031                 } 
3032         }
3033 }
3034
3035 /*********************************************************************
3036  *
3037  *  When Link is lost sometimes there is work still in the TX ring
3038  *  which may result in a watchdog, rather than allow that we do an
3039  *  attempted cleanup and then reinit here. Note that this has been
3040  *  seens mostly with fiber adapters.
3041  *
3042  **********************************************************************/
3043 static void
3044 lem_tx_purge(struct adapter *adapter)
3045 {
3046         if ((!adapter->link_active) && (adapter->watchdog_check)) {
3047                 EM_TX_LOCK(adapter);
3048                 lem_txeof(adapter);
3049                 EM_TX_UNLOCK(adapter);
3050                 if (adapter->watchdog_check) /* Still outstanding? */
3051                         lem_init_locked(adapter);
3052         }
3053 }
3054
3055 /*********************************************************************
3056  *
3057  *  Get a buffer from system mbuf buffer pool.
3058  *
3059  **********************************************************************/
3060 static int
3061 lem_get_buf(struct adapter *adapter, int i)
3062 {
3063         struct mbuf             *m;
3064         bus_dma_segment_t       segs[1];
3065         bus_dmamap_t            map;
3066         struct em_buffer        *rx_buffer;
3067         int                     error, nsegs;
3068
3069         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3070         if (m == NULL) {
3071                 adapter->mbuf_cluster_failed++;
3072                 return (ENOBUFS);
3073         }
3074         m->m_len = m->m_pkthdr.len = MCLBYTES;
3075
3076         if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3077                 m_adj(m, ETHER_ALIGN);
3078
3079         /*
3080          * Using memory from the mbuf cluster pool, invoke the
3081          * bus_dma machinery to arrange the memory mapping.
3082          */
3083         error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3084             adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3085         if (error != 0) {
3086                 m_free(m);
3087                 return (error);
3088         }
3089
3090         /* If nsegs is wrong then the stack is corrupt. */
3091         KASSERT(nsegs == 1, ("Too many segments returned!"));
3092
3093         rx_buffer = &adapter->rx_buffer_area[i];
3094         if (rx_buffer->m_head != NULL)
3095                 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3096
3097         map = rx_buffer->map;
3098         rx_buffer->map = adapter->rx_sparemap;
3099         adapter->rx_sparemap = map;
3100         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3101         rx_buffer->m_head = m;
3102
3103         adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3104         return (0);
3105 }
3106
3107 /*********************************************************************
3108  *
3109  *  Allocate memory for rx_buffer structures. Since we use one
3110  *  rx_buffer per received packet, the maximum number of rx_buffer's
3111  *  that we'll need is equal to the number of receive descriptors
3112  *  that we've allocated.
3113  *
3114  **********************************************************************/
3115 static int
3116 lem_allocate_receive_structures(struct adapter *adapter)
3117 {
3118         device_t dev = adapter->dev;
3119         struct em_buffer *rx_buffer;
3120         int i, error;
3121
3122         adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3123             adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3124         if (adapter->rx_buffer_area == NULL) {
3125                 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3126                 return (ENOMEM);
3127         }
3128
3129         error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3130                                 1, 0,                   /* alignment, bounds */
3131                                 BUS_SPACE_MAXADDR,      /* lowaddr */
3132                                 BUS_SPACE_MAXADDR,      /* highaddr */
3133                                 NULL, NULL,             /* filter, filterarg */
3134                                 MCLBYTES,               /* maxsize */
3135                                 1,                      /* nsegments */
3136                                 MCLBYTES,               /* maxsegsize */
3137                                 0,                      /* flags */
3138                                 NULL,                   /* lockfunc */
3139                                 NULL,                   /* lockarg */
3140                                 &adapter->rxtag);
3141         if (error) {
3142                 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3143                     __func__, error);
3144                 goto fail;
3145         }
3146
3147         /* Create the spare map (used by getbuf) */
3148         error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3149              &adapter->rx_sparemap);
3150         if (error) {
3151                 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3152                     __func__, error);
3153                 goto fail;
3154         }
3155
3156         rx_buffer = adapter->rx_buffer_area;
3157         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3158                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3159                     &rx_buffer->map);
3160                 if (error) {
3161                         device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3162                             __func__, error);
3163                         goto fail;
3164                 }
3165         }
3166
3167         return (0);
3168
3169 fail:
3170         lem_free_receive_structures(adapter);
3171         return (error);
3172 }
3173
3174 /*********************************************************************
3175  *
3176  *  (Re)initialize receive structures.
3177  *
3178  **********************************************************************/
3179 static int
3180 lem_setup_receive_structures(struct adapter *adapter)
3181 {
3182         struct em_buffer *rx_buffer;
3183         int i, error;
3184
3185         /* Reset descriptor ring */
3186         bzero(adapter->rx_desc_base,
3187             (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3188
3189         /* Free current RX buffers. */
3190         rx_buffer = adapter->rx_buffer_area;
3191         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3192                 if (rx_buffer->m_head != NULL) {
3193                         bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3194                             BUS_DMASYNC_POSTREAD);
3195                         bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3196                         m_freem(rx_buffer->m_head);
3197                         rx_buffer->m_head = NULL;
3198                 }
3199         }
3200
3201         /* Allocate new ones. */
3202         for (i = 0; i < adapter->num_rx_desc; i++) {
3203                 error = lem_get_buf(adapter, i);
3204                 if (error)
3205                         return (error);
3206         }
3207
3208         /* Setup our descriptor pointers */
3209         adapter->next_rx_desc_to_check = 0;
3210         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3211             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3212
3213         return (0);
3214 }
3215
3216 /*********************************************************************
3217  *
3218  *  Enable receive unit.
3219  *
3220  **********************************************************************/
3221 #define MAX_INTS_PER_SEC        8000
3222 #define DEFAULT_ITR          1000000000/(MAX_INTS_PER_SEC * 256)
3223
3224 static void
3225 lem_initialize_receive_unit(struct adapter *adapter)
3226 {
3227         struct ifnet    *ifp = adapter->ifp;
3228         u64     bus_addr;
3229         u32     rctl, rxcsum;
3230
3231         INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3232
3233         /*
3234          * Make sure receives are disabled while setting
3235          * up the descriptor ring
3236          */
3237         rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3238         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3239
3240         if (adapter->hw.mac.type >= e1000_82540) {
3241                 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3242                     adapter->rx_abs_int_delay.value);
3243                 /*
3244                  * Set the interrupt throttling rate. Value is calculated
3245                  * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3246                  */
3247                 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3248         }
3249
3250         /*
3251         ** When using MSIX interrupts we need to throttle
3252         ** using the EITR register (82574 only)
3253         */
3254         if (adapter->msix)
3255                 for (int i = 0; i < 4; i++)
3256                         E1000_WRITE_REG(&adapter->hw,
3257                             E1000_EITR_82574(i), DEFAULT_ITR);
3258
3259         /* Disable accelerated ackknowledge */
3260         if (adapter->hw.mac.type == e1000_82574)
3261                 E1000_WRITE_REG(&adapter->hw,
3262                     E1000_RFCTL, E1000_RFCTL_ACK_DIS);
3263
3264         /* Setup the Base and Length of the Rx Descriptor Ring */
3265         bus_addr = adapter->rxdma.dma_paddr;
3266         E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3267             adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3268         E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3269             (u32)(bus_addr >> 32));
3270         E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3271             (u32)bus_addr);
3272
3273         /* Setup the Receive Control Register */
3274         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3275         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3276                    E1000_RCTL_RDMTS_HALF |
3277                    (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3278
3279         /* Make sure VLAN Filters are off */
3280         rctl &= ~E1000_RCTL_VFE;
3281
3282         if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3283                 rctl |= E1000_RCTL_SBP;
3284         else
3285                 rctl &= ~E1000_RCTL_SBP;
3286
3287         switch (adapter->rx_buffer_len) {
3288         default:
3289         case 2048:
3290                 rctl |= E1000_RCTL_SZ_2048;
3291                 break;
3292         case 4096:
3293                 rctl |= E1000_RCTL_SZ_4096 |
3294                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
3295                 break;
3296         case 8192:
3297                 rctl |= E1000_RCTL_SZ_8192 |
3298                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
3299                 break;
3300         case 16384:
3301                 rctl |= E1000_RCTL_SZ_16384 |
3302                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
3303                 break;
3304         }
3305
3306         if (ifp->if_mtu > ETHERMTU)
3307                 rctl |= E1000_RCTL_LPE;
3308         else
3309                 rctl &= ~E1000_RCTL_LPE;
3310
3311         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3312         if ((adapter->hw.mac.type >= e1000_82543) &&
3313             (ifp->if_capenable & IFCAP_RXCSUM)) {
3314                 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3315                 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3316                 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3317         }
3318
3319         /* Enable Receives */
3320         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3321
3322         /*
3323          * Setup the HW Rx Head and
3324          * Tail Descriptor Pointers
3325          */
3326         E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3327         E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
3328
3329         return;
3330 }
3331
3332 /*********************************************************************
3333  *
3334  *  Free receive related data structures.
3335  *
3336  **********************************************************************/
3337 static void
3338 lem_free_receive_structures(struct adapter *adapter)
3339 {
3340         struct em_buffer *rx_buffer;
3341         int i;
3342
3343         INIT_DEBUGOUT("free_receive_structures: begin");
3344
3345         if (adapter->rx_sparemap) {
3346                 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3347                 adapter->rx_sparemap = NULL;
3348         }
3349
3350         /* Cleanup any existing buffers */
3351         if (adapter->rx_buffer_area != NULL) {
3352                 rx_buffer = adapter->rx_buffer_area;
3353                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3354                         if (rx_buffer->m_head != NULL) {
3355                                 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3356                                     BUS_DMASYNC_POSTREAD);
3357                                 bus_dmamap_unload(adapter->rxtag,
3358                                     rx_buffer->map);
3359                                 m_freem(rx_buffer->m_head);
3360                                 rx_buffer->m_head = NULL;
3361                         } else if (rx_buffer->map != NULL)
3362                                 bus_dmamap_unload(adapter->rxtag,
3363                                     rx_buffer->map);
3364                         if (rx_buffer->map != NULL) {
3365                                 bus_dmamap_destroy(adapter->rxtag,
3366                                     rx_buffer->map);
3367                                 rx_buffer->map = NULL;
3368                         }
3369                 }
3370         }
3371
3372         if (adapter->rx_buffer_area != NULL) {
3373                 free(adapter->rx_buffer_area, M_DEVBUF);
3374                 adapter->rx_buffer_area = NULL;
3375         }
3376
3377         if (adapter->rxtag != NULL) {
3378                 bus_dma_tag_destroy(adapter->rxtag);
3379                 adapter->rxtag = NULL;
3380         }
3381 }
3382
3383 /*********************************************************************
3384  *
3385  *  This routine executes in interrupt context. It replenishes
3386  *  the mbufs in the descriptor and sends data which has been
3387  *  dma'ed into host memory to upper layer.
3388  *
3389  *  We loop at most count times if count is > 0, or until done if
3390  *  count < 0.
3391  *  
3392  *  For polling we also now return the number of cleaned packets
3393  *********************************************************************/
3394 static bool
3395 lem_rxeof(struct adapter *adapter, int count, int *done)
3396 {
3397         struct ifnet    *ifp = adapter->ifp;;
3398         struct mbuf     *mp;
3399         u8              status = 0, accept_frame = 0, eop = 0;
3400         u16             len, desc_len, prev_len_adj;
3401         int             i, rx_sent = 0;
3402         struct e1000_rx_desc   *current_desc;
3403
3404         EM_RX_LOCK(adapter);
3405         i = adapter->next_rx_desc_to_check;
3406         current_desc = &adapter->rx_desc_base[i];
3407         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3408             BUS_DMASYNC_POSTREAD);
3409
3410         if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3411                 if (done != NULL)
3412                         *done = rx_sent;
3413                 EM_RX_UNLOCK(adapter);
3414                 return (FALSE);
3415         }
3416
3417         while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3418                 struct mbuf *m = NULL;
3419
3420                 status = current_desc->status;
3421                 if ((status & E1000_RXD_STAT_DD) == 0)
3422                         break;
3423
3424                 mp = adapter->rx_buffer_area[i].m_head;
3425                 /*
3426                  * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3427                  * needs to access the last received byte in the mbuf.
3428                  */
3429                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3430                     BUS_DMASYNC_POSTREAD);
3431
3432                 accept_frame = 1;
3433                 prev_len_adj = 0;
3434                 desc_len = le16toh(current_desc->length);
3435                 if (status & E1000_RXD_STAT_EOP) {
3436                         count--;
3437                         eop = 1;
3438                         if (desc_len < ETHER_CRC_LEN) {
3439                                 len = 0;
3440                                 prev_len_adj = ETHER_CRC_LEN - desc_len;
3441                         } else
3442                                 len = desc_len - ETHER_CRC_LEN;
3443                 } else {
3444                         eop = 0;
3445                         len = desc_len;
3446                 }
3447
3448                 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3449                         u8      last_byte;
3450                         u32     pkt_len = desc_len;
3451
3452                         if (adapter->fmp != NULL)
3453                                 pkt_len += adapter->fmp->m_pkthdr.len;
3454
3455                         last_byte = *(mtod(mp, caddr_t) + desc_len - 1);                        
3456                         if (TBI_ACCEPT(&adapter->hw, status,
3457                             current_desc->errors, pkt_len, last_byte,
3458                             adapter->min_frame_size, adapter->max_frame_size)) {
3459                                 e1000_tbi_adjust_stats_82543(&adapter->hw,
3460                                     &adapter->stats, pkt_len,
3461                                     adapter->hw.mac.addr,
3462                                     adapter->max_frame_size);
3463                                 if (len > 0)
3464                                         len--;
3465                         } else
3466                                 accept_frame = 0;
3467                 }
3468
3469                 if (accept_frame) {
3470                         if (lem_get_buf(adapter, i) != 0) {
3471                                 ifp->if_iqdrops++;
3472                                 goto discard;
3473                         }
3474
3475                         /* Assign correct length to the current fragment */
3476                         mp->m_len = len;
3477
3478                         if (adapter->fmp == NULL) {
3479                                 mp->m_pkthdr.len = len;
3480                                 adapter->fmp = mp; /* Store the first mbuf */
3481                                 adapter->lmp = mp;
3482                         } else {
3483                                 /* Chain mbuf's together */
3484                                 mp->m_flags &= ~M_PKTHDR;
3485                                 /*
3486                                  * Adjust length of previous mbuf in chain if
3487                                  * we received less than 4 bytes in the last
3488                                  * descriptor.
3489                                  */
3490                                 if (prev_len_adj > 0) {
3491                                         adapter->lmp->m_len -= prev_len_adj;
3492                                         adapter->fmp->m_pkthdr.len -=
3493                                             prev_len_adj;
3494                                 }
3495                                 adapter->lmp->m_next = mp;
3496                                 adapter->lmp = adapter->lmp->m_next;
3497                                 adapter->fmp->m_pkthdr.len += len;
3498                         }
3499
3500                         if (eop) {
3501                                 adapter->fmp->m_pkthdr.rcvif = ifp;
3502                                 ifp->if_ipackets++;
3503                                 lem_receive_checksum(adapter, current_desc,
3504                                     adapter->fmp);
3505 #ifndef __NO_STRICT_ALIGNMENT
3506                                 if (adapter->max_frame_size >
3507                                     (MCLBYTES - ETHER_ALIGN) &&
3508                                     lem_fixup_rx(adapter) != 0)
3509                                         goto skip;
3510 #endif
3511                                 if (status & E1000_RXD_STAT_VP) {
3512                                         adapter->fmp->m_pkthdr.ether_vtag =
3513                                             (le16toh(current_desc->special) &
3514                                             E1000_RXD_SPC_VLAN_MASK);
3515                                         adapter->fmp->m_flags |= M_VLANTAG;
3516                                 }
3517 #ifndef __NO_STRICT_ALIGNMENT
3518 skip:
3519 #endif
3520                                 m = adapter->fmp;
3521                                 adapter->fmp = NULL;
3522                                 adapter->lmp = NULL;
3523                         }
3524                 } else {
3525                         ifp->if_ierrors++;
3526 discard:
3527                         /* Reuse loaded DMA map and just update mbuf chain */
3528                         mp = adapter->rx_buffer_area[i].m_head;
3529                         mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3530                         mp->m_data = mp->m_ext.ext_buf;
3531                         mp->m_next = NULL;
3532                         if (adapter->max_frame_size <=
3533                             (MCLBYTES - ETHER_ALIGN))
3534                                 m_adj(mp, ETHER_ALIGN);
3535                         if (adapter->fmp != NULL) {
3536                                 m_freem(adapter->fmp);
3537                                 adapter->fmp = NULL;
3538                                 adapter->lmp = NULL;
3539                         }
3540                         m = NULL;
3541                 }
3542
3543                 /* Zero out the receive descriptors status. */
3544                 current_desc->status = 0;
3545                 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3546                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3547
3548                 /* Advance our pointers to the next descriptor. */
3549                 if (++i == adapter->num_rx_desc)
3550                         i = 0;
3551                 /* Call into the stack */
3552                 if (m != NULL) {
3553                         adapter->next_rx_desc_to_check = i;
3554                         EM_RX_UNLOCK(adapter);
3555                         (*ifp->if_input)(ifp, m);
3556                         EM_RX_LOCK(adapter);
3557                         rx_sent++;
3558                         i = adapter->next_rx_desc_to_check;
3559                 }
3560                 current_desc = &adapter->rx_desc_base[i];
3561         }
3562         adapter->next_rx_desc_to_check = i;
3563
3564         /* Advance the E1000's Receive Queue #0  "Tail Pointer". */
3565         if (--i < 0)
3566                 i = adapter->num_rx_desc - 1;
3567         E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3568         if (done != NULL)
3569                 *done = rx_sent;
3570         EM_RX_UNLOCK(adapter);
3571         return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3572 }
3573
3574 #ifndef __NO_STRICT_ALIGNMENT
3575 /*
3576  * When jumbo frames are enabled we should realign entire payload on
3577  * architecures with strict alignment. This is serious design mistake of 8254x
3578  * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3579  * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3580  * payload. On architecures without strict alignment restrictions 8254x still
3581  * performs unaligned memory access which would reduce the performance too.
3582  * To avoid copying over an entire frame to align, we allocate a new mbuf and
3583  * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3584  * existing mbuf chain.
3585  *
3586  * Be aware, best performance of the 8254x is achived only when jumbo frame is
3587  * not used at all on architectures with strict alignment.
3588  */
3589 static int
3590 lem_fixup_rx(struct adapter *adapter)
3591 {
3592         struct mbuf *m, *n;
3593         int error;
3594
3595         error = 0;
3596         m = adapter->fmp;
3597         if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3598                 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3599                 m->m_data += ETHER_HDR_LEN;
3600         } else {
3601                 MGETHDR(n, M_DONTWAIT, MT_DATA);
3602                 if (n != NULL) {
3603                         bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3604                         m->m_data += ETHER_HDR_LEN;
3605                         m->m_len -= ETHER_HDR_LEN;
3606                         n->m_len = ETHER_HDR_LEN;
3607                         M_MOVE_PKTHDR(n, m);
3608                         n->m_next = m;
3609                         adapter->fmp = n;
3610                 } else {
3611                         adapter->dropped_pkts++;
3612                         m_freem(adapter->fmp);
3613                         adapter->fmp = NULL;
3614                         error = ENOMEM;
3615                 }
3616         }
3617
3618         return (error);
3619 }
3620 #endif
3621
3622 /*********************************************************************
3623  *
3624  *  Verify that the hardware indicated that the checksum is valid.
3625  *  Inform the stack about the status of checksum so that stack
3626  *  doesn't spend time verifying the checksum.
3627  *
3628  *********************************************************************/
3629 static void
3630 lem_receive_checksum(struct adapter *adapter,
3631             struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3632 {
3633         /* 82543 or newer only */
3634         if ((adapter->hw.mac.type < e1000_82543) ||
3635             /* Ignore Checksum bit is set */
3636             (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3637                 mp->m_pkthdr.csum_flags = 0;
3638                 return;
3639         }
3640
3641         if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3642                 /* Did it pass? */
3643                 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3644                         /* IP Checksum Good */
3645                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3646                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3647
3648                 } else {
3649                         mp->m_pkthdr.csum_flags = 0;
3650                 }
3651         }
3652
3653         if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3654                 /* Did it pass? */
3655                 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3656                         mp->m_pkthdr.csum_flags |=
3657                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3658                         mp->m_pkthdr.csum_data = htons(0xffff);
3659                 }
3660         }
3661 }
3662
3663 /*
3664  * This routine is run via an vlan
3665  * config EVENT
3666  */
3667 static void
3668 lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3669 {
3670         struct adapter  *adapter = ifp->if_softc;
3671         u32             index, bit;
3672
3673         if (ifp->if_softc !=  arg)   /* Not our event */
3674                 return;
3675
3676         if ((vtag == 0) || (vtag > 4095))       /* Invalid ID */
3677                 return;
3678
3679         EM_CORE_LOCK(adapter);
3680         index = (vtag >> 5) & 0x7F;
3681         bit = vtag & 0x1F;
3682         adapter->shadow_vfta[index] |= (1 << bit);
3683         ++adapter->num_vlans;
3684         /* Re-init to load the changes */
3685         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3686                 lem_init_locked(adapter);
3687         EM_CORE_UNLOCK(adapter);
3688 }
3689
3690 /*
3691  * This routine is run via an vlan
3692  * unconfig EVENT
3693  */
3694 static void
3695 lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3696 {
3697         struct adapter  *adapter = ifp->if_softc;
3698         u32             index, bit;
3699
3700         if (ifp->if_softc !=  arg)
3701                 return;
3702
3703         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3704                 return;
3705
3706         EM_CORE_LOCK(adapter);
3707         index = (vtag >> 5) & 0x7F;
3708         bit = vtag & 0x1F;
3709         adapter->shadow_vfta[index] &= ~(1 << bit);
3710         --adapter->num_vlans;
3711         /* Re-init to load the changes */
3712         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3713                 lem_init_locked(adapter);
3714         EM_CORE_UNLOCK(adapter);
3715 }
3716
3717 static void
3718 lem_setup_vlan_hw_support(struct adapter *adapter)
3719 {
3720         struct e1000_hw *hw = &adapter->hw;
3721         u32             reg;
3722
3723         /*
3724         ** We get here thru init_locked, meaning
3725         ** a soft reset, this has already cleared
3726         ** the VFTA and other state, so if there
3727         ** have been no vlan's registered do nothing.
3728         */
3729         if (adapter->num_vlans == 0)
3730                 return;
3731
3732         /*
3733         ** A soft reset zero's out the VFTA, so
3734         ** we need to repopulate it now.
3735         */
3736         for (int i = 0; i < EM_VFTA_SIZE; i++)
3737                 if (adapter->shadow_vfta[i] != 0)
3738                         E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3739                             i, adapter->shadow_vfta[i]);
3740
3741         reg = E1000_READ_REG(hw, E1000_CTRL);
3742         reg |= E1000_CTRL_VME;
3743         E1000_WRITE_REG(hw, E1000_CTRL, reg);
3744
3745         /* Enable the Filter Table */
3746         reg = E1000_READ_REG(hw, E1000_RCTL);
3747         reg &= ~E1000_RCTL_CFIEN;
3748         reg |= E1000_RCTL_VFE;
3749         E1000_WRITE_REG(hw, E1000_RCTL, reg);
3750
3751         /* Update the frame size */
3752         E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
3753             adapter->max_frame_size + VLAN_TAG_SIZE);
3754 }
3755
3756 static void
3757 lem_enable_intr(struct adapter *adapter)
3758 {
3759         struct e1000_hw *hw = &adapter->hw;
3760         u32 ims_mask = IMS_ENABLE_MASK;
3761
3762         if (adapter->msix) {
3763                 E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
3764                 ims_mask |= EM_MSIX_MASK;
3765         } 
3766         E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3767 }
3768
3769 static void
3770 lem_disable_intr(struct adapter *adapter)
3771 {
3772         struct e1000_hw *hw = &adapter->hw;
3773
3774         if (adapter->msix)
3775                 E1000_WRITE_REG(hw, EM_EIAC, 0);
3776         E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
3777 }
3778
3779 /*
3780  * Bit of a misnomer, what this really means is
3781  * to enable OS management of the system... aka
3782  * to disable special hardware management features 
3783  */
3784 static void
3785 lem_init_manageability(struct adapter *adapter)
3786 {
3787         /* A shared code workaround */
3788         if (adapter->has_manage) {
3789                 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3790                 /* disable hardware interception of ARP */
3791                 manc &= ~(E1000_MANC_ARP_EN);
3792                 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3793         }
3794 }
3795
3796 /*
3797  * Give control back to hardware management
3798  * controller if there is one.
3799  */
3800 static void
3801 lem_release_manageability(struct adapter *adapter)
3802 {
3803         if (adapter->has_manage) {
3804                 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3805
3806                 /* re-enable hardware interception of ARP */
3807                 manc |= E1000_MANC_ARP_EN;
3808                 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3809         }
3810 }
3811
3812 /*
3813  * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3814  * For ASF and Pass Through versions of f/w this means
3815  * that the driver is loaded. For AMT version type f/w
3816  * this means that the network i/f is open.
3817  */
3818 static void
3819 lem_get_hw_control(struct adapter *adapter)
3820 {
3821         u32 ctrl_ext;
3822
3823         ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3824         E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3825             ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3826         return;
3827 }
3828
3829 /*
3830  * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3831  * For ASF and Pass Through versions of f/w this means that
3832  * the driver is no longer loaded. For AMT versions of the
3833  * f/w this means that the network i/f is closed.
3834  */
3835 static void
3836 lem_release_hw_control(struct adapter *adapter)
3837 {
3838         u32 ctrl_ext;
3839
3840         if (!adapter->has_manage)
3841                 return;
3842
3843         ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3844         E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3845             ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3846         return;
3847 }
3848
3849 static int
3850 lem_is_valid_ether_addr(u8 *addr)
3851 {
3852         char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3853
3854         if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3855                 return (FALSE);
3856         }
3857
3858         return (TRUE);
3859 }
3860
3861 /*
3862 ** Parse the interface capabilities with regard
3863 ** to both system management and wake-on-lan for
3864 ** later use.
3865 */
3866 static void
3867 lem_get_wakeup(device_t dev)
3868 {
3869         struct adapter  *adapter = device_get_softc(dev);
3870         u16             eeprom_data = 0, device_id, apme_mask;
3871
3872         adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3873         apme_mask = EM_EEPROM_APME;
3874
3875         switch (adapter->hw.mac.type) {
3876         case e1000_82542:
3877         case e1000_82543:
3878                 break;
3879         case e1000_82544:
3880                 e1000_read_nvm(&adapter->hw,
3881                     NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3882                 apme_mask = EM_82544_APME;
3883                 break;
3884         case e1000_82546:
3885         case e1000_82546_rev_3:
3886                 if (adapter->hw.bus.func == 1) {
3887                         e1000_read_nvm(&adapter->hw,
3888                             NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3889                         break;
3890                 } else
3891                         e1000_read_nvm(&adapter->hw,
3892                             NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3893                 break;
3894         default:
3895                 e1000_read_nvm(&adapter->hw,
3896                     NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3897                 break;
3898         }
3899         if (eeprom_data & apme_mask)
3900                 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
3901         /*
3902          * We have the eeprom settings, now apply the special cases
3903          * where the eeprom may be wrong or the board won't support
3904          * wake on lan on a particular port
3905          */
3906         device_id = pci_get_device(dev);
3907         switch (device_id) {
3908         case E1000_DEV_ID_82546GB_PCIE:
3909                 adapter->wol = 0;
3910                 break;
3911         case E1000_DEV_ID_82546EB_FIBER:
3912         case E1000_DEV_ID_82546GB_FIBER:
3913                 /* Wake events only supported on port A for dual fiber
3914                  * regardless of eeprom setting */
3915                 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3916                     E1000_STATUS_FUNC_1)
3917                         adapter->wol = 0;
3918                 break;
3919         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
3920                 /* if quad port adapter, disable WoL on all but port A */
3921                 if (global_quad_port_a != 0)
3922                         adapter->wol = 0;
3923                 /* Reset for multiple quad port adapters */
3924                 if (++global_quad_port_a == 4)
3925                         global_quad_port_a = 0;
3926                 break;
3927         }
3928         return;
3929 }
3930
3931
3932 /*
3933  * Enable PCI Wake On Lan capability
3934  */
3935 static void
3936 lem_enable_wakeup(device_t dev)
3937 {
3938         struct adapter  *adapter = device_get_softc(dev);
3939         struct ifnet    *ifp = adapter->ifp;
3940         u32             pmc, ctrl, ctrl_ext, rctl;
3941         u16             status;
3942
3943         if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
3944                 return;
3945
3946         /* Advertise the wakeup capability */
3947         ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
3948         ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
3949         E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
3950         E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3951
3952         /* Keep the laser running on Fiber adapters */
3953         if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
3954             adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
3955                 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3956                 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
3957                 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
3958         }
3959
3960         /*
3961         ** Determine type of Wakeup: note that wol
3962         ** is set with all bits on by default.
3963         */
3964         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
3965                 adapter->wol &= ~E1000_WUFC_MAG;
3966
3967         if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
3968                 adapter->wol &= ~E1000_WUFC_MC;
3969         else {
3970                 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3971                 rctl |= E1000_RCTL_MPE;
3972                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3973         }
3974
3975         if (adapter->hw.mac.type == e1000_pchlan) {
3976                 if (lem_enable_phy_wakeup(adapter))
3977                         return;
3978         } else {
3979                 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3980                 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
3981         }
3982
3983
3984         /* Request PME */
3985         status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
3986         status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3987         if (ifp->if_capenable & IFCAP_WOL)
3988                 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3989         pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
3990
3991         return;
3992 }
3993
3994 /*
3995 ** WOL in the newer chipset interfaces (pchlan)
3996 ** require thing to be copied into the phy
3997 */
3998 static int
3999 lem_enable_phy_wakeup(struct adapter *adapter)
4000 {
4001         struct e1000_hw *hw = &adapter->hw;
4002         u32 mreg, ret = 0;
4003         u16 preg;
4004
4005         /* copy MAC RARs to PHY RARs */
4006         for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4007                 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4008                 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4009                 e1000_write_phy_reg(hw, BM_RAR_M(i),
4010                     (u16)((mreg >> 16) & 0xFFFF));
4011                 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4012                 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4013                 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4014                     (u16)((mreg >> 16) & 0xFFFF));
4015         }
4016
4017         /* copy MAC MTA to PHY MTA */
4018         for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4019                 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4020                 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4021                 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4022                     (u16)((mreg >> 16) & 0xFFFF));
4023         }
4024
4025         /* configure PHY Rx Control register */
4026         e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4027         mreg = E1000_READ_REG(hw, E1000_RCTL);
4028         if (mreg & E1000_RCTL_UPE)
4029                 preg |= BM_RCTL_UPE;
4030         if (mreg & E1000_RCTL_MPE)
4031                 preg |= BM_RCTL_MPE;
4032         preg &= ~(BM_RCTL_MO_MASK);
4033         if (mreg & E1000_RCTL_MO_3)
4034                 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4035                                 << BM_RCTL_MO_SHIFT);
4036         if (mreg & E1000_RCTL_BAM)
4037                 preg |= BM_RCTL_BAM;
4038         if (mreg & E1000_RCTL_PMCF)
4039                 preg |= BM_RCTL_PMCF;
4040         mreg = E1000_READ_REG(hw, E1000_CTRL);
4041         if (mreg & E1000_CTRL_RFCE)
4042                 preg |= BM_RCTL_RFCE;
4043         e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4044
4045         /* enable PHY wakeup in MAC register */
4046         E1000_WRITE_REG(hw, E1000_WUC,
4047             E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4048         E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4049
4050         /* configure and enable PHY wakeup in PHY registers */
4051         e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4052         e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4053
4054         /* activate PHY wakeup */
4055         ret = hw->phy.ops.acquire(hw);
4056         if (ret) {
4057                 printf("Could not acquire PHY\n");
4058                 return ret;
4059         }
4060         e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4061                                  (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4062         ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4063         if (ret) {
4064                 printf("Could not read PHY page 769\n");
4065                 goto out;
4066         }
4067         preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4068         ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4069         if (ret)
4070                 printf("Could not set PHY Host Wakeup bit\n");
4071 out:
4072         hw->phy.ops.release(hw);
4073
4074         return ret;
4075 }
4076
4077 static void
4078 lem_led_func(void *arg, int onoff)
4079 {
4080         struct adapter  *adapter = arg;
4081
4082         EM_CORE_LOCK(adapter);
4083         if (onoff) {
4084                 e1000_setup_led(&adapter->hw);
4085                 e1000_led_on(&adapter->hw);
4086         } else {
4087                 e1000_led_off(&adapter->hw);
4088                 e1000_cleanup_led(&adapter->hw);
4089         }
4090         EM_CORE_UNLOCK(adapter);
4091 }
4092
4093 /*********************************************************************
4094 * 82544 Coexistence issue workaround.
4095 *    There are 2 issues.
4096 *       1. Transmit Hang issue.
4097 *    To detect this issue, following equation can be used...
4098 *         SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4099 *         If SUM[3:0] is in between 1 to 4, we will have this issue.
4100 *
4101 *       2. DAC issue.
4102 *    To detect this issue, following equation can be used...
4103 *         SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4104 *         If SUM[3:0] is in between 9 to c, we will have this issue.
4105 *
4106 *
4107 *    WORKAROUND:
4108 *         Make sure we do not have ending address
4109 *         as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4110 *
4111 *************************************************************************/
4112 static u32
4113 lem_fill_descriptors (bus_addr_t address, u32 length,
4114                 PDESC_ARRAY desc_array)
4115 {
4116         u32 safe_terminator;
4117
4118         /* Since issue is sensitive to length and address.*/
4119         /* Let us first check the address...*/
4120         if (length <= 4) {
4121                 desc_array->descriptor[0].address = address;
4122                 desc_array->descriptor[0].length = length;
4123                 desc_array->elements = 1;
4124                 return (desc_array->elements);
4125         }
4126         safe_terminator = (u32)((((u32)address & 0x7) +
4127             (length & 0xF)) & 0xF);
4128         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4129         if (safe_terminator == 0   ||
4130         (safe_terminator > 4   &&
4131         safe_terminator < 9)   ||
4132         (safe_terminator > 0xC &&
4133         safe_terminator <= 0xF)) {
4134                 desc_array->descriptor[0].address = address;
4135                 desc_array->descriptor[0].length = length;
4136                 desc_array->elements = 1;
4137                 return (desc_array->elements);
4138         }
4139
4140         desc_array->descriptor[0].address = address;
4141         desc_array->descriptor[0].length = length - 4;
4142         desc_array->descriptor[1].address = address + (length - 4);
4143         desc_array->descriptor[1].length = 4;
4144         desc_array->elements = 2;
4145         return (desc_array->elements);
4146 }
4147
4148 /**********************************************************************
4149  *
4150  *  Update the board statistics counters.
4151  *
4152  **********************************************************************/
4153 static void
4154 lem_update_stats_counters(struct adapter *adapter)
4155 {
4156         struct ifnet   *ifp;
4157
4158         if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4159            (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4160                 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4161                 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4162         }
4163         adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4164         adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4165         adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4166         adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4167
4168         adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4169         adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4170         adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4171         adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4172         adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4173         adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4174         adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4175         adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4176         adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4177         adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4178         adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4179         adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4180         adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4181         adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4182         adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4183         adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4184         adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4185         adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4186         adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4187         adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4188
4189         /* For the 64-bit byte counters the low dword must be read first. */
4190         /* Both registers clear on the read of the high dword */
4191
4192         adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4193             ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4194         adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4195             ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4196
4197         adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4198         adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4199         adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4200         adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4201         adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4202
4203         adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4204         adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4205
4206         adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4207         adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4208         adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4209         adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4210         adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4211         adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4212         adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4213         adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4214         adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4215         adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4216
4217         if (adapter->hw.mac.type >= e1000_82543) {
4218                 adapter->stats.algnerrc += 
4219                 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4220                 adapter->stats.rxerrc += 
4221                 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4222                 adapter->stats.tncrs += 
4223                 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4224                 adapter->stats.cexterr += 
4225                 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4226                 adapter->stats.tsctc += 
4227                 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4228                 adapter->stats.tsctfc += 
4229                 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4230         }
4231         ifp = adapter->ifp;
4232
4233         ifp->if_collisions = adapter->stats.colc;
4234
4235         /* Rx Errors */
4236         ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4237             adapter->stats.crcerrs + adapter->stats.algnerrc +
4238             adapter->stats.ruc + adapter->stats.roc +
4239             adapter->stats.mpc + adapter->stats.cexterr;
4240
4241         /* Tx Errors */
4242         ifp->if_oerrors = adapter->stats.ecol +
4243             adapter->stats.latecol + adapter->watchdog_events;
4244 }
4245
4246 /* Export a single 32-bit register via a read-only sysctl. */
4247 static int
4248 lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4249 {
4250         struct adapter *adapter;
4251         u_int val;
4252
4253         adapter = oidp->oid_arg1;
4254         val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4255         return (sysctl_handle_int(oidp, &val, 0, req));
4256 }
4257
4258 /*
4259  * Add sysctl variables, one per statistic, to the system.
4260  */
4261 static void
4262 lem_add_hw_stats(struct adapter *adapter)
4263 {
4264         device_t dev = adapter->dev;
4265
4266         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4267         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4268         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4269         struct e1000_hw_stats *stats = &adapter->stats;
4270
4271         struct sysctl_oid *stat_node;
4272         struct sysctl_oid_list *stat_list;
4273
4274         /* Driver Statistics */
4275         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail", 
4276                          CTLFLAG_RD, &adapter->mbuf_alloc_failed,
4277                          "Std mbuf failed");
4278         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail", 
4279                          CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4280                          "Std mbuf cluster failed");
4281         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 
4282                         CTLFLAG_RD, &adapter->dropped_pkts,
4283                         "Driver dropped packets");
4284         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", 
4285                         CTLFLAG_RD, &adapter->no_tx_dma_setup,
4286                         "Driver tx dma failure in xmit");
4287         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4288                         CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4289                         "Not enough tx descriptors failure in xmit");
4290         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4291                         CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4292                         "Not enough tx descriptors failure in xmit");
4293         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4294                         CTLFLAG_RD, &adapter->rx_overruns,
4295                         "RX overruns");
4296         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4297                         CTLFLAG_RD, &adapter->watchdog_events,
4298                         "Watchdog timeouts");
4299
4300         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4301                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
4302                         lem_sysctl_reg_handler, "IU",
4303                         "Device Control Register");
4304         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4305                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
4306                         lem_sysctl_reg_handler, "IU",
4307                         "Receiver Control Register");
4308         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4309                         CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4310                         "Flow Control High Watermark");
4311         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", 
4312                         CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4313                         "Flow Control Low Watermark");
4314         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
4315                         CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4316                         "TX FIFO workaround events");
4317         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
4318                         CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4319                         "TX FIFO resets");
4320
4321         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head", 
4322                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
4323                         lem_sysctl_reg_handler, "IU",
4324                         "Transmit Descriptor Head");
4325         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail", 
4326                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
4327                         lem_sysctl_reg_handler, "IU",
4328                         "Transmit Descriptor Tail");
4329         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head", 
4330                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
4331                         lem_sysctl_reg_handler, "IU",
4332                         "Receive Descriptor Head");
4333         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail", 
4334                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
4335                         lem_sysctl_reg_handler, "IU",
4336                         "Receive Descriptor Tail");
4337         
4338
4339         /* MAC stats get their own sub node */
4340
4341         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
4342                                     CTLFLAG_RD, NULL, "Statistics");
4343         stat_list = SYSCTL_CHILDREN(stat_node);
4344
4345         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4346                         CTLFLAG_RD, &stats->ecol,
4347                         "Excessive collisions");
4348         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4349                         CTLFLAG_RD, &stats->scc,
4350                         "Single collisions");
4351         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4352                         CTLFLAG_RD, &stats->mcc,
4353                         "Multiple collisions");
4354         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4355                         CTLFLAG_RD, &stats->latecol,
4356                         "Late collisions");
4357         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4358                         CTLFLAG_RD, &stats->colc,
4359                         "Collision Count");
4360         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4361                         CTLFLAG_RD, &adapter->stats.symerrs,
4362                         "Symbol Errors");
4363         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4364                         CTLFLAG_RD, &adapter->stats.sec,
4365                         "Sequence Errors");
4366         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4367                         CTLFLAG_RD, &adapter->stats.dc,
4368                         "Defer Count");
4369         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4370                         CTLFLAG_RD, &adapter->stats.mpc,
4371                         "Missed Packets");
4372         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4373                         CTLFLAG_RD, &adapter->stats.rnbc,
4374                         "Receive No Buffers");
4375         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4376                         CTLFLAG_RD, &adapter->stats.ruc,
4377                         "Receive Undersize");
4378         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4379                         CTLFLAG_RD, &adapter->stats.rfc,
4380                         "Fragmented Packets Received ");
4381         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4382                         CTLFLAG_RD, &adapter->stats.roc,
4383                         "Oversized Packets Received");
4384         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4385                         CTLFLAG_RD, &adapter->stats.rjc,
4386                         "Recevied Jabber");
4387         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4388                         CTLFLAG_RD, &adapter->stats.rxerrc,
4389                         "Receive Errors");
4390         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4391                         CTLFLAG_RD, &adapter->stats.crcerrs,
4392                         "CRC errors");
4393         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4394                         CTLFLAG_RD, &adapter->stats.algnerrc,
4395                         "Alignment Errors");
4396         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4397                         CTLFLAG_RD, &adapter->stats.cexterr,
4398                         "Collision/Carrier extension errors");
4399         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4400                         CTLFLAG_RD, &adapter->stats.xonrxc,
4401                         "XON Received");
4402         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4403                         CTLFLAG_RD, &adapter->stats.xontxc,
4404                         "XON Transmitted");
4405         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4406                         CTLFLAG_RD, &adapter->stats.xoffrxc,
4407                         "XOFF Received");
4408         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4409                         CTLFLAG_RD, &adapter->stats.xofftxc,
4410                         "XOFF Transmitted");
4411
4412         /* Packet Reception Stats */
4413         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4414                         CTLFLAG_RD, &adapter->stats.tpr,
4415                         "Total Packets Received ");
4416         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4417                         CTLFLAG_RD, &adapter->stats.gprc,
4418                         "Good Packets Received");
4419         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4420                         CTLFLAG_RD, &adapter->stats.bprc,
4421                         "Broadcast Packets Received");
4422         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4423                         CTLFLAG_RD, &adapter->stats.mprc,
4424                         "Multicast Packets Received");
4425         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4426                         CTLFLAG_RD, &adapter->stats.prc64,
4427                         "64 byte frames received ");
4428         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4429                         CTLFLAG_RD, &adapter->stats.prc127,
4430                         "65-127 byte frames received");
4431         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4432                         CTLFLAG_RD, &adapter->stats.prc255,
4433                         "128-255 byte frames received");
4434         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4435                         CTLFLAG_RD, &adapter->stats.prc511,
4436                         "256-511 byte frames received");
4437         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4438                         CTLFLAG_RD, &adapter->stats.prc1023,
4439                         "512-1023 byte frames received");
4440         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4441                         CTLFLAG_RD, &adapter->stats.prc1522,
4442                         "1023-1522 byte frames received");
4443         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4444                         CTLFLAG_RD, &adapter->stats.gorc, 
4445                         "Good Octets Received");
4446
4447         /* Packet Transmission Stats */
4448         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4449                         CTLFLAG_RD, &adapter->stats.gotc, 
4450                         "Good Octets Transmitted"); 
4451         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4452                         CTLFLAG_RD, &adapter->stats.tpt,
4453                         "Total Packets Transmitted");
4454         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4455                         CTLFLAG_RD, &adapter->stats.gptc,
4456                         "Good Packets Transmitted");
4457         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4458                         CTLFLAG_RD, &adapter->stats.bptc,
4459                         "Broadcast Packets Transmitted");
4460         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4461                         CTLFLAG_RD, &adapter->stats.mptc,
4462                         "Multicast Packets Transmitted");
4463         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4464                         CTLFLAG_RD, &adapter->stats.ptc64,
4465                         "64 byte frames transmitted ");
4466         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4467                         CTLFLAG_RD, &adapter->stats.ptc127,
4468                         "65-127 byte frames transmitted");
4469         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4470                         CTLFLAG_RD, &adapter->stats.ptc255,
4471                         "128-255 byte frames transmitted");
4472         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4473                         CTLFLAG_RD, &adapter->stats.ptc511,
4474                         "256-511 byte frames transmitted");
4475         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4476                         CTLFLAG_RD, &adapter->stats.ptc1023,
4477                         "512-1023 byte frames transmitted");
4478         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4479                         CTLFLAG_RD, &adapter->stats.ptc1522,
4480                         "1024-1522 byte frames transmitted");
4481         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4482                         CTLFLAG_RD, &adapter->stats.tsctc,
4483                         "TSO Contexts Transmitted");
4484         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4485                         CTLFLAG_RD, &adapter->stats.tsctfc,
4486                         "TSO Contexts Failed");
4487 }
4488
4489 /**********************************************************************
4490  *
4491  *  This routine provides a way to dump out the adapter eeprom,
4492  *  often a useful debug/service tool. This only dumps the first
4493  *  32 words, stuff that matters is in that extent.
4494  *
4495  **********************************************************************/
4496
4497 static int
4498 lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4499 {
4500         struct adapter *adapter;
4501         int error;
4502         int result;
4503
4504         result = -1;
4505         error = sysctl_handle_int(oidp, &result, 0, req);
4506
4507         if (error || !req->newptr)
4508                 return (error);
4509
4510         /*
4511          * This value will cause a hex dump of the
4512          * first 32 16-bit words of the EEPROM to
4513          * the screen.
4514          */
4515         if (result == 1) {
4516                 adapter = (struct adapter *)arg1;
4517                 lem_print_nvm_info(adapter);
4518         }
4519
4520         return (error);
4521 }
4522
4523 static void
4524 lem_print_nvm_info(struct adapter *adapter)
4525 {
4526         u16     eeprom_data;
4527         int     i, j, row = 0;
4528
4529         /* Its a bit crude, but it gets the job done */
4530         printf("\nInterface EEPROM Dump:\n");
4531         printf("Offset\n0x0000  ");
4532         for (i = 0, j = 0; i < 32; i++, j++) {
4533                 if (j == 8) { /* Make the offset block */
4534                         j = 0; ++row;
4535                         printf("\n0x00%x0  ",row);
4536                 }
4537                 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4538                 printf("%04x ", eeprom_data);
4539         }
4540         printf("\n");
4541 }
4542
4543 static int
4544 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4545 {
4546         struct em_int_delay_info *info;
4547         struct adapter *adapter;
4548         u32 regval;
4549         int error;
4550         int usecs;
4551         int ticks;
4552
4553         info = (struct em_int_delay_info *)arg1;
4554         usecs = info->value;
4555         error = sysctl_handle_int(oidp, &usecs, 0, req);
4556         if (error != 0 || req->newptr == NULL)
4557                 return (error);
4558         if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4559                 return (EINVAL);
4560         info->value = usecs;
4561         ticks = EM_USECS_TO_TICKS(usecs);
4562
4563         adapter = info->adapter;
4564         
4565         EM_CORE_LOCK(adapter);
4566         regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4567         regval = (regval & ~0xffff) | (ticks & 0xffff);
4568         /* Handle a few special cases. */
4569         switch (info->offset) {
4570         case E1000_RDTR:
4571                 break;
4572         case E1000_TIDV:
4573                 if (ticks == 0) {
4574                         adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4575                         /* Don't write 0 into the TIDV register. */
4576                         regval++;
4577                 } else
4578                         adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4579                 break;
4580         }
4581         E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4582         EM_CORE_UNLOCK(adapter);
4583         return (0);
4584 }
4585
4586 static void
4587 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4588         const char *description, struct em_int_delay_info *info,
4589         int offset, int value)
4590 {
4591         info->adapter = adapter;
4592         info->offset = offset;
4593         info->value = value;
4594         SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4595             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4596             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4597             info, 0, lem_sysctl_int_delay, "I", description);
4598 }
4599
4600 static void
4601 lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4602         const char *description, int *limit, int value)
4603 {
4604         *limit = value;
4605         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4606             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4607             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4608 }
4609
4610 #ifndef EM_LEGACY_IRQ
4611 static void
4612 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4613         const char *description, int *limit, int value)
4614 {
4615         *limit = value;
4616         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4617             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4618             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4619 }
4620 #endif