]> CyberLeo.Net >> Repos - FreeBSD/releng/8.2.git/blob - sys/dev/e1000/if_lem.c
Copy stable/8 to releng/8.2 in preparation for FreeBSD-8.2 release.
[FreeBSD/releng/8.2.git] / sys / dev / e1000 / if_lem.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2010, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
37 #include "opt_inet.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bus.h>
43 #include <sys/endian.h>
44 #include <sys/kernel.h>
45 #include <sys/kthread.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/module.h>
49 #include <sys/rman.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/taskqueue.h>
54 #include <sys/eventhandler.h>
55 #include <machine/bus.h>
56 #include <machine/resource.h>
57
58 #include <net/bpf.h>
59 #include <net/ethernet.h>
60 #include <net/if.h>
61 #include <net/if_arp.h>
62 #include <net/if_dl.h>
63 #include <net/if_media.h>
64
65 #include <net/if_types.h>
66 #include <net/if_vlan_var.h>
67
68 #include <netinet/in_systm.h>
69 #include <netinet/in.h>
70 #include <netinet/if_ether.h>
71 #include <netinet/ip.h>
72 #include <netinet/ip6.h>
73 #include <netinet/tcp.h>
74 #include <netinet/udp.h>
75
76 #include <machine/in_cksum.h>
77 #include <dev/led/led.h>
78 #include <dev/pci/pcivar.h>
79 #include <dev/pci/pcireg.h>
80
81 #include "e1000_api.h"
82 #include "if_lem.h"
83
84 /*********************************************************************
85  *  Legacy Em Driver version:
86  *********************************************************************/
87 char lem_driver_version[] = "1.0.3";
88
89 /*********************************************************************
90  *  PCI Device ID Table
91  *
92  *  Used by probe to select devices to load on
93  *  Last field stores an index into e1000_strings
94  *  Last entry must be all 0s
95  *
96  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
97  *********************************************************************/
98
99 static em_vendor_info_t lem_vendor_info_array[] =
100 {
101         /* Intel(R) PRO/1000 Network Connection */
102         { 0x8086, E1000_DEV_ID_82540EM,         PCI_ANY_ID, PCI_ANY_ID, 0},
103         { 0x8086, E1000_DEV_ID_82540EM_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
104         { 0x8086, E1000_DEV_ID_82540EP,         PCI_ANY_ID, PCI_ANY_ID, 0},
105         { 0x8086, E1000_DEV_ID_82540EP_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
106         { 0x8086, E1000_DEV_ID_82540EP_LP,      PCI_ANY_ID, PCI_ANY_ID, 0},
107
108         { 0x8086, E1000_DEV_ID_82541EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
109         { 0x8086, E1000_DEV_ID_82541ER,         PCI_ANY_ID, PCI_ANY_ID, 0},
110         { 0x8086, E1000_DEV_ID_82541ER_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
111         { 0x8086, E1000_DEV_ID_82541EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
112         { 0x8086, E1000_DEV_ID_82541GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
113         { 0x8086, E1000_DEV_ID_82541GI_LF,      PCI_ANY_ID, PCI_ANY_ID, 0},
114         { 0x8086, E1000_DEV_ID_82541GI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
115
116         { 0x8086, E1000_DEV_ID_82542,           PCI_ANY_ID, PCI_ANY_ID, 0},
117
118         { 0x8086, E1000_DEV_ID_82543GC_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
119         { 0x8086, E1000_DEV_ID_82543GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
120
121         { 0x8086, E1000_DEV_ID_82544EI_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
122         { 0x8086, E1000_DEV_ID_82544EI_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
123         { 0x8086, E1000_DEV_ID_82544GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
124         { 0x8086, E1000_DEV_ID_82544GC_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
125
126         { 0x8086, E1000_DEV_ID_82545EM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
127         { 0x8086, E1000_DEV_ID_82545EM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
128         { 0x8086, E1000_DEV_ID_82545GM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
129         { 0x8086, E1000_DEV_ID_82545GM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
130         { 0x8086, E1000_DEV_ID_82545GM_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
131
132         { 0x8086, E1000_DEV_ID_82546EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
133         { 0x8086, E1000_DEV_ID_82546EB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
134         { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
135         { 0x8086, E1000_DEV_ID_82546GB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
136         { 0x8086, E1000_DEV_ID_82546GB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
137         { 0x8086, E1000_DEV_ID_82546GB_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
138         { 0x8086, E1000_DEV_ID_82546GB_PCIE,    PCI_ANY_ID, PCI_ANY_ID, 0},
139         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
140         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
141                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
142
143         { 0x8086, E1000_DEV_ID_82547EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
144         { 0x8086, E1000_DEV_ID_82547EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
145         { 0x8086, E1000_DEV_ID_82547GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
146         /* required last entry */
147         { 0, 0, 0, 0, 0}
148 };
149
150 /*********************************************************************
151  *  Table of branding strings for all supported NICs.
152  *********************************************************************/
153
154 static char *lem_strings[] = {
155         "Intel(R) PRO/1000 Legacy Network Connection"
156 };
157
158 /*********************************************************************
159  *  Function prototypes
160  *********************************************************************/
161 static int      lem_probe(device_t);
162 static int      lem_attach(device_t);
163 static int      lem_detach(device_t);
164 static int      lem_shutdown(device_t);
165 static int      lem_suspend(device_t);
166 static int      lem_resume(device_t);
167 static void     lem_start(struct ifnet *);
168 static void     lem_start_locked(struct ifnet *ifp);
169 static int      lem_ioctl(struct ifnet *, u_long, caddr_t);
170 static void     lem_init(void *);
171 static void     lem_init_locked(struct adapter *);
172 static void     lem_stop(void *);
173 static void     lem_media_status(struct ifnet *, struct ifmediareq *);
174 static int      lem_media_change(struct ifnet *);
175 static void     lem_identify_hardware(struct adapter *);
176 static int      lem_allocate_pci_resources(struct adapter *);
177 static int      lem_allocate_irq(struct adapter *adapter);
178 static void     lem_free_pci_resources(struct adapter *);
179 static void     lem_local_timer(void *);
180 static int      lem_hardware_init(struct adapter *);
181 static int      lem_setup_interface(device_t, struct adapter *);
182 static void     lem_setup_transmit_structures(struct adapter *);
183 static void     lem_initialize_transmit_unit(struct adapter *);
184 static int      lem_setup_receive_structures(struct adapter *);
185 static void     lem_initialize_receive_unit(struct adapter *);
186 static void     lem_enable_intr(struct adapter *);
187 static void     lem_disable_intr(struct adapter *);
188 static void     lem_free_transmit_structures(struct adapter *);
189 static void     lem_free_receive_structures(struct adapter *);
190 static void     lem_update_stats_counters(struct adapter *);
191 static void     lem_add_hw_stats(struct adapter *adapter);
192 static void     lem_txeof(struct adapter *);
193 static void     lem_tx_purge(struct adapter *);
194 static int      lem_allocate_receive_structures(struct adapter *);
195 static int      lem_allocate_transmit_structures(struct adapter *);
196 static bool     lem_rxeof(struct adapter *, int, int *);
197 #ifndef __NO_STRICT_ALIGNMENT
198 static int      lem_fixup_rx(struct adapter *);
199 #endif
200 static void     lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
201                     struct mbuf *);
202 static void     lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
203                     u32 *, u32 *);
204 static void     lem_set_promisc(struct adapter *);
205 static void     lem_disable_promisc(struct adapter *);
206 static void     lem_set_multi(struct adapter *);
207 static void     lem_update_link_status(struct adapter *);
208 static int      lem_get_buf(struct adapter *, int);
209 static void     lem_register_vlan(void *, struct ifnet *, u16);
210 static void     lem_unregister_vlan(void *, struct ifnet *, u16);
211 static void     lem_setup_vlan_hw_support(struct adapter *);
212 static int      lem_xmit(struct adapter *, struct mbuf **);
213 static void     lem_smartspeed(struct adapter *);
214 static int      lem_82547_fifo_workaround(struct adapter *, int);
215 static void     lem_82547_update_fifo_head(struct adapter *, int);
216 static int      lem_82547_tx_fifo_reset(struct adapter *);
217 static void     lem_82547_move_tail(void *);
218 static int      lem_dma_malloc(struct adapter *, bus_size_t,
219                     struct em_dma_alloc *, int);
220 static void     lem_dma_free(struct adapter *, struct em_dma_alloc *);
221 static int      lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
222 static void     lem_print_nvm_info(struct adapter *);
223 static int      lem_is_valid_ether_addr(u8 *);
224 static u32      lem_fill_descriptors (bus_addr_t address, u32 length,
225                     PDESC_ARRAY desc_array);
226 static int      lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
227 static void     lem_add_int_delay_sysctl(struct adapter *, const char *,
228                     const char *, struct em_int_delay_info *, int, int);
229 static void     lem_set_flow_cntrl(struct adapter *, const char *,
230                     const char *, int *, int);
231 /* Management and WOL Support */
232 static void     lem_init_manageability(struct adapter *);
233 static void     lem_release_manageability(struct adapter *);
234 static void     lem_get_hw_control(struct adapter *);
235 static void     lem_release_hw_control(struct adapter *);
236 static void     lem_get_wakeup(device_t);
237 static void     lem_enable_wakeup(device_t);
238 static int      lem_enable_phy_wakeup(struct adapter *);
239 static void     lem_led_func(void *, int);
240
241 #ifdef EM_LEGACY_IRQ
242 static void     lem_intr(void *);
243 #else /* FAST IRQ */
244 static int      lem_irq_fast(void *);
245 static void     lem_handle_rxtx(void *context, int pending);
246 static void     lem_handle_link(void *context, int pending);
247 static void     lem_add_rx_process_limit(struct adapter *, const char *,
248                     const char *, int *, int);
249 #endif /* ~EM_LEGACY_IRQ */
250
251 #ifdef DEVICE_POLLING
252 static poll_handler_t lem_poll;
253 #endif /* POLLING */
254
255 /*********************************************************************
256  *  FreeBSD Device Interface Entry Points
257  *********************************************************************/
258
259 static device_method_t lem_methods[] = {
260         /* Device interface */
261         DEVMETHOD(device_probe, lem_probe),
262         DEVMETHOD(device_attach, lem_attach),
263         DEVMETHOD(device_detach, lem_detach),
264         DEVMETHOD(device_shutdown, lem_shutdown),
265         DEVMETHOD(device_suspend, lem_suspend),
266         DEVMETHOD(device_resume, lem_resume),
267         {0, 0}
268 };
269
270 static driver_t lem_driver = {
271         "em", lem_methods, sizeof(struct adapter),
272 };
273
274 extern devclass_t em_devclass;
275 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
276 MODULE_DEPEND(lem, pci, 1, 1, 1);
277 MODULE_DEPEND(lem, ether, 1, 1, 1);
278
279 /*********************************************************************
280  *  Tunable default values.
281  *********************************************************************/
282
283 #define EM_TICKS_TO_USECS(ticks)        ((1024 * (ticks) + 500) / 1000)
284 #define EM_USECS_TO_TICKS(usecs)        ((1000 * (usecs) + 512) / 1024)
285
286 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
287 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
288 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
289 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
290 static int lem_rxd = EM_DEFAULT_RXD;
291 static int lem_txd = EM_DEFAULT_TXD;
292 static int lem_smart_pwr_down = FALSE;
293
294 /* Controls whether promiscuous also shows bad packets */
295 static int lem_debug_sbp = FALSE;
296
297 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
298 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
299 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
300 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
301 TUNABLE_INT("hw.em.rxd", &lem_rxd);
302 TUNABLE_INT("hw.em.txd", &lem_txd);
303 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
304 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
305
306 #ifndef EM_LEGACY_IRQ
307 /* How many packets rxeof tries to clean at a time */
308 static int lem_rx_process_limit = 100;
309 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
310 #endif
311
312 /* Flow control setting - default to FULL */
313 static int lem_fc_setting = e1000_fc_full;
314 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
315
316 /* Global used in WOL setup with multiport cards */
317 static int global_quad_port_a = 0;
318
319 /*********************************************************************
320  *  Device identification routine
321  *
322  *  em_probe determines if the driver should be loaded on
323  *  adapter based on PCI vendor/device id of the adapter.
324  *
325  *  return BUS_PROBE_DEFAULT on success, positive on failure
326  *********************************************************************/
327
328 static int
329 lem_probe(device_t dev)
330 {
331         char            adapter_name[60];
332         u16             pci_vendor_id = 0;
333         u16             pci_device_id = 0;
334         u16             pci_subvendor_id = 0;
335         u16             pci_subdevice_id = 0;
336         em_vendor_info_t *ent;
337
338         INIT_DEBUGOUT("em_probe: begin");
339
340         pci_vendor_id = pci_get_vendor(dev);
341         if (pci_vendor_id != EM_VENDOR_ID)
342                 return (ENXIO);
343
344         pci_device_id = pci_get_device(dev);
345         pci_subvendor_id = pci_get_subvendor(dev);
346         pci_subdevice_id = pci_get_subdevice(dev);
347
348         ent = lem_vendor_info_array;
349         while (ent->vendor_id != 0) {
350                 if ((pci_vendor_id == ent->vendor_id) &&
351                     (pci_device_id == ent->device_id) &&
352
353                     ((pci_subvendor_id == ent->subvendor_id) ||
354                     (ent->subvendor_id == PCI_ANY_ID)) &&
355
356                     ((pci_subdevice_id == ent->subdevice_id) ||
357                     (ent->subdevice_id == PCI_ANY_ID))) {
358                         sprintf(adapter_name, "%s %s",
359                                 lem_strings[ent->index],
360                                 lem_driver_version);
361                         device_set_desc_copy(dev, adapter_name);
362                         return (BUS_PROBE_DEFAULT);
363                 }
364                 ent++;
365         }
366
367         return (ENXIO);
368 }
369
370 /*********************************************************************
371  *  Device initialization routine
372  *
373  *  The attach entry point is called when the driver is being loaded.
374  *  This routine identifies the type of hardware, allocates all resources
375  *  and initializes the hardware.
376  *
377  *  return 0 on success, positive on failure
378  *********************************************************************/
379
380 static int
381 lem_attach(device_t dev)
382 {
383         struct adapter  *adapter;
384         int             tsize, rsize;
385         int             error = 0;
386
387         INIT_DEBUGOUT("lem_attach: begin");
388
389         adapter = device_get_softc(dev);
390         adapter->dev = adapter->osdep.dev = dev;
391         EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
392         EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
393         EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
394
395         /* SYSCTL stuff */
396         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
397             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
398             OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
399             lem_sysctl_nvm_info, "I", "NVM Information");
400
401         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
402         callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
403
404         /* Determine hardware and mac info */
405         lem_identify_hardware(adapter);
406
407         /* Setup PCI resources */
408         if (lem_allocate_pci_resources(adapter)) {
409                 device_printf(dev, "Allocation of PCI resources failed\n");
410                 error = ENXIO;
411                 goto err_pci;
412         }
413
414         /* Do Shared Code initialization */
415         if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
416                 device_printf(dev, "Setup of Shared code failed\n");
417                 error = ENXIO;
418                 goto err_pci;
419         }
420
421         e1000_get_bus_info(&adapter->hw);
422
423         /* Set up some sysctls for the tunable interrupt delays */
424         lem_add_int_delay_sysctl(adapter, "rx_int_delay",
425             "receive interrupt delay in usecs", &adapter->rx_int_delay,
426             E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
427         lem_add_int_delay_sysctl(adapter, "tx_int_delay",
428             "transmit interrupt delay in usecs", &adapter->tx_int_delay,
429             E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
430         if (adapter->hw.mac.type >= e1000_82540) {
431                 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
432                     "receive interrupt delay limit in usecs",
433                     &adapter->rx_abs_int_delay,
434                     E1000_REGISTER(&adapter->hw, E1000_RADV),
435                     lem_rx_abs_int_delay_dflt);
436                 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
437                     "transmit interrupt delay limit in usecs",
438                     &adapter->tx_abs_int_delay,
439                     E1000_REGISTER(&adapter->hw, E1000_TADV),
440                     lem_tx_abs_int_delay_dflt);
441         }
442
443 #ifndef EM_LEGACY_IRQ
444         /* Sysctls for limiting the amount of work done in the taskqueue */
445         lem_add_rx_process_limit(adapter, "rx_processing_limit",
446             "max number of rx packets to process", &adapter->rx_process_limit,
447             lem_rx_process_limit);
448 #endif
449
450         /* Sysctl for setting the interface flow control */
451         lem_set_flow_cntrl(adapter, "flow_control",
452             "max number of rx packets to process",
453             &adapter->fc_setting, lem_fc_setting);
454
455         /*
456          * Validate number of transmit and receive descriptors. It
457          * must not exceed hardware maximum, and must be multiple
458          * of E1000_DBA_ALIGN.
459          */
460         if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
461             (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
462             (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
463             (lem_txd < EM_MIN_TXD)) {
464                 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
465                     EM_DEFAULT_TXD, lem_txd);
466                 adapter->num_tx_desc = EM_DEFAULT_TXD;
467         } else
468                 adapter->num_tx_desc = lem_txd;
469         if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
470             (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
471             (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
472             (lem_rxd < EM_MIN_RXD)) {
473                 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
474                     EM_DEFAULT_RXD, lem_rxd);
475                 adapter->num_rx_desc = EM_DEFAULT_RXD;
476         } else
477                 adapter->num_rx_desc = lem_rxd;
478
479         adapter->hw.mac.autoneg = DO_AUTO_NEG;
480         adapter->hw.phy.autoneg_wait_to_complete = FALSE;
481         adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
482         adapter->rx_buffer_len = 2048;
483
484         e1000_init_script_state_82541(&adapter->hw, TRUE);
485         e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
486
487         /* Copper options */
488         if (adapter->hw.phy.media_type == e1000_media_type_copper) {
489                 adapter->hw.phy.mdix = AUTO_ALL_MODES;
490                 adapter->hw.phy.disable_polarity_correction = FALSE;
491                 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
492         }
493
494         /*
495          * Set the frame limits assuming
496          * standard ethernet sized frames.
497          */
498         adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
499         adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
500
501         /*
502          * This controls when hardware reports transmit completion
503          * status.
504          */
505         adapter->hw.mac.report_tx_early = 1;
506
507         tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
508             EM_DBA_ALIGN);
509
510         /* Allocate Transmit Descriptor ring */
511         if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
512                 device_printf(dev, "Unable to allocate tx_desc memory\n");
513                 error = ENOMEM;
514                 goto err_tx_desc;
515         }
516         adapter->tx_desc_base = 
517             (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
518
519         rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
520             EM_DBA_ALIGN);
521
522         /* Allocate Receive Descriptor ring */
523         if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
524                 device_printf(dev, "Unable to allocate rx_desc memory\n");
525                 error = ENOMEM;
526                 goto err_rx_desc;
527         }
528         adapter->rx_desc_base =
529             (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
530
531         /* Allocate multicast array memory. */
532         adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
533             MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
534         if (adapter->mta == NULL) {
535                 device_printf(dev, "Can not allocate multicast setup array\n");
536                 error = ENOMEM;
537                 goto err_hw_init;
538         }
539
540         /*
541         ** Start from a known state, this is
542         ** important in reading the nvm and
543         ** mac from that.
544         */
545         e1000_reset_hw(&adapter->hw);
546
547         /* Make sure we have a good EEPROM before we read from it */
548         if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
549                 /*
550                 ** Some PCI-E parts fail the first check due to
551                 ** the link being in sleep state, call it again,
552                 ** if it fails a second time its a real issue.
553                 */
554                 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
555                         device_printf(dev,
556                             "The EEPROM Checksum Is Not Valid\n");
557                         error = EIO;
558                         goto err_hw_init;
559                 }
560         }
561
562         /* Copy the permanent MAC address out of the EEPROM */
563         if (e1000_read_mac_addr(&adapter->hw) < 0) {
564                 device_printf(dev, "EEPROM read error while reading MAC"
565                     " address\n");
566                 error = EIO;
567                 goto err_hw_init;
568         }
569
570         if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
571                 device_printf(dev, "Invalid MAC address\n");
572                 error = EIO;
573                 goto err_hw_init;
574         }
575
576         /* Initialize the hardware */
577         if (lem_hardware_init(adapter)) {
578                 device_printf(dev, "Unable to initialize the hardware\n");
579                 error = EIO;
580                 goto err_hw_init;
581         }
582
583         /* Allocate transmit descriptors and buffers */
584         if (lem_allocate_transmit_structures(adapter)) {
585                 device_printf(dev, "Could not setup transmit structures\n");
586                 error = ENOMEM;
587                 goto err_tx_struct;
588         }
589
590         /* Allocate receive descriptors and buffers */
591         if (lem_allocate_receive_structures(adapter)) {
592                 device_printf(dev, "Could not setup receive structures\n");
593                 error = ENOMEM;
594                 goto err_rx_struct;
595         }
596
597         /*
598         **  Do interrupt configuration
599         */
600         error = lem_allocate_irq(adapter);
601         if (error)
602                 goto err_rx_struct;
603
604         /*
605          * Get Wake-on-Lan and Management info for later use
606          */
607         lem_get_wakeup(dev);
608
609         /* Setup OS specific network interface */
610         if (lem_setup_interface(dev, adapter) != 0)
611                 goto err_rx_struct;
612
613         /* Initialize statistics */
614         lem_update_stats_counters(adapter);
615
616         adapter->hw.mac.get_link_status = 1;
617         lem_update_link_status(adapter);
618
619         /* Indicate SOL/IDER usage */
620         if (e1000_check_reset_block(&adapter->hw))
621                 device_printf(dev,
622                     "PHY reset is blocked due to SOL/IDER session.\n");
623
624         /* Do we need workaround for 82544 PCI-X adapter? */
625         if (adapter->hw.bus.type == e1000_bus_type_pcix &&
626             adapter->hw.mac.type == e1000_82544)
627                 adapter->pcix_82544 = TRUE;
628         else
629                 adapter->pcix_82544 = FALSE;
630
631         /* Register for VLAN events */
632         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
633             lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
634         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
635             lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 
636
637         lem_add_hw_stats(adapter);
638
639         /* Non-AMT based hardware can now take control from firmware */
640         if (adapter->has_manage && !adapter->has_amt)
641                 lem_get_hw_control(adapter);
642
643         /* Tell the stack that the interface is not active */
644         adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
645
646         adapter->led_dev = led_create(lem_led_func, adapter,
647             device_get_nameunit(dev));
648
649         INIT_DEBUGOUT("lem_attach: end");
650
651         return (0);
652
653 err_rx_struct:
654         lem_free_transmit_structures(adapter);
655 err_tx_struct:
656 err_hw_init:
657         lem_release_hw_control(adapter);
658         lem_dma_free(adapter, &adapter->rxdma);
659 err_rx_desc:
660         lem_dma_free(adapter, &adapter->txdma);
661 err_tx_desc:
662 err_pci:
663         if (adapter->ifp != NULL)
664                 if_free(adapter->ifp);
665         lem_free_pci_resources(adapter);
666         free(adapter->mta, M_DEVBUF);
667         EM_TX_LOCK_DESTROY(adapter);
668         EM_RX_LOCK_DESTROY(adapter);
669         EM_CORE_LOCK_DESTROY(adapter);
670
671         return (error);
672 }
673
674 /*********************************************************************
675  *  Device removal routine
676  *
677  *  The detach entry point is called when the driver is being removed.
678  *  This routine stops the adapter and deallocates all the resources
679  *  that were allocated for driver operation.
680  *
681  *  return 0 on success, positive on failure
682  *********************************************************************/
683
684 static int
685 lem_detach(device_t dev)
686 {
687         struct adapter  *adapter = device_get_softc(dev);
688         struct ifnet    *ifp = adapter->ifp;
689
690         INIT_DEBUGOUT("em_detach: begin");
691
692         /* Make sure VLANS are not using driver */
693         if (adapter->ifp->if_vlantrunk != NULL) {
694                 device_printf(dev,"Vlan in use, detach first\n");
695                 return (EBUSY);
696         }
697
698 #ifdef DEVICE_POLLING
699         if (ifp->if_capenable & IFCAP_POLLING)
700                 ether_poll_deregister(ifp);
701 #endif
702
703         if (adapter->led_dev != NULL)
704                 led_destroy(adapter->led_dev);
705
706         EM_CORE_LOCK(adapter);
707         EM_TX_LOCK(adapter);
708         adapter->in_detach = 1;
709         lem_stop(adapter);
710         e1000_phy_hw_reset(&adapter->hw);
711
712         lem_release_manageability(adapter);
713
714         EM_TX_UNLOCK(adapter);
715         EM_CORE_UNLOCK(adapter);
716
717         /* Unregister VLAN events */
718         if (adapter->vlan_attach != NULL)
719                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
720         if (adapter->vlan_detach != NULL)
721                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 
722
723         ether_ifdetach(adapter->ifp);
724         callout_drain(&adapter->timer);
725         callout_drain(&adapter->tx_fifo_timer);
726
727         lem_free_pci_resources(adapter);
728         bus_generic_detach(dev);
729         if_free(ifp);
730
731         lem_free_transmit_structures(adapter);
732         lem_free_receive_structures(adapter);
733
734         /* Free Transmit Descriptor ring */
735         if (adapter->tx_desc_base) {
736                 lem_dma_free(adapter, &adapter->txdma);
737                 adapter->tx_desc_base = NULL;
738         }
739
740         /* Free Receive Descriptor ring */
741         if (adapter->rx_desc_base) {
742                 lem_dma_free(adapter, &adapter->rxdma);
743                 adapter->rx_desc_base = NULL;
744         }
745
746         lem_release_hw_control(adapter);
747         free(adapter->mta, M_DEVBUF);
748         EM_TX_LOCK_DESTROY(adapter);
749         EM_RX_LOCK_DESTROY(adapter);
750         EM_CORE_LOCK_DESTROY(adapter);
751
752         return (0);
753 }
754
755 /*********************************************************************
756  *
757  *  Shutdown entry point
758  *
759  **********************************************************************/
760
761 static int
762 lem_shutdown(device_t dev)
763 {
764         return lem_suspend(dev);
765 }
766
767 /*
768  * Suspend/resume device methods.
769  */
770 static int
771 lem_suspend(device_t dev)
772 {
773         struct adapter *adapter = device_get_softc(dev);
774
775         EM_CORE_LOCK(adapter);
776
777         lem_release_manageability(adapter);
778         lem_release_hw_control(adapter);
779         lem_enable_wakeup(dev);
780
781         EM_CORE_UNLOCK(adapter);
782
783         return bus_generic_suspend(dev);
784 }
785
786 static int
787 lem_resume(device_t dev)
788 {
789         struct adapter *adapter = device_get_softc(dev);
790         struct ifnet *ifp = adapter->ifp;
791
792         EM_CORE_LOCK(adapter);
793         lem_init_locked(adapter);
794         lem_init_manageability(adapter);
795         EM_CORE_UNLOCK(adapter);
796         lem_start(ifp);
797
798         return bus_generic_resume(dev);
799 }
800
801
802 static void
803 lem_start_locked(struct ifnet *ifp)
804 {
805         struct adapter  *adapter = ifp->if_softc;
806         struct mbuf     *m_head;
807
808         EM_TX_LOCK_ASSERT(adapter);
809
810         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
811             IFF_DRV_RUNNING)
812                 return;
813         if (!adapter->link_active)
814                 return;
815
816         /*
817          * Force a cleanup if number of TX descriptors
818          * available hits the threshold
819          */
820         if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
821                 lem_txeof(adapter);
822                 /* Now do we at least have a minimal? */
823                 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
824                         adapter->no_tx_desc_avail1++;
825                         return;
826                 }
827         }
828
829         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
830
831                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
832                 if (m_head == NULL)
833                         break;
834                 /*
835                  *  Encapsulation can modify our pointer, and or make it
836                  *  NULL on failure.  In that event, we can't requeue.
837                  */
838                 if (lem_xmit(adapter, &m_head)) {
839                         if (m_head == NULL)
840                                 break;
841                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
842                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
843                         break;
844                 }
845
846                 /* Send a copy of the frame to the BPF listener */
847                 ETHER_BPF_MTAP(ifp, m_head);
848
849                 /* Set timeout in case hardware has problems transmitting. */
850                 adapter->watchdog_check = TRUE;
851                 adapter->watchdog_time = ticks;
852         }
853         if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
854                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
855
856         return;
857 }
858
859 static void
860 lem_start(struct ifnet *ifp)
861 {
862         struct adapter *adapter = ifp->if_softc;
863
864         EM_TX_LOCK(adapter);
865         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
866                 lem_start_locked(ifp);
867         EM_TX_UNLOCK(adapter);
868 }
869
870 /*********************************************************************
871  *  Ioctl entry point
872  *
873  *  em_ioctl is called when the user wants to configure the
874  *  interface.
875  *
876  *  return 0 on success, positive on failure
877  **********************************************************************/
878
879 static int
880 lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
881 {
882         struct adapter  *adapter = ifp->if_softc;
883         struct ifreq *ifr = (struct ifreq *)data;
884 #ifdef INET
885         struct ifaddr *ifa = (struct ifaddr *)data;
886 #endif
887         int error = 0;
888
889         if (adapter->in_detach)
890                 return (error);
891
892         switch (command) {
893         case SIOCSIFADDR:
894 #ifdef INET
895                 if (ifa->ifa_addr->sa_family == AF_INET) {
896                         /*
897                          * XXX
898                          * Since resetting hardware takes a very long time
899                          * and results in link renegotiation we only
900                          * initialize the hardware only when it is absolutely
901                          * required.
902                          */
903                         ifp->if_flags |= IFF_UP;
904                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
905                                 EM_CORE_LOCK(adapter);
906                                 lem_init_locked(adapter);
907                                 EM_CORE_UNLOCK(adapter);
908                         }
909                         arp_ifinit(ifp, ifa);
910                 } else
911 #endif
912                         error = ether_ioctl(ifp, command, data);
913                 break;
914         case SIOCSIFMTU:
915             {
916                 int max_frame_size;
917
918                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
919
920                 EM_CORE_LOCK(adapter);
921                 switch (adapter->hw.mac.type) {
922                 case e1000_82542:
923                         max_frame_size = ETHER_MAX_LEN;
924                         break;
925                 default:
926                         max_frame_size = MAX_JUMBO_FRAME_SIZE;
927                 }
928                 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
929                     ETHER_CRC_LEN) {
930                         EM_CORE_UNLOCK(adapter);
931                         error = EINVAL;
932                         break;
933                 }
934
935                 ifp->if_mtu = ifr->ifr_mtu;
936                 adapter->max_frame_size =
937                     ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
938                 lem_init_locked(adapter);
939                 EM_CORE_UNLOCK(adapter);
940                 break;
941             }
942         case SIOCSIFFLAGS:
943                 IOCTL_DEBUGOUT("ioctl rcv'd:\
944                     SIOCSIFFLAGS (Set Interface Flags)");
945                 EM_CORE_LOCK(adapter);
946                 if (ifp->if_flags & IFF_UP) {
947                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
948                                 if ((ifp->if_flags ^ adapter->if_flags) &
949                                     (IFF_PROMISC | IFF_ALLMULTI)) {
950                                         lem_disable_promisc(adapter);
951                                         lem_set_promisc(adapter);
952                                 }
953                         } else
954                                 lem_init_locked(adapter);
955                 } else
956                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
957                                 EM_TX_LOCK(adapter);
958                                 lem_stop(adapter);
959                                 EM_TX_UNLOCK(adapter);
960                         }
961                 adapter->if_flags = ifp->if_flags;
962                 EM_CORE_UNLOCK(adapter);
963                 break;
964         case SIOCADDMULTI:
965         case SIOCDELMULTI:
966                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
967                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
968                         EM_CORE_LOCK(adapter);
969                         lem_disable_intr(adapter);
970                         lem_set_multi(adapter);
971                         if (adapter->hw.mac.type == e1000_82542 && 
972                             adapter->hw.revision_id == E1000_REVISION_2) {
973                                 lem_initialize_receive_unit(adapter);
974                         }
975 #ifdef DEVICE_POLLING
976                         if (!(ifp->if_capenable & IFCAP_POLLING))
977 #endif
978                                 lem_enable_intr(adapter);
979                         EM_CORE_UNLOCK(adapter);
980                 }
981                 break;
982         case SIOCSIFMEDIA:
983                 /* Check SOL/IDER usage */
984                 EM_CORE_LOCK(adapter);
985                 if (e1000_check_reset_block(&adapter->hw)) {
986                         EM_CORE_UNLOCK(adapter);
987                         device_printf(adapter->dev, "Media change is"
988                             " blocked due to SOL/IDER session.\n");
989                         break;
990                 }
991                 EM_CORE_UNLOCK(adapter);
992         case SIOCGIFMEDIA:
993                 IOCTL_DEBUGOUT("ioctl rcv'd: \
994                     SIOCxIFMEDIA (Get/Set Interface Media)");
995                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
996                 break;
997         case SIOCSIFCAP:
998             {
999                 int mask, reinit;
1000
1001                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1002                 reinit = 0;
1003                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1004 #ifdef DEVICE_POLLING
1005                 if (mask & IFCAP_POLLING) {
1006                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
1007                                 error = ether_poll_register(lem_poll, ifp);
1008                                 if (error)
1009                                         return (error);
1010                                 EM_CORE_LOCK(adapter);
1011                                 lem_disable_intr(adapter);
1012                                 ifp->if_capenable |= IFCAP_POLLING;
1013                                 EM_CORE_UNLOCK(adapter);
1014                         } else {
1015                                 error = ether_poll_deregister(ifp);
1016                                 /* Enable interrupt even in error case */
1017                                 EM_CORE_LOCK(adapter);
1018                                 lem_enable_intr(adapter);
1019                                 ifp->if_capenable &= ~IFCAP_POLLING;
1020                                 EM_CORE_UNLOCK(adapter);
1021                         }
1022                 }
1023 #endif
1024                 if (mask & IFCAP_HWCSUM) {
1025                         ifp->if_capenable ^= IFCAP_HWCSUM;
1026                         reinit = 1;
1027                 }
1028                 if (mask & IFCAP_VLAN_HWTAGGING) {
1029                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1030                         reinit = 1;
1031                 }
1032                 if ((mask & IFCAP_WOL) &&
1033                     (ifp->if_capabilities & IFCAP_WOL) != 0) {
1034                         if (mask & IFCAP_WOL_MCAST)
1035                                 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1036                         if (mask & IFCAP_WOL_MAGIC)
1037                                 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1038                 }
1039                 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1040                         lem_init(adapter);
1041                 VLAN_CAPABILITIES(ifp);
1042                 break;
1043             }
1044
1045         default:
1046                 error = ether_ioctl(ifp, command, data);
1047                 break;
1048         }
1049
1050         return (error);
1051 }
1052
1053
1054 /*********************************************************************
1055  *  Init entry point
1056  *
1057  *  This routine is used in two ways. It is used by the stack as
1058  *  init entry point in network interface structure. It is also used
1059  *  by the driver as a hw/sw initialization routine to get to a
1060  *  consistent state.
1061  *
1062  *  return 0 on success, positive on failure
1063  **********************************************************************/
1064
1065 static void
1066 lem_init_locked(struct adapter *adapter)
1067 {
1068         struct ifnet    *ifp = adapter->ifp;
1069         device_t        dev = adapter->dev;
1070         u32             pba;
1071
1072         INIT_DEBUGOUT("lem_init: begin");
1073
1074         EM_CORE_LOCK_ASSERT(adapter);
1075
1076         EM_TX_LOCK(adapter);
1077         lem_stop(adapter);
1078         EM_TX_UNLOCK(adapter);
1079
1080         /*
1081          * Packet Buffer Allocation (PBA)
1082          * Writing PBA sets the receive portion of the buffer
1083          * the remainder is used for the transmit buffer.
1084          *
1085          * Devices before the 82547 had a Packet Buffer of 64K.
1086          *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1087          * After the 82547 the buffer was reduced to 40K.
1088          *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1089          *   Note: default does not leave enough room for Jumbo Frame >10k.
1090          */
1091         switch (adapter->hw.mac.type) {
1092         case e1000_82547:
1093         case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1094                 if (adapter->max_frame_size > 8192)
1095                         pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1096                 else
1097                         pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1098                 adapter->tx_fifo_head = 0;
1099                 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1100                 adapter->tx_fifo_size =
1101                     (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1102                 break;
1103         default:
1104                 /* Devices before 82547 had a Packet Buffer of 64K.   */
1105                 if (adapter->max_frame_size > 8192)
1106                         pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1107                 else
1108                         pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1109         }
1110
1111         INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1112         E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1113         
1114         /* Get the latest mac address, User can use a LAA */
1115         bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1116               ETHER_ADDR_LEN);
1117
1118         /* Put the address into the Receive Address Array */
1119         e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1120
1121         /* Initialize the hardware */
1122         if (lem_hardware_init(adapter)) {
1123                 device_printf(dev, "Unable to initialize the hardware\n");
1124                 return;
1125         }
1126         lem_update_link_status(adapter);
1127
1128         /* Setup VLAN support, basic and offload if available */
1129         E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1130
1131         /* Set hardware offload abilities */
1132         ifp->if_hwassist = 0;
1133         if (adapter->hw.mac.type >= e1000_82543) {
1134                 if (ifp->if_capenable & IFCAP_TXCSUM)
1135                         ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1136         }
1137
1138         /* Configure for OS presence */
1139         lem_init_manageability(adapter);
1140
1141         /* Prepare transmit descriptors and buffers */
1142         lem_setup_transmit_structures(adapter);
1143         lem_initialize_transmit_unit(adapter);
1144
1145         /* Setup Multicast table */
1146         lem_set_multi(adapter);
1147
1148         /* Prepare receive descriptors and buffers */
1149         if (lem_setup_receive_structures(adapter)) {
1150                 device_printf(dev, "Could not setup receive structures\n");
1151                 EM_TX_LOCK(adapter);
1152                 lem_stop(adapter);
1153                 EM_TX_UNLOCK(adapter);
1154                 return;
1155         }
1156         lem_initialize_receive_unit(adapter);
1157
1158         /* Use real VLAN Filter support? */
1159         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1160                 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1161                         /* Use real VLAN Filter support */
1162                         lem_setup_vlan_hw_support(adapter);
1163                 else {
1164                         u32 ctrl;
1165                         ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1166                         ctrl |= E1000_CTRL_VME;
1167                         E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1168                 }
1169         }
1170
1171         /* Don't lose promiscuous settings */
1172         lem_set_promisc(adapter);
1173
1174         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1175         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1176
1177         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1178         e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1179
1180         /* MSI/X configuration for 82574 */
1181         if (adapter->hw.mac.type == e1000_82574) {
1182                 int tmp;
1183                 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1184                 tmp |= E1000_CTRL_EXT_PBA_CLR;
1185                 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1186                 /*
1187                 ** Set the IVAR - interrupt vector routing.
1188                 ** Each nibble represents a vector, high bit
1189                 ** is enable, other 3 bits are the MSIX table
1190                 ** entry, we map RXQ0 to 0, TXQ0 to 1, and
1191                 ** Link (other) to 2, hence the magic number.
1192                 */
1193                 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1194         }
1195
1196 #ifdef DEVICE_POLLING
1197         /*
1198          * Only enable interrupts if we are not polling, make sure
1199          * they are off otherwise.
1200          */
1201         if (ifp->if_capenable & IFCAP_POLLING)
1202                 lem_disable_intr(adapter);
1203         else
1204 #endif /* DEVICE_POLLING */
1205                 lem_enable_intr(adapter);
1206
1207         /* AMT based hardware can now take control from firmware */
1208         if (adapter->has_manage && adapter->has_amt)
1209                 lem_get_hw_control(adapter);
1210
1211         /* Don't reset the phy next time init gets called */
1212         adapter->hw.phy.reset_disable = TRUE;
1213 }
1214
1215 static void
1216 lem_init(void *arg)
1217 {
1218         struct adapter *adapter = arg;
1219
1220         EM_CORE_LOCK(adapter);
1221         lem_init_locked(adapter);
1222         EM_CORE_UNLOCK(adapter);
1223 }
1224
1225
1226 #ifdef DEVICE_POLLING
1227 /*********************************************************************
1228  *
1229  *  Legacy polling routine  
1230  *
1231  *********************************************************************/
1232 static int
1233 lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1234 {
1235         struct adapter *adapter = ifp->if_softc;
1236         u32             reg_icr, rx_done = 0;
1237
1238         EM_CORE_LOCK(adapter);
1239         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1240                 EM_CORE_UNLOCK(adapter);
1241                 return (rx_done);
1242         }
1243
1244         if (cmd == POLL_AND_CHECK_STATUS) {
1245                 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1246                 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1247                         callout_stop(&adapter->timer);
1248                         adapter->hw.mac.get_link_status = 1;
1249                         lem_update_link_status(adapter);
1250                         callout_reset(&adapter->timer, hz,
1251                             lem_local_timer, adapter);
1252                 }
1253         }
1254         EM_CORE_UNLOCK(adapter);
1255
1256         lem_rxeof(adapter, count, &rx_done);
1257
1258         EM_TX_LOCK(adapter);
1259         lem_txeof(adapter);
1260         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1261                 lem_start_locked(ifp);
1262         EM_TX_UNLOCK(adapter);
1263         return (rx_done);
1264 }
1265 #endif /* DEVICE_POLLING */
1266
1267 #ifdef EM_LEGACY_IRQ 
1268 /*********************************************************************
1269  *
1270  *  Legacy Interrupt Service routine  
1271  *
1272  *********************************************************************/
1273 static void
1274 lem_intr(void *arg)
1275 {
1276         struct adapter  *adapter = arg;
1277         struct ifnet    *ifp = adapter->ifp;
1278         u32             reg_icr;
1279
1280
1281         if (ifp->if_capenable & IFCAP_POLLING)
1282                 return;
1283
1284         EM_CORE_LOCK(adapter);
1285         reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1286         if (reg_icr & E1000_ICR_RXO)
1287                 adapter->rx_overruns++;
1288
1289         if ((reg_icr == 0xffffffff) || (reg_icr == 0))
1290                         goto out;
1291
1292         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1293                         goto out;
1294
1295         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1296                 callout_stop(&adapter->timer);
1297                 adapter->hw.mac.get_link_status = 1;
1298                 lem_update_link_status(adapter);
1299                 /* Deal with TX cruft when link lost */
1300                 lem_tx_purge(adapter);
1301                 callout_reset(&adapter->timer, hz,
1302                     lem_local_timer, adapter);
1303                 goto out;
1304         }
1305
1306         EM_TX_LOCK(adapter);
1307         lem_rxeof(adapter, -1, NULL);
1308         lem_txeof(adapter);
1309         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1310             !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1311                 lem_start_locked(ifp);
1312         EM_TX_UNLOCK(adapter);
1313
1314 out:
1315         EM_CORE_UNLOCK(adapter);
1316         return;
1317 }
1318
1319 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1320
1321 static void
1322 lem_handle_link(void *context, int pending)
1323 {
1324         struct adapter  *adapter = context;
1325         struct ifnet *ifp = adapter->ifp;
1326
1327         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1328                 return;
1329
1330         EM_CORE_LOCK(adapter);
1331         callout_stop(&adapter->timer);
1332         lem_update_link_status(adapter);
1333         /* Deal with TX cruft when link lost */
1334         lem_tx_purge(adapter);
1335         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1336         EM_CORE_UNLOCK(adapter);
1337 }
1338
1339
1340 /* Combined RX/TX handler, used by Legacy and MSI */
1341 static void
1342 lem_handle_rxtx(void *context, int pending)
1343 {
1344         struct adapter  *adapter = context;
1345         struct ifnet    *ifp = adapter->ifp;
1346
1347
1348         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1349                 lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1350                 EM_TX_LOCK(adapter);
1351                 lem_txeof(adapter);
1352                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1353                         lem_start_locked(ifp);
1354                 EM_TX_UNLOCK(adapter);
1355         }
1356
1357         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1358                 lem_enable_intr(adapter);
1359 }
1360
1361 /*********************************************************************
1362  *
1363  *  Fast Legacy/MSI Combined Interrupt Service routine  
1364  *
1365  *********************************************************************/
1366 static int
1367 lem_irq_fast(void *arg)
1368 {
1369         struct adapter  *adapter = arg;
1370         struct ifnet    *ifp;
1371         u32             reg_icr;
1372
1373         ifp = adapter->ifp;
1374
1375         reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1376
1377         /* Hot eject?  */
1378         if (reg_icr == 0xffffffff)
1379                 return FILTER_STRAY;
1380
1381         /* Definitely not our interrupt.  */
1382         if (reg_icr == 0x0)
1383                 return FILTER_STRAY;
1384
1385         /*
1386          * Mask interrupts until the taskqueue is finished running.  This is
1387          * cheap, just assume that it is needed.  This also works around the
1388          * MSI message reordering errata on certain systems.
1389          */
1390         lem_disable_intr(adapter);
1391         taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1392
1393         /* Link status change */
1394         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1395                 adapter->hw.mac.get_link_status = 1;
1396                 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1397         }
1398
1399         if (reg_icr & E1000_ICR_RXO)
1400                 adapter->rx_overruns++;
1401         return FILTER_HANDLED;
1402 }
1403 #endif /* ~EM_LEGACY_IRQ */
1404
1405
1406 /*********************************************************************
1407  *
1408  *  Media Ioctl callback
1409  *
1410  *  This routine is called whenever the user queries the status of
1411  *  the interface using ifconfig.
1412  *
1413  **********************************************************************/
1414 static void
1415 lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1416 {
1417         struct adapter *adapter = ifp->if_softc;
1418         u_char fiber_type = IFM_1000_SX;
1419
1420         INIT_DEBUGOUT("lem_media_status: begin");
1421
1422         EM_CORE_LOCK(adapter);
1423         lem_update_link_status(adapter);
1424
1425         ifmr->ifm_status = IFM_AVALID;
1426         ifmr->ifm_active = IFM_ETHER;
1427
1428         if (!adapter->link_active) {
1429                 EM_CORE_UNLOCK(adapter);
1430                 return;
1431         }
1432
1433         ifmr->ifm_status |= IFM_ACTIVE;
1434
1435         if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1436             (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1437                 if (adapter->hw.mac.type == e1000_82545)
1438                         fiber_type = IFM_1000_LX;
1439                 ifmr->ifm_active |= fiber_type | IFM_FDX;
1440         } else {
1441                 switch (adapter->link_speed) {
1442                 case 10:
1443                         ifmr->ifm_active |= IFM_10_T;
1444                         break;
1445                 case 100:
1446                         ifmr->ifm_active |= IFM_100_TX;
1447                         break;
1448                 case 1000:
1449                         ifmr->ifm_active |= IFM_1000_T;
1450                         break;
1451                 }
1452                 if (adapter->link_duplex == FULL_DUPLEX)
1453                         ifmr->ifm_active |= IFM_FDX;
1454                 else
1455                         ifmr->ifm_active |= IFM_HDX;
1456         }
1457         EM_CORE_UNLOCK(adapter);
1458 }
1459
1460 /*********************************************************************
1461  *
1462  *  Media Ioctl callback
1463  *
1464  *  This routine is called when the user changes speed/duplex using
1465  *  media/mediopt option with ifconfig.
1466  *
1467  **********************************************************************/
1468 static int
1469 lem_media_change(struct ifnet *ifp)
1470 {
1471         struct adapter *adapter = ifp->if_softc;
1472         struct ifmedia  *ifm = &adapter->media;
1473
1474         INIT_DEBUGOUT("lem_media_change: begin");
1475
1476         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1477                 return (EINVAL);
1478
1479         EM_CORE_LOCK(adapter);
1480         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1481         case IFM_AUTO:
1482                 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1483                 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1484                 break;
1485         case IFM_1000_LX:
1486         case IFM_1000_SX:
1487         case IFM_1000_T:
1488                 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1489                 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1490                 break;
1491         case IFM_100_TX:
1492                 adapter->hw.mac.autoneg = FALSE;
1493                 adapter->hw.phy.autoneg_advertised = 0;
1494                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1495                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1496                 else
1497                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1498                 break;
1499         case IFM_10_T:
1500                 adapter->hw.mac.autoneg = FALSE;
1501                 adapter->hw.phy.autoneg_advertised = 0;
1502                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1503                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1504                 else
1505                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1506                 break;
1507         default:
1508                 device_printf(adapter->dev, "Unsupported media type\n");
1509         }
1510
1511         /* As the speed/duplex settings my have changed we need to
1512          * reset the PHY.
1513          */
1514         adapter->hw.phy.reset_disable = FALSE;
1515
1516         lem_init_locked(adapter);
1517         EM_CORE_UNLOCK(adapter);
1518
1519         return (0);
1520 }
1521
1522 /*********************************************************************
1523  *
1524  *  This routine maps the mbufs to tx descriptors.
1525  *
1526  *  return 0 on success, positive on failure
1527  **********************************************************************/
1528
1529 static int
1530 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1531 {
1532         bus_dma_segment_t       segs[EM_MAX_SCATTER];
1533         bus_dmamap_t            map;
1534         struct em_buffer        *tx_buffer, *tx_buffer_mapped;
1535         struct e1000_tx_desc    *ctxd = NULL;
1536         struct mbuf             *m_head;
1537         u32                     txd_upper, txd_lower, txd_used, txd_saved;
1538         int                     error, nsegs, i, j, first, last = 0;
1539
1540         m_head = *m_headp;
1541         txd_upper = txd_lower = txd_used = txd_saved = 0;
1542
1543         /*
1544         ** When doing checksum offload, it is critical to
1545         ** make sure the first mbuf has more than header,
1546         ** because that routine expects data to be present.
1547         */
1548         if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1549             (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1550                 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1551                 *m_headp = m_head;
1552                 if (m_head == NULL)
1553                         return (ENOBUFS);
1554         }
1555
1556         /*
1557          * Map the packet for DMA
1558          *
1559          * Capture the first descriptor index,
1560          * this descriptor will have the index
1561          * of the EOP which is the only one that
1562          * now gets a DONE bit writeback.
1563          */
1564         first = adapter->next_avail_tx_desc;
1565         tx_buffer = &adapter->tx_buffer_area[first];
1566         tx_buffer_mapped = tx_buffer;
1567         map = tx_buffer->map;
1568
1569         error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1570             *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1571
1572         /*
1573          * There are two types of errors we can (try) to handle:
1574          * - EFBIG means the mbuf chain was too long and bus_dma ran
1575          *   out of segments.  Defragment the mbuf chain and try again.
1576          * - ENOMEM means bus_dma could not obtain enough bounce buffers
1577          *   at this point in time.  Defer sending and try again later.
1578          * All other errors, in particular EINVAL, are fatal and prevent the
1579          * mbuf chain from ever going through.  Drop it and report error.
1580          */
1581         if (error == EFBIG) {
1582                 struct mbuf *m;
1583
1584                 m = m_defrag(*m_headp, M_DONTWAIT);
1585                 if (m == NULL) {
1586                         adapter->mbuf_alloc_failed++;
1587                         m_freem(*m_headp);
1588                         *m_headp = NULL;
1589                         return (ENOBUFS);
1590                 }
1591                 *m_headp = m;
1592
1593                 /* Try it again */
1594                 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1595                     *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1596
1597                 if (error) {
1598                         adapter->no_tx_dma_setup++;
1599                         m_freem(*m_headp);
1600                         *m_headp = NULL;
1601                         return (error);
1602                 }
1603         } else if (error != 0) {
1604                 adapter->no_tx_dma_setup++;
1605                 return (error);
1606         }
1607
1608         if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1609                 adapter->no_tx_desc_avail2++;
1610                 bus_dmamap_unload(adapter->txtag, map);
1611                 return (ENOBUFS);
1612         }
1613         m_head = *m_headp;
1614
1615         /* Do hardware assists */
1616         if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1617                 lem_transmit_checksum_setup(adapter,  m_head,
1618                     &txd_upper, &txd_lower);
1619
1620         i = adapter->next_avail_tx_desc;
1621         if (adapter->pcix_82544) 
1622                 txd_saved = i;
1623
1624         /* Set up our transmit descriptors */
1625         for (j = 0; j < nsegs; j++) {
1626                 bus_size_t seg_len;
1627                 bus_addr_t seg_addr;
1628                 /* If adapter is 82544 and on PCIX bus */
1629                 if(adapter->pcix_82544) {
1630                         DESC_ARRAY      desc_array;
1631                         u32             array_elements, counter;
1632                         /*
1633                          * Check the Address and Length combination and
1634                          * split the data accordingly
1635                          */
1636                         array_elements = lem_fill_descriptors(segs[j].ds_addr,
1637                             segs[j].ds_len, &desc_array);
1638                         for (counter = 0; counter < array_elements; counter++) {
1639                                 if (txd_used == adapter->num_tx_desc_avail) {
1640                                         adapter->next_avail_tx_desc = txd_saved;
1641                                         adapter->no_tx_desc_avail2++;
1642                                         bus_dmamap_unload(adapter->txtag, map);
1643                                         return (ENOBUFS);
1644                                 }
1645                                 tx_buffer = &adapter->tx_buffer_area[i];
1646                                 ctxd = &adapter->tx_desc_base[i];
1647                                 ctxd->buffer_addr = htole64(
1648                                     desc_array.descriptor[counter].address);
1649                                 ctxd->lower.data = htole32(
1650                                     (adapter->txd_cmd | txd_lower | (u16)
1651                                     desc_array.descriptor[counter].length));
1652                                 ctxd->upper.data =
1653                                     htole32((txd_upper));
1654                                 last = i;
1655                                 if (++i == adapter->num_tx_desc)
1656                                          i = 0;
1657                                 tx_buffer->m_head = NULL;
1658                                 tx_buffer->next_eop = -1;
1659                                 txd_used++;
1660                         }
1661                 } else {
1662                         tx_buffer = &adapter->tx_buffer_area[i];
1663                         ctxd = &adapter->tx_desc_base[i];
1664                         seg_addr = segs[j].ds_addr;
1665                         seg_len  = segs[j].ds_len;
1666                         ctxd->buffer_addr = htole64(seg_addr);
1667                         ctxd->lower.data = htole32(
1668                         adapter->txd_cmd | txd_lower | seg_len);
1669                         ctxd->upper.data =
1670                             htole32(txd_upper);
1671                         last = i;
1672                         if (++i == adapter->num_tx_desc)
1673                                 i = 0;
1674                         tx_buffer->m_head = NULL;
1675                         tx_buffer->next_eop = -1;
1676                 }
1677         }
1678
1679         adapter->next_avail_tx_desc = i;
1680
1681         if (adapter->pcix_82544)
1682                 adapter->num_tx_desc_avail -= txd_used;
1683         else
1684                 adapter->num_tx_desc_avail -= nsegs;
1685
1686         if (m_head->m_flags & M_VLANTAG) {
1687                 /* Set the vlan id. */
1688                 ctxd->upper.fields.special =
1689                     htole16(m_head->m_pkthdr.ether_vtag);
1690                 /* Tell hardware to add tag */
1691                 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1692         }
1693
1694         tx_buffer->m_head = m_head;
1695         tx_buffer_mapped->map = tx_buffer->map;
1696         tx_buffer->map = map;
1697         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1698
1699         /*
1700          * Last Descriptor of Packet
1701          * needs End Of Packet (EOP)
1702          * and Report Status (RS)
1703          */
1704         ctxd->lower.data |=
1705             htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1706         /*
1707          * Keep track in the first buffer which
1708          * descriptor will be written back
1709          */
1710         tx_buffer = &adapter->tx_buffer_area[first];
1711         tx_buffer->next_eop = last;
1712         adapter->watchdog_time = ticks;
1713
1714         /*
1715          * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1716          * that this frame is available to transmit.
1717          */
1718         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1719             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1720         if (adapter->hw.mac.type == e1000_82547 &&
1721             adapter->link_duplex == HALF_DUPLEX)
1722                 lem_82547_move_tail(adapter);
1723         else {
1724                 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1725                 if (adapter->hw.mac.type == e1000_82547)
1726                         lem_82547_update_fifo_head(adapter,
1727                             m_head->m_pkthdr.len);
1728         }
1729
1730         return (0);
1731 }
1732
1733 /*********************************************************************
1734  *
1735  * 82547 workaround to avoid controller hang in half-duplex environment.
1736  * The workaround is to avoid queuing a large packet that would span
1737  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1738  * in this case. We do that only when FIFO is quiescent.
1739  *
1740  **********************************************************************/
1741 static void
1742 lem_82547_move_tail(void *arg)
1743 {
1744         struct adapter *adapter = arg;
1745         struct e1000_tx_desc *tx_desc;
1746         u16     hw_tdt, sw_tdt, length = 0;
1747         bool    eop = 0;
1748
1749         EM_TX_LOCK_ASSERT(adapter);
1750
1751         hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1752         sw_tdt = adapter->next_avail_tx_desc;
1753         
1754         while (hw_tdt != sw_tdt) {
1755                 tx_desc = &adapter->tx_desc_base[hw_tdt];
1756                 length += tx_desc->lower.flags.length;
1757                 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1758                 if (++hw_tdt == adapter->num_tx_desc)
1759                         hw_tdt = 0;
1760
1761                 if (eop) {
1762                         if (lem_82547_fifo_workaround(adapter, length)) {
1763                                 adapter->tx_fifo_wrk_cnt++;
1764                                 callout_reset(&adapter->tx_fifo_timer, 1,
1765                                         lem_82547_move_tail, adapter);
1766                                 break;
1767                         }
1768                         E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1769                         lem_82547_update_fifo_head(adapter, length);
1770                         length = 0;
1771                 }
1772         }       
1773 }
1774
1775 static int
1776 lem_82547_fifo_workaround(struct adapter *adapter, int len)
1777 {       
1778         int fifo_space, fifo_pkt_len;
1779
1780         fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1781
1782         if (adapter->link_duplex == HALF_DUPLEX) {
1783                 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1784
1785                 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1786                         if (lem_82547_tx_fifo_reset(adapter))
1787                                 return (0);
1788                         else
1789                                 return (1);
1790                 }
1791         }
1792
1793         return (0);
1794 }
1795
1796 static void
1797 lem_82547_update_fifo_head(struct adapter *adapter, int len)
1798 {
1799         int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1800         
1801         /* tx_fifo_head is always 16 byte aligned */
1802         adapter->tx_fifo_head += fifo_pkt_len;
1803         if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1804                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1805         }
1806 }
1807
1808
1809 static int
1810 lem_82547_tx_fifo_reset(struct adapter *adapter)
1811 {
1812         u32 tctl;
1813
1814         if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1815             E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1816             (E1000_READ_REG(&adapter->hw, E1000_TDFT) == 
1817             E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1818             (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1819             E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1820             (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1821                 /* Disable TX unit */
1822                 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1823                 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1824                     tctl & ~E1000_TCTL_EN);
1825
1826                 /* Reset FIFO pointers */
1827                 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1828                     adapter->tx_head_addr);
1829                 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1830                     adapter->tx_head_addr);
1831                 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1832                     adapter->tx_head_addr);
1833                 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1834                     adapter->tx_head_addr);
1835
1836                 /* Re-enable TX unit */
1837                 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1838                 E1000_WRITE_FLUSH(&adapter->hw);
1839
1840                 adapter->tx_fifo_head = 0;
1841                 adapter->tx_fifo_reset_cnt++;
1842
1843                 return (TRUE);
1844         }
1845         else {
1846                 return (FALSE);
1847         }
1848 }
1849
1850 static void
1851 lem_set_promisc(struct adapter *adapter)
1852 {
1853         struct ifnet    *ifp = adapter->ifp;
1854         u32             reg_rctl;
1855
1856         reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1857
1858         if (ifp->if_flags & IFF_PROMISC) {
1859                 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1860                 /* Turn this on if you want to see bad packets */
1861                 if (lem_debug_sbp)
1862                         reg_rctl |= E1000_RCTL_SBP;
1863                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1864         } else if (ifp->if_flags & IFF_ALLMULTI) {
1865                 reg_rctl |= E1000_RCTL_MPE;
1866                 reg_rctl &= ~E1000_RCTL_UPE;
1867                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1868         }
1869 }
1870
1871 static void
1872 lem_disable_promisc(struct adapter *adapter)
1873 {
1874         u32     reg_rctl;
1875
1876         reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1877
1878         reg_rctl &=  (~E1000_RCTL_UPE);
1879         reg_rctl &=  (~E1000_RCTL_MPE);
1880         reg_rctl &=  (~E1000_RCTL_SBP);
1881         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1882 }
1883
1884
1885 /*********************************************************************
1886  *  Multicast Update
1887  *
1888  *  This routine is called whenever multicast address list is updated.
1889  *
1890  **********************************************************************/
1891
1892 static void
1893 lem_set_multi(struct adapter *adapter)
1894 {
1895         struct ifnet    *ifp = adapter->ifp;
1896         struct ifmultiaddr *ifma;
1897         u32 reg_rctl = 0;
1898         u8  *mta; /* Multicast array memory */
1899         int mcnt = 0;
1900
1901         IOCTL_DEBUGOUT("lem_set_multi: begin");
1902
1903         mta = adapter->mta;
1904         bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1905
1906         if (adapter->hw.mac.type == e1000_82542 && 
1907             adapter->hw.revision_id == E1000_REVISION_2) {
1908                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1909                 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1910                         e1000_pci_clear_mwi(&adapter->hw);
1911                 reg_rctl |= E1000_RCTL_RST;
1912                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1913                 msec_delay(5);
1914         }
1915
1916 #if __FreeBSD_version < 800000
1917         IF_ADDR_LOCK(ifp);
1918 #else
1919         if_maddr_rlock(ifp);
1920 #endif
1921         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1922                 if (ifma->ifma_addr->sa_family != AF_LINK)
1923                         continue;
1924
1925                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1926                         break;
1927
1928                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1929                     &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1930                 mcnt++;
1931         }
1932 #if __FreeBSD_version < 800000
1933         IF_ADDR_UNLOCK(ifp);
1934 #else
1935         if_maddr_runlock(ifp);
1936 #endif
1937         if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1938                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1939                 reg_rctl |= E1000_RCTL_MPE;
1940                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1941         } else
1942                 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1943
1944         if (adapter->hw.mac.type == e1000_82542 && 
1945             adapter->hw.revision_id == E1000_REVISION_2) {
1946                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1947                 reg_rctl &= ~E1000_RCTL_RST;
1948                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1949                 msec_delay(5);
1950                 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1951                         e1000_pci_set_mwi(&adapter->hw);
1952         }
1953 }
1954
1955
1956 /*********************************************************************
1957  *  Timer routine
1958  *
1959  *  This routine checks for link status and updates statistics.
1960  *
1961  **********************************************************************/
1962
1963 static void
1964 lem_local_timer(void *arg)
1965 {
1966         struct adapter  *adapter = arg;
1967
1968         EM_CORE_LOCK_ASSERT(adapter);
1969
1970         lem_update_link_status(adapter);
1971         lem_update_stats_counters(adapter);
1972
1973         lem_smartspeed(adapter);
1974
1975         /*
1976          * We check the watchdog: the time since
1977          * the last TX descriptor was cleaned.
1978          * This implies a functional TX engine.
1979          */
1980         if ((adapter->watchdog_check == TRUE) &&
1981             (ticks - adapter->watchdog_time > EM_WATCHDOG))
1982                 goto hung;
1983
1984         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1985         return;
1986 hung:
1987         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1988         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1989         adapter->watchdog_events++;
1990         lem_init_locked(adapter);
1991 }
1992
1993 static void
1994 lem_update_link_status(struct adapter *adapter)
1995 {
1996         struct e1000_hw *hw = &adapter->hw;
1997         struct ifnet *ifp = adapter->ifp;
1998         device_t dev = adapter->dev;
1999         u32 link_check = 0;
2000
2001         /* Get the cached link value or read phy for real */
2002         switch (hw->phy.media_type) {
2003         case e1000_media_type_copper:
2004                 if (hw->mac.get_link_status) {
2005                         /* Do the work to read phy */
2006                         e1000_check_for_link(hw);
2007                         link_check = !hw->mac.get_link_status;
2008                         if (link_check) /* ESB2 fix */
2009                                 e1000_cfg_on_link_up(hw);
2010                 } else
2011                         link_check = TRUE;
2012                 break;
2013         case e1000_media_type_fiber:
2014                 e1000_check_for_link(hw);
2015                 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2016                                  E1000_STATUS_LU);
2017                 break;
2018         case e1000_media_type_internal_serdes:
2019                 e1000_check_for_link(hw);
2020                 link_check = adapter->hw.mac.serdes_has_link;
2021                 break;
2022         default:
2023         case e1000_media_type_unknown:
2024                 break;
2025         }
2026
2027         /* Now check for a transition */
2028         if (link_check && (adapter->link_active == 0)) {
2029                 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2030                     &adapter->link_duplex);
2031                 if (bootverbose)
2032                         device_printf(dev, "Link is up %d Mbps %s\n",
2033                             adapter->link_speed,
2034                             ((adapter->link_duplex == FULL_DUPLEX) ?
2035                             "Full Duplex" : "Half Duplex"));
2036                 adapter->link_active = 1;
2037                 adapter->smartspeed = 0;
2038                 ifp->if_baudrate = adapter->link_speed * 1000000;
2039                 if_link_state_change(ifp, LINK_STATE_UP);
2040         } else if (!link_check && (adapter->link_active == 1)) {
2041                 ifp->if_baudrate = adapter->link_speed = 0;
2042                 adapter->link_duplex = 0;
2043                 if (bootverbose)
2044                         device_printf(dev, "Link is Down\n");
2045                 adapter->link_active = 0;
2046                 /* Link down, disable watchdog */
2047                 adapter->watchdog_check = FALSE;
2048                 if_link_state_change(ifp, LINK_STATE_DOWN);
2049         }
2050 }
2051
2052 /*********************************************************************
2053  *
2054  *  This routine disables all traffic on the adapter by issuing a
2055  *  global reset on the MAC and deallocates TX/RX buffers.
2056  *
2057  *  This routine should always be called with BOTH the CORE
2058  *  and TX locks.
2059  **********************************************************************/
2060
2061 static void
2062 lem_stop(void *arg)
2063 {
2064         struct adapter  *adapter = arg;
2065         struct ifnet    *ifp = adapter->ifp;
2066
2067         EM_CORE_LOCK_ASSERT(adapter);
2068         EM_TX_LOCK_ASSERT(adapter);
2069
2070         INIT_DEBUGOUT("lem_stop: begin");
2071
2072         lem_disable_intr(adapter);
2073         callout_stop(&adapter->timer);
2074         callout_stop(&adapter->tx_fifo_timer);
2075
2076         /* Tell the stack that the interface is no longer active */
2077         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2078
2079         e1000_reset_hw(&adapter->hw);
2080         if (adapter->hw.mac.type >= e1000_82544)
2081                 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2082
2083         e1000_led_off(&adapter->hw);
2084         e1000_cleanup_led(&adapter->hw);
2085 }
2086
2087
2088 /*********************************************************************
2089  *
2090  *  Determine hardware revision.
2091  *
2092  **********************************************************************/
2093 static void
2094 lem_identify_hardware(struct adapter *adapter)
2095 {
2096         device_t dev = adapter->dev;
2097
2098         /* Make sure our PCI config space has the necessary stuff set */
2099         adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2100         if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2101             (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2102                 device_printf(dev, "Memory Access and/or Bus Master bits "
2103                     "were not set!\n");
2104                 adapter->hw.bus.pci_cmd_word |=
2105                 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2106                 pci_write_config(dev, PCIR_COMMAND,
2107                     adapter->hw.bus.pci_cmd_word, 2);
2108         }
2109
2110         /* Save off the information about this board */
2111         adapter->hw.vendor_id = pci_get_vendor(dev);
2112         adapter->hw.device_id = pci_get_device(dev);
2113         adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2114         adapter->hw.subsystem_vendor_id =
2115             pci_read_config(dev, PCIR_SUBVEND_0, 2);
2116         adapter->hw.subsystem_device_id =
2117             pci_read_config(dev, PCIR_SUBDEV_0, 2);
2118
2119         /* Do Shared Code Init and Setup */
2120         if (e1000_set_mac_type(&adapter->hw)) {
2121                 device_printf(dev, "Setup init failure\n");
2122                 return;
2123         }
2124 }
2125
2126 static int
2127 lem_allocate_pci_resources(struct adapter *adapter)
2128 {
2129         device_t        dev = adapter->dev;
2130         int             val, rid, error = E1000_SUCCESS;
2131
2132         rid = PCIR_BAR(0);
2133         adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2134             &rid, RF_ACTIVE);
2135         if (adapter->memory == NULL) {
2136                 device_printf(dev, "Unable to allocate bus resource: memory\n");
2137                 return (ENXIO);
2138         }
2139         adapter->osdep.mem_bus_space_tag =
2140             rman_get_bustag(adapter->memory);
2141         adapter->osdep.mem_bus_space_handle =
2142             rman_get_bushandle(adapter->memory);
2143         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2144
2145         /* Only older adapters use IO mapping */
2146         if (adapter->hw.mac.type > e1000_82543) {
2147                 /* Figure our where our IO BAR is ? */
2148                 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2149                         val = pci_read_config(dev, rid, 4);
2150                         if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2151                                 adapter->io_rid = rid;
2152                                 break;
2153                         }
2154                         rid += 4;
2155                         /* check for 64bit BAR */
2156                         if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2157                                 rid += 4;
2158                 }
2159                 if (rid >= PCIR_CIS) {
2160                         device_printf(dev, "Unable to locate IO BAR\n");
2161                         return (ENXIO);
2162                 }
2163                 adapter->ioport = bus_alloc_resource_any(dev,
2164                     SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2165                 if (adapter->ioport == NULL) {
2166                         device_printf(dev, "Unable to allocate bus resource: "
2167                             "ioport\n");
2168                         return (ENXIO);
2169                 }
2170                 adapter->hw.io_base = 0;
2171                 adapter->osdep.io_bus_space_tag =
2172                     rman_get_bustag(adapter->ioport);
2173                 adapter->osdep.io_bus_space_handle =
2174                     rman_get_bushandle(adapter->ioport);
2175         }
2176
2177         adapter->hw.back = &adapter->osdep;
2178
2179         return (error);
2180 }
2181
2182 /*********************************************************************
2183  *
2184  *  Setup the Legacy or MSI Interrupt handler
2185  *
2186  **********************************************************************/
2187 int
2188 lem_allocate_irq(struct adapter *adapter)
2189 {
2190         device_t dev = adapter->dev;
2191         int error, rid = 0;
2192
2193         /* Manually turn off all interrupts */
2194         E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2195
2196         /* We allocate a single interrupt resource */
2197         adapter->res[0] = bus_alloc_resource_any(dev,
2198             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2199         if (adapter->res[0] == NULL) {
2200                 device_printf(dev, "Unable to allocate bus resource: "
2201                     "interrupt\n");
2202                 return (ENXIO);
2203         }
2204
2205 #ifdef EM_LEGACY_IRQ
2206         /* We do Legacy setup */
2207         if ((error = bus_setup_intr(dev, adapter->res[0],
2208             INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2209             &adapter->tag[0])) != 0) {
2210                 device_printf(dev, "Failed to register interrupt handler");
2211                 return (error);
2212         }
2213
2214 #else /* FAST_IRQ */
2215         /*
2216          * Try allocating a fast interrupt and the associated deferred
2217          * processing contexts.
2218          */
2219         TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2220         TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2221         adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2222             taskqueue_thread_enqueue, &adapter->tq);
2223         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2224             device_get_nameunit(adapter->dev));
2225         if ((error = bus_setup_intr(dev, adapter->res[0],
2226             INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2227             &adapter->tag[0])) != 0) {
2228                 device_printf(dev, "Failed to register fast interrupt "
2229                             "handler: %d\n", error);
2230                 taskqueue_free(adapter->tq);
2231                 adapter->tq = NULL;
2232                 return (error);
2233         }
2234 #endif  /* EM_LEGACY_IRQ */
2235         
2236         return (0);
2237 }
2238
2239
2240 static void
2241 lem_free_pci_resources(struct adapter *adapter)
2242 {
2243         device_t dev = adapter->dev;
2244
2245
2246         if (adapter->tag[0] != NULL) {
2247                 bus_teardown_intr(dev, adapter->res[0],
2248                     adapter->tag[0]);
2249                 adapter->tag[0] = NULL;
2250         }
2251
2252         if (adapter->res[0] != NULL) {
2253                 bus_release_resource(dev, SYS_RES_IRQ,
2254                     0, adapter->res[0]);
2255         }
2256
2257         if (adapter->memory != NULL)
2258                 bus_release_resource(dev, SYS_RES_MEMORY,
2259                     PCIR_BAR(0), adapter->memory);
2260
2261         if (adapter->ioport != NULL)
2262                 bus_release_resource(dev, SYS_RES_IOPORT,
2263                     adapter->io_rid, adapter->ioport);
2264 }
2265
2266
2267 /*********************************************************************
2268  *
2269  *  Initialize the hardware to a configuration
2270  *  as specified by the adapter structure.
2271  *
2272  **********************************************************************/
2273 static int
2274 lem_hardware_init(struct adapter *adapter)
2275 {
2276         device_t dev = adapter->dev;
2277         u16     rx_buffer_size;
2278
2279         INIT_DEBUGOUT("lem_hardware_init: begin");
2280
2281         /* Issue a global reset */
2282         e1000_reset_hw(&adapter->hw);
2283
2284         /* When hardware is reset, fifo_head is also reset */
2285         adapter->tx_fifo_head = 0;
2286
2287         /*
2288          * These parameters control the automatic generation (Tx) and
2289          * response (Rx) to Ethernet PAUSE frames.
2290          * - High water mark should allow for at least two frames to be
2291          *   received after sending an XOFF.
2292          * - Low water mark works best when it is very near the high water mark.
2293          *   This allows the receiver to restart by sending XON when it has
2294          *   drained a bit. Here we use an arbitary value of 1500 which will
2295          *   restart after one full frame is pulled from the buffer. There
2296          *   could be several smaller frames in the buffer and if so they will
2297          *   not trigger the XON until their total number reduces the buffer
2298          *   by 1500.
2299          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2300          */
2301         rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2302             0xffff) << 10 );
2303
2304         adapter->hw.fc.high_water = rx_buffer_size -
2305             roundup2(adapter->max_frame_size, 1024);
2306         adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2307
2308         adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2309         adapter->hw.fc.send_xon = TRUE;
2310
2311         /* Set Flow control, use the tunable location if sane */
2312         if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2313                 adapter->hw.fc.requested_mode = lem_fc_setting;
2314         else
2315                 adapter->hw.fc.requested_mode = e1000_fc_none;
2316
2317         if (e1000_init_hw(&adapter->hw) < 0) {
2318                 device_printf(dev, "Hardware Initialization Failed\n");
2319                 return (EIO);
2320         }
2321
2322         e1000_check_for_link(&adapter->hw);
2323
2324         return (0);
2325 }
2326
2327 /*********************************************************************
2328  *
2329  *  Setup networking device structure and register an interface.
2330  *
2331  **********************************************************************/
2332 static int
2333 lem_setup_interface(device_t dev, struct adapter *adapter)
2334 {
2335         struct ifnet   *ifp;
2336
2337         INIT_DEBUGOUT("lem_setup_interface: begin");
2338
2339         ifp = adapter->ifp = if_alloc(IFT_ETHER);
2340         if (ifp == NULL) {
2341                 device_printf(dev, "can not allocate ifnet structure\n");
2342                 return (-1);
2343         }
2344         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2345         ifp->if_mtu = ETHERMTU;
2346         ifp->if_init =  lem_init;
2347         ifp->if_softc = adapter;
2348         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2349         ifp->if_ioctl = lem_ioctl;
2350         ifp->if_start = lem_start;
2351         IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2352         ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2353         IFQ_SET_READY(&ifp->if_snd);
2354
2355         ether_ifattach(ifp, adapter->hw.mac.addr);
2356
2357         ifp->if_capabilities = ifp->if_capenable = 0;
2358
2359         if (adapter->hw.mac.type >= e1000_82543) {
2360                 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2361                 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2362         }
2363
2364         /*
2365          * Tell the upper layer(s) we support long frames.
2366          */
2367         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2368         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2369         ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2370
2371         /*
2372         ** Dont turn this on by default, if vlans are
2373         ** created on another pseudo device (eg. lagg)
2374         ** then vlan events are not passed thru, breaking
2375         ** operation, but with HW FILTER off it works. If
2376         ** using vlans directly on the em driver you can
2377         ** enable this and get full hardware tag filtering.
2378         */
2379         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2380
2381 #ifdef DEVICE_POLLING
2382         ifp->if_capabilities |= IFCAP_POLLING;
2383 #endif
2384
2385         /* Enable only WOL MAGIC by default */
2386         if (adapter->wol) {
2387                 ifp->if_capabilities |= IFCAP_WOL;
2388                 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2389         }
2390                 
2391         /*
2392          * Specify the media types supported by this adapter and register
2393          * callbacks to update media and link information
2394          */
2395         ifmedia_init(&adapter->media, IFM_IMASK,
2396             lem_media_change, lem_media_status);
2397         if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2398             (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2399                 u_char fiber_type = IFM_1000_SX;        /* default type */
2400
2401                 if (adapter->hw.mac.type == e1000_82545)
2402                         fiber_type = IFM_1000_LX;
2403                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 
2404                             0, NULL);
2405                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2406         } else {
2407                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2408                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2409                             0, NULL);
2410                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2411                             0, NULL);
2412                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2413                             0, NULL);
2414                 if (adapter->hw.phy.type != e1000_phy_ife) {
2415                         ifmedia_add(&adapter->media,
2416                                 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2417                         ifmedia_add(&adapter->media,
2418                                 IFM_ETHER | IFM_1000_T, 0, NULL);
2419                 }
2420         }
2421         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2422         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2423         return (0);
2424 }
2425
2426
2427 /*********************************************************************
2428  *
2429  *  Workaround for SmartSpeed on 82541 and 82547 controllers
2430  *
2431  **********************************************************************/
2432 static void
2433 lem_smartspeed(struct adapter *adapter)
2434 {
2435         u16 phy_tmp;
2436
2437         if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2438             adapter->hw.mac.autoneg == 0 ||
2439             (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2440                 return;
2441
2442         if (adapter->smartspeed == 0) {
2443                 /* If Master/Slave config fault is asserted twice,
2444                  * we assume back-to-back */
2445                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2446                 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2447                         return;
2448                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2449                 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2450                         e1000_read_phy_reg(&adapter->hw,
2451                             PHY_1000T_CTRL, &phy_tmp);
2452                         if(phy_tmp & CR_1000T_MS_ENABLE) {
2453                                 phy_tmp &= ~CR_1000T_MS_ENABLE;
2454                                 e1000_write_phy_reg(&adapter->hw,
2455                                     PHY_1000T_CTRL, phy_tmp);
2456                                 adapter->smartspeed++;
2457                                 if(adapter->hw.mac.autoneg &&
2458                                    !e1000_copper_link_autoneg(&adapter->hw) &&
2459                                    !e1000_read_phy_reg(&adapter->hw,
2460                                     PHY_CONTROL, &phy_tmp)) {
2461                                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2462                                                     MII_CR_RESTART_AUTO_NEG);
2463                                         e1000_write_phy_reg(&adapter->hw,
2464                                             PHY_CONTROL, phy_tmp);
2465                                 }
2466                         }
2467                 }
2468                 return;
2469         } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2470                 /* If still no link, perhaps using 2/3 pair cable */
2471                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2472                 phy_tmp |= CR_1000T_MS_ENABLE;
2473                 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2474                 if(adapter->hw.mac.autoneg &&
2475                    !e1000_copper_link_autoneg(&adapter->hw) &&
2476                    !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2477                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2478                                     MII_CR_RESTART_AUTO_NEG);
2479                         e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2480                 }
2481         }
2482         /* Restart process after EM_SMARTSPEED_MAX iterations */
2483         if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2484                 adapter->smartspeed = 0;
2485 }
2486
2487
2488 /*
2489  * Manage DMA'able memory.
2490  */
2491 static void
2492 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2493 {
2494         if (error)
2495                 return;
2496         *(bus_addr_t *) arg = segs[0].ds_addr;
2497 }
2498
2499 static int
2500 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2501         struct em_dma_alloc *dma, int mapflags)
2502 {
2503         int error;
2504
2505         error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2506                                 EM_DBA_ALIGN, 0,        /* alignment, bounds */
2507                                 BUS_SPACE_MAXADDR,      /* lowaddr */
2508                                 BUS_SPACE_MAXADDR,      /* highaddr */
2509                                 NULL, NULL,             /* filter, filterarg */
2510                                 size,                   /* maxsize */
2511                                 1,                      /* nsegments */
2512                                 size,                   /* maxsegsize */
2513                                 0,                      /* flags */
2514                                 NULL,                   /* lockfunc */
2515                                 NULL,                   /* lockarg */
2516                                 &dma->dma_tag);
2517         if (error) {
2518                 device_printf(adapter->dev,
2519                     "%s: bus_dma_tag_create failed: %d\n",
2520                     __func__, error);
2521                 goto fail_0;
2522         }
2523
2524         error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2525             BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2526         if (error) {
2527                 device_printf(adapter->dev,
2528                     "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2529                     __func__, (uintmax_t)size, error);
2530                 goto fail_2;
2531         }
2532
2533         dma->dma_paddr = 0;
2534         error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2535             size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2536         if (error || dma->dma_paddr == 0) {
2537                 device_printf(adapter->dev,
2538                     "%s: bus_dmamap_load failed: %d\n",
2539                     __func__, error);
2540                 goto fail_3;
2541         }
2542
2543         return (0);
2544
2545 fail_3:
2546         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2547 fail_2:
2548         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2549         bus_dma_tag_destroy(dma->dma_tag);
2550 fail_0:
2551         dma->dma_map = NULL;
2552         dma->dma_tag = NULL;
2553
2554         return (error);
2555 }
2556
2557 static void
2558 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2559 {
2560         if (dma->dma_tag == NULL)
2561                 return;
2562         if (dma->dma_map != NULL) {
2563                 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2564                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2565                 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2566                 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2567                 dma->dma_map = NULL;
2568         }
2569         bus_dma_tag_destroy(dma->dma_tag);
2570         dma->dma_tag = NULL;
2571 }
2572
2573
2574 /*********************************************************************
2575  *
2576  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2577  *  the information needed to transmit a packet on the wire.
2578  *
2579  **********************************************************************/
2580 static int
2581 lem_allocate_transmit_structures(struct adapter *adapter)
2582 {
2583         device_t dev = adapter->dev;
2584         struct em_buffer *tx_buffer;
2585         int error;
2586
2587         /*
2588          * Create DMA tags for tx descriptors
2589          */
2590         if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2591                                 1, 0,                   /* alignment, bounds */
2592                                 BUS_SPACE_MAXADDR,      /* lowaddr */
2593                                 BUS_SPACE_MAXADDR,      /* highaddr */
2594                                 NULL, NULL,             /* filter, filterarg */
2595                                 MCLBYTES * EM_MAX_SCATTER,      /* maxsize */
2596                                 EM_MAX_SCATTER,         /* nsegments */
2597                                 MCLBYTES,               /* maxsegsize */
2598                                 0,                      /* flags */
2599                                 NULL,                   /* lockfunc */
2600                                 NULL,                   /* lockarg */
2601                                 &adapter->txtag)) != 0) {
2602                 device_printf(dev, "Unable to allocate TX DMA tag\n");
2603                 goto fail;
2604         }
2605
2606         adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2607             adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2608         if (adapter->tx_buffer_area == NULL) {
2609                 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2610                 error = ENOMEM;
2611                 goto fail;
2612         }
2613
2614         /* Create the descriptor buffer dma maps */
2615         for (int i = 0; i < adapter->num_tx_desc; i++) {
2616                 tx_buffer = &adapter->tx_buffer_area[i];
2617                 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2618                 if (error != 0) {
2619                         device_printf(dev, "Unable to create TX DMA map\n");
2620                         goto fail;
2621                 }
2622                 tx_buffer->next_eop = -1;
2623         }
2624
2625         return (0);
2626 fail:
2627         lem_free_transmit_structures(adapter);
2628         return (error);
2629 }
2630
2631 /*********************************************************************
2632  *
2633  *  (Re)Initialize transmit structures.
2634  *
2635  **********************************************************************/
2636 static void
2637 lem_setup_transmit_structures(struct adapter *adapter)
2638 {
2639         struct em_buffer *tx_buffer;
2640
2641         /* Clear the old ring contents */
2642         bzero(adapter->tx_desc_base,
2643             (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2644
2645         /* Free any existing TX buffers */
2646         for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2647                 tx_buffer = &adapter->tx_buffer_area[i];
2648                 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2649                     BUS_DMASYNC_POSTWRITE);
2650                 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2651                 m_freem(tx_buffer->m_head);
2652                 tx_buffer->m_head = NULL;
2653                 tx_buffer->next_eop = -1;
2654         }
2655
2656         /* Reset state */
2657         adapter->next_avail_tx_desc = 0;
2658         adapter->next_tx_to_clean = 0;
2659         adapter->num_tx_desc_avail = adapter->num_tx_desc;
2660
2661         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2662             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2663
2664         return;
2665 }
2666
2667 /*********************************************************************
2668  *
2669  *  Enable transmit unit.
2670  *
2671  **********************************************************************/
2672 static void
2673 lem_initialize_transmit_unit(struct adapter *adapter)
2674 {
2675         u32     tctl, tipg = 0;
2676         u64     bus_addr;
2677
2678          INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2679         /* Setup the Base and Length of the Tx Descriptor Ring */
2680         bus_addr = adapter->txdma.dma_paddr;
2681         E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2682             adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2683         E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2684             (u32)(bus_addr >> 32));
2685         E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2686             (u32)bus_addr);
2687         /* Setup the HW Tx Head and Tail descriptor pointers */
2688         E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2689         E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2690
2691         HW_DEBUGOUT2("Base = %x, Length = %x\n",
2692             E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2693             E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2694
2695         /* Set the default values for the Tx Inter Packet Gap timer */
2696         switch (adapter->hw.mac.type) {
2697         case e1000_82542:
2698                 tipg = DEFAULT_82542_TIPG_IPGT;
2699                 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2700                 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2701                 break;
2702         default:
2703                 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2704                     (adapter->hw.phy.media_type ==
2705                     e1000_media_type_internal_serdes))
2706                         tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2707                 else
2708                         tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2709                 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2710                 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2711         }
2712
2713         E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2714         E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2715         if(adapter->hw.mac.type >= e1000_82540)
2716                 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2717                     adapter->tx_abs_int_delay.value);
2718
2719         /* Program the Transmit Control Register */
2720         tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2721         tctl &= ~E1000_TCTL_CT;
2722         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2723                    (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2724
2725         /* This write will effectively turn on the transmit unit. */
2726         E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2727
2728         /* Setup Transmit Descriptor Base Settings */   
2729         adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2730
2731         if (adapter->tx_int_delay.value > 0)
2732                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2733 }
2734
2735 /*********************************************************************
2736  *
2737  *  Free all transmit related data structures.
2738  *
2739  **********************************************************************/
2740 static void
2741 lem_free_transmit_structures(struct adapter *adapter)
2742 {
2743         struct em_buffer *tx_buffer;
2744
2745         INIT_DEBUGOUT("free_transmit_structures: begin");
2746
2747         if (adapter->tx_buffer_area != NULL) {
2748                 for (int i = 0; i < adapter->num_tx_desc; i++) {
2749                         tx_buffer = &adapter->tx_buffer_area[i];
2750                         if (tx_buffer->m_head != NULL) {
2751                                 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2752                                     BUS_DMASYNC_POSTWRITE);
2753                                 bus_dmamap_unload(adapter->txtag,
2754                                     tx_buffer->map);
2755                                 m_freem(tx_buffer->m_head);
2756                                 tx_buffer->m_head = NULL;
2757                         } else if (tx_buffer->map != NULL)
2758                                 bus_dmamap_unload(adapter->txtag,
2759                                     tx_buffer->map);
2760                         if (tx_buffer->map != NULL) {
2761                                 bus_dmamap_destroy(adapter->txtag,
2762                                     tx_buffer->map);
2763                                 tx_buffer->map = NULL;
2764                         }
2765                 }
2766         }
2767         if (adapter->tx_buffer_area != NULL) {
2768                 free(adapter->tx_buffer_area, M_DEVBUF);
2769                 adapter->tx_buffer_area = NULL;
2770         }
2771         if (adapter->txtag != NULL) {
2772                 bus_dma_tag_destroy(adapter->txtag);
2773                 adapter->txtag = NULL;
2774         }
2775 #if __FreeBSD_version >= 800000
2776         if (adapter->br != NULL)
2777                 buf_ring_free(adapter->br, M_DEVBUF);
2778 #endif
2779 }
2780
2781 /*********************************************************************
2782  *
2783  *  The offload context needs to be set when we transfer the first
2784  *  packet of a particular protocol (TCP/UDP). This routine has been
2785  *  enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2786  *
2787  *  Added back the old method of keeping the current context type
2788  *  and not setting if unnecessary, as this is reported to be a
2789  *  big performance win.  -jfv
2790  **********************************************************************/
2791 static void
2792 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2793     u32 *txd_upper, u32 *txd_lower)
2794 {
2795         struct e1000_context_desc *TXD = NULL;
2796         struct em_buffer *tx_buffer;
2797         struct ether_vlan_header *eh;
2798         struct ip *ip = NULL;
2799         struct ip6_hdr *ip6;
2800         int curr_txd, ehdrlen;
2801         u32 cmd, hdr_len, ip_hlen;
2802         u16 etype;
2803         u8 ipproto;
2804
2805
2806         cmd = hdr_len = ipproto = 0;
2807         *txd_upper = *txd_lower = 0;
2808         curr_txd = adapter->next_avail_tx_desc;
2809
2810         /*
2811          * Determine where frame payload starts.
2812          * Jump over vlan headers if already present,
2813          * helpful for QinQ too.
2814          */
2815         eh = mtod(mp, struct ether_vlan_header *);
2816         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2817                 etype = ntohs(eh->evl_proto);
2818                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2819         } else {
2820                 etype = ntohs(eh->evl_encap_proto);
2821                 ehdrlen = ETHER_HDR_LEN;
2822         }
2823
2824         /*
2825          * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2826          * TODO: Support SCTP too when it hits the tree.
2827          */
2828         switch (etype) {
2829         case ETHERTYPE_IP:
2830                 ip = (struct ip *)(mp->m_data + ehdrlen);
2831                 ip_hlen = ip->ip_hl << 2;
2832
2833                 /* Setup of IP header checksum. */
2834                 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2835                         /*
2836                          * Start offset for header checksum calculation.
2837                          * End offset for header checksum calculation.
2838                          * Offset of place to put the checksum.
2839                          */
2840                         TXD = (struct e1000_context_desc *)
2841                             &adapter->tx_desc_base[curr_txd];
2842                         TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2843                         TXD->lower_setup.ip_fields.ipcse =
2844                             htole16(ehdrlen + ip_hlen);
2845                         TXD->lower_setup.ip_fields.ipcso =
2846                             ehdrlen + offsetof(struct ip, ip_sum);
2847                         cmd |= E1000_TXD_CMD_IP;
2848                         *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2849                 }
2850
2851                 hdr_len = ehdrlen + ip_hlen;
2852                 ipproto = ip->ip_p;
2853
2854                 break;
2855         case ETHERTYPE_IPV6:
2856                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2857                 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2858
2859                 /* IPv6 doesn't have a header checksum. */
2860
2861                 hdr_len = ehdrlen + ip_hlen;
2862                 ipproto = ip6->ip6_nxt;
2863                 break;
2864
2865         default:
2866                 return;
2867         }
2868
2869         switch (ipproto) {
2870         case IPPROTO_TCP:
2871                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2872                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2873                         *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2874                         /* no need for context if already set */
2875                         if (adapter->last_hw_offload == CSUM_TCP)
2876                                 return;
2877                         adapter->last_hw_offload = CSUM_TCP;
2878                         /*
2879                          * Start offset for payload checksum calculation.
2880                          * End offset for payload checksum calculation.
2881                          * Offset of place to put the checksum.
2882                          */
2883                         TXD = (struct e1000_context_desc *)
2884                             &adapter->tx_desc_base[curr_txd];
2885                         TXD->upper_setup.tcp_fields.tucss = hdr_len;
2886                         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2887                         TXD->upper_setup.tcp_fields.tucso =
2888                             hdr_len + offsetof(struct tcphdr, th_sum);
2889                         cmd |= E1000_TXD_CMD_TCP;
2890                 }
2891                 break;
2892         case IPPROTO_UDP:
2893         {
2894                 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2895                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2896                         *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2897                         /* no need for context if already set */
2898                         if (adapter->last_hw_offload == CSUM_UDP)
2899                                 return;
2900                         adapter->last_hw_offload = CSUM_UDP;
2901                         /*
2902                          * Start offset for header checksum calculation.
2903                          * End offset for header checksum calculation.
2904                          * Offset of place to put the checksum.
2905                          */
2906                         TXD = (struct e1000_context_desc *)
2907                             &adapter->tx_desc_base[curr_txd];
2908                         TXD->upper_setup.tcp_fields.tucss = hdr_len;
2909                         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2910                         TXD->upper_setup.tcp_fields.tucso =
2911                             hdr_len + offsetof(struct udphdr, uh_sum);
2912                 }
2913                 /* Fall Thru */
2914         }
2915         default:
2916                 break;
2917         }
2918
2919         if (TXD == NULL)
2920                 return;
2921         TXD->tcp_seg_setup.data = htole32(0);
2922         TXD->cmd_and_length =
2923             htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
2924         tx_buffer = &adapter->tx_buffer_area[curr_txd];
2925         tx_buffer->m_head = NULL;
2926         tx_buffer->next_eop = -1;
2927
2928         if (++curr_txd == adapter->num_tx_desc)
2929                 curr_txd = 0;
2930
2931         adapter->num_tx_desc_avail--;
2932         adapter->next_avail_tx_desc = curr_txd;
2933 }
2934
2935
2936 /**********************************************************************
2937  *
2938  *  Examine each tx_buffer in the used queue. If the hardware is done
2939  *  processing the packet then free associated resources. The
2940  *  tx_buffer is put back on the free queue.
2941  *
2942  **********************************************************************/
2943 static void
2944 lem_txeof(struct adapter *adapter)
2945 {
2946         int first, last, done, num_avail;
2947         struct em_buffer *tx_buffer;
2948         struct e1000_tx_desc   *tx_desc, *eop_desc;
2949         struct ifnet   *ifp = adapter->ifp;
2950
2951         EM_TX_LOCK_ASSERT(adapter);
2952
2953         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2954                 return;
2955
2956         num_avail = adapter->num_tx_desc_avail;
2957         first = adapter->next_tx_to_clean;
2958         tx_desc = &adapter->tx_desc_base[first];
2959         tx_buffer = &adapter->tx_buffer_area[first];
2960         last = tx_buffer->next_eop;
2961         eop_desc = &adapter->tx_desc_base[last];
2962
2963         /*
2964          * What this does is get the index of the
2965          * first descriptor AFTER the EOP of the 
2966          * first packet, that way we can do the
2967          * simple comparison on the inner while loop.
2968          */
2969         if (++last == adapter->num_tx_desc)
2970                 last = 0;
2971         done = last;
2972
2973         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2974             BUS_DMASYNC_POSTREAD);
2975
2976         while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2977                 /* We clean the range of the packet */
2978                 while (first != done) {
2979                         tx_desc->upper.data = 0;
2980                         tx_desc->lower.data = 0;
2981                         tx_desc->buffer_addr = 0;
2982                         ++num_avail;
2983
2984                         if (tx_buffer->m_head) {
2985                                 ifp->if_opackets++;
2986                                 bus_dmamap_sync(adapter->txtag,
2987                                     tx_buffer->map,
2988                                     BUS_DMASYNC_POSTWRITE);
2989                                 bus_dmamap_unload(adapter->txtag,
2990                                     tx_buffer->map);
2991
2992                                 m_freem(tx_buffer->m_head);
2993                                 tx_buffer->m_head = NULL;
2994                         }
2995                         tx_buffer->next_eop = -1;
2996                         adapter->watchdog_time = ticks;
2997
2998                         if (++first == adapter->num_tx_desc)
2999                                 first = 0;
3000
3001                         tx_buffer = &adapter->tx_buffer_area[first];
3002                         tx_desc = &adapter->tx_desc_base[first];
3003                 }
3004                 /* See if we can continue to the next packet */
3005                 last = tx_buffer->next_eop;
3006                 if (last != -1) {
3007                         eop_desc = &adapter->tx_desc_base[last];
3008                         /* Get new done point */
3009                         if (++last == adapter->num_tx_desc) last = 0;
3010                         done = last;
3011                 } else
3012                         break;
3013         }
3014         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3015             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3016
3017         adapter->next_tx_to_clean = first;
3018         adapter->num_tx_desc_avail = num_avail;
3019
3020         /*
3021          * If we have enough room, clear IFF_DRV_OACTIVE to
3022          * tell the stack that it is OK to send packets.
3023          * If there are no pending descriptors, clear the watchdog.
3024          */
3025         if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {                
3026                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3027                 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3028                         adapter->watchdog_check = FALSE;
3029                         return;
3030                 } 
3031         }
3032 }
3033
3034 /*********************************************************************
3035  *
3036  *  When Link is lost sometimes there is work still in the TX ring
3037  *  which may result in a watchdog, rather than allow that we do an
3038  *  attempted cleanup and then reinit here. Note that this has been
3039  *  seens mostly with fiber adapters.
3040  *
3041  **********************************************************************/
3042 static void
3043 lem_tx_purge(struct adapter *adapter)
3044 {
3045         if ((!adapter->link_active) && (adapter->watchdog_check)) {
3046                 EM_TX_LOCK(adapter);
3047                 lem_txeof(adapter);
3048                 EM_TX_UNLOCK(adapter);
3049                 if (adapter->watchdog_check) /* Still outstanding? */
3050                         lem_init_locked(adapter);
3051         }
3052 }
3053
3054 /*********************************************************************
3055  *
3056  *  Get a buffer from system mbuf buffer pool.
3057  *
3058  **********************************************************************/
3059 static int
3060 lem_get_buf(struct adapter *adapter, int i)
3061 {
3062         struct mbuf             *m;
3063         bus_dma_segment_t       segs[1];
3064         bus_dmamap_t            map;
3065         struct em_buffer        *rx_buffer;
3066         int                     error, nsegs;
3067
3068         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3069         if (m == NULL) {
3070                 adapter->mbuf_cluster_failed++;
3071                 return (ENOBUFS);
3072         }
3073         m->m_len = m->m_pkthdr.len = MCLBYTES;
3074
3075         if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3076                 m_adj(m, ETHER_ALIGN);
3077
3078         /*
3079          * Using memory from the mbuf cluster pool, invoke the
3080          * bus_dma machinery to arrange the memory mapping.
3081          */
3082         error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3083             adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3084         if (error != 0) {
3085                 m_free(m);
3086                 return (error);
3087         }
3088
3089         /* If nsegs is wrong then the stack is corrupt. */
3090         KASSERT(nsegs == 1, ("Too many segments returned!"));
3091
3092         rx_buffer = &adapter->rx_buffer_area[i];
3093         if (rx_buffer->m_head != NULL)
3094                 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3095
3096         map = rx_buffer->map;
3097         rx_buffer->map = adapter->rx_sparemap;
3098         adapter->rx_sparemap = map;
3099         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3100         rx_buffer->m_head = m;
3101
3102         adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3103         return (0);
3104 }
3105
3106 /*********************************************************************
3107  *
3108  *  Allocate memory for rx_buffer structures. Since we use one
3109  *  rx_buffer per received packet, the maximum number of rx_buffer's
3110  *  that we'll need is equal to the number of receive descriptors
3111  *  that we've allocated.
3112  *
3113  **********************************************************************/
3114 static int
3115 lem_allocate_receive_structures(struct adapter *adapter)
3116 {
3117         device_t dev = adapter->dev;
3118         struct em_buffer *rx_buffer;
3119         int i, error;
3120
3121         adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3122             adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3123         if (adapter->rx_buffer_area == NULL) {
3124                 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3125                 return (ENOMEM);
3126         }
3127
3128         error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3129                                 1, 0,                   /* alignment, bounds */
3130                                 BUS_SPACE_MAXADDR,      /* lowaddr */
3131                                 BUS_SPACE_MAXADDR,      /* highaddr */
3132                                 NULL, NULL,             /* filter, filterarg */
3133                                 MCLBYTES,               /* maxsize */
3134                                 1,                      /* nsegments */
3135                                 MCLBYTES,               /* maxsegsize */
3136                                 0,                      /* flags */
3137                                 NULL,                   /* lockfunc */
3138                                 NULL,                   /* lockarg */
3139                                 &adapter->rxtag);
3140         if (error) {
3141                 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3142                     __func__, error);
3143                 goto fail;
3144         }
3145
3146         /* Create the spare map (used by getbuf) */
3147         error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3148              &adapter->rx_sparemap);
3149         if (error) {
3150                 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3151                     __func__, error);
3152                 goto fail;
3153         }
3154
3155         rx_buffer = adapter->rx_buffer_area;
3156         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3157                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3158                     &rx_buffer->map);
3159                 if (error) {
3160                         device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3161                             __func__, error);
3162                         goto fail;
3163                 }
3164         }
3165
3166         return (0);
3167
3168 fail:
3169         lem_free_receive_structures(adapter);
3170         return (error);
3171 }
3172
3173 /*********************************************************************
3174  *
3175  *  (Re)initialize receive structures.
3176  *
3177  **********************************************************************/
3178 static int
3179 lem_setup_receive_structures(struct adapter *adapter)
3180 {
3181         struct em_buffer *rx_buffer;
3182         int i, error;
3183
3184         /* Reset descriptor ring */
3185         bzero(adapter->rx_desc_base,
3186             (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3187
3188         /* Free current RX buffers. */
3189         rx_buffer = adapter->rx_buffer_area;
3190         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3191                 if (rx_buffer->m_head != NULL) {
3192                         bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3193                             BUS_DMASYNC_POSTREAD);
3194                         bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3195                         m_freem(rx_buffer->m_head);
3196                         rx_buffer->m_head = NULL;
3197                 }
3198         }
3199
3200         /* Allocate new ones. */
3201         for (i = 0; i < adapter->num_rx_desc; i++) {
3202                 error = lem_get_buf(adapter, i);
3203                 if (error)
3204                         return (error);
3205         }
3206
3207         /* Setup our descriptor pointers */
3208         adapter->next_rx_desc_to_check = 0;
3209         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3210             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3211
3212         return (0);
3213 }
3214
3215 /*********************************************************************
3216  *
3217  *  Enable receive unit.
3218  *
3219  **********************************************************************/
3220 #define MAX_INTS_PER_SEC        8000
3221 #define DEFAULT_ITR          1000000000/(MAX_INTS_PER_SEC * 256)
3222
3223 static void
3224 lem_initialize_receive_unit(struct adapter *adapter)
3225 {
3226         struct ifnet    *ifp = adapter->ifp;
3227         u64     bus_addr;
3228         u32     rctl, rxcsum;
3229
3230         INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3231
3232         /*
3233          * Make sure receives are disabled while setting
3234          * up the descriptor ring
3235          */
3236         rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3237         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3238
3239         if (adapter->hw.mac.type >= e1000_82540) {
3240                 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3241                     adapter->rx_abs_int_delay.value);
3242                 /*
3243                  * Set the interrupt throttling rate. Value is calculated
3244                  * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3245                  */
3246                 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3247         }
3248
3249         /*
3250         ** When using MSIX interrupts we need to throttle
3251         ** using the EITR register (82574 only)
3252         */
3253         if (adapter->msix)
3254                 for (int i = 0; i < 4; i++)
3255                         E1000_WRITE_REG(&adapter->hw,
3256                             E1000_EITR_82574(i), DEFAULT_ITR);
3257
3258         /* Disable accelerated ackknowledge */
3259         if (adapter->hw.mac.type == e1000_82574)
3260                 E1000_WRITE_REG(&adapter->hw,
3261                     E1000_RFCTL, E1000_RFCTL_ACK_DIS);
3262
3263         /* Setup the Base and Length of the Rx Descriptor Ring */
3264         bus_addr = adapter->rxdma.dma_paddr;
3265         E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3266             adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3267         E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3268             (u32)(bus_addr >> 32));
3269         E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3270             (u32)bus_addr);
3271
3272         /* Setup the Receive Control Register */
3273         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3274         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3275                    E1000_RCTL_RDMTS_HALF |
3276                    (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3277
3278         /* Make sure VLAN Filters are off */
3279         rctl &= ~E1000_RCTL_VFE;
3280
3281         if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3282                 rctl |= E1000_RCTL_SBP;
3283         else
3284                 rctl &= ~E1000_RCTL_SBP;
3285
3286         switch (adapter->rx_buffer_len) {
3287         default:
3288         case 2048:
3289                 rctl |= E1000_RCTL_SZ_2048;
3290                 break;
3291         case 4096:
3292                 rctl |= E1000_RCTL_SZ_4096 |
3293                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
3294                 break;
3295         case 8192:
3296                 rctl |= E1000_RCTL_SZ_8192 |
3297                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
3298                 break;
3299         case 16384:
3300                 rctl |= E1000_RCTL_SZ_16384 |
3301                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
3302                 break;
3303         }
3304
3305         if (ifp->if_mtu > ETHERMTU)
3306                 rctl |= E1000_RCTL_LPE;
3307         else
3308                 rctl &= ~E1000_RCTL_LPE;
3309
3310         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3311         if ((adapter->hw.mac.type >= e1000_82543) &&
3312             (ifp->if_capenable & IFCAP_RXCSUM)) {
3313                 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3314                 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3315                 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3316         }
3317
3318         /* Enable Receives */
3319         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3320
3321         /*
3322          * Setup the HW Rx Head and
3323          * Tail Descriptor Pointers
3324          */
3325         E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3326         E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
3327
3328         return;
3329 }
3330
3331 /*********************************************************************
3332  *
3333  *  Free receive related data structures.
3334  *
3335  **********************************************************************/
3336 static void
3337 lem_free_receive_structures(struct adapter *adapter)
3338 {
3339         struct em_buffer *rx_buffer;
3340         int i;
3341
3342         INIT_DEBUGOUT("free_receive_structures: begin");
3343
3344         if (adapter->rx_sparemap) {
3345                 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3346                 adapter->rx_sparemap = NULL;
3347         }
3348
3349         /* Cleanup any existing buffers */
3350         if (adapter->rx_buffer_area != NULL) {
3351                 rx_buffer = adapter->rx_buffer_area;
3352                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3353                         if (rx_buffer->m_head != NULL) {
3354                                 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3355                                     BUS_DMASYNC_POSTREAD);
3356                                 bus_dmamap_unload(adapter->rxtag,
3357                                     rx_buffer->map);
3358                                 m_freem(rx_buffer->m_head);
3359                                 rx_buffer->m_head = NULL;
3360                         } else if (rx_buffer->map != NULL)
3361                                 bus_dmamap_unload(adapter->rxtag,
3362                                     rx_buffer->map);
3363                         if (rx_buffer->map != NULL) {
3364                                 bus_dmamap_destroy(adapter->rxtag,
3365                                     rx_buffer->map);
3366                                 rx_buffer->map = NULL;
3367                         }
3368                 }
3369         }
3370
3371         if (adapter->rx_buffer_area != NULL) {
3372                 free(adapter->rx_buffer_area, M_DEVBUF);
3373                 adapter->rx_buffer_area = NULL;
3374         }
3375
3376         if (adapter->rxtag != NULL) {
3377                 bus_dma_tag_destroy(adapter->rxtag);
3378                 adapter->rxtag = NULL;
3379         }
3380 }
3381
3382 /*********************************************************************
3383  *
3384  *  This routine executes in interrupt context. It replenishes
3385  *  the mbufs in the descriptor and sends data which has been
3386  *  dma'ed into host memory to upper layer.
3387  *
3388  *  We loop at most count times if count is > 0, or until done if
3389  *  count < 0.
3390  *  
3391  *  For polling we also now return the number of cleaned packets
3392  *********************************************************************/
3393 static bool
3394 lem_rxeof(struct adapter *adapter, int count, int *done)
3395 {
3396         struct ifnet    *ifp = adapter->ifp;;
3397         struct mbuf     *mp;
3398         u8              status = 0, accept_frame = 0, eop = 0;
3399         u16             len, desc_len, prev_len_adj;
3400         int             i, rx_sent = 0;
3401         struct e1000_rx_desc   *current_desc;
3402
3403         EM_RX_LOCK(adapter);
3404         i = adapter->next_rx_desc_to_check;
3405         current_desc = &adapter->rx_desc_base[i];
3406         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3407             BUS_DMASYNC_POSTREAD);
3408
3409         if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3410                 if (done != NULL)
3411                         *done = rx_sent;
3412                 EM_RX_UNLOCK(adapter);
3413                 return (FALSE);
3414         }
3415
3416         while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3417                 struct mbuf *m = NULL;
3418
3419                 status = current_desc->status;
3420                 if ((status & E1000_RXD_STAT_DD) == 0)
3421                         break;
3422
3423                 mp = adapter->rx_buffer_area[i].m_head;
3424                 /*
3425                  * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3426                  * needs to access the last received byte in the mbuf.
3427                  */
3428                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3429                     BUS_DMASYNC_POSTREAD);
3430
3431                 accept_frame = 1;
3432                 prev_len_adj = 0;
3433                 desc_len = le16toh(current_desc->length);
3434                 if (status & E1000_RXD_STAT_EOP) {
3435                         count--;
3436                         eop = 1;
3437                         if (desc_len < ETHER_CRC_LEN) {
3438                                 len = 0;
3439                                 prev_len_adj = ETHER_CRC_LEN - desc_len;
3440                         } else
3441                                 len = desc_len - ETHER_CRC_LEN;
3442                 } else {
3443                         eop = 0;
3444                         len = desc_len;
3445                 }
3446
3447                 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3448                         u8      last_byte;
3449                         u32     pkt_len = desc_len;
3450
3451                         if (adapter->fmp != NULL)
3452                                 pkt_len += adapter->fmp->m_pkthdr.len;
3453
3454                         last_byte = *(mtod(mp, caddr_t) + desc_len - 1);                        
3455                         if (TBI_ACCEPT(&adapter->hw, status,
3456                             current_desc->errors, pkt_len, last_byte,
3457                             adapter->min_frame_size, adapter->max_frame_size)) {
3458                                 e1000_tbi_adjust_stats_82543(&adapter->hw,
3459                                     &adapter->stats, pkt_len,
3460                                     adapter->hw.mac.addr,
3461                                     adapter->max_frame_size);
3462                                 if (len > 0)
3463                                         len--;
3464                         } else
3465                                 accept_frame = 0;
3466                 }
3467
3468                 if (accept_frame) {
3469                         if (lem_get_buf(adapter, i) != 0) {
3470                                 ifp->if_iqdrops++;
3471                                 goto discard;
3472                         }
3473
3474                         /* Assign correct length to the current fragment */
3475                         mp->m_len = len;
3476
3477                         if (adapter->fmp == NULL) {
3478                                 mp->m_pkthdr.len = len;
3479                                 adapter->fmp = mp; /* Store the first mbuf */
3480                                 adapter->lmp = mp;
3481                         } else {
3482                                 /* Chain mbuf's together */
3483                                 mp->m_flags &= ~M_PKTHDR;
3484                                 /*
3485                                  * Adjust length of previous mbuf in chain if
3486                                  * we received less than 4 bytes in the last
3487                                  * descriptor.
3488                                  */
3489                                 if (prev_len_adj > 0) {
3490                                         adapter->lmp->m_len -= prev_len_adj;
3491                                         adapter->fmp->m_pkthdr.len -=
3492                                             prev_len_adj;
3493                                 }
3494                                 adapter->lmp->m_next = mp;
3495                                 adapter->lmp = adapter->lmp->m_next;
3496                                 adapter->fmp->m_pkthdr.len += len;
3497                         }
3498
3499                         if (eop) {
3500                                 adapter->fmp->m_pkthdr.rcvif = ifp;
3501                                 ifp->if_ipackets++;
3502                                 lem_receive_checksum(adapter, current_desc,
3503                                     adapter->fmp);
3504 #ifndef __NO_STRICT_ALIGNMENT
3505                                 if (adapter->max_frame_size >
3506                                     (MCLBYTES - ETHER_ALIGN) &&
3507                                     lem_fixup_rx(adapter) != 0)
3508                                         goto skip;
3509 #endif
3510                                 if (status & E1000_RXD_STAT_VP) {
3511                                         adapter->fmp->m_pkthdr.ether_vtag =
3512                                             (le16toh(current_desc->special) &
3513                                             E1000_RXD_SPC_VLAN_MASK);
3514                                         adapter->fmp->m_flags |= M_VLANTAG;
3515                                 }
3516 #ifndef __NO_STRICT_ALIGNMENT
3517 skip:
3518 #endif
3519                                 m = adapter->fmp;
3520                                 adapter->fmp = NULL;
3521                                 adapter->lmp = NULL;
3522                         }
3523                 } else {
3524                         ifp->if_ierrors++;
3525 discard:
3526                         /* Reuse loaded DMA map and just update mbuf chain */
3527                         mp = adapter->rx_buffer_area[i].m_head;
3528                         mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3529                         mp->m_data = mp->m_ext.ext_buf;
3530                         mp->m_next = NULL;
3531                         if (adapter->max_frame_size <=
3532                             (MCLBYTES - ETHER_ALIGN))
3533                                 m_adj(mp, ETHER_ALIGN);
3534                         if (adapter->fmp != NULL) {
3535                                 m_freem(adapter->fmp);
3536                                 adapter->fmp = NULL;
3537                                 adapter->lmp = NULL;
3538                         }
3539                         m = NULL;
3540                 }
3541
3542                 /* Zero out the receive descriptors status. */
3543                 current_desc->status = 0;
3544                 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3545                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3546
3547                 /* Advance our pointers to the next descriptor. */
3548                 if (++i == adapter->num_rx_desc)
3549                         i = 0;
3550                 /* Call into the stack */
3551                 if (m != NULL) {
3552                         adapter->next_rx_desc_to_check = i;
3553                         EM_RX_UNLOCK(adapter);
3554                         (*ifp->if_input)(ifp, m);
3555                         EM_RX_LOCK(adapter);
3556                         rx_sent++;
3557                         i = adapter->next_rx_desc_to_check;
3558                 }
3559                 current_desc = &adapter->rx_desc_base[i];
3560         }
3561         adapter->next_rx_desc_to_check = i;
3562
3563         /* Advance the E1000's Receive Queue #0  "Tail Pointer". */
3564         if (--i < 0)
3565                 i = adapter->num_rx_desc - 1;
3566         E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3567         if (done != NULL)
3568                 *done = rx_sent;
3569         EM_RX_UNLOCK(adapter);
3570         return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3571 }
3572
3573 #ifndef __NO_STRICT_ALIGNMENT
3574 /*
3575  * When jumbo frames are enabled we should realign entire payload on
3576  * architecures with strict alignment. This is serious design mistake of 8254x
3577  * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3578  * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3579  * payload. On architecures without strict alignment restrictions 8254x still
3580  * performs unaligned memory access which would reduce the performance too.
3581  * To avoid copying over an entire frame to align, we allocate a new mbuf and
3582  * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3583  * existing mbuf chain.
3584  *
3585  * Be aware, best performance of the 8254x is achived only when jumbo frame is
3586  * not used at all on architectures with strict alignment.
3587  */
3588 static int
3589 lem_fixup_rx(struct adapter *adapter)
3590 {
3591         struct mbuf *m, *n;
3592         int error;
3593
3594         error = 0;
3595         m = adapter->fmp;
3596         if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3597                 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3598                 m->m_data += ETHER_HDR_LEN;
3599         } else {
3600                 MGETHDR(n, M_DONTWAIT, MT_DATA);
3601                 if (n != NULL) {
3602                         bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3603                         m->m_data += ETHER_HDR_LEN;
3604                         m->m_len -= ETHER_HDR_LEN;
3605                         n->m_len = ETHER_HDR_LEN;
3606                         M_MOVE_PKTHDR(n, m);
3607                         n->m_next = m;
3608                         adapter->fmp = n;
3609                 } else {
3610                         adapter->dropped_pkts++;
3611                         m_freem(adapter->fmp);
3612                         adapter->fmp = NULL;
3613                         error = ENOMEM;
3614                 }
3615         }
3616
3617         return (error);
3618 }
3619 #endif
3620
3621 /*********************************************************************
3622  *
3623  *  Verify that the hardware indicated that the checksum is valid.
3624  *  Inform the stack about the status of checksum so that stack
3625  *  doesn't spend time verifying the checksum.
3626  *
3627  *********************************************************************/
3628 static void
3629 lem_receive_checksum(struct adapter *adapter,
3630             struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3631 {
3632         /* 82543 or newer only */
3633         if ((adapter->hw.mac.type < e1000_82543) ||
3634             /* Ignore Checksum bit is set */
3635             (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3636                 mp->m_pkthdr.csum_flags = 0;
3637                 return;
3638         }
3639
3640         if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3641                 /* Did it pass? */
3642                 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3643                         /* IP Checksum Good */
3644                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3645                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3646
3647                 } else {
3648                         mp->m_pkthdr.csum_flags = 0;
3649                 }
3650         }
3651
3652         if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3653                 /* Did it pass? */
3654                 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3655                         mp->m_pkthdr.csum_flags |=
3656                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3657                         mp->m_pkthdr.csum_data = htons(0xffff);
3658                 }
3659         }
3660 }
3661
3662 /*
3663  * This routine is run via an vlan
3664  * config EVENT
3665  */
3666 static void
3667 lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3668 {
3669         struct adapter  *adapter = ifp->if_softc;
3670         u32             index, bit;
3671
3672         if (ifp->if_softc !=  arg)   /* Not our event */
3673                 return;
3674
3675         if ((vtag == 0) || (vtag > 4095))       /* Invalid ID */
3676                 return;
3677
3678         EM_CORE_LOCK(adapter);
3679         index = (vtag >> 5) & 0x7F;
3680         bit = vtag & 0x1F;
3681         adapter->shadow_vfta[index] |= (1 << bit);
3682         ++adapter->num_vlans;
3683         /* Re-init to load the changes */
3684         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3685                 lem_init_locked(adapter);
3686         EM_CORE_UNLOCK(adapter);
3687 }
3688
3689 /*
3690  * This routine is run via an vlan
3691  * unconfig EVENT
3692  */
3693 static void
3694 lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3695 {
3696         struct adapter  *adapter = ifp->if_softc;
3697         u32             index, bit;
3698
3699         if (ifp->if_softc !=  arg)
3700                 return;
3701
3702         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3703                 return;
3704
3705         EM_CORE_LOCK(adapter);
3706         index = (vtag >> 5) & 0x7F;
3707         bit = vtag & 0x1F;
3708         adapter->shadow_vfta[index] &= ~(1 << bit);
3709         --adapter->num_vlans;
3710         /* Re-init to load the changes */
3711         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3712                 lem_init_locked(adapter);
3713         EM_CORE_UNLOCK(adapter);
3714 }
3715
3716 static void
3717 lem_setup_vlan_hw_support(struct adapter *adapter)
3718 {
3719         struct e1000_hw *hw = &adapter->hw;
3720         u32             reg;
3721
3722         /*
3723         ** We get here thru init_locked, meaning
3724         ** a soft reset, this has already cleared
3725         ** the VFTA and other state, so if there
3726         ** have been no vlan's registered do nothing.
3727         */
3728         if (adapter->num_vlans == 0)
3729                 return;
3730
3731         /*
3732         ** A soft reset zero's out the VFTA, so
3733         ** we need to repopulate it now.
3734         */
3735         for (int i = 0; i < EM_VFTA_SIZE; i++)
3736                 if (adapter->shadow_vfta[i] != 0)
3737                         E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3738                             i, adapter->shadow_vfta[i]);
3739
3740         reg = E1000_READ_REG(hw, E1000_CTRL);
3741         reg |= E1000_CTRL_VME;
3742         E1000_WRITE_REG(hw, E1000_CTRL, reg);
3743
3744         /* Enable the Filter Table */
3745         reg = E1000_READ_REG(hw, E1000_RCTL);
3746         reg &= ~E1000_RCTL_CFIEN;
3747         reg |= E1000_RCTL_VFE;
3748         E1000_WRITE_REG(hw, E1000_RCTL, reg);
3749
3750         /* Update the frame size */
3751         E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
3752             adapter->max_frame_size + VLAN_TAG_SIZE);
3753 }
3754
3755 static void
3756 lem_enable_intr(struct adapter *adapter)
3757 {
3758         struct e1000_hw *hw = &adapter->hw;
3759         u32 ims_mask = IMS_ENABLE_MASK;
3760
3761         if (adapter->msix) {
3762                 E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
3763                 ims_mask |= EM_MSIX_MASK;
3764         } 
3765         E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3766 }
3767
3768 static void
3769 lem_disable_intr(struct adapter *adapter)
3770 {
3771         struct e1000_hw *hw = &adapter->hw;
3772
3773         if (adapter->msix)
3774                 E1000_WRITE_REG(hw, EM_EIAC, 0);
3775         E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
3776 }
3777
3778 /*
3779  * Bit of a misnomer, what this really means is
3780  * to enable OS management of the system... aka
3781  * to disable special hardware management features 
3782  */
3783 static void
3784 lem_init_manageability(struct adapter *adapter)
3785 {
3786         /* A shared code workaround */
3787         if (adapter->has_manage) {
3788                 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3789                 /* disable hardware interception of ARP */
3790                 manc &= ~(E1000_MANC_ARP_EN);
3791                 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3792         }
3793 }
3794
3795 /*
3796  * Give control back to hardware management
3797  * controller if there is one.
3798  */
3799 static void
3800 lem_release_manageability(struct adapter *adapter)
3801 {
3802         if (adapter->has_manage) {
3803                 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3804
3805                 /* re-enable hardware interception of ARP */
3806                 manc |= E1000_MANC_ARP_EN;
3807                 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3808         }
3809 }
3810
3811 /*
3812  * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3813  * For ASF and Pass Through versions of f/w this means
3814  * that the driver is loaded. For AMT version type f/w
3815  * this means that the network i/f is open.
3816  */
3817 static void
3818 lem_get_hw_control(struct adapter *adapter)
3819 {
3820         u32 ctrl_ext;
3821
3822         ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3823         E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3824             ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3825         return;
3826 }
3827
3828 /*
3829  * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3830  * For ASF and Pass Through versions of f/w this means that
3831  * the driver is no longer loaded. For AMT versions of the
3832  * f/w this means that the network i/f is closed.
3833  */
3834 static void
3835 lem_release_hw_control(struct adapter *adapter)
3836 {
3837         u32 ctrl_ext;
3838
3839         if (!adapter->has_manage)
3840                 return;
3841
3842         ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3843         E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3844             ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3845         return;
3846 }
3847
3848 static int
3849 lem_is_valid_ether_addr(u8 *addr)
3850 {
3851         char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3852
3853         if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3854                 return (FALSE);
3855         }
3856
3857         return (TRUE);
3858 }
3859
3860 /*
3861 ** Parse the interface capabilities with regard
3862 ** to both system management and wake-on-lan for
3863 ** later use.
3864 */
3865 static void
3866 lem_get_wakeup(device_t dev)
3867 {
3868         struct adapter  *adapter = device_get_softc(dev);
3869         u16             eeprom_data = 0, device_id, apme_mask;
3870
3871         adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3872         apme_mask = EM_EEPROM_APME;
3873
3874         switch (adapter->hw.mac.type) {
3875         case e1000_82542:
3876         case e1000_82543:
3877                 break;
3878         case e1000_82544:
3879                 e1000_read_nvm(&adapter->hw,
3880                     NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3881                 apme_mask = EM_82544_APME;
3882                 break;
3883         case e1000_82546:
3884         case e1000_82546_rev_3:
3885                 if (adapter->hw.bus.func == 1) {
3886                         e1000_read_nvm(&adapter->hw,
3887                             NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3888                         break;
3889                 } else
3890                         e1000_read_nvm(&adapter->hw,
3891                             NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3892                 break;
3893         default:
3894                 e1000_read_nvm(&adapter->hw,
3895                     NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3896                 break;
3897         }
3898         if (eeprom_data & apme_mask)
3899                 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
3900         /*
3901          * We have the eeprom settings, now apply the special cases
3902          * where the eeprom may be wrong or the board won't support
3903          * wake on lan on a particular port
3904          */
3905         device_id = pci_get_device(dev);
3906         switch (device_id) {
3907         case E1000_DEV_ID_82546GB_PCIE:
3908                 adapter->wol = 0;
3909                 break;
3910         case E1000_DEV_ID_82546EB_FIBER:
3911         case E1000_DEV_ID_82546GB_FIBER:
3912                 /* Wake events only supported on port A for dual fiber
3913                  * regardless of eeprom setting */
3914                 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3915                     E1000_STATUS_FUNC_1)
3916                         adapter->wol = 0;
3917                 break;
3918         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
3919                 /* if quad port adapter, disable WoL on all but port A */
3920                 if (global_quad_port_a != 0)
3921                         adapter->wol = 0;
3922                 /* Reset for multiple quad port adapters */
3923                 if (++global_quad_port_a == 4)
3924                         global_quad_port_a = 0;
3925                 break;
3926         }
3927         return;
3928 }
3929
3930
3931 /*
3932  * Enable PCI Wake On Lan capability
3933  */
3934 static void
3935 lem_enable_wakeup(device_t dev)
3936 {
3937         struct adapter  *adapter = device_get_softc(dev);
3938         struct ifnet    *ifp = adapter->ifp;
3939         u32             pmc, ctrl, ctrl_ext, rctl;
3940         u16             status;
3941
3942         if ((pci_find_extcap(dev, PCIY_PMG, &pmc) != 0))
3943                 return;
3944
3945         /* Advertise the wakeup capability */
3946         ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
3947         ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
3948         E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
3949         E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3950
3951         /* Keep the laser running on Fiber adapters */
3952         if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
3953             adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
3954                 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3955                 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
3956                 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
3957         }
3958
3959         /*
3960         ** Determine type of Wakeup: note that wol
3961         ** is set with all bits on by default.
3962         */
3963         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
3964                 adapter->wol &= ~E1000_WUFC_MAG;
3965
3966         if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
3967                 adapter->wol &= ~E1000_WUFC_MC;
3968         else {
3969                 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3970                 rctl |= E1000_RCTL_MPE;
3971                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3972         }
3973
3974         if (adapter->hw.mac.type == e1000_pchlan) {
3975                 if (lem_enable_phy_wakeup(adapter))
3976                         return;
3977         } else {
3978                 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3979                 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
3980         }
3981
3982
3983         /* Request PME */
3984         status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
3985         status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3986         if (ifp->if_capenable & IFCAP_WOL)
3987                 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3988         pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
3989
3990         return;
3991 }
3992
3993 /*
3994 ** WOL in the newer chipset interfaces (pchlan)
3995 ** require thing to be copied into the phy
3996 */
3997 static int
3998 lem_enable_phy_wakeup(struct adapter *adapter)
3999 {
4000         struct e1000_hw *hw = &adapter->hw;
4001         u32 mreg, ret = 0;
4002         u16 preg;
4003
4004         /* copy MAC RARs to PHY RARs */
4005         for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4006                 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4007                 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4008                 e1000_write_phy_reg(hw, BM_RAR_M(i),
4009                     (u16)((mreg >> 16) & 0xFFFF));
4010                 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4011                 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4012                 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4013                     (u16)((mreg >> 16) & 0xFFFF));
4014         }
4015
4016         /* copy MAC MTA to PHY MTA */
4017         for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4018                 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4019                 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4020                 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4021                     (u16)((mreg >> 16) & 0xFFFF));
4022         }
4023
4024         /* configure PHY Rx Control register */
4025         e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4026         mreg = E1000_READ_REG(hw, E1000_RCTL);
4027         if (mreg & E1000_RCTL_UPE)
4028                 preg |= BM_RCTL_UPE;
4029         if (mreg & E1000_RCTL_MPE)
4030                 preg |= BM_RCTL_MPE;
4031         preg &= ~(BM_RCTL_MO_MASK);
4032         if (mreg & E1000_RCTL_MO_3)
4033                 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4034                                 << BM_RCTL_MO_SHIFT);
4035         if (mreg & E1000_RCTL_BAM)
4036                 preg |= BM_RCTL_BAM;
4037         if (mreg & E1000_RCTL_PMCF)
4038                 preg |= BM_RCTL_PMCF;
4039         mreg = E1000_READ_REG(hw, E1000_CTRL);
4040         if (mreg & E1000_CTRL_RFCE)
4041                 preg |= BM_RCTL_RFCE;
4042         e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4043
4044         /* enable PHY wakeup in MAC register */
4045         E1000_WRITE_REG(hw, E1000_WUC,
4046             E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4047         E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4048
4049         /* configure and enable PHY wakeup in PHY registers */
4050         e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4051         e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4052
4053         /* activate PHY wakeup */
4054         ret = hw->phy.ops.acquire(hw);
4055         if (ret) {
4056                 printf("Could not acquire PHY\n");
4057                 return ret;
4058         }
4059         e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4060                                  (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4061         ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4062         if (ret) {
4063                 printf("Could not read PHY page 769\n");
4064                 goto out;
4065         }
4066         preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4067         ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4068         if (ret)
4069                 printf("Could not set PHY Host Wakeup bit\n");
4070 out:
4071         hw->phy.ops.release(hw);
4072
4073         return ret;
4074 }
4075
4076 static void
4077 lem_led_func(void *arg, int onoff)
4078 {
4079         struct adapter  *adapter = arg;
4080
4081         EM_CORE_LOCK(adapter);
4082         if (onoff) {
4083                 e1000_setup_led(&adapter->hw);
4084                 e1000_led_on(&adapter->hw);
4085         } else {
4086                 e1000_led_off(&adapter->hw);
4087                 e1000_cleanup_led(&adapter->hw);
4088         }
4089         EM_CORE_UNLOCK(adapter);
4090 }
4091
4092 /*********************************************************************
4093 * 82544 Coexistence issue workaround.
4094 *    There are 2 issues.
4095 *       1. Transmit Hang issue.
4096 *    To detect this issue, following equation can be used...
4097 *         SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4098 *         If SUM[3:0] is in between 1 to 4, we will have this issue.
4099 *
4100 *       2. DAC issue.
4101 *    To detect this issue, following equation can be used...
4102 *         SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4103 *         If SUM[3:0] is in between 9 to c, we will have this issue.
4104 *
4105 *
4106 *    WORKAROUND:
4107 *         Make sure we do not have ending address
4108 *         as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4109 *
4110 *************************************************************************/
4111 static u32
4112 lem_fill_descriptors (bus_addr_t address, u32 length,
4113                 PDESC_ARRAY desc_array)
4114 {
4115         u32 safe_terminator;
4116
4117         /* Since issue is sensitive to length and address.*/
4118         /* Let us first check the address...*/
4119         if (length <= 4) {
4120                 desc_array->descriptor[0].address = address;
4121                 desc_array->descriptor[0].length = length;
4122                 desc_array->elements = 1;
4123                 return (desc_array->elements);
4124         }
4125         safe_terminator = (u32)((((u32)address & 0x7) +
4126             (length & 0xF)) & 0xF);
4127         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4128         if (safe_terminator == 0   ||
4129         (safe_terminator > 4   &&
4130         safe_terminator < 9)   ||
4131         (safe_terminator > 0xC &&
4132         safe_terminator <= 0xF)) {
4133                 desc_array->descriptor[0].address = address;
4134                 desc_array->descriptor[0].length = length;
4135                 desc_array->elements = 1;
4136                 return (desc_array->elements);
4137         }
4138
4139         desc_array->descriptor[0].address = address;
4140         desc_array->descriptor[0].length = length - 4;
4141         desc_array->descriptor[1].address = address + (length - 4);
4142         desc_array->descriptor[1].length = 4;
4143         desc_array->elements = 2;
4144         return (desc_array->elements);
4145 }
4146
4147 /**********************************************************************
4148  *
4149  *  Update the board statistics counters.
4150  *
4151  **********************************************************************/
4152 static void
4153 lem_update_stats_counters(struct adapter *adapter)
4154 {
4155         struct ifnet   *ifp;
4156
4157         if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4158            (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4159                 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4160                 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4161         }
4162         adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4163         adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4164         adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4165         adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4166
4167         adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4168         adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4169         adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4170         adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4171         adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4172         adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4173         adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4174         adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4175         adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4176         adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4177         adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4178         adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4179         adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4180         adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4181         adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4182         adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4183         adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4184         adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4185         adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4186         adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4187
4188         /* For the 64-bit byte counters the low dword must be read first. */
4189         /* Both registers clear on the read of the high dword */
4190
4191         adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4192             ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4193         adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4194             ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4195
4196         adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4197         adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4198         adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4199         adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4200         adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4201
4202         adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4203         adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4204
4205         adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4206         adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4207         adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4208         adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4209         adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4210         adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4211         adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4212         adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4213         adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4214         adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4215
4216         if (adapter->hw.mac.type >= e1000_82543) {
4217                 adapter->stats.algnerrc += 
4218                 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4219                 adapter->stats.rxerrc += 
4220                 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4221                 adapter->stats.tncrs += 
4222                 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4223                 adapter->stats.cexterr += 
4224                 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4225                 adapter->stats.tsctc += 
4226                 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4227                 adapter->stats.tsctfc += 
4228                 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4229         }
4230         ifp = adapter->ifp;
4231
4232         ifp->if_collisions = adapter->stats.colc;
4233
4234         /* Rx Errors */
4235         ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4236             adapter->stats.crcerrs + adapter->stats.algnerrc +
4237             adapter->stats.ruc + adapter->stats.roc +
4238             adapter->stats.mpc + adapter->stats.cexterr;
4239
4240         /* Tx Errors */
4241         ifp->if_oerrors = adapter->stats.ecol +
4242             adapter->stats.latecol + adapter->watchdog_events;
4243 }
4244
4245 /* Export a single 32-bit register via a read-only sysctl. */
4246 static int
4247 lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4248 {
4249         struct adapter *adapter;
4250         u_int val;
4251
4252         adapter = oidp->oid_arg1;
4253         val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4254         return (sysctl_handle_int(oidp, &val, 0, req));
4255 }
4256
4257 /*
4258  * Add sysctl variables, one per statistic, to the system.
4259  */
4260 static void
4261 lem_add_hw_stats(struct adapter *adapter)
4262 {
4263         device_t dev = adapter->dev;
4264
4265         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4266         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4267         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4268         struct e1000_hw_stats *stats = &adapter->stats;
4269
4270         struct sysctl_oid *stat_node;
4271         struct sysctl_oid_list *stat_list;
4272
4273         /* Driver Statistics */
4274         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail", 
4275                          CTLFLAG_RD, &adapter->mbuf_alloc_failed,
4276                          "Std mbuf failed");
4277         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail", 
4278                          CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4279                          "Std mbuf cluster failed");
4280         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 
4281                         CTLFLAG_RD, &adapter->dropped_pkts,
4282                         "Driver dropped packets");
4283         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", 
4284                         CTLFLAG_RD, &adapter->no_tx_dma_setup,
4285                         "Driver tx dma failure in xmit");
4286         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4287                         CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4288                         "Not enough tx descriptors failure in xmit");
4289         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4290                         CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4291                         "Not enough tx descriptors failure in xmit");
4292         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4293                         CTLFLAG_RD, &adapter->rx_overruns,
4294                         "RX overruns");
4295         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4296                         CTLFLAG_RD, &adapter->watchdog_events,
4297                         "Watchdog timeouts");
4298
4299         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4300                         CTLFLAG_RD, adapter, E1000_CTRL,
4301                         lem_sysctl_reg_handler, "IU",
4302                         "Device Control Register");
4303         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4304                         CTLFLAG_RD, adapter, E1000_RCTL,
4305                         lem_sysctl_reg_handler, "IU",
4306                         "Receiver Control Register");
4307         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4308                         CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4309                         "Flow Control High Watermark");
4310         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", 
4311                         CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4312                         "Flow Control Low Watermark");
4313         SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, "fifo_workaround",
4314                         CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4315                         "TX FIFO workaround events");
4316         SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, "fifo_reset",
4317                         CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4318                         "TX FIFO resets");
4319
4320         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head", 
4321                         CTLFLAG_RD, adapter, E1000_TDH(0),
4322                         lem_sysctl_reg_handler, "IU",
4323                         "Transmit Descriptor Head");
4324         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail", 
4325                         CTLFLAG_RD, adapter, E1000_TDT(0),
4326                         lem_sysctl_reg_handler, "IU",
4327                         "Transmit Descriptor Tail");
4328         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head", 
4329                         CTLFLAG_RD, adapter, E1000_RDH(0),
4330                         lem_sysctl_reg_handler, "IU",
4331                         "Receive Descriptor Head");
4332         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail", 
4333                         CTLFLAG_RD, adapter, E1000_RDT(0),
4334                         lem_sysctl_reg_handler, "IU",
4335                         "Receive Descriptor Tail");
4336         
4337
4338         /* MAC stats get their own sub node */
4339
4340         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
4341                                     CTLFLAG_RD, NULL, "Statistics");
4342         stat_list = SYSCTL_CHILDREN(stat_node);
4343
4344         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "excess_coll", 
4345                         CTLFLAG_RD, &stats->ecol,
4346                         "Excessive collisions");
4347         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "single_coll", 
4348                         CTLFLAG_RD, &stats->scc,
4349                         "Single collisions");
4350         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "multiple_coll", 
4351                         CTLFLAG_RD, &stats->mcc,
4352                         "Multiple collisions");
4353         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "late_coll", 
4354                         CTLFLAG_RD, &stats->latecol,
4355                         "Late collisions");
4356         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "collision_count", 
4357                         CTLFLAG_RD, &stats->colc,
4358                         "Collision Count");
4359         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4360                         CTLFLAG_RD, &adapter->stats.symerrs,
4361                         "Symbol Errors");
4362         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4363                         CTLFLAG_RD, &adapter->stats.sec,
4364                         "Sequence Errors");
4365         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "defer_count",
4366                         CTLFLAG_RD, &adapter->stats.dc,
4367                         "Defer Count");
4368         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4369                         CTLFLAG_RD, &adapter->stats.mpc,
4370                         "Missed Packets");
4371         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4372                         CTLFLAG_RD, &adapter->stats.rnbc,
4373                         "Receive No Buffers");
4374         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4375                         CTLFLAG_RD, &adapter->stats.ruc,
4376                         "Receive Undersize");
4377         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4378                         CTLFLAG_RD, &adapter->stats.rfc,
4379                         "Fragmented Packets Received ");
4380         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4381                         CTLFLAG_RD, &adapter->stats.roc,
4382                         "Oversized Packets Received");
4383         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4384                         CTLFLAG_RD, &adapter->stats.rjc,
4385                         "Recevied Jabber");
4386         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4387                         CTLFLAG_RD, &adapter->stats.rxerrc,
4388                         "Receive Errors");
4389         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4390                         CTLFLAG_RD, &adapter->stats.crcerrs,
4391                         "CRC errors");
4392         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4393                         CTLFLAG_RD, &adapter->stats.algnerrc,
4394                         "Alignment Errors");
4395         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4396                         CTLFLAG_RD, &adapter->stats.cexterr,
4397                         "Collision/Carrier extension errors");
4398         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4399                         CTLFLAG_RD, &adapter->stats.xonrxc,
4400                         "XON Received");
4401         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4402                         CTLFLAG_RD, &adapter->stats.xontxc,
4403                         "XON Transmitted");
4404         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4405                         CTLFLAG_RD, &adapter->stats.xoffrxc,
4406                         "XOFF Received");
4407         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4408                         CTLFLAG_RD, &adapter->stats.xofftxc,
4409                         "XOFF Transmitted");
4410
4411         /* Packet Reception Stats */
4412         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4413                         CTLFLAG_RD, &adapter->stats.tpr,
4414                         "Total Packets Received ");
4415         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4416                         CTLFLAG_RD, &adapter->stats.gprc,
4417                         "Good Packets Received");
4418         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4419                         CTLFLAG_RD, &adapter->stats.bprc,
4420                         "Broadcast Packets Received");
4421         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4422                         CTLFLAG_RD, &adapter->stats.mprc,
4423                         "Multicast Packets Received");
4424         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4425                         CTLFLAG_RD, &adapter->stats.prc64,
4426                         "64 byte frames received ");
4427         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4428                         CTLFLAG_RD, &adapter->stats.prc127,
4429                         "65-127 byte frames received");
4430         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4431                         CTLFLAG_RD, &adapter->stats.prc255,
4432                         "128-255 byte frames received");
4433         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4434                         CTLFLAG_RD, &adapter->stats.prc511,
4435                         "256-511 byte frames received");
4436         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4437                         CTLFLAG_RD, &adapter->stats.prc1023,
4438                         "512-1023 byte frames received");
4439         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4440                         CTLFLAG_RD, &adapter->stats.prc1522,
4441                         "1023-1522 byte frames received");
4442         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", 
4443                         CTLFLAG_RD, &adapter->stats.gorc, 
4444                         "Good Octets Received");
4445
4446         /* Packet Transmission Stats */
4447         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 
4448                         CTLFLAG_RD, &adapter->stats.gotc, 
4449                         "Good Octets Transmitted"); 
4450         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4451                         CTLFLAG_RD, &adapter->stats.tpt,
4452                         "Total Packets Transmitted");
4453         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4454                         CTLFLAG_RD, &adapter->stats.gptc,
4455                         "Good Packets Transmitted");
4456         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4457                         CTLFLAG_RD, &adapter->stats.bptc,
4458                         "Broadcast Packets Transmitted");
4459         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4460                         CTLFLAG_RD, &adapter->stats.mptc,
4461                         "Multicast Packets Transmitted");
4462         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4463                         CTLFLAG_RD, &adapter->stats.ptc64,
4464                         "64 byte frames transmitted ");
4465         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4466                         CTLFLAG_RD, &adapter->stats.ptc127,
4467                         "65-127 byte frames transmitted");
4468         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4469                         CTLFLAG_RD, &adapter->stats.ptc255,
4470                         "128-255 byte frames transmitted");
4471         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4472                         CTLFLAG_RD, &adapter->stats.ptc511,
4473                         "256-511 byte frames transmitted");
4474         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4475                         CTLFLAG_RD, &adapter->stats.ptc1023,
4476                         "512-1023 byte frames transmitted");
4477         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4478                         CTLFLAG_RD, &adapter->stats.ptc1522,
4479                         "1024-1522 byte frames transmitted");
4480         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4481                         CTLFLAG_RD, &adapter->stats.tsctc,
4482                         "TSO Contexts Transmitted");
4483         SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4484                         CTLFLAG_RD, &adapter->stats.tsctfc,
4485                         "TSO Contexts Failed");
4486 }
4487
4488 /**********************************************************************
4489  *
4490  *  This routine provides a way to dump out the adapter eeprom,
4491  *  often a useful debug/service tool. This only dumps the first
4492  *  32 words, stuff that matters is in that extent.
4493  *
4494  **********************************************************************/
4495
4496 static int
4497 lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4498 {
4499         struct adapter *adapter;
4500         int error;
4501         int result;
4502
4503         result = -1;
4504         error = sysctl_handle_int(oidp, &result, 0, req);
4505
4506         if (error || !req->newptr)
4507                 return (error);
4508
4509         /*
4510          * This value will cause a hex dump of the
4511          * first 32 16-bit words of the EEPROM to
4512          * the screen.
4513          */
4514         if (result == 1) {
4515                 adapter = (struct adapter *)arg1;
4516                 lem_print_nvm_info(adapter);
4517         }
4518
4519         return (error);
4520 }
4521
4522 static void
4523 lem_print_nvm_info(struct adapter *adapter)
4524 {
4525         u16     eeprom_data;
4526         int     i, j, row = 0;
4527
4528         /* Its a bit crude, but it gets the job done */
4529         printf("\nInterface EEPROM Dump:\n");
4530         printf("Offset\n0x0000  ");
4531         for (i = 0, j = 0; i < 32; i++, j++) {
4532                 if (j == 8) { /* Make the offset block */
4533                         j = 0; ++row;
4534                         printf("\n0x00%x0  ",row);
4535                 }
4536                 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4537                 printf("%04x ", eeprom_data);
4538         }
4539         printf("\n");
4540 }
4541
4542 static int
4543 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4544 {
4545         struct em_int_delay_info *info;
4546         struct adapter *adapter;
4547         u32 regval;
4548         int error;
4549         int usecs;
4550         int ticks;
4551
4552         info = (struct em_int_delay_info *)arg1;
4553         usecs = info->value;
4554         error = sysctl_handle_int(oidp, &usecs, 0, req);
4555         if (error != 0 || req->newptr == NULL)
4556                 return (error);
4557         if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4558                 return (EINVAL);
4559         info->value = usecs;
4560         ticks = EM_USECS_TO_TICKS(usecs);
4561
4562         adapter = info->adapter;
4563         
4564         EM_CORE_LOCK(adapter);
4565         regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4566         regval = (regval & ~0xffff) | (ticks & 0xffff);
4567         /* Handle a few special cases. */
4568         switch (info->offset) {
4569         case E1000_RDTR:
4570                 break;
4571         case E1000_TIDV:
4572                 if (ticks == 0) {
4573                         adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4574                         /* Don't write 0 into the TIDV register. */
4575                         regval++;
4576                 } else
4577                         adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4578                 break;
4579         }
4580         E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4581         EM_CORE_UNLOCK(adapter);
4582         return (0);
4583 }
4584
4585 static void
4586 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4587         const char *description, struct em_int_delay_info *info,
4588         int offset, int value)
4589 {
4590         info->adapter = adapter;
4591         info->offset = offset;
4592         info->value = value;
4593         SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4594             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4595             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4596             info, 0, lem_sysctl_int_delay, "I", description);
4597 }
4598
4599 static void
4600 lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4601         const char *description, int *limit, int value)
4602 {
4603         *limit = value;
4604         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4605             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4606             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4607 }
4608
4609 #ifndef EM_LEGACY_IRQ
4610 static void
4611 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4612         const char *description, int *limit, int value)
4613 {
4614         *limit = value;
4615         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4616             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4617             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4618 }
4619 #endif