]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/dev/e1000/if_lem.c
MFC: sync the version of netmap with the one in HEAD, including device
[FreeBSD/stable/9.git] / sys / dev / e1000 / if_lem.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2012, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/bus.h>
44 #include <sys/endian.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/module.h>
50 #include <sys/rman.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/sysctl.h>
54 #include <sys/taskqueue.h>
55 #include <sys/eventhandler.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58
59 #include <net/bpf.h>
60 #include <net/ethernet.h>
61 #include <net/if.h>
62 #include <net/if_arp.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65
66 #include <net/if_types.h>
67 #include <net/if_vlan_var.h>
68
69 #include <netinet/in_systm.h>
70 #include <netinet/in.h>
71 #include <netinet/if_ether.h>
72 #include <netinet/ip.h>
73 #include <netinet/ip6.h>
74 #include <netinet/tcp.h>
75 #include <netinet/udp.h>
76
77 #include <machine/in_cksum.h>
78 #include <dev/led/led.h>
79 #include <dev/pci/pcivar.h>
80 #include <dev/pci/pcireg.h>
81
82 #include "e1000_api.h"
83 #include "if_lem.h"
84
85 /*********************************************************************
86  *  Legacy Em Driver version:
87  *********************************************************************/
88 char lem_driver_version[] = "1.0.5";
89
90 /*********************************************************************
91  *  PCI Device ID Table
92  *
93  *  Used by probe to select devices to load on
94  *  Last field stores an index into e1000_strings
95  *  Last entry must be all 0s
96  *
97  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
98  *********************************************************************/
99
100 static em_vendor_info_t lem_vendor_info_array[] =
101 {
102         /* Intel(R) PRO/1000 Network Connection */
103         { 0x8086, E1000_DEV_ID_82540EM,         PCI_ANY_ID, PCI_ANY_ID, 0},
104         { 0x8086, E1000_DEV_ID_82540EM_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
105         { 0x8086, E1000_DEV_ID_82540EP,         PCI_ANY_ID, PCI_ANY_ID, 0},
106         { 0x8086, E1000_DEV_ID_82540EP_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
107         { 0x8086, E1000_DEV_ID_82540EP_LP,      PCI_ANY_ID, PCI_ANY_ID, 0},
108
109         { 0x8086, E1000_DEV_ID_82541EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
110         { 0x8086, E1000_DEV_ID_82541ER,         PCI_ANY_ID, PCI_ANY_ID, 0},
111         { 0x8086, E1000_DEV_ID_82541ER_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
112         { 0x8086, E1000_DEV_ID_82541EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
113         { 0x8086, E1000_DEV_ID_82541GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
114         { 0x8086, E1000_DEV_ID_82541GI_LF,      PCI_ANY_ID, PCI_ANY_ID, 0},
115         { 0x8086, E1000_DEV_ID_82541GI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
116
117         { 0x8086, E1000_DEV_ID_82542,           PCI_ANY_ID, PCI_ANY_ID, 0},
118
119         { 0x8086, E1000_DEV_ID_82543GC_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
120         { 0x8086, E1000_DEV_ID_82543GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
121
122         { 0x8086, E1000_DEV_ID_82544EI_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
123         { 0x8086, E1000_DEV_ID_82544EI_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
124         { 0x8086, E1000_DEV_ID_82544GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
125         { 0x8086, E1000_DEV_ID_82544GC_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
126
127         { 0x8086, E1000_DEV_ID_82545EM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
128         { 0x8086, E1000_DEV_ID_82545EM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
129         { 0x8086, E1000_DEV_ID_82545GM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
130         { 0x8086, E1000_DEV_ID_82545GM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
131         { 0x8086, E1000_DEV_ID_82545GM_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
132
133         { 0x8086, E1000_DEV_ID_82546EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
134         { 0x8086, E1000_DEV_ID_82546EB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
135         { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
136         { 0x8086, E1000_DEV_ID_82546GB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
137         { 0x8086, E1000_DEV_ID_82546GB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
138         { 0x8086, E1000_DEV_ID_82546GB_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
139         { 0x8086, E1000_DEV_ID_82546GB_PCIE,    PCI_ANY_ID, PCI_ANY_ID, 0},
140         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
141         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
142                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
143
144         { 0x8086, E1000_DEV_ID_82547EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
145         { 0x8086, E1000_DEV_ID_82547EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
146         { 0x8086, E1000_DEV_ID_82547GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
147         /* required last entry */
148         { 0, 0, 0, 0, 0}
149 };
150
151 /*********************************************************************
152  *  Table of branding strings for all supported NICs.
153  *********************************************************************/
154
155 static char *lem_strings[] = {
156         "Intel(R) PRO/1000 Legacy Network Connection"
157 };
158
159 /*********************************************************************
160  *  Function prototypes
161  *********************************************************************/
162 static int      lem_probe(device_t);
163 static int      lem_attach(device_t);
164 static int      lem_detach(device_t);
165 static int      lem_shutdown(device_t);
166 static int      lem_suspend(device_t);
167 static int      lem_resume(device_t);
168 static void     lem_start(struct ifnet *);
169 static void     lem_start_locked(struct ifnet *ifp);
170 static int      lem_ioctl(struct ifnet *, u_long, caddr_t);
171 static void     lem_init(void *);
172 static void     lem_init_locked(struct adapter *);
173 static void     lem_stop(void *);
174 static void     lem_media_status(struct ifnet *, struct ifmediareq *);
175 static int      lem_media_change(struct ifnet *);
176 static void     lem_identify_hardware(struct adapter *);
177 static int      lem_allocate_pci_resources(struct adapter *);
178 static int      lem_allocate_irq(struct adapter *adapter);
179 static void     lem_free_pci_resources(struct adapter *);
180 static void     lem_local_timer(void *);
181 static int      lem_hardware_init(struct adapter *);
182 static int      lem_setup_interface(device_t, struct adapter *);
183 static void     lem_setup_transmit_structures(struct adapter *);
184 static void     lem_initialize_transmit_unit(struct adapter *);
185 static int      lem_setup_receive_structures(struct adapter *);
186 static void     lem_initialize_receive_unit(struct adapter *);
187 static void     lem_enable_intr(struct adapter *);
188 static void     lem_disable_intr(struct adapter *);
189 static void     lem_free_transmit_structures(struct adapter *);
190 static void     lem_free_receive_structures(struct adapter *);
191 static void     lem_update_stats_counters(struct adapter *);
192 static void     lem_add_hw_stats(struct adapter *adapter);
193 static void     lem_txeof(struct adapter *);
194 static void     lem_tx_purge(struct adapter *);
195 static int      lem_allocate_receive_structures(struct adapter *);
196 static int      lem_allocate_transmit_structures(struct adapter *);
197 static bool     lem_rxeof(struct adapter *, int, int *);
198 #ifndef __NO_STRICT_ALIGNMENT
199 static int      lem_fixup_rx(struct adapter *);
200 #endif
201 static void     lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
202                     struct mbuf *);
203 static void     lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
204                     u32 *, u32 *);
205 static void     lem_set_promisc(struct adapter *);
206 static void     lem_disable_promisc(struct adapter *);
207 static void     lem_set_multi(struct adapter *);
208 static void     lem_update_link_status(struct adapter *);
209 static int      lem_get_buf(struct adapter *, int);
210 static void     lem_register_vlan(void *, struct ifnet *, u16);
211 static void     lem_unregister_vlan(void *, struct ifnet *, u16);
212 static void     lem_setup_vlan_hw_support(struct adapter *);
213 static int      lem_xmit(struct adapter *, struct mbuf **);
214 static void     lem_smartspeed(struct adapter *);
215 static int      lem_82547_fifo_workaround(struct adapter *, int);
216 static void     lem_82547_update_fifo_head(struct adapter *, int);
217 static int      lem_82547_tx_fifo_reset(struct adapter *);
218 static void     lem_82547_move_tail(void *);
219 static int      lem_dma_malloc(struct adapter *, bus_size_t,
220                     struct em_dma_alloc *, int);
221 static void     lem_dma_free(struct adapter *, struct em_dma_alloc *);
222 static int      lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
223 static void     lem_print_nvm_info(struct adapter *);
224 static int      lem_is_valid_ether_addr(u8 *);
225 static u32      lem_fill_descriptors (bus_addr_t address, u32 length,
226                     PDESC_ARRAY desc_array);
227 static int      lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
228 static void     lem_add_int_delay_sysctl(struct adapter *, const char *,
229                     const char *, struct em_int_delay_info *, int, int);
230 static void     lem_set_flow_cntrl(struct adapter *, const char *,
231                     const char *, int *, int);
232 /* Management and WOL Support */
233 static void     lem_init_manageability(struct adapter *);
234 static void     lem_release_manageability(struct adapter *);
235 static void     lem_get_hw_control(struct adapter *);
236 static void     lem_release_hw_control(struct adapter *);
237 static void     lem_get_wakeup(device_t);
238 static void     lem_enable_wakeup(device_t);
239 static int      lem_enable_phy_wakeup(struct adapter *);
240 static void     lem_led_func(void *, int);
241
242 static void     lem_intr(void *);
243 static int      lem_irq_fast(void *);
244 static void     lem_handle_rxtx(void *context, int pending);
245 static void     lem_handle_link(void *context, int pending);
246 static void     lem_add_rx_process_limit(struct adapter *, const char *,
247                     const char *, int *, int);
248
249 #ifdef DEVICE_POLLING
250 static poll_handler_t lem_poll;
251 #endif /* POLLING */
252
253 /*********************************************************************
254  *  FreeBSD Device Interface Entry Points
255  *********************************************************************/
256
257 static device_method_t lem_methods[] = {
258         /* Device interface */
259         DEVMETHOD(device_probe, lem_probe),
260         DEVMETHOD(device_attach, lem_attach),
261         DEVMETHOD(device_detach, lem_detach),
262         DEVMETHOD(device_shutdown, lem_shutdown),
263         DEVMETHOD(device_suspend, lem_suspend),
264         DEVMETHOD(device_resume, lem_resume),
265         DEVMETHOD_END
266 };
267
268 static driver_t lem_driver = {
269         "em", lem_methods, sizeof(struct adapter),
270 };
271
272 extern devclass_t em_devclass;
273 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
274 MODULE_DEPEND(lem, pci, 1, 1, 1);
275 MODULE_DEPEND(lem, ether, 1, 1, 1);
276
277 /*********************************************************************
278  *  Tunable default values.
279  *********************************************************************/
280
281 #define EM_TICKS_TO_USECS(ticks)        ((1024 * (ticks) + 500) / 1000)
282 #define EM_USECS_TO_TICKS(usecs)        ((1000 * (usecs) + 512) / 1024)
283
284 #define MAX_INTS_PER_SEC        8000
285 #define DEFAULT_ITR             (1000000000/(MAX_INTS_PER_SEC * 256))
286
287 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
288 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
289 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
290 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
291 static int lem_rxd = EM_DEFAULT_RXD;
292 static int lem_txd = EM_DEFAULT_TXD;
293 static int lem_smart_pwr_down = FALSE;
294
295 /* Controls whether promiscuous also shows bad packets */
296 static int lem_debug_sbp = FALSE;
297
298 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
299 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
300 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
301 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
302 TUNABLE_INT("hw.em.rxd", &lem_rxd);
303 TUNABLE_INT("hw.em.txd", &lem_txd);
304 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
305 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
306
307 /* Interrupt style - default to fast */
308 static int lem_use_legacy_irq = 0;
309 TUNABLE_INT("hw.em.use_legacy_irq", &lem_use_legacy_irq);
310
311 /* How many packets rxeof tries to clean at a time */
312 static int lem_rx_process_limit = 100;
313 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
314
315 /* Flow control setting - default to FULL */
316 static int lem_fc_setting = e1000_fc_full;
317 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
318
319 /* Global used in WOL setup with multiport cards */
320 static int global_quad_port_a = 0;
321
322 #ifdef DEV_NETMAP       /* see ixgbe.c for details */
323 #include <dev/netmap/if_lem_netmap.h>
324 #endif /* DEV_NETMAP */
325
326 /*********************************************************************
327  *  Device identification routine
328  *
329  *  em_probe determines if the driver should be loaded on
330  *  adapter based on PCI vendor/device id of the adapter.
331  *
332  *  return BUS_PROBE_DEFAULT on success, positive on failure
333  *********************************************************************/
334
335 static int
336 lem_probe(device_t dev)
337 {
338         char            adapter_name[60];
339         u16             pci_vendor_id = 0;
340         u16             pci_device_id = 0;
341         u16             pci_subvendor_id = 0;
342         u16             pci_subdevice_id = 0;
343         em_vendor_info_t *ent;
344
345         INIT_DEBUGOUT("em_probe: begin");
346
347         pci_vendor_id = pci_get_vendor(dev);
348         if (pci_vendor_id != EM_VENDOR_ID)
349                 return (ENXIO);
350
351         pci_device_id = pci_get_device(dev);
352         pci_subvendor_id = pci_get_subvendor(dev);
353         pci_subdevice_id = pci_get_subdevice(dev);
354
355         ent = lem_vendor_info_array;
356         while (ent->vendor_id != 0) {
357                 if ((pci_vendor_id == ent->vendor_id) &&
358                     (pci_device_id == ent->device_id) &&
359
360                     ((pci_subvendor_id == ent->subvendor_id) ||
361                     (ent->subvendor_id == PCI_ANY_ID)) &&
362
363                     ((pci_subdevice_id == ent->subdevice_id) ||
364                     (ent->subdevice_id == PCI_ANY_ID))) {
365                         sprintf(adapter_name, "%s %s",
366                                 lem_strings[ent->index],
367                                 lem_driver_version);
368                         device_set_desc_copy(dev, adapter_name);
369                         return (BUS_PROBE_DEFAULT);
370                 }
371                 ent++;
372         }
373
374         return (ENXIO);
375 }
376
377 /*********************************************************************
378  *  Device initialization routine
379  *
380  *  The attach entry point is called when the driver is being loaded.
381  *  This routine identifies the type of hardware, allocates all resources
382  *  and initializes the hardware.
383  *
384  *  return 0 on success, positive on failure
385  *********************************************************************/
386
387 static int
388 lem_attach(device_t dev)
389 {
390         struct adapter  *adapter;
391         int             tsize, rsize;
392         int             error = 0;
393
394         INIT_DEBUGOUT("lem_attach: begin");
395
396         adapter = device_get_softc(dev);
397         adapter->dev = adapter->osdep.dev = dev;
398         EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
399         EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
400         EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
401
402         /* SYSCTL stuff */
403         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
404             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
405             OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
406             lem_sysctl_nvm_info, "I", "NVM Information");
407
408         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
409         callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
410
411         /* Determine hardware and mac info */
412         lem_identify_hardware(adapter);
413
414         /* Setup PCI resources */
415         if (lem_allocate_pci_resources(adapter)) {
416                 device_printf(dev, "Allocation of PCI resources failed\n");
417                 error = ENXIO;
418                 goto err_pci;
419         }
420
421         /* Do Shared Code initialization */
422         if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
423                 device_printf(dev, "Setup of Shared code failed\n");
424                 error = ENXIO;
425                 goto err_pci;
426         }
427
428         e1000_get_bus_info(&adapter->hw);
429
430         /* Set up some sysctls for the tunable interrupt delays */
431         lem_add_int_delay_sysctl(adapter, "rx_int_delay",
432             "receive interrupt delay in usecs", &adapter->rx_int_delay,
433             E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
434         lem_add_int_delay_sysctl(adapter, "tx_int_delay",
435             "transmit interrupt delay in usecs", &adapter->tx_int_delay,
436             E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
437         if (adapter->hw.mac.type >= e1000_82540) {
438                 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
439                     "receive interrupt delay limit in usecs",
440                     &adapter->rx_abs_int_delay,
441                     E1000_REGISTER(&adapter->hw, E1000_RADV),
442                     lem_rx_abs_int_delay_dflt);
443                 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
444                     "transmit interrupt delay limit in usecs",
445                     &adapter->tx_abs_int_delay,
446                     E1000_REGISTER(&adapter->hw, E1000_TADV),
447                     lem_tx_abs_int_delay_dflt);
448                 lem_add_int_delay_sysctl(adapter, "itr",
449                     "interrupt delay limit in usecs/4",
450                     &adapter->tx_itr,
451                     E1000_REGISTER(&adapter->hw, E1000_ITR),
452                     DEFAULT_ITR);
453         }
454
455         /* Sysctls for limiting the amount of work done in the taskqueue */
456         lem_add_rx_process_limit(adapter, "rx_processing_limit",
457             "max number of rx packets to process", &adapter->rx_process_limit,
458             lem_rx_process_limit);
459
460         /* Sysctl for setting the interface flow control */
461         lem_set_flow_cntrl(adapter, "flow_control",
462             "flow control setting",
463             &adapter->fc_setting, lem_fc_setting);
464
465         /*
466          * Validate number of transmit and receive descriptors. It
467          * must not exceed hardware maximum, and must be multiple
468          * of E1000_DBA_ALIGN.
469          */
470         if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
471             (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
472             (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
473             (lem_txd < EM_MIN_TXD)) {
474                 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
475                     EM_DEFAULT_TXD, lem_txd);
476                 adapter->num_tx_desc = EM_DEFAULT_TXD;
477         } else
478                 adapter->num_tx_desc = lem_txd;
479         if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
480             (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
481             (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
482             (lem_rxd < EM_MIN_RXD)) {
483                 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
484                     EM_DEFAULT_RXD, lem_rxd);
485                 adapter->num_rx_desc = EM_DEFAULT_RXD;
486         } else
487                 adapter->num_rx_desc = lem_rxd;
488
489         adapter->hw.mac.autoneg = DO_AUTO_NEG;
490         adapter->hw.phy.autoneg_wait_to_complete = FALSE;
491         adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
492         adapter->rx_buffer_len = 2048;
493
494         e1000_init_script_state_82541(&adapter->hw, TRUE);
495         e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
496
497         /* Copper options */
498         if (adapter->hw.phy.media_type == e1000_media_type_copper) {
499                 adapter->hw.phy.mdix = AUTO_ALL_MODES;
500                 adapter->hw.phy.disable_polarity_correction = FALSE;
501                 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
502         }
503
504         /*
505          * Set the frame limits assuming
506          * standard ethernet sized frames.
507          */
508         adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
509         adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
510
511         /*
512          * This controls when hardware reports transmit completion
513          * status.
514          */
515         adapter->hw.mac.report_tx_early = 1;
516
517         tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
518             EM_DBA_ALIGN);
519
520         /* Allocate Transmit Descriptor ring */
521         if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
522                 device_printf(dev, "Unable to allocate tx_desc memory\n");
523                 error = ENOMEM;
524                 goto err_tx_desc;
525         }
526         adapter->tx_desc_base = 
527             (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
528
529         rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
530             EM_DBA_ALIGN);
531
532         /* Allocate Receive Descriptor ring */
533         if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
534                 device_printf(dev, "Unable to allocate rx_desc memory\n");
535                 error = ENOMEM;
536                 goto err_rx_desc;
537         }
538         adapter->rx_desc_base =
539             (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
540
541         /* Allocate multicast array memory. */
542         adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
543             MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
544         if (adapter->mta == NULL) {
545                 device_printf(dev, "Can not allocate multicast setup array\n");
546                 error = ENOMEM;
547                 goto err_hw_init;
548         }
549
550         /*
551         ** Start from a known state, this is
552         ** important in reading the nvm and
553         ** mac from that.
554         */
555         e1000_reset_hw(&adapter->hw);
556
557         /* Make sure we have a good EEPROM before we read from it */
558         if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
559                 /*
560                 ** Some PCI-E parts fail the first check due to
561                 ** the link being in sleep state, call it again,
562                 ** if it fails a second time its a real issue.
563                 */
564                 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
565                         device_printf(dev,
566                             "The EEPROM Checksum Is Not Valid\n");
567                         error = EIO;
568                         goto err_hw_init;
569                 }
570         }
571
572         /* Copy the permanent MAC address out of the EEPROM */
573         if (e1000_read_mac_addr(&adapter->hw) < 0) {
574                 device_printf(dev, "EEPROM read error while reading MAC"
575                     " address\n");
576                 error = EIO;
577                 goto err_hw_init;
578         }
579
580         if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
581                 device_printf(dev, "Invalid MAC address\n");
582                 error = EIO;
583                 goto err_hw_init;
584         }
585
586         /* Initialize the hardware */
587         if (lem_hardware_init(adapter)) {
588                 device_printf(dev, "Unable to initialize the hardware\n");
589                 error = EIO;
590                 goto err_hw_init;
591         }
592
593         /* Allocate transmit descriptors and buffers */
594         if (lem_allocate_transmit_structures(adapter)) {
595                 device_printf(dev, "Could not setup transmit structures\n");
596                 error = ENOMEM;
597                 goto err_tx_struct;
598         }
599
600         /* Allocate receive descriptors and buffers */
601         if (lem_allocate_receive_structures(adapter)) {
602                 device_printf(dev, "Could not setup receive structures\n");
603                 error = ENOMEM;
604                 goto err_rx_struct;
605         }
606
607         /*
608         **  Do interrupt configuration
609         */
610         error = lem_allocate_irq(adapter);
611         if (error)
612                 goto err_rx_struct;
613
614         /*
615          * Get Wake-on-Lan and Management info for later use
616          */
617         lem_get_wakeup(dev);
618
619         /* Setup OS specific network interface */
620         if (lem_setup_interface(dev, adapter) != 0)
621                 goto err_rx_struct;
622
623         /* Initialize statistics */
624         lem_update_stats_counters(adapter);
625
626         adapter->hw.mac.get_link_status = 1;
627         lem_update_link_status(adapter);
628
629         /* Indicate SOL/IDER usage */
630         if (e1000_check_reset_block(&adapter->hw))
631                 device_printf(dev,
632                     "PHY reset is blocked due to SOL/IDER session.\n");
633
634         /* Do we need workaround for 82544 PCI-X adapter? */
635         if (adapter->hw.bus.type == e1000_bus_type_pcix &&
636             adapter->hw.mac.type == e1000_82544)
637                 adapter->pcix_82544 = TRUE;
638         else
639                 adapter->pcix_82544 = FALSE;
640
641         /* Register for VLAN events */
642         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
643             lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
644         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
645             lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 
646
647         lem_add_hw_stats(adapter);
648
649         /* Non-AMT based hardware can now take control from firmware */
650         if (adapter->has_manage && !adapter->has_amt)
651                 lem_get_hw_control(adapter);
652
653         /* Tell the stack that the interface is not active */
654         adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
655
656         adapter->led_dev = led_create(lem_led_func, adapter,
657             device_get_nameunit(dev));
658
659 #ifdef DEV_NETMAP
660         lem_netmap_attach(adapter);
661 #endif /* DEV_NETMAP */
662         INIT_DEBUGOUT("lem_attach: end");
663
664         return (0);
665
666 err_rx_struct:
667         lem_free_transmit_structures(adapter);
668 err_tx_struct:
669 err_hw_init:
670         lem_release_hw_control(adapter);
671         lem_dma_free(adapter, &adapter->rxdma);
672 err_rx_desc:
673         lem_dma_free(adapter, &adapter->txdma);
674 err_tx_desc:
675 err_pci:
676         if (adapter->ifp != NULL)
677                 if_free(adapter->ifp);
678         lem_free_pci_resources(adapter);
679         free(adapter->mta, M_DEVBUF);
680         EM_TX_LOCK_DESTROY(adapter);
681         EM_RX_LOCK_DESTROY(adapter);
682         EM_CORE_LOCK_DESTROY(adapter);
683
684         return (error);
685 }
686
687 /*********************************************************************
688  *  Device removal routine
689  *
690  *  The detach entry point is called when the driver is being removed.
691  *  This routine stops the adapter and deallocates all the resources
692  *  that were allocated for driver operation.
693  *
694  *  return 0 on success, positive on failure
695  *********************************************************************/
696
697 static int
698 lem_detach(device_t dev)
699 {
700         struct adapter  *adapter = device_get_softc(dev);
701         struct ifnet    *ifp = adapter->ifp;
702
703         INIT_DEBUGOUT("em_detach: begin");
704
705         /* Make sure VLANS are not using driver */
706         if (adapter->ifp->if_vlantrunk != NULL) {
707                 device_printf(dev,"Vlan in use, detach first\n");
708                 return (EBUSY);
709         }
710
711 #ifdef DEVICE_POLLING
712         if (ifp->if_capenable & IFCAP_POLLING)
713                 ether_poll_deregister(ifp);
714 #endif
715
716         if (adapter->led_dev != NULL)
717                 led_destroy(adapter->led_dev);
718
719         EM_CORE_LOCK(adapter);
720         EM_TX_LOCK(adapter);
721         adapter->in_detach = 1;
722         lem_stop(adapter);
723         e1000_phy_hw_reset(&adapter->hw);
724
725         lem_release_manageability(adapter);
726
727         EM_TX_UNLOCK(adapter);
728         EM_CORE_UNLOCK(adapter);
729
730         /* Unregister VLAN events */
731         if (adapter->vlan_attach != NULL)
732                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
733         if (adapter->vlan_detach != NULL)
734                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 
735
736         ether_ifdetach(adapter->ifp);
737         callout_drain(&adapter->timer);
738         callout_drain(&adapter->tx_fifo_timer);
739
740 #ifdef DEV_NETMAP
741         netmap_detach(ifp);
742 #endif /* DEV_NETMAP */
743         lem_free_pci_resources(adapter);
744         bus_generic_detach(dev);
745         if_free(ifp);
746
747         lem_free_transmit_structures(adapter);
748         lem_free_receive_structures(adapter);
749
750         /* Free Transmit Descriptor ring */
751         if (adapter->tx_desc_base) {
752                 lem_dma_free(adapter, &adapter->txdma);
753                 adapter->tx_desc_base = NULL;
754         }
755
756         /* Free Receive Descriptor ring */
757         if (adapter->rx_desc_base) {
758                 lem_dma_free(adapter, &adapter->rxdma);
759                 adapter->rx_desc_base = NULL;
760         }
761
762         lem_release_hw_control(adapter);
763         free(adapter->mta, M_DEVBUF);
764         EM_TX_LOCK_DESTROY(adapter);
765         EM_RX_LOCK_DESTROY(adapter);
766         EM_CORE_LOCK_DESTROY(adapter);
767
768         return (0);
769 }
770
771 /*********************************************************************
772  *
773  *  Shutdown entry point
774  *
775  **********************************************************************/
776
777 static int
778 lem_shutdown(device_t dev)
779 {
780         return lem_suspend(dev);
781 }
782
783 /*
784  * Suspend/resume device methods.
785  */
786 static int
787 lem_suspend(device_t dev)
788 {
789         struct adapter *adapter = device_get_softc(dev);
790
791         EM_CORE_LOCK(adapter);
792
793         lem_release_manageability(adapter);
794         lem_release_hw_control(adapter);
795         lem_enable_wakeup(dev);
796
797         EM_CORE_UNLOCK(adapter);
798
799         return bus_generic_suspend(dev);
800 }
801
802 static int
803 lem_resume(device_t dev)
804 {
805         struct adapter *adapter = device_get_softc(dev);
806         struct ifnet *ifp = adapter->ifp;
807
808         EM_CORE_LOCK(adapter);
809         lem_init_locked(adapter);
810         lem_init_manageability(adapter);
811         EM_CORE_UNLOCK(adapter);
812         lem_start(ifp);
813
814         return bus_generic_resume(dev);
815 }
816
817
818 static void
819 lem_start_locked(struct ifnet *ifp)
820 {
821         struct adapter  *adapter = ifp->if_softc;
822         struct mbuf     *m_head;
823
824         EM_TX_LOCK_ASSERT(adapter);
825
826         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
827             IFF_DRV_RUNNING)
828                 return;
829         if (!adapter->link_active)
830                 return;
831
832         /*
833          * Force a cleanup if number of TX descriptors
834          * available hits the threshold
835          */
836         if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
837                 lem_txeof(adapter);
838                 /* Now do we at least have a minimal? */
839                 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
840                         adapter->no_tx_desc_avail1++;
841                         return;
842                 }
843         }
844
845         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
846
847                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
848                 if (m_head == NULL)
849                         break;
850                 /*
851                  *  Encapsulation can modify our pointer, and or make it
852                  *  NULL on failure.  In that event, we can't requeue.
853                  */
854                 if (lem_xmit(adapter, &m_head)) {
855                         if (m_head == NULL)
856                                 break;
857                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
858                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
859                         break;
860                 }
861
862                 /* Send a copy of the frame to the BPF listener */
863                 ETHER_BPF_MTAP(ifp, m_head);
864
865                 /* Set timeout in case hardware has problems transmitting. */
866                 adapter->watchdog_check = TRUE;
867                 adapter->watchdog_time = ticks;
868         }
869         if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
870                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
871
872         return;
873 }
874
875 static void
876 lem_start(struct ifnet *ifp)
877 {
878         struct adapter *adapter = ifp->if_softc;
879
880         EM_TX_LOCK(adapter);
881         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
882                 lem_start_locked(ifp);
883         EM_TX_UNLOCK(adapter);
884 }
885
886 /*********************************************************************
887  *  Ioctl entry point
888  *
889  *  em_ioctl is called when the user wants to configure the
890  *  interface.
891  *
892  *  return 0 on success, positive on failure
893  **********************************************************************/
894
895 static int
896 lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
897 {
898         struct adapter  *adapter = ifp->if_softc;
899         struct ifreq    *ifr = (struct ifreq *)data;
900 #if defined(INET) || defined(INET6)
901         struct ifaddr   *ifa = (struct ifaddr *)data;
902 #endif
903         bool            avoid_reset = FALSE;
904         int             error = 0;
905
906         if (adapter->in_detach)
907                 return (error);
908
909         switch (command) {
910         case SIOCSIFADDR:
911 #ifdef INET
912                 if (ifa->ifa_addr->sa_family == AF_INET)
913                         avoid_reset = TRUE;
914 #endif
915 #ifdef INET6
916                 if (ifa->ifa_addr->sa_family == AF_INET6)
917                         avoid_reset = TRUE;
918 #endif
919                 /*
920                 ** Calling init results in link renegotiation,
921                 ** so we avoid doing it when possible.
922                 */
923                 if (avoid_reset) {
924                         ifp->if_flags |= IFF_UP;
925                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
926                                 lem_init(adapter);
927 #ifdef INET
928                         if (!(ifp->if_flags & IFF_NOARP))
929                                 arp_ifinit(ifp, ifa);
930 #endif
931                 } else
932                         error = ether_ioctl(ifp, command, data);
933                 break;
934         case SIOCSIFMTU:
935             {
936                 int max_frame_size;
937
938                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
939
940                 EM_CORE_LOCK(adapter);
941                 switch (adapter->hw.mac.type) {
942                 case e1000_82542:
943                         max_frame_size = ETHER_MAX_LEN;
944                         break;
945                 default:
946                         max_frame_size = MAX_JUMBO_FRAME_SIZE;
947                 }
948                 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
949                     ETHER_CRC_LEN) {
950                         EM_CORE_UNLOCK(adapter);
951                         error = EINVAL;
952                         break;
953                 }
954
955                 ifp->if_mtu = ifr->ifr_mtu;
956                 adapter->max_frame_size =
957                     ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
958                 lem_init_locked(adapter);
959                 EM_CORE_UNLOCK(adapter);
960                 break;
961             }
962         case SIOCSIFFLAGS:
963                 IOCTL_DEBUGOUT("ioctl rcv'd:\
964                     SIOCSIFFLAGS (Set Interface Flags)");
965                 EM_CORE_LOCK(adapter);
966                 if (ifp->if_flags & IFF_UP) {
967                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
968                                 if ((ifp->if_flags ^ adapter->if_flags) &
969                                     (IFF_PROMISC | IFF_ALLMULTI)) {
970                                         lem_disable_promisc(adapter);
971                                         lem_set_promisc(adapter);
972                                 }
973                         } else
974                                 lem_init_locked(adapter);
975                 } else
976                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
977                                 EM_TX_LOCK(adapter);
978                                 lem_stop(adapter);
979                                 EM_TX_UNLOCK(adapter);
980                         }
981                 adapter->if_flags = ifp->if_flags;
982                 EM_CORE_UNLOCK(adapter);
983                 break;
984         case SIOCADDMULTI:
985         case SIOCDELMULTI:
986                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
987                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
988                         EM_CORE_LOCK(adapter);
989                         lem_disable_intr(adapter);
990                         lem_set_multi(adapter);
991                         if (adapter->hw.mac.type == e1000_82542 && 
992                             adapter->hw.revision_id == E1000_REVISION_2) {
993                                 lem_initialize_receive_unit(adapter);
994                         }
995 #ifdef DEVICE_POLLING
996                         if (!(ifp->if_capenable & IFCAP_POLLING))
997 #endif
998                                 lem_enable_intr(adapter);
999                         EM_CORE_UNLOCK(adapter);
1000                 }
1001                 break;
1002         case SIOCSIFMEDIA:
1003                 /* Check SOL/IDER usage */
1004                 EM_CORE_LOCK(adapter);
1005                 if (e1000_check_reset_block(&adapter->hw)) {
1006                         EM_CORE_UNLOCK(adapter);
1007                         device_printf(adapter->dev, "Media change is"
1008                             " blocked due to SOL/IDER session.\n");
1009                         break;
1010                 }
1011                 EM_CORE_UNLOCK(adapter);
1012         case SIOCGIFMEDIA:
1013                 IOCTL_DEBUGOUT("ioctl rcv'd: \
1014                     SIOCxIFMEDIA (Get/Set Interface Media)");
1015                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1016                 break;
1017         case SIOCSIFCAP:
1018             {
1019                 int mask, reinit;
1020
1021                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1022                 reinit = 0;
1023                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1024 #ifdef DEVICE_POLLING
1025                 if (mask & IFCAP_POLLING) {
1026                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
1027                                 error = ether_poll_register(lem_poll, ifp);
1028                                 if (error)
1029                                         return (error);
1030                                 EM_CORE_LOCK(adapter);
1031                                 lem_disable_intr(adapter);
1032                                 ifp->if_capenable |= IFCAP_POLLING;
1033                                 EM_CORE_UNLOCK(adapter);
1034                         } else {
1035                                 error = ether_poll_deregister(ifp);
1036                                 /* Enable interrupt even in error case */
1037                                 EM_CORE_LOCK(adapter);
1038                                 lem_enable_intr(adapter);
1039                                 ifp->if_capenable &= ~IFCAP_POLLING;
1040                                 EM_CORE_UNLOCK(adapter);
1041                         }
1042                 }
1043 #endif
1044                 if (mask & IFCAP_HWCSUM) {
1045                         ifp->if_capenable ^= IFCAP_HWCSUM;
1046                         reinit = 1;
1047                 }
1048                 if (mask & IFCAP_VLAN_HWTAGGING) {
1049                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1050                         reinit = 1;
1051                 }
1052                 if ((mask & IFCAP_WOL) &&
1053                     (ifp->if_capabilities & IFCAP_WOL) != 0) {
1054                         if (mask & IFCAP_WOL_MCAST)
1055                                 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1056                         if (mask & IFCAP_WOL_MAGIC)
1057                                 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1058                 }
1059                 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1060                         lem_init(adapter);
1061                 VLAN_CAPABILITIES(ifp);
1062                 break;
1063             }
1064
1065         default:
1066                 error = ether_ioctl(ifp, command, data);
1067                 break;
1068         }
1069
1070         return (error);
1071 }
1072
1073
1074 /*********************************************************************
1075  *  Init entry point
1076  *
1077  *  This routine is used in two ways. It is used by the stack as
1078  *  init entry point in network interface structure. It is also used
1079  *  by the driver as a hw/sw initialization routine to get to a
1080  *  consistent state.
1081  *
1082  *  return 0 on success, positive on failure
1083  **********************************************************************/
1084
1085 static void
1086 lem_init_locked(struct adapter *adapter)
1087 {
1088         struct ifnet    *ifp = adapter->ifp;
1089         device_t        dev = adapter->dev;
1090         u32             pba;
1091
1092         INIT_DEBUGOUT("lem_init: begin");
1093
1094         EM_CORE_LOCK_ASSERT(adapter);
1095
1096         EM_TX_LOCK(adapter);
1097         lem_stop(adapter);
1098         EM_TX_UNLOCK(adapter);
1099
1100         /*
1101          * Packet Buffer Allocation (PBA)
1102          * Writing PBA sets the receive portion of the buffer
1103          * the remainder is used for the transmit buffer.
1104          *
1105          * Devices before the 82547 had a Packet Buffer of 64K.
1106          *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1107          * After the 82547 the buffer was reduced to 40K.
1108          *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1109          *   Note: default does not leave enough room for Jumbo Frame >10k.
1110          */
1111         switch (adapter->hw.mac.type) {
1112         case e1000_82547:
1113         case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1114                 if (adapter->max_frame_size > 8192)
1115                         pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1116                 else
1117                         pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1118                 adapter->tx_fifo_head = 0;
1119                 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1120                 adapter->tx_fifo_size =
1121                     (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1122                 break;
1123         default:
1124                 /* Devices before 82547 had a Packet Buffer of 64K.   */
1125                 if (adapter->max_frame_size > 8192)
1126                         pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1127                 else
1128                         pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1129         }
1130
1131         INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1132         E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1133         
1134         /* Get the latest mac address, User can use a LAA */
1135         bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1136               ETHER_ADDR_LEN);
1137
1138         /* Put the address into the Receive Address Array */
1139         e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1140
1141         /* Initialize the hardware */
1142         if (lem_hardware_init(adapter)) {
1143                 device_printf(dev, "Unable to initialize the hardware\n");
1144                 return;
1145         }
1146         lem_update_link_status(adapter);
1147
1148         /* Setup VLAN support, basic and offload if available */
1149         E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1150
1151         /* Set hardware offload abilities */
1152         ifp->if_hwassist = 0;
1153         if (adapter->hw.mac.type >= e1000_82543) {
1154                 if (ifp->if_capenable & IFCAP_TXCSUM)
1155                         ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1156         }
1157
1158         /* Configure for OS presence */
1159         lem_init_manageability(adapter);
1160
1161         /* Prepare transmit descriptors and buffers */
1162         lem_setup_transmit_structures(adapter);
1163         lem_initialize_transmit_unit(adapter);
1164
1165         /* Setup Multicast table */
1166         lem_set_multi(adapter);
1167
1168         /* Prepare receive descriptors and buffers */
1169         if (lem_setup_receive_structures(adapter)) {
1170                 device_printf(dev, "Could not setup receive structures\n");
1171                 EM_TX_LOCK(adapter);
1172                 lem_stop(adapter);
1173                 EM_TX_UNLOCK(adapter);
1174                 return;
1175         }
1176         lem_initialize_receive_unit(adapter);
1177
1178         /* Use real VLAN Filter support? */
1179         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1180                 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1181                         /* Use real VLAN Filter support */
1182                         lem_setup_vlan_hw_support(adapter);
1183                 else {
1184                         u32 ctrl;
1185                         ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1186                         ctrl |= E1000_CTRL_VME;
1187                         E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1188                 }
1189         }
1190
1191         /* Don't lose promiscuous settings */
1192         lem_set_promisc(adapter);
1193
1194         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1195         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1196
1197         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1198         e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1199
1200 #ifdef DEVICE_POLLING
1201         /*
1202          * Only enable interrupts if we are not polling, make sure
1203          * they are off otherwise.
1204          */
1205         if (ifp->if_capenable & IFCAP_POLLING)
1206                 lem_disable_intr(adapter);
1207         else
1208 #endif /* DEVICE_POLLING */
1209                 lem_enable_intr(adapter);
1210
1211         /* AMT based hardware can now take control from firmware */
1212         if (adapter->has_manage && adapter->has_amt)
1213                 lem_get_hw_control(adapter);
1214 }
1215
1216 static void
1217 lem_init(void *arg)
1218 {
1219         struct adapter *adapter = arg;
1220
1221         EM_CORE_LOCK(adapter);
1222         lem_init_locked(adapter);
1223         EM_CORE_UNLOCK(adapter);
1224 }
1225
1226
1227 #ifdef DEVICE_POLLING
1228 /*********************************************************************
1229  *
1230  *  Legacy polling routine  
1231  *
1232  *********************************************************************/
1233 static int
1234 lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1235 {
1236         struct adapter *adapter = ifp->if_softc;
1237         u32             reg_icr, rx_done = 0;
1238
1239         EM_CORE_LOCK(adapter);
1240         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1241                 EM_CORE_UNLOCK(adapter);
1242                 return (rx_done);
1243         }
1244
1245         if (cmd == POLL_AND_CHECK_STATUS) {
1246                 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1247                 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1248                         callout_stop(&adapter->timer);
1249                         adapter->hw.mac.get_link_status = 1;
1250                         lem_update_link_status(adapter);
1251                         callout_reset(&adapter->timer, hz,
1252                             lem_local_timer, adapter);
1253                 }
1254         }
1255         EM_CORE_UNLOCK(adapter);
1256
1257         lem_rxeof(adapter, count, &rx_done);
1258
1259         EM_TX_LOCK(adapter);
1260         lem_txeof(adapter);
1261         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1262                 lem_start_locked(ifp);
1263         EM_TX_UNLOCK(adapter);
1264         return (rx_done);
1265 }
1266 #endif /* DEVICE_POLLING */
1267
1268 /*********************************************************************
1269  *
1270  *  Legacy Interrupt Service routine  
1271  *
1272  *********************************************************************/
1273 static void
1274 lem_intr(void *arg)
1275 {
1276         struct adapter  *adapter = arg;
1277         struct ifnet    *ifp = adapter->ifp;
1278         u32             reg_icr;
1279
1280
1281         if ((ifp->if_capenable & IFCAP_POLLING) ||
1282             ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0))
1283                 return;
1284
1285         EM_CORE_LOCK(adapter);
1286         reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1287         if (reg_icr & E1000_ICR_RXO)
1288                 adapter->rx_overruns++;
1289
1290         if ((reg_icr == 0xffffffff) || (reg_icr == 0)) {
1291                 EM_CORE_UNLOCK(adapter);
1292                 return;
1293         }
1294
1295         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1296                 callout_stop(&adapter->timer);
1297                 adapter->hw.mac.get_link_status = 1;
1298                 lem_update_link_status(adapter);
1299                 /* Deal with TX cruft when link lost */
1300                 lem_tx_purge(adapter);
1301                 callout_reset(&adapter->timer, hz,
1302                     lem_local_timer, adapter);
1303                 EM_CORE_UNLOCK(adapter);
1304                 return;
1305         }
1306
1307         EM_CORE_UNLOCK(adapter);
1308         lem_rxeof(adapter, -1, NULL);
1309
1310         EM_TX_LOCK(adapter);
1311         lem_txeof(adapter);
1312         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1313             !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1314                 lem_start_locked(ifp);
1315         EM_TX_UNLOCK(adapter);
1316         return;
1317 }
1318
1319
1320 static void
1321 lem_handle_link(void *context, int pending)
1322 {
1323         struct adapter  *adapter = context;
1324         struct ifnet *ifp = adapter->ifp;
1325
1326         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1327                 return;
1328
1329         EM_CORE_LOCK(adapter);
1330         callout_stop(&adapter->timer);
1331         lem_update_link_status(adapter);
1332         /* Deal with TX cruft when link lost */
1333         lem_tx_purge(adapter);
1334         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1335         EM_CORE_UNLOCK(adapter);
1336 }
1337
1338
1339 /* Combined RX/TX handler, used by Legacy and MSI */
1340 static void
1341 lem_handle_rxtx(void *context, int pending)
1342 {
1343         struct adapter  *adapter = context;
1344         struct ifnet    *ifp = adapter->ifp;
1345
1346
1347         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1348                 bool more = lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1349                 EM_TX_LOCK(adapter);
1350                 lem_txeof(adapter);
1351                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1352                         lem_start_locked(ifp);
1353                 EM_TX_UNLOCK(adapter);
1354                 if (more) {
1355                         taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1356                         return;
1357                 }
1358         }
1359
1360         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1361                 lem_enable_intr(adapter);
1362 }
1363
1364 /*********************************************************************
1365  *
1366  *  Fast Legacy/MSI Combined Interrupt Service routine  
1367  *
1368  *********************************************************************/
1369 static int
1370 lem_irq_fast(void *arg)
1371 {
1372         struct adapter  *adapter = arg;
1373         struct ifnet    *ifp;
1374         u32             reg_icr;
1375
1376         ifp = adapter->ifp;
1377
1378         reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1379
1380         /* Hot eject?  */
1381         if (reg_icr == 0xffffffff)
1382                 return FILTER_STRAY;
1383
1384         /* Definitely not our interrupt.  */
1385         if (reg_icr == 0x0)
1386                 return FILTER_STRAY;
1387
1388         /*
1389          * Mask interrupts until the taskqueue is finished running.  This is
1390          * cheap, just assume that it is needed.  This also works around the
1391          * MSI message reordering errata on certain systems.
1392          */
1393         lem_disable_intr(adapter);
1394         taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1395
1396         /* Link status change */
1397         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1398                 adapter->hw.mac.get_link_status = 1;
1399                 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1400         }
1401
1402         if (reg_icr & E1000_ICR_RXO)
1403                 adapter->rx_overruns++;
1404         return FILTER_HANDLED;
1405 }
1406
1407
1408 /*********************************************************************
1409  *
1410  *  Media Ioctl callback
1411  *
1412  *  This routine is called whenever the user queries the status of
1413  *  the interface using ifconfig.
1414  *
1415  **********************************************************************/
1416 static void
1417 lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1418 {
1419         struct adapter *adapter = ifp->if_softc;
1420         u_char fiber_type = IFM_1000_SX;
1421
1422         INIT_DEBUGOUT("lem_media_status: begin");
1423
1424         EM_CORE_LOCK(adapter);
1425         lem_update_link_status(adapter);
1426
1427         ifmr->ifm_status = IFM_AVALID;
1428         ifmr->ifm_active = IFM_ETHER;
1429
1430         if (!adapter->link_active) {
1431                 EM_CORE_UNLOCK(adapter);
1432                 return;
1433         }
1434
1435         ifmr->ifm_status |= IFM_ACTIVE;
1436
1437         if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1438             (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1439                 if (adapter->hw.mac.type == e1000_82545)
1440                         fiber_type = IFM_1000_LX;
1441                 ifmr->ifm_active |= fiber_type | IFM_FDX;
1442         } else {
1443                 switch (adapter->link_speed) {
1444                 case 10:
1445                         ifmr->ifm_active |= IFM_10_T;
1446                         break;
1447                 case 100:
1448                         ifmr->ifm_active |= IFM_100_TX;
1449                         break;
1450                 case 1000:
1451                         ifmr->ifm_active |= IFM_1000_T;
1452                         break;
1453                 }
1454                 if (adapter->link_duplex == FULL_DUPLEX)
1455                         ifmr->ifm_active |= IFM_FDX;
1456                 else
1457                         ifmr->ifm_active |= IFM_HDX;
1458         }
1459         EM_CORE_UNLOCK(adapter);
1460 }
1461
1462 /*********************************************************************
1463  *
1464  *  Media Ioctl callback
1465  *
1466  *  This routine is called when the user changes speed/duplex using
1467  *  media/mediopt option with ifconfig.
1468  *
1469  **********************************************************************/
1470 static int
1471 lem_media_change(struct ifnet *ifp)
1472 {
1473         struct adapter *adapter = ifp->if_softc;
1474         struct ifmedia  *ifm = &adapter->media;
1475
1476         INIT_DEBUGOUT("lem_media_change: begin");
1477
1478         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1479                 return (EINVAL);
1480
1481         EM_CORE_LOCK(adapter);
1482         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1483         case IFM_AUTO:
1484                 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1485                 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1486                 break;
1487         case IFM_1000_LX:
1488         case IFM_1000_SX:
1489         case IFM_1000_T:
1490                 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1491                 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1492                 break;
1493         case IFM_100_TX:
1494                 adapter->hw.mac.autoneg = FALSE;
1495                 adapter->hw.phy.autoneg_advertised = 0;
1496                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1497                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1498                 else
1499                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1500                 break;
1501         case IFM_10_T:
1502                 adapter->hw.mac.autoneg = FALSE;
1503                 adapter->hw.phy.autoneg_advertised = 0;
1504                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1505                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1506                 else
1507                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1508                 break;
1509         default:
1510                 device_printf(adapter->dev, "Unsupported media type\n");
1511         }
1512
1513         lem_init_locked(adapter);
1514         EM_CORE_UNLOCK(adapter);
1515
1516         return (0);
1517 }
1518
1519 /*********************************************************************
1520  *
1521  *  This routine maps the mbufs to tx descriptors.
1522  *
1523  *  return 0 on success, positive on failure
1524  **********************************************************************/
1525
1526 static int
1527 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1528 {
1529         bus_dma_segment_t       segs[EM_MAX_SCATTER];
1530         bus_dmamap_t            map;
1531         struct em_buffer        *tx_buffer, *tx_buffer_mapped;
1532         struct e1000_tx_desc    *ctxd = NULL;
1533         struct mbuf             *m_head;
1534         u32                     txd_upper, txd_lower, txd_used, txd_saved;
1535         int                     error, nsegs, i, j, first, last = 0;
1536
1537         m_head = *m_headp;
1538         txd_upper = txd_lower = txd_used = txd_saved = 0;
1539
1540         /*
1541         ** When doing checksum offload, it is critical to
1542         ** make sure the first mbuf has more than header,
1543         ** because that routine expects data to be present.
1544         */
1545         if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1546             (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1547                 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1548                 *m_headp = m_head;
1549                 if (m_head == NULL)
1550                         return (ENOBUFS);
1551         }
1552
1553         /*
1554          * Map the packet for DMA
1555          *
1556          * Capture the first descriptor index,
1557          * this descriptor will have the index
1558          * of the EOP which is the only one that
1559          * now gets a DONE bit writeback.
1560          */
1561         first = adapter->next_avail_tx_desc;
1562         tx_buffer = &adapter->tx_buffer_area[first];
1563         tx_buffer_mapped = tx_buffer;
1564         map = tx_buffer->map;
1565
1566         error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1567             *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1568
1569         /*
1570          * There are two types of errors we can (try) to handle:
1571          * - EFBIG means the mbuf chain was too long and bus_dma ran
1572          *   out of segments.  Defragment the mbuf chain and try again.
1573          * - ENOMEM means bus_dma could not obtain enough bounce buffers
1574          *   at this point in time.  Defer sending and try again later.
1575          * All other errors, in particular EINVAL, are fatal and prevent the
1576          * mbuf chain from ever going through.  Drop it and report error.
1577          */
1578         if (error == EFBIG) {
1579                 struct mbuf *m;
1580
1581                 m = m_defrag(*m_headp, M_NOWAIT);
1582                 if (m == NULL) {
1583                         adapter->mbuf_alloc_failed++;
1584                         m_freem(*m_headp);
1585                         *m_headp = NULL;
1586                         return (ENOBUFS);
1587                 }
1588                 *m_headp = m;
1589
1590                 /* Try it again */
1591                 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1592                     *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1593
1594                 if (error) {
1595                         adapter->no_tx_dma_setup++;
1596                         m_freem(*m_headp);
1597                         *m_headp = NULL;
1598                         return (error);
1599                 }
1600         } else if (error != 0) {
1601                 adapter->no_tx_dma_setup++;
1602                 return (error);
1603         }
1604
1605         if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1606                 adapter->no_tx_desc_avail2++;
1607                 bus_dmamap_unload(adapter->txtag, map);
1608                 return (ENOBUFS);
1609         }
1610         m_head = *m_headp;
1611
1612         /* Do hardware assists */
1613         if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1614                 lem_transmit_checksum_setup(adapter,  m_head,
1615                     &txd_upper, &txd_lower);
1616
1617         i = adapter->next_avail_tx_desc;
1618         if (adapter->pcix_82544) 
1619                 txd_saved = i;
1620
1621         /* Set up our transmit descriptors */
1622         for (j = 0; j < nsegs; j++) {
1623                 bus_size_t seg_len;
1624                 bus_addr_t seg_addr;
1625                 /* If adapter is 82544 and on PCIX bus */
1626                 if(adapter->pcix_82544) {
1627                         DESC_ARRAY      desc_array;
1628                         u32             array_elements, counter;
1629                         /*
1630                          * Check the Address and Length combination and
1631                          * split the data accordingly
1632                          */
1633                         array_elements = lem_fill_descriptors(segs[j].ds_addr,
1634                             segs[j].ds_len, &desc_array);
1635                         for (counter = 0; counter < array_elements; counter++) {
1636                                 if (txd_used == adapter->num_tx_desc_avail) {
1637                                         adapter->next_avail_tx_desc = txd_saved;
1638                                         adapter->no_tx_desc_avail2++;
1639                                         bus_dmamap_unload(adapter->txtag, map);
1640                                         return (ENOBUFS);
1641                                 }
1642                                 tx_buffer = &adapter->tx_buffer_area[i];
1643                                 ctxd = &adapter->tx_desc_base[i];
1644                                 ctxd->buffer_addr = htole64(
1645                                     desc_array.descriptor[counter].address);
1646                                 ctxd->lower.data = htole32(
1647                                     (adapter->txd_cmd | txd_lower | (u16)
1648                                     desc_array.descriptor[counter].length));
1649                                 ctxd->upper.data =
1650                                     htole32((txd_upper));
1651                                 last = i;
1652                                 if (++i == adapter->num_tx_desc)
1653                                          i = 0;
1654                                 tx_buffer->m_head = NULL;
1655                                 tx_buffer->next_eop = -1;
1656                                 txd_used++;
1657                         }
1658                 } else {
1659                         tx_buffer = &adapter->tx_buffer_area[i];
1660                         ctxd = &adapter->tx_desc_base[i];
1661                         seg_addr = segs[j].ds_addr;
1662                         seg_len  = segs[j].ds_len;
1663                         ctxd->buffer_addr = htole64(seg_addr);
1664                         ctxd->lower.data = htole32(
1665                         adapter->txd_cmd | txd_lower | seg_len);
1666                         ctxd->upper.data =
1667                             htole32(txd_upper);
1668                         last = i;
1669                         if (++i == adapter->num_tx_desc)
1670                                 i = 0;
1671                         tx_buffer->m_head = NULL;
1672                         tx_buffer->next_eop = -1;
1673                 }
1674         }
1675
1676         adapter->next_avail_tx_desc = i;
1677
1678         if (adapter->pcix_82544)
1679                 adapter->num_tx_desc_avail -= txd_used;
1680         else
1681                 adapter->num_tx_desc_avail -= nsegs;
1682
1683         if (m_head->m_flags & M_VLANTAG) {
1684                 /* Set the vlan id. */
1685                 ctxd->upper.fields.special =
1686                     htole16(m_head->m_pkthdr.ether_vtag);
1687                 /* Tell hardware to add tag */
1688                 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1689         }
1690
1691         tx_buffer->m_head = m_head;
1692         tx_buffer_mapped->map = tx_buffer->map;
1693         tx_buffer->map = map;
1694         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1695
1696         /*
1697          * Last Descriptor of Packet
1698          * needs End Of Packet (EOP)
1699          * and Report Status (RS)
1700          */
1701         ctxd->lower.data |=
1702             htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1703         /*
1704          * Keep track in the first buffer which
1705          * descriptor will be written back
1706          */
1707         tx_buffer = &adapter->tx_buffer_area[first];
1708         tx_buffer->next_eop = last;
1709         adapter->watchdog_time = ticks;
1710
1711         /*
1712          * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1713          * that this frame is available to transmit.
1714          */
1715         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1716             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1717         if (adapter->hw.mac.type == e1000_82547 &&
1718             adapter->link_duplex == HALF_DUPLEX)
1719                 lem_82547_move_tail(adapter);
1720         else {
1721                 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1722                 if (adapter->hw.mac.type == e1000_82547)
1723                         lem_82547_update_fifo_head(adapter,
1724                             m_head->m_pkthdr.len);
1725         }
1726
1727         return (0);
1728 }
1729
1730 /*********************************************************************
1731  *
1732  * 82547 workaround to avoid controller hang in half-duplex environment.
1733  * The workaround is to avoid queuing a large packet that would span
1734  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1735  * in this case. We do that only when FIFO is quiescent.
1736  *
1737  **********************************************************************/
1738 static void
1739 lem_82547_move_tail(void *arg)
1740 {
1741         struct adapter *adapter = arg;
1742         struct e1000_tx_desc *tx_desc;
1743         u16     hw_tdt, sw_tdt, length = 0;
1744         bool    eop = 0;
1745
1746         EM_TX_LOCK_ASSERT(adapter);
1747
1748         hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1749         sw_tdt = adapter->next_avail_tx_desc;
1750         
1751         while (hw_tdt != sw_tdt) {
1752                 tx_desc = &adapter->tx_desc_base[hw_tdt];
1753                 length += tx_desc->lower.flags.length;
1754                 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1755                 if (++hw_tdt == adapter->num_tx_desc)
1756                         hw_tdt = 0;
1757
1758                 if (eop) {
1759                         if (lem_82547_fifo_workaround(adapter, length)) {
1760                                 adapter->tx_fifo_wrk_cnt++;
1761                                 callout_reset(&adapter->tx_fifo_timer, 1,
1762                                         lem_82547_move_tail, adapter);
1763                                 break;
1764                         }
1765                         E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1766                         lem_82547_update_fifo_head(adapter, length);
1767                         length = 0;
1768                 }
1769         }       
1770 }
1771
1772 static int
1773 lem_82547_fifo_workaround(struct adapter *adapter, int len)
1774 {       
1775         int fifo_space, fifo_pkt_len;
1776
1777         fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1778
1779         if (adapter->link_duplex == HALF_DUPLEX) {
1780                 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1781
1782                 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1783                         if (lem_82547_tx_fifo_reset(adapter))
1784                                 return (0);
1785                         else
1786                                 return (1);
1787                 }
1788         }
1789
1790         return (0);
1791 }
1792
1793 static void
1794 lem_82547_update_fifo_head(struct adapter *adapter, int len)
1795 {
1796         int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1797         
1798         /* tx_fifo_head is always 16 byte aligned */
1799         adapter->tx_fifo_head += fifo_pkt_len;
1800         if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1801                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1802         }
1803 }
1804
1805
1806 static int
1807 lem_82547_tx_fifo_reset(struct adapter *adapter)
1808 {
1809         u32 tctl;
1810
1811         if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1812             E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1813             (E1000_READ_REG(&adapter->hw, E1000_TDFT) == 
1814             E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1815             (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1816             E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1817             (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1818                 /* Disable TX unit */
1819                 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1820                 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1821                     tctl & ~E1000_TCTL_EN);
1822
1823                 /* Reset FIFO pointers */
1824                 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1825                     adapter->tx_head_addr);
1826                 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1827                     adapter->tx_head_addr);
1828                 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1829                     adapter->tx_head_addr);
1830                 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1831                     adapter->tx_head_addr);
1832
1833                 /* Re-enable TX unit */
1834                 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1835                 E1000_WRITE_FLUSH(&adapter->hw);
1836
1837                 adapter->tx_fifo_head = 0;
1838                 adapter->tx_fifo_reset_cnt++;
1839
1840                 return (TRUE);
1841         }
1842         else {
1843                 return (FALSE);
1844         }
1845 }
1846
1847 static void
1848 lem_set_promisc(struct adapter *adapter)
1849 {
1850         struct ifnet    *ifp = adapter->ifp;
1851         u32             reg_rctl;
1852
1853         reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1854
1855         if (ifp->if_flags & IFF_PROMISC) {
1856                 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1857                 /* Turn this on if you want to see bad packets */
1858                 if (lem_debug_sbp)
1859                         reg_rctl |= E1000_RCTL_SBP;
1860                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1861         } else if (ifp->if_flags & IFF_ALLMULTI) {
1862                 reg_rctl |= E1000_RCTL_MPE;
1863                 reg_rctl &= ~E1000_RCTL_UPE;
1864                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1865         }
1866 }
1867
1868 static void
1869 lem_disable_promisc(struct adapter *adapter)
1870 {
1871         u32     reg_rctl;
1872
1873         reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1874
1875         reg_rctl &=  (~E1000_RCTL_UPE);
1876         reg_rctl &=  (~E1000_RCTL_MPE);
1877         reg_rctl &=  (~E1000_RCTL_SBP);
1878         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1879 }
1880
1881
1882 /*********************************************************************
1883  *  Multicast Update
1884  *
1885  *  This routine is called whenever multicast address list is updated.
1886  *
1887  **********************************************************************/
1888
1889 static void
1890 lem_set_multi(struct adapter *adapter)
1891 {
1892         struct ifnet    *ifp = adapter->ifp;
1893         struct ifmultiaddr *ifma;
1894         u32 reg_rctl = 0;
1895         u8  *mta; /* Multicast array memory */
1896         int mcnt = 0;
1897
1898         IOCTL_DEBUGOUT("lem_set_multi: begin");
1899
1900         mta = adapter->mta;
1901         bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1902
1903         if (adapter->hw.mac.type == e1000_82542 && 
1904             adapter->hw.revision_id == E1000_REVISION_2) {
1905                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1906                 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1907                         e1000_pci_clear_mwi(&adapter->hw);
1908                 reg_rctl |= E1000_RCTL_RST;
1909                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1910                 msec_delay(5);
1911         }
1912
1913 #if __FreeBSD_version < 800000
1914         IF_ADDR_LOCK(ifp);
1915 #else
1916         if_maddr_rlock(ifp);
1917 #endif
1918         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1919                 if (ifma->ifma_addr->sa_family != AF_LINK)
1920                         continue;
1921
1922                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1923                         break;
1924
1925                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1926                     &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1927                 mcnt++;
1928         }
1929 #if __FreeBSD_version < 800000
1930         IF_ADDR_UNLOCK(ifp);
1931 #else
1932         if_maddr_runlock(ifp);
1933 #endif
1934         if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1935                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1936                 reg_rctl |= E1000_RCTL_MPE;
1937                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1938         } else
1939                 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1940
1941         if (adapter->hw.mac.type == e1000_82542 && 
1942             adapter->hw.revision_id == E1000_REVISION_2) {
1943                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1944                 reg_rctl &= ~E1000_RCTL_RST;
1945                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1946                 msec_delay(5);
1947                 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1948                         e1000_pci_set_mwi(&adapter->hw);
1949         }
1950 }
1951
1952
1953 /*********************************************************************
1954  *  Timer routine
1955  *
1956  *  This routine checks for link status and updates statistics.
1957  *
1958  **********************************************************************/
1959
1960 static void
1961 lem_local_timer(void *arg)
1962 {
1963         struct adapter  *adapter = arg;
1964
1965         EM_CORE_LOCK_ASSERT(adapter);
1966
1967         lem_update_link_status(adapter);
1968         lem_update_stats_counters(adapter);
1969
1970         lem_smartspeed(adapter);
1971
1972         /*
1973          * We check the watchdog: the time since
1974          * the last TX descriptor was cleaned.
1975          * This implies a functional TX engine.
1976          */
1977         if ((adapter->watchdog_check == TRUE) &&
1978             (ticks - adapter->watchdog_time > EM_WATCHDOG))
1979                 goto hung;
1980
1981         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1982         return;
1983 hung:
1984         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1985         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1986         adapter->watchdog_events++;
1987         lem_init_locked(adapter);
1988 }
1989
1990 static void
1991 lem_update_link_status(struct adapter *adapter)
1992 {
1993         struct e1000_hw *hw = &adapter->hw;
1994         struct ifnet *ifp = adapter->ifp;
1995         device_t dev = adapter->dev;
1996         u32 link_check = 0;
1997
1998         /* Get the cached link value or read phy for real */
1999         switch (hw->phy.media_type) {
2000         case e1000_media_type_copper:
2001                 if (hw->mac.get_link_status) {
2002                         /* Do the work to read phy */
2003                         e1000_check_for_link(hw);
2004                         link_check = !hw->mac.get_link_status;
2005                         if (link_check) /* ESB2 fix */
2006                                 e1000_cfg_on_link_up(hw);
2007                 } else
2008                         link_check = TRUE;
2009                 break;
2010         case e1000_media_type_fiber:
2011                 e1000_check_for_link(hw);
2012                 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2013                                  E1000_STATUS_LU);
2014                 break;
2015         case e1000_media_type_internal_serdes:
2016                 e1000_check_for_link(hw);
2017                 link_check = adapter->hw.mac.serdes_has_link;
2018                 break;
2019         default:
2020         case e1000_media_type_unknown:
2021                 break;
2022         }
2023
2024         /* Now check for a transition */
2025         if (link_check && (adapter->link_active == 0)) {
2026                 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2027                     &adapter->link_duplex);
2028                 if (bootverbose)
2029                         device_printf(dev, "Link is up %d Mbps %s\n",
2030                             adapter->link_speed,
2031                             ((adapter->link_duplex == FULL_DUPLEX) ?
2032                             "Full Duplex" : "Half Duplex"));
2033                 adapter->link_active = 1;
2034                 adapter->smartspeed = 0;
2035                 ifp->if_baudrate = adapter->link_speed * 1000000;
2036                 if_link_state_change(ifp, LINK_STATE_UP);
2037         } else if (!link_check && (adapter->link_active == 1)) {
2038                 ifp->if_baudrate = adapter->link_speed = 0;
2039                 adapter->link_duplex = 0;
2040                 if (bootverbose)
2041                         device_printf(dev, "Link is Down\n");
2042                 adapter->link_active = 0;
2043                 /* Link down, disable watchdog */
2044                 adapter->watchdog_check = FALSE;
2045                 if_link_state_change(ifp, LINK_STATE_DOWN);
2046         }
2047 }
2048
2049 /*********************************************************************
2050  *
2051  *  This routine disables all traffic on the adapter by issuing a
2052  *  global reset on the MAC and deallocates TX/RX buffers.
2053  *
2054  *  This routine should always be called with BOTH the CORE
2055  *  and TX locks.
2056  **********************************************************************/
2057
2058 static void
2059 lem_stop(void *arg)
2060 {
2061         struct adapter  *adapter = arg;
2062         struct ifnet    *ifp = adapter->ifp;
2063
2064         EM_CORE_LOCK_ASSERT(adapter);
2065         EM_TX_LOCK_ASSERT(adapter);
2066
2067         INIT_DEBUGOUT("lem_stop: begin");
2068
2069         lem_disable_intr(adapter);
2070         callout_stop(&adapter->timer);
2071         callout_stop(&adapter->tx_fifo_timer);
2072
2073         /* Tell the stack that the interface is no longer active */
2074         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2075
2076         e1000_reset_hw(&adapter->hw);
2077         if (adapter->hw.mac.type >= e1000_82544)
2078                 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2079
2080         e1000_led_off(&adapter->hw);
2081         e1000_cleanup_led(&adapter->hw);
2082 }
2083
2084
2085 /*********************************************************************
2086  *
2087  *  Determine hardware revision.
2088  *
2089  **********************************************************************/
2090 static void
2091 lem_identify_hardware(struct adapter *adapter)
2092 {
2093         device_t dev = adapter->dev;
2094
2095         /* Make sure our PCI config space has the necessary stuff set */
2096         adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2097         if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2098             (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2099                 device_printf(dev, "Memory Access and/or Bus Master bits "
2100                     "were not set!\n");
2101                 adapter->hw.bus.pci_cmd_word |=
2102                 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2103                 pci_write_config(dev, PCIR_COMMAND,
2104                     adapter->hw.bus.pci_cmd_word, 2);
2105         }
2106
2107         /* Save off the information about this board */
2108         adapter->hw.vendor_id = pci_get_vendor(dev);
2109         adapter->hw.device_id = pci_get_device(dev);
2110         adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2111         adapter->hw.subsystem_vendor_id =
2112             pci_read_config(dev, PCIR_SUBVEND_0, 2);
2113         adapter->hw.subsystem_device_id =
2114             pci_read_config(dev, PCIR_SUBDEV_0, 2);
2115
2116         /* Do Shared Code Init and Setup */
2117         if (e1000_set_mac_type(&adapter->hw)) {
2118                 device_printf(dev, "Setup init failure\n");
2119                 return;
2120         }
2121 }
2122
2123 static int
2124 lem_allocate_pci_resources(struct adapter *adapter)
2125 {
2126         device_t        dev = adapter->dev;
2127         int             val, rid, error = E1000_SUCCESS;
2128
2129         rid = PCIR_BAR(0);
2130         adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2131             &rid, RF_ACTIVE);
2132         if (adapter->memory == NULL) {
2133                 device_printf(dev, "Unable to allocate bus resource: memory\n");
2134                 return (ENXIO);
2135         }
2136         adapter->osdep.mem_bus_space_tag =
2137             rman_get_bustag(adapter->memory);
2138         adapter->osdep.mem_bus_space_handle =
2139             rman_get_bushandle(adapter->memory);
2140         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2141
2142         /* Only older adapters use IO mapping */
2143         if (adapter->hw.mac.type > e1000_82543) {
2144                 /* Figure our where our IO BAR is ? */
2145                 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2146                         val = pci_read_config(dev, rid, 4);
2147                         if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2148                                 adapter->io_rid = rid;
2149                                 break;
2150                         }
2151                         rid += 4;
2152                         /* check for 64bit BAR */
2153                         if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2154                                 rid += 4;
2155                 }
2156                 if (rid >= PCIR_CIS) {
2157                         device_printf(dev, "Unable to locate IO BAR\n");
2158                         return (ENXIO);
2159                 }
2160                 adapter->ioport = bus_alloc_resource_any(dev,
2161                     SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2162                 if (adapter->ioport == NULL) {
2163                         device_printf(dev, "Unable to allocate bus resource: "
2164                             "ioport\n");
2165                         return (ENXIO);
2166                 }
2167                 adapter->hw.io_base = 0;
2168                 adapter->osdep.io_bus_space_tag =
2169                     rman_get_bustag(adapter->ioport);
2170                 adapter->osdep.io_bus_space_handle =
2171                     rman_get_bushandle(adapter->ioport);
2172         }
2173
2174         adapter->hw.back = &adapter->osdep;
2175
2176         return (error);
2177 }
2178
2179 /*********************************************************************
2180  *
2181  *  Setup the Legacy or MSI Interrupt handler
2182  *
2183  **********************************************************************/
2184 int
2185 lem_allocate_irq(struct adapter *adapter)
2186 {
2187         device_t dev = adapter->dev;
2188         int error, rid = 0;
2189
2190         /* Manually turn off all interrupts */
2191         E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2192
2193         /* We allocate a single interrupt resource */
2194         adapter->res[0] = bus_alloc_resource_any(dev,
2195             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2196         if (adapter->res[0] == NULL) {
2197                 device_printf(dev, "Unable to allocate bus resource: "
2198                     "interrupt\n");
2199                 return (ENXIO);
2200         }
2201
2202         /* Do Legacy setup? */
2203         if (lem_use_legacy_irq) {
2204                 if ((error = bus_setup_intr(dev, adapter->res[0],
2205                     INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2206                     &adapter->tag[0])) != 0) {
2207                         device_printf(dev,
2208                             "Failed to register interrupt handler");
2209                         return (error);
2210                 }
2211                 return (0);
2212         }
2213
2214         /*
2215          * Use a Fast interrupt and the associated
2216          * deferred processing contexts.
2217          */
2218         TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2219         TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2220         adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2221             taskqueue_thread_enqueue, &adapter->tq);
2222         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2223             device_get_nameunit(adapter->dev));
2224         if ((error = bus_setup_intr(dev, adapter->res[0],
2225             INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2226             &adapter->tag[0])) != 0) {
2227                 device_printf(dev, "Failed to register fast interrupt "
2228                             "handler: %d\n", error);
2229                 taskqueue_free(adapter->tq);
2230                 adapter->tq = NULL;
2231                 return (error);
2232         }
2233         
2234         return (0);
2235 }
2236
2237
2238 static void
2239 lem_free_pci_resources(struct adapter *adapter)
2240 {
2241         device_t dev = adapter->dev;
2242
2243
2244         if (adapter->tag[0] != NULL) {
2245                 bus_teardown_intr(dev, adapter->res[0],
2246                     adapter->tag[0]);
2247                 adapter->tag[0] = NULL;
2248         }
2249
2250         if (adapter->res[0] != NULL) {
2251                 bus_release_resource(dev, SYS_RES_IRQ,
2252                     0, adapter->res[0]);
2253         }
2254
2255         if (adapter->memory != NULL)
2256                 bus_release_resource(dev, SYS_RES_MEMORY,
2257                     PCIR_BAR(0), adapter->memory);
2258
2259         if (adapter->ioport != NULL)
2260                 bus_release_resource(dev, SYS_RES_IOPORT,
2261                     adapter->io_rid, adapter->ioport);
2262 }
2263
2264
2265 /*********************************************************************
2266  *
2267  *  Initialize the hardware to a configuration
2268  *  as specified by the adapter structure.
2269  *
2270  **********************************************************************/
2271 static int
2272 lem_hardware_init(struct adapter *adapter)
2273 {
2274         device_t dev = adapter->dev;
2275         u16     rx_buffer_size;
2276
2277         INIT_DEBUGOUT("lem_hardware_init: begin");
2278
2279         /* Issue a global reset */
2280         e1000_reset_hw(&adapter->hw);
2281
2282         /* When hardware is reset, fifo_head is also reset */
2283         adapter->tx_fifo_head = 0;
2284
2285         /*
2286          * These parameters control the automatic generation (Tx) and
2287          * response (Rx) to Ethernet PAUSE frames.
2288          * - High water mark should allow for at least two frames to be
2289          *   received after sending an XOFF.
2290          * - Low water mark works best when it is very near the high water mark.
2291          *   This allows the receiver to restart by sending XON when it has
2292          *   drained a bit. Here we use an arbitary value of 1500 which will
2293          *   restart after one full frame is pulled from the buffer. There
2294          *   could be several smaller frames in the buffer and if so they will
2295          *   not trigger the XON until their total number reduces the buffer
2296          *   by 1500.
2297          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2298          */
2299         rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2300             0xffff) << 10 );
2301
2302         adapter->hw.fc.high_water = rx_buffer_size -
2303             roundup2(adapter->max_frame_size, 1024);
2304         adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2305
2306         adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2307         adapter->hw.fc.send_xon = TRUE;
2308
2309         /* Set Flow control, use the tunable location if sane */
2310         if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2311                 adapter->hw.fc.requested_mode = lem_fc_setting;
2312         else
2313                 adapter->hw.fc.requested_mode = e1000_fc_none;
2314
2315         if (e1000_init_hw(&adapter->hw) < 0) {
2316                 device_printf(dev, "Hardware Initialization Failed\n");
2317                 return (EIO);
2318         }
2319
2320         e1000_check_for_link(&adapter->hw);
2321
2322         return (0);
2323 }
2324
2325 /*********************************************************************
2326  *
2327  *  Setup networking device structure and register an interface.
2328  *
2329  **********************************************************************/
2330 static int
2331 lem_setup_interface(device_t dev, struct adapter *adapter)
2332 {
2333         struct ifnet   *ifp;
2334
2335         INIT_DEBUGOUT("lem_setup_interface: begin");
2336
2337         ifp = adapter->ifp = if_alloc(IFT_ETHER);
2338         if (ifp == NULL) {
2339                 device_printf(dev, "can not allocate ifnet structure\n");
2340                 return (-1);
2341         }
2342         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2343         ifp->if_init =  lem_init;
2344         ifp->if_softc = adapter;
2345         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2346         ifp->if_ioctl = lem_ioctl;
2347         ifp->if_start = lem_start;
2348         IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2349         ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2350         IFQ_SET_READY(&ifp->if_snd);
2351
2352         ether_ifattach(ifp, adapter->hw.mac.addr);
2353
2354         ifp->if_capabilities = ifp->if_capenable = 0;
2355
2356         if (adapter->hw.mac.type >= e1000_82543) {
2357                 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2358                 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2359         }
2360
2361         /*
2362          * Tell the upper layer(s) we support long frames.
2363          */
2364         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2365         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2366         ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2367
2368         /*
2369         ** Dont turn this on by default, if vlans are
2370         ** created on another pseudo device (eg. lagg)
2371         ** then vlan events are not passed thru, breaking
2372         ** operation, but with HW FILTER off it works. If
2373         ** using vlans directly on the em driver you can
2374         ** enable this and get full hardware tag filtering.
2375         */
2376         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2377
2378 #ifdef DEVICE_POLLING
2379         ifp->if_capabilities |= IFCAP_POLLING;
2380 #endif
2381
2382         /* Enable only WOL MAGIC by default */
2383         if (adapter->wol) {
2384                 ifp->if_capabilities |= IFCAP_WOL;
2385                 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2386         }
2387                 
2388         /*
2389          * Specify the media types supported by this adapter and register
2390          * callbacks to update media and link information
2391          */
2392         ifmedia_init(&adapter->media, IFM_IMASK,
2393             lem_media_change, lem_media_status);
2394         if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2395             (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2396                 u_char fiber_type = IFM_1000_SX;        /* default type */
2397
2398                 if (adapter->hw.mac.type == e1000_82545)
2399                         fiber_type = IFM_1000_LX;
2400                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 
2401                             0, NULL);
2402                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2403         } else {
2404                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2405                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2406                             0, NULL);
2407                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2408                             0, NULL);
2409                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2410                             0, NULL);
2411                 if (adapter->hw.phy.type != e1000_phy_ife) {
2412                         ifmedia_add(&adapter->media,
2413                                 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2414                         ifmedia_add(&adapter->media,
2415                                 IFM_ETHER | IFM_1000_T, 0, NULL);
2416                 }
2417         }
2418         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2419         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2420         return (0);
2421 }
2422
2423
2424 /*********************************************************************
2425  *
2426  *  Workaround for SmartSpeed on 82541 and 82547 controllers
2427  *
2428  **********************************************************************/
2429 static void
2430 lem_smartspeed(struct adapter *adapter)
2431 {
2432         u16 phy_tmp;
2433
2434         if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2435             adapter->hw.mac.autoneg == 0 ||
2436             (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2437                 return;
2438
2439         if (adapter->smartspeed == 0) {
2440                 /* If Master/Slave config fault is asserted twice,
2441                  * we assume back-to-back */
2442                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2443                 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2444                         return;
2445                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2446                 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2447                         e1000_read_phy_reg(&adapter->hw,
2448                             PHY_1000T_CTRL, &phy_tmp);
2449                         if(phy_tmp & CR_1000T_MS_ENABLE) {
2450                                 phy_tmp &= ~CR_1000T_MS_ENABLE;
2451                                 e1000_write_phy_reg(&adapter->hw,
2452                                     PHY_1000T_CTRL, phy_tmp);
2453                                 adapter->smartspeed++;
2454                                 if(adapter->hw.mac.autoneg &&
2455                                    !e1000_copper_link_autoneg(&adapter->hw) &&
2456                                    !e1000_read_phy_reg(&adapter->hw,
2457                                     PHY_CONTROL, &phy_tmp)) {
2458                                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2459                                                     MII_CR_RESTART_AUTO_NEG);
2460                                         e1000_write_phy_reg(&adapter->hw,
2461                                             PHY_CONTROL, phy_tmp);
2462                                 }
2463                         }
2464                 }
2465                 return;
2466         } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2467                 /* If still no link, perhaps using 2/3 pair cable */
2468                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2469                 phy_tmp |= CR_1000T_MS_ENABLE;
2470                 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2471                 if(adapter->hw.mac.autoneg &&
2472                    !e1000_copper_link_autoneg(&adapter->hw) &&
2473                    !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2474                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2475                                     MII_CR_RESTART_AUTO_NEG);
2476                         e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2477                 }
2478         }
2479         /* Restart process after EM_SMARTSPEED_MAX iterations */
2480         if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2481                 adapter->smartspeed = 0;
2482 }
2483
2484
2485 /*
2486  * Manage DMA'able memory.
2487  */
2488 static void
2489 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2490 {
2491         if (error)
2492                 return;
2493         *(bus_addr_t *) arg = segs[0].ds_addr;
2494 }
2495
2496 static int
2497 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2498         struct em_dma_alloc *dma, int mapflags)
2499 {
2500         int error;
2501
2502         error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2503                                 EM_DBA_ALIGN, 0,        /* alignment, bounds */
2504                                 BUS_SPACE_MAXADDR,      /* lowaddr */
2505                                 BUS_SPACE_MAXADDR,      /* highaddr */
2506                                 NULL, NULL,             /* filter, filterarg */
2507                                 size,                   /* maxsize */
2508                                 1,                      /* nsegments */
2509                                 size,                   /* maxsegsize */
2510                                 0,                      /* flags */
2511                                 NULL,                   /* lockfunc */
2512                                 NULL,                   /* lockarg */
2513                                 &dma->dma_tag);
2514         if (error) {
2515                 device_printf(adapter->dev,
2516                     "%s: bus_dma_tag_create failed: %d\n",
2517                     __func__, error);
2518                 goto fail_0;
2519         }
2520
2521         error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2522             BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2523         if (error) {
2524                 device_printf(adapter->dev,
2525                     "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2526                     __func__, (uintmax_t)size, error);
2527                 goto fail_2;
2528         }
2529
2530         dma->dma_paddr = 0;
2531         error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2532             size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2533         if (error || dma->dma_paddr == 0) {
2534                 device_printf(adapter->dev,
2535                     "%s: bus_dmamap_load failed: %d\n",
2536                     __func__, error);
2537                 goto fail_3;
2538         }
2539
2540         return (0);
2541
2542 fail_3:
2543         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2544 fail_2:
2545         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2546         bus_dma_tag_destroy(dma->dma_tag);
2547 fail_0:
2548         dma->dma_map = NULL;
2549         dma->dma_tag = NULL;
2550
2551         return (error);
2552 }
2553
2554 static void
2555 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2556 {
2557         if (dma->dma_tag == NULL)
2558                 return;
2559         if (dma->dma_map != NULL) {
2560                 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2561                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2562                 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2563                 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2564                 dma->dma_map = NULL;
2565         }
2566         bus_dma_tag_destroy(dma->dma_tag);
2567         dma->dma_tag = NULL;
2568 }
2569
2570
2571 /*********************************************************************
2572  *
2573  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2574  *  the information needed to transmit a packet on the wire.
2575  *
2576  **********************************************************************/
2577 static int
2578 lem_allocate_transmit_structures(struct adapter *adapter)
2579 {
2580         device_t dev = adapter->dev;
2581         struct em_buffer *tx_buffer;
2582         int error;
2583
2584         /*
2585          * Create DMA tags for tx descriptors
2586          */
2587         if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2588                                 1, 0,                   /* alignment, bounds */
2589                                 BUS_SPACE_MAXADDR,      /* lowaddr */
2590                                 BUS_SPACE_MAXADDR,      /* highaddr */
2591                                 NULL, NULL,             /* filter, filterarg */
2592                                 MCLBYTES * EM_MAX_SCATTER,      /* maxsize */
2593                                 EM_MAX_SCATTER,         /* nsegments */
2594                                 MCLBYTES,               /* maxsegsize */
2595                                 0,                      /* flags */
2596                                 NULL,                   /* lockfunc */
2597                                 NULL,                   /* lockarg */
2598                                 &adapter->txtag)) != 0) {
2599                 device_printf(dev, "Unable to allocate TX DMA tag\n");
2600                 goto fail;
2601         }
2602
2603         adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2604             adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2605         if (adapter->tx_buffer_area == NULL) {
2606                 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2607                 error = ENOMEM;
2608                 goto fail;
2609         }
2610
2611         /* Create the descriptor buffer dma maps */
2612         for (int i = 0; i < adapter->num_tx_desc; i++) {
2613                 tx_buffer = &adapter->tx_buffer_area[i];
2614                 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2615                 if (error != 0) {
2616                         device_printf(dev, "Unable to create TX DMA map\n");
2617                         goto fail;
2618                 }
2619                 tx_buffer->next_eop = -1;
2620         }
2621
2622         return (0);
2623 fail:
2624         lem_free_transmit_structures(adapter);
2625         return (error);
2626 }
2627
2628 /*********************************************************************
2629  *
2630  *  (Re)Initialize transmit structures.
2631  *
2632  **********************************************************************/
2633 static void
2634 lem_setup_transmit_structures(struct adapter *adapter)
2635 {
2636         struct em_buffer *tx_buffer;
2637 #ifdef DEV_NETMAP
2638         /* we are already locked */
2639         struct netmap_adapter *na = NA(adapter->ifp);
2640         struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
2641 #endif /* DEV_NETMAP */
2642
2643         /* Clear the old ring contents */
2644         bzero(adapter->tx_desc_base,
2645             (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2646
2647         /* Free any existing TX buffers */
2648         for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2649                 tx_buffer = &adapter->tx_buffer_area[i];
2650                 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2651                     BUS_DMASYNC_POSTWRITE);
2652                 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2653                 m_freem(tx_buffer->m_head);
2654                 tx_buffer->m_head = NULL;
2655 #ifdef DEV_NETMAP
2656                 if (slot) {
2657                         /* the i-th NIC entry goes to slot si */
2658                         int si = netmap_idx_n2k(&na->tx_rings[0], i);
2659                         uint64_t paddr;
2660                         void *addr;
2661
2662                         addr = PNMB(slot + si, &paddr);
2663                         adapter->tx_desc_base[si].buffer_addr = htole64(paddr);
2664                         /* reload the map for netmap mode */
2665                         netmap_load_map(adapter->txtag, tx_buffer->map, addr);
2666                 }
2667 #endif /* DEV_NETMAP */
2668                 tx_buffer->next_eop = -1;
2669         }
2670
2671         /* Reset state */
2672         adapter->last_hw_offload = 0;
2673         adapter->next_avail_tx_desc = 0;
2674         adapter->next_tx_to_clean = 0;
2675         adapter->num_tx_desc_avail = adapter->num_tx_desc;
2676
2677         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2678             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2679
2680         return;
2681 }
2682
2683 /*********************************************************************
2684  *
2685  *  Enable transmit unit.
2686  *
2687  **********************************************************************/
2688 static void
2689 lem_initialize_transmit_unit(struct adapter *adapter)
2690 {
2691         u32     tctl, tipg = 0;
2692         u64     bus_addr;
2693
2694          INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2695         /* Setup the Base and Length of the Tx Descriptor Ring */
2696         bus_addr = adapter->txdma.dma_paddr;
2697         E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2698             adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2699         E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2700             (u32)(bus_addr >> 32));
2701         E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2702             (u32)bus_addr);
2703         /* Setup the HW Tx Head and Tail descriptor pointers */
2704         E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2705         E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2706
2707         HW_DEBUGOUT2("Base = %x, Length = %x\n",
2708             E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2709             E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2710
2711         /* Set the default values for the Tx Inter Packet Gap timer */
2712         switch (adapter->hw.mac.type) {
2713         case e1000_82542:
2714                 tipg = DEFAULT_82542_TIPG_IPGT;
2715                 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2716                 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2717                 break;
2718         default:
2719                 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2720                     (adapter->hw.phy.media_type ==
2721                     e1000_media_type_internal_serdes))
2722                         tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2723                 else
2724                         tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2725                 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2726                 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2727         }
2728
2729         E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2730         E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2731         if(adapter->hw.mac.type >= e1000_82540)
2732                 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2733                     adapter->tx_abs_int_delay.value);
2734
2735         /* Program the Transmit Control Register */
2736         tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2737         tctl &= ~E1000_TCTL_CT;
2738         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2739                    (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2740
2741         /* This write will effectively turn on the transmit unit. */
2742         E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2743
2744         /* Setup Transmit Descriptor Base Settings */   
2745         adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2746
2747         if (adapter->tx_int_delay.value > 0)
2748                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2749 }
2750
2751 /*********************************************************************
2752  *
2753  *  Free all transmit related data structures.
2754  *
2755  **********************************************************************/
2756 static void
2757 lem_free_transmit_structures(struct adapter *adapter)
2758 {
2759         struct em_buffer *tx_buffer;
2760
2761         INIT_DEBUGOUT("free_transmit_structures: begin");
2762
2763         if (adapter->tx_buffer_area != NULL) {
2764                 for (int i = 0; i < adapter->num_tx_desc; i++) {
2765                         tx_buffer = &adapter->tx_buffer_area[i];
2766                         if (tx_buffer->m_head != NULL) {
2767                                 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2768                                     BUS_DMASYNC_POSTWRITE);
2769                                 bus_dmamap_unload(adapter->txtag,
2770                                     tx_buffer->map);
2771                                 m_freem(tx_buffer->m_head);
2772                                 tx_buffer->m_head = NULL;
2773                         } else if (tx_buffer->map != NULL)
2774                                 bus_dmamap_unload(adapter->txtag,
2775                                     tx_buffer->map);
2776                         if (tx_buffer->map != NULL) {
2777                                 bus_dmamap_destroy(adapter->txtag,
2778                                     tx_buffer->map);
2779                                 tx_buffer->map = NULL;
2780                         }
2781                 }
2782         }
2783         if (adapter->tx_buffer_area != NULL) {
2784                 free(adapter->tx_buffer_area, M_DEVBUF);
2785                 adapter->tx_buffer_area = NULL;
2786         }
2787         if (adapter->txtag != NULL) {
2788                 bus_dma_tag_destroy(adapter->txtag);
2789                 adapter->txtag = NULL;
2790         }
2791 #if __FreeBSD_version >= 800000
2792         if (adapter->br != NULL)
2793                 buf_ring_free(adapter->br, M_DEVBUF);
2794 #endif
2795 }
2796
2797 /*********************************************************************
2798  *
2799  *  The offload context needs to be set when we transfer the first
2800  *  packet of a particular protocol (TCP/UDP). This routine has been
2801  *  enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2802  *
2803  *  Added back the old method of keeping the current context type
2804  *  and not setting if unnecessary, as this is reported to be a
2805  *  big performance win.  -jfv
2806  **********************************************************************/
2807 static void
2808 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2809     u32 *txd_upper, u32 *txd_lower)
2810 {
2811         struct e1000_context_desc *TXD = NULL;
2812         struct em_buffer *tx_buffer;
2813         struct ether_vlan_header *eh;
2814         struct ip *ip = NULL;
2815         struct ip6_hdr *ip6;
2816         int curr_txd, ehdrlen;
2817         u32 cmd, hdr_len, ip_hlen;
2818         u16 etype;
2819         u8 ipproto;
2820
2821
2822         cmd = hdr_len = ipproto = 0;
2823         *txd_upper = *txd_lower = 0;
2824         curr_txd = adapter->next_avail_tx_desc;
2825
2826         /*
2827          * Determine where frame payload starts.
2828          * Jump over vlan headers if already present,
2829          * helpful for QinQ too.
2830          */
2831         eh = mtod(mp, struct ether_vlan_header *);
2832         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2833                 etype = ntohs(eh->evl_proto);
2834                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2835         } else {
2836                 etype = ntohs(eh->evl_encap_proto);
2837                 ehdrlen = ETHER_HDR_LEN;
2838         }
2839
2840         /*
2841          * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2842          * TODO: Support SCTP too when it hits the tree.
2843          */
2844         switch (etype) {
2845         case ETHERTYPE_IP:
2846                 ip = (struct ip *)(mp->m_data + ehdrlen);
2847                 ip_hlen = ip->ip_hl << 2;
2848
2849                 /* Setup of IP header checksum. */
2850                 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2851                         /*
2852                          * Start offset for header checksum calculation.
2853                          * End offset for header checksum calculation.
2854                          * Offset of place to put the checksum.
2855                          */
2856                         TXD = (struct e1000_context_desc *)
2857                             &adapter->tx_desc_base[curr_txd];
2858                         TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2859                         TXD->lower_setup.ip_fields.ipcse =
2860                             htole16(ehdrlen + ip_hlen);
2861                         TXD->lower_setup.ip_fields.ipcso =
2862                             ehdrlen + offsetof(struct ip, ip_sum);
2863                         cmd |= E1000_TXD_CMD_IP;
2864                         *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2865                 }
2866
2867                 hdr_len = ehdrlen + ip_hlen;
2868                 ipproto = ip->ip_p;
2869
2870                 break;
2871         case ETHERTYPE_IPV6:
2872                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2873                 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2874
2875                 /* IPv6 doesn't have a header checksum. */
2876
2877                 hdr_len = ehdrlen + ip_hlen;
2878                 ipproto = ip6->ip6_nxt;
2879                 break;
2880
2881         default:
2882                 return;
2883         }
2884
2885         switch (ipproto) {
2886         case IPPROTO_TCP:
2887                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2888                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2889                         *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2890                         /* no need for context if already set */
2891                         if (adapter->last_hw_offload == CSUM_TCP)
2892                                 return;
2893                         adapter->last_hw_offload = CSUM_TCP;
2894                         /*
2895                          * Start offset for payload checksum calculation.
2896                          * End offset for payload checksum calculation.
2897                          * Offset of place to put the checksum.
2898                          */
2899                         TXD = (struct e1000_context_desc *)
2900                             &adapter->tx_desc_base[curr_txd];
2901                         TXD->upper_setup.tcp_fields.tucss = hdr_len;
2902                         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2903                         TXD->upper_setup.tcp_fields.tucso =
2904                             hdr_len + offsetof(struct tcphdr, th_sum);
2905                         cmd |= E1000_TXD_CMD_TCP;
2906                 }
2907                 break;
2908         case IPPROTO_UDP:
2909         {
2910                 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2911                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2912                         *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2913                         /* no need for context if already set */
2914                         if (adapter->last_hw_offload == CSUM_UDP)
2915                                 return;
2916                         adapter->last_hw_offload = CSUM_UDP;
2917                         /*
2918                          * Start offset for header checksum calculation.
2919                          * End offset for header checksum calculation.
2920                          * Offset of place to put the checksum.
2921                          */
2922                         TXD = (struct e1000_context_desc *)
2923                             &adapter->tx_desc_base[curr_txd];
2924                         TXD->upper_setup.tcp_fields.tucss = hdr_len;
2925                         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2926                         TXD->upper_setup.tcp_fields.tucso =
2927                             hdr_len + offsetof(struct udphdr, uh_sum);
2928                 }
2929                 /* Fall Thru */
2930         }
2931         default:
2932                 break;
2933         }
2934
2935         if (TXD == NULL)
2936                 return;
2937         TXD->tcp_seg_setup.data = htole32(0);
2938         TXD->cmd_and_length =
2939             htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
2940         tx_buffer = &adapter->tx_buffer_area[curr_txd];
2941         tx_buffer->m_head = NULL;
2942         tx_buffer->next_eop = -1;
2943
2944         if (++curr_txd == adapter->num_tx_desc)
2945                 curr_txd = 0;
2946
2947         adapter->num_tx_desc_avail--;
2948         adapter->next_avail_tx_desc = curr_txd;
2949 }
2950
2951
2952 /**********************************************************************
2953  *
2954  *  Examine each tx_buffer in the used queue. If the hardware is done
2955  *  processing the packet then free associated resources. The
2956  *  tx_buffer is put back on the free queue.
2957  *
2958  **********************************************************************/
2959 static void
2960 lem_txeof(struct adapter *adapter)
2961 {
2962         int first, last, done, num_avail;
2963         struct em_buffer *tx_buffer;
2964         struct e1000_tx_desc   *tx_desc, *eop_desc;
2965         struct ifnet   *ifp = adapter->ifp;
2966
2967         EM_TX_LOCK_ASSERT(adapter);
2968
2969 #ifdef DEV_NETMAP
2970         if (netmap_tx_irq(ifp, 0 | (NETMAP_LOCKED_ENTER|NETMAP_LOCKED_EXIT)))
2971                 return;
2972 #endif /* DEV_NETMAP */
2973         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2974                 return;
2975
2976         num_avail = adapter->num_tx_desc_avail;
2977         first = adapter->next_tx_to_clean;
2978         tx_desc = &adapter->tx_desc_base[first];
2979         tx_buffer = &adapter->tx_buffer_area[first];
2980         last = tx_buffer->next_eop;
2981         eop_desc = &adapter->tx_desc_base[last];
2982
2983         /*
2984          * What this does is get the index of the
2985          * first descriptor AFTER the EOP of the 
2986          * first packet, that way we can do the
2987          * simple comparison on the inner while loop.
2988          */
2989         if (++last == adapter->num_tx_desc)
2990                 last = 0;
2991         done = last;
2992
2993         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2994             BUS_DMASYNC_POSTREAD);
2995
2996         while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2997                 /* We clean the range of the packet */
2998                 while (first != done) {
2999                         tx_desc->upper.data = 0;
3000                         tx_desc->lower.data = 0;
3001                         tx_desc->buffer_addr = 0;
3002                         ++num_avail;
3003
3004                         if (tx_buffer->m_head) {
3005                                 ifp->if_opackets++;
3006                                 bus_dmamap_sync(adapter->txtag,
3007                                     tx_buffer->map,
3008                                     BUS_DMASYNC_POSTWRITE);
3009                                 bus_dmamap_unload(adapter->txtag,
3010                                     tx_buffer->map);
3011
3012                                 m_freem(tx_buffer->m_head);
3013                                 tx_buffer->m_head = NULL;
3014                         }
3015                         tx_buffer->next_eop = -1;
3016                         adapter->watchdog_time = ticks;
3017
3018                         if (++first == adapter->num_tx_desc)
3019                                 first = 0;
3020
3021                         tx_buffer = &adapter->tx_buffer_area[first];
3022                         tx_desc = &adapter->tx_desc_base[first];
3023                 }
3024                 /* See if we can continue to the next packet */
3025                 last = tx_buffer->next_eop;
3026                 if (last != -1) {
3027                         eop_desc = &adapter->tx_desc_base[last];
3028                         /* Get new done point */
3029                         if (++last == adapter->num_tx_desc) last = 0;
3030                         done = last;
3031                 } else
3032                         break;
3033         }
3034         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3035             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3036
3037         adapter->next_tx_to_clean = first;
3038         adapter->num_tx_desc_avail = num_avail;
3039
3040         /*
3041          * If we have enough room, clear IFF_DRV_OACTIVE to
3042          * tell the stack that it is OK to send packets.
3043          * If there are no pending descriptors, clear the watchdog.
3044          */
3045         if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {                
3046                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3047                 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3048                         adapter->watchdog_check = FALSE;
3049                         return;
3050                 } 
3051         }
3052 }
3053
3054 /*********************************************************************
3055  *
3056  *  When Link is lost sometimes there is work still in the TX ring
3057  *  which may result in a watchdog, rather than allow that we do an
3058  *  attempted cleanup and then reinit here. Note that this has been
3059  *  seens mostly with fiber adapters.
3060  *
3061  **********************************************************************/
3062 static void
3063 lem_tx_purge(struct adapter *adapter)
3064 {
3065         if ((!adapter->link_active) && (adapter->watchdog_check)) {
3066                 EM_TX_LOCK(adapter);
3067                 lem_txeof(adapter);
3068                 EM_TX_UNLOCK(adapter);
3069                 if (adapter->watchdog_check) /* Still outstanding? */
3070                         lem_init_locked(adapter);
3071         }
3072 }
3073
3074 /*********************************************************************
3075  *
3076  *  Get a buffer from system mbuf buffer pool.
3077  *
3078  **********************************************************************/
3079 static int
3080 lem_get_buf(struct adapter *adapter, int i)
3081 {
3082         struct mbuf             *m;
3083         bus_dma_segment_t       segs[1];
3084         bus_dmamap_t            map;
3085         struct em_buffer        *rx_buffer;
3086         int                     error, nsegs;
3087
3088         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3089         if (m == NULL) {
3090                 adapter->mbuf_cluster_failed++;
3091                 return (ENOBUFS);
3092         }
3093         m->m_len = m->m_pkthdr.len = MCLBYTES;
3094
3095         if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3096                 m_adj(m, ETHER_ALIGN);
3097
3098         /*
3099          * Using memory from the mbuf cluster pool, invoke the
3100          * bus_dma machinery to arrange the memory mapping.
3101          */
3102         error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3103             adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3104         if (error != 0) {
3105                 m_free(m);
3106                 return (error);
3107         }
3108
3109         /* If nsegs is wrong then the stack is corrupt. */
3110         KASSERT(nsegs == 1, ("Too many segments returned!"));
3111
3112         rx_buffer = &adapter->rx_buffer_area[i];
3113         if (rx_buffer->m_head != NULL)
3114                 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3115
3116         map = rx_buffer->map;
3117         rx_buffer->map = adapter->rx_sparemap;
3118         adapter->rx_sparemap = map;
3119         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3120         rx_buffer->m_head = m;
3121
3122         adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3123         return (0);
3124 }
3125
3126 /*********************************************************************
3127  *
3128  *  Allocate memory for rx_buffer structures. Since we use one
3129  *  rx_buffer per received packet, the maximum number of rx_buffer's
3130  *  that we'll need is equal to the number of receive descriptors
3131  *  that we've allocated.
3132  *
3133  **********************************************************************/
3134 static int
3135 lem_allocate_receive_structures(struct adapter *adapter)
3136 {
3137         device_t dev = adapter->dev;
3138         struct em_buffer *rx_buffer;
3139         int i, error;
3140
3141         adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3142             adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3143         if (adapter->rx_buffer_area == NULL) {
3144                 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3145                 return (ENOMEM);
3146         }
3147
3148         error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3149                                 1, 0,                   /* alignment, bounds */
3150                                 BUS_SPACE_MAXADDR,      /* lowaddr */
3151                                 BUS_SPACE_MAXADDR,      /* highaddr */
3152                                 NULL, NULL,             /* filter, filterarg */
3153                                 MCLBYTES,               /* maxsize */
3154                                 1,                      /* nsegments */
3155                                 MCLBYTES,               /* maxsegsize */
3156                                 0,                      /* flags */
3157                                 NULL,                   /* lockfunc */
3158                                 NULL,                   /* lockarg */
3159                                 &adapter->rxtag);
3160         if (error) {
3161                 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3162                     __func__, error);
3163                 goto fail;
3164         }
3165
3166         /* Create the spare map (used by getbuf) */
3167         error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3168              &adapter->rx_sparemap);
3169         if (error) {
3170                 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3171                     __func__, error);
3172                 goto fail;
3173         }
3174
3175         rx_buffer = adapter->rx_buffer_area;
3176         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3177                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3178                     &rx_buffer->map);
3179                 if (error) {
3180                         device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3181                             __func__, error);
3182                         goto fail;
3183                 }
3184         }
3185
3186         return (0);
3187
3188 fail:
3189         lem_free_receive_structures(adapter);
3190         return (error);
3191 }
3192
3193 /*********************************************************************
3194  *
3195  *  (Re)initialize receive structures.
3196  *
3197  **********************************************************************/
3198 static int
3199 lem_setup_receive_structures(struct adapter *adapter)
3200 {
3201         struct em_buffer *rx_buffer;
3202         int i, error;
3203 #ifdef DEV_NETMAP
3204         /* we are already under lock */
3205         struct netmap_adapter *na = NA(adapter->ifp);
3206         struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
3207 #endif
3208
3209         /* Reset descriptor ring */
3210         bzero(adapter->rx_desc_base,
3211             (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3212
3213         /* Free current RX buffers. */
3214         rx_buffer = adapter->rx_buffer_area;
3215         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3216                 if (rx_buffer->m_head != NULL) {
3217                         bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3218                             BUS_DMASYNC_POSTREAD);
3219                         bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3220                         m_freem(rx_buffer->m_head);
3221                         rx_buffer->m_head = NULL;
3222                 }
3223         }
3224
3225         /* Allocate new ones. */
3226         for (i = 0; i < adapter->num_rx_desc; i++) {
3227 #ifdef DEV_NETMAP
3228                 if (slot) {
3229                         /* the i-th NIC entry goes to slot si */
3230                         int si = netmap_idx_n2k(&na->rx_rings[0], i);
3231                         uint64_t paddr;
3232                         void *addr;
3233
3234                         addr = PNMB(slot + si, &paddr);
3235                         netmap_load_map(adapter->rxtag, rx_buffer->map, addr);
3236                         /* Update descriptor */
3237                         adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
3238                         continue;
3239                 }
3240 #endif /* DEV_NETMAP */
3241                 error = lem_get_buf(adapter, i);
3242                 if (error)
3243                         return (error);
3244         }
3245
3246         /* Setup our descriptor pointers */
3247         adapter->next_rx_desc_to_check = 0;
3248         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3249             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3250
3251         return (0);
3252 }
3253
3254 /*********************************************************************
3255  *
3256  *  Enable receive unit.
3257  *
3258  **********************************************************************/
3259
3260 static void
3261 lem_initialize_receive_unit(struct adapter *adapter)
3262 {
3263         struct ifnet    *ifp = adapter->ifp;
3264         u64     bus_addr;
3265         u32     rctl, rxcsum;
3266
3267         INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3268
3269         /*
3270          * Make sure receives are disabled while setting
3271          * up the descriptor ring
3272          */
3273         rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3274         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3275
3276         if (adapter->hw.mac.type >= e1000_82540) {
3277                 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3278                     adapter->rx_abs_int_delay.value);
3279                 /*
3280                  * Set the interrupt throttling rate. Value is calculated
3281                  * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3282                  */
3283                 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3284         }
3285
3286         /* Setup the Base and Length of the Rx Descriptor Ring */
3287         bus_addr = adapter->rxdma.dma_paddr;
3288         E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3289             adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3290         E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3291             (u32)(bus_addr >> 32));
3292         E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3293             (u32)bus_addr);
3294
3295         /* Setup the Receive Control Register */
3296         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3297         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3298                    E1000_RCTL_RDMTS_HALF |
3299                    (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3300
3301         /* Make sure VLAN Filters are off */
3302         rctl &= ~E1000_RCTL_VFE;
3303
3304         if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3305                 rctl |= E1000_RCTL_SBP;
3306         else
3307                 rctl &= ~E1000_RCTL_SBP;
3308
3309         switch (adapter->rx_buffer_len) {
3310         default:
3311         case 2048:
3312                 rctl |= E1000_RCTL_SZ_2048;
3313                 break;
3314         case 4096:
3315                 rctl |= E1000_RCTL_SZ_4096 |
3316                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
3317                 break;
3318         case 8192:
3319                 rctl |= E1000_RCTL_SZ_8192 |
3320                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
3321                 break;
3322         case 16384:
3323                 rctl |= E1000_RCTL_SZ_16384 |
3324                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
3325                 break;
3326         }
3327
3328         if (ifp->if_mtu > ETHERMTU)
3329                 rctl |= E1000_RCTL_LPE;
3330         else
3331                 rctl &= ~E1000_RCTL_LPE;
3332
3333         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3334         if ((adapter->hw.mac.type >= e1000_82543) &&
3335             (ifp->if_capenable & IFCAP_RXCSUM)) {
3336                 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3337                 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3338                 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3339         }
3340
3341         /* Enable Receives */
3342         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3343
3344         /*
3345          * Setup the HW Rx Head and
3346          * Tail Descriptor Pointers
3347          */
3348         E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3349         rctl = adapter->num_rx_desc - 1; /* default RDT value */
3350 #ifdef DEV_NETMAP
3351         /* preserve buffers already made available to clients */
3352         if (ifp->if_capenable & IFCAP_NETMAP)
3353                 rctl -= NA(adapter->ifp)->rx_rings[0].nr_hwavail;
3354 #endif /* DEV_NETMAP */
3355         E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rctl);
3356
3357         return;
3358 }
3359
3360 /*********************************************************************
3361  *
3362  *  Free receive related data structures.
3363  *
3364  **********************************************************************/
3365 static void
3366 lem_free_receive_structures(struct adapter *adapter)
3367 {
3368         struct em_buffer *rx_buffer;
3369         int i;
3370
3371         INIT_DEBUGOUT("free_receive_structures: begin");
3372
3373         if (adapter->rx_sparemap) {
3374                 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3375                 adapter->rx_sparemap = NULL;
3376         }
3377
3378         /* Cleanup any existing buffers */
3379         if (adapter->rx_buffer_area != NULL) {
3380                 rx_buffer = adapter->rx_buffer_area;
3381                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3382                         if (rx_buffer->m_head != NULL) {
3383                                 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3384                                     BUS_DMASYNC_POSTREAD);
3385                                 bus_dmamap_unload(adapter->rxtag,
3386                                     rx_buffer->map);
3387                                 m_freem(rx_buffer->m_head);
3388                                 rx_buffer->m_head = NULL;
3389                         } else if (rx_buffer->map != NULL)
3390                                 bus_dmamap_unload(adapter->rxtag,
3391                                     rx_buffer->map);
3392                         if (rx_buffer->map != NULL) {
3393                                 bus_dmamap_destroy(adapter->rxtag,
3394                                     rx_buffer->map);
3395                                 rx_buffer->map = NULL;
3396                         }
3397                 }
3398         }
3399
3400         if (adapter->rx_buffer_area != NULL) {
3401                 free(adapter->rx_buffer_area, M_DEVBUF);
3402                 adapter->rx_buffer_area = NULL;
3403         }
3404
3405         if (adapter->rxtag != NULL) {
3406                 bus_dma_tag_destroy(adapter->rxtag);
3407                 adapter->rxtag = NULL;
3408         }
3409 }
3410
3411 /*********************************************************************
3412  *
3413  *  This routine executes in interrupt context. It replenishes
3414  *  the mbufs in the descriptor and sends data which has been
3415  *  dma'ed into host memory to upper layer.
3416  *
3417  *  We loop at most count times if count is > 0, or until done if
3418  *  count < 0.
3419  *  
3420  *  For polling we also now return the number of cleaned packets
3421  *********************************************************************/
3422 static bool
3423 lem_rxeof(struct adapter *adapter, int count, int *done)
3424 {
3425         struct ifnet    *ifp = adapter->ifp;
3426         struct mbuf     *mp;
3427         u8              status = 0, accept_frame = 0, eop = 0;
3428         u16             len, desc_len, prev_len_adj;
3429         int             i, rx_sent = 0;
3430         struct e1000_rx_desc   *current_desc;
3431
3432         EM_RX_LOCK(adapter);
3433         i = adapter->next_rx_desc_to_check;
3434         current_desc = &adapter->rx_desc_base[i];
3435         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3436             BUS_DMASYNC_POSTREAD);
3437
3438 #ifdef DEV_NETMAP
3439         if (netmap_rx_irq(ifp, 0 | NETMAP_LOCKED_ENTER, &rx_sent))
3440                 return (FALSE);
3441 #endif /* DEV_NETMAP */
3442
3443         if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3444                 if (done != NULL)
3445                         *done = rx_sent;
3446                 EM_RX_UNLOCK(adapter);
3447                 return (FALSE);
3448         }
3449
3450         while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3451                 struct mbuf *m = NULL;
3452
3453                 status = current_desc->status;
3454                 if ((status & E1000_RXD_STAT_DD) == 0)
3455                         break;
3456
3457                 mp = adapter->rx_buffer_area[i].m_head;
3458                 /*
3459                  * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3460                  * needs to access the last received byte in the mbuf.
3461                  */
3462                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3463                     BUS_DMASYNC_POSTREAD);
3464
3465                 accept_frame = 1;
3466                 prev_len_adj = 0;
3467                 desc_len = le16toh(current_desc->length);
3468                 if (status & E1000_RXD_STAT_EOP) {
3469                         count--;
3470                         eop = 1;
3471                         if (desc_len < ETHER_CRC_LEN) {
3472                                 len = 0;
3473                                 prev_len_adj = ETHER_CRC_LEN - desc_len;
3474                         } else
3475                                 len = desc_len - ETHER_CRC_LEN;
3476                 } else {
3477                         eop = 0;
3478                         len = desc_len;
3479                 }
3480
3481                 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3482                         u8      last_byte;
3483                         u32     pkt_len = desc_len;
3484
3485                         if (adapter->fmp != NULL)
3486                                 pkt_len += adapter->fmp->m_pkthdr.len;
3487
3488                         last_byte = *(mtod(mp, caddr_t) + desc_len - 1);                        
3489                         if (TBI_ACCEPT(&adapter->hw, status,
3490                             current_desc->errors, pkt_len, last_byte,
3491                             adapter->min_frame_size, adapter->max_frame_size)) {
3492                                 e1000_tbi_adjust_stats_82543(&adapter->hw,
3493                                     &adapter->stats, pkt_len,
3494                                     adapter->hw.mac.addr,
3495                                     adapter->max_frame_size);
3496                                 if (len > 0)
3497                                         len--;
3498                         } else
3499                                 accept_frame = 0;
3500                 }
3501
3502                 if (accept_frame) {
3503                         if (lem_get_buf(adapter, i) != 0) {
3504                                 ifp->if_iqdrops++;
3505                                 goto discard;
3506                         }
3507
3508                         /* Assign correct length to the current fragment */
3509                         mp->m_len = len;
3510
3511                         if (adapter->fmp == NULL) {
3512                                 mp->m_pkthdr.len = len;
3513                                 adapter->fmp = mp; /* Store the first mbuf */
3514                                 adapter->lmp = mp;
3515                         } else {
3516                                 /* Chain mbuf's together */
3517                                 mp->m_flags &= ~M_PKTHDR;
3518                                 /*
3519                                  * Adjust length of previous mbuf in chain if
3520                                  * we received less than 4 bytes in the last
3521                                  * descriptor.
3522                                  */
3523                                 if (prev_len_adj > 0) {
3524                                         adapter->lmp->m_len -= prev_len_adj;
3525                                         adapter->fmp->m_pkthdr.len -=
3526                                             prev_len_adj;
3527                                 }
3528                                 adapter->lmp->m_next = mp;
3529                                 adapter->lmp = adapter->lmp->m_next;
3530                                 adapter->fmp->m_pkthdr.len += len;
3531                         }
3532
3533                         if (eop) {
3534                                 adapter->fmp->m_pkthdr.rcvif = ifp;
3535                                 ifp->if_ipackets++;
3536                                 lem_receive_checksum(adapter, current_desc,
3537                                     adapter->fmp);
3538 #ifndef __NO_STRICT_ALIGNMENT
3539                                 if (adapter->max_frame_size >
3540                                     (MCLBYTES - ETHER_ALIGN) &&
3541                                     lem_fixup_rx(adapter) != 0)
3542                                         goto skip;
3543 #endif
3544                                 if (status & E1000_RXD_STAT_VP) {
3545                                         adapter->fmp->m_pkthdr.ether_vtag =
3546                                             le16toh(current_desc->special);
3547                                         adapter->fmp->m_flags |= M_VLANTAG;
3548                                 }
3549 #ifndef __NO_STRICT_ALIGNMENT
3550 skip:
3551 #endif
3552                                 m = adapter->fmp;
3553                                 adapter->fmp = NULL;
3554                                 adapter->lmp = NULL;
3555                         }
3556                 } else {
3557                         adapter->dropped_pkts++;
3558 discard:
3559                         /* Reuse loaded DMA map and just update mbuf chain */
3560                         mp = adapter->rx_buffer_area[i].m_head;
3561                         mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3562                         mp->m_data = mp->m_ext.ext_buf;
3563                         mp->m_next = NULL;
3564                         if (adapter->max_frame_size <=
3565                             (MCLBYTES - ETHER_ALIGN))
3566                                 m_adj(mp, ETHER_ALIGN);
3567                         if (adapter->fmp != NULL) {
3568                                 m_freem(adapter->fmp);
3569                                 adapter->fmp = NULL;
3570                                 adapter->lmp = NULL;
3571                         }
3572                         m = NULL;
3573                 }
3574
3575                 /* Zero out the receive descriptors status. */
3576                 current_desc->status = 0;
3577                 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3578                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3579
3580                 /* Advance our pointers to the next descriptor. */
3581                 if (++i == adapter->num_rx_desc)
3582                         i = 0;
3583                 /* Call into the stack */
3584                 if (m != NULL) {
3585                         adapter->next_rx_desc_to_check = i;
3586                         EM_RX_UNLOCK(adapter);
3587                         (*ifp->if_input)(ifp, m);
3588                         EM_RX_LOCK(adapter);
3589                         rx_sent++;
3590                         i = adapter->next_rx_desc_to_check;
3591                 }
3592                 current_desc = &adapter->rx_desc_base[i];
3593         }
3594         adapter->next_rx_desc_to_check = i;
3595
3596         /* Advance the E1000's Receive Queue #0  "Tail Pointer". */
3597         if (--i < 0)
3598                 i = adapter->num_rx_desc - 1;
3599         E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3600         if (done != NULL)
3601                 *done = rx_sent;
3602         EM_RX_UNLOCK(adapter);
3603         return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3604 }
3605
3606 #ifndef __NO_STRICT_ALIGNMENT
3607 /*
3608  * When jumbo frames are enabled we should realign entire payload on
3609  * architecures with strict alignment. This is serious design mistake of 8254x
3610  * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3611  * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3612  * payload. On architecures without strict alignment restrictions 8254x still
3613  * performs unaligned memory access which would reduce the performance too.
3614  * To avoid copying over an entire frame to align, we allocate a new mbuf and
3615  * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3616  * existing mbuf chain.
3617  *
3618  * Be aware, best performance of the 8254x is achived only when jumbo frame is
3619  * not used at all on architectures with strict alignment.
3620  */
3621 static int
3622 lem_fixup_rx(struct adapter *adapter)
3623 {
3624         struct mbuf *m, *n;
3625         int error;
3626
3627         error = 0;
3628         m = adapter->fmp;
3629         if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3630                 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3631                 m->m_data += ETHER_HDR_LEN;
3632         } else {
3633                 MGETHDR(n, M_NOWAIT, MT_DATA);
3634                 if (n != NULL) {
3635                         bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3636                         m->m_data += ETHER_HDR_LEN;
3637                         m->m_len -= ETHER_HDR_LEN;
3638                         n->m_len = ETHER_HDR_LEN;
3639                         M_MOVE_PKTHDR(n, m);
3640                         n->m_next = m;
3641                         adapter->fmp = n;
3642                 } else {
3643                         adapter->dropped_pkts++;
3644                         m_freem(adapter->fmp);
3645                         adapter->fmp = NULL;
3646                         error = ENOMEM;
3647                 }
3648         }
3649
3650         return (error);
3651 }
3652 #endif
3653
3654 /*********************************************************************
3655  *
3656  *  Verify that the hardware indicated that the checksum is valid.
3657  *  Inform the stack about the status of checksum so that stack
3658  *  doesn't spend time verifying the checksum.
3659  *
3660  *********************************************************************/
3661 static void
3662 lem_receive_checksum(struct adapter *adapter,
3663             struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3664 {
3665         /* 82543 or newer only */
3666         if ((adapter->hw.mac.type < e1000_82543) ||
3667             /* Ignore Checksum bit is set */
3668             (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3669                 mp->m_pkthdr.csum_flags = 0;
3670                 return;
3671         }
3672
3673         if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3674                 /* Did it pass? */
3675                 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3676                         /* IP Checksum Good */
3677                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3678                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3679
3680                 } else {
3681                         mp->m_pkthdr.csum_flags = 0;
3682                 }
3683         }
3684
3685         if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3686                 /* Did it pass? */
3687                 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3688                         mp->m_pkthdr.csum_flags |=
3689                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3690                         mp->m_pkthdr.csum_data = htons(0xffff);
3691                 }
3692         }
3693 }
3694
3695 /*
3696  * This routine is run via an vlan
3697  * config EVENT
3698  */
3699 static void
3700 lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3701 {
3702         struct adapter  *adapter = ifp->if_softc;
3703         u32             index, bit;
3704
3705         if (ifp->if_softc !=  arg)   /* Not our event */
3706                 return;
3707
3708         if ((vtag == 0) || (vtag > 4095))       /* Invalid ID */
3709                 return;
3710
3711         EM_CORE_LOCK(adapter);
3712         index = (vtag >> 5) & 0x7F;
3713         bit = vtag & 0x1F;
3714         adapter->shadow_vfta[index] |= (1 << bit);
3715         ++adapter->num_vlans;
3716         /* Re-init to load the changes */
3717         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3718                 lem_init_locked(adapter);
3719         EM_CORE_UNLOCK(adapter);
3720 }
3721
3722 /*
3723  * This routine is run via an vlan
3724  * unconfig EVENT
3725  */
3726 static void
3727 lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3728 {
3729         struct adapter  *adapter = ifp->if_softc;
3730         u32             index, bit;
3731
3732         if (ifp->if_softc !=  arg)
3733                 return;
3734
3735         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3736                 return;
3737
3738         EM_CORE_LOCK(adapter);
3739         index = (vtag >> 5) & 0x7F;
3740         bit = vtag & 0x1F;
3741         adapter->shadow_vfta[index] &= ~(1 << bit);
3742         --adapter->num_vlans;
3743         /* Re-init to load the changes */
3744         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3745                 lem_init_locked(adapter);
3746         EM_CORE_UNLOCK(adapter);
3747 }
3748
3749 static void
3750 lem_setup_vlan_hw_support(struct adapter *adapter)
3751 {
3752         struct e1000_hw *hw = &adapter->hw;
3753         u32             reg;
3754
3755         /*
3756         ** We get here thru init_locked, meaning
3757         ** a soft reset, this has already cleared
3758         ** the VFTA and other state, so if there
3759         ** have been no vlan's registered do nothing.
3760         */
3761         if (adapter->num_vlans == 0)
3762                 return;
3763
3764         /*
3765         ** A soft reset zero's out the VFTA, so
3766         ** we need to repopulate it now.
3767         */
3768         for (int i = 0; i < EM_VFTA_SIZE; i++)
3769                 if (adapter->shadow_vfta[i] != 0)
3770                         E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3771                             i, adapter->shadow_vfta[i]);
3772
3773         reg = E1000_READ_REG(hw, E1000_CTRL);
3774         reg |= E1000_CTRL_VME;
3775         E1000_WRITE_REG(hw, E1000_CTRL, reg);
3776
3777         /* Enable the Filter Table */
3778         reg = E1000_READ_REG(hw, E1000_RCTL);
3779         reg &= ~E1000_RCTL_CFIEN;
3780         reg |= E1000_RCTL_VFE;
3781         E1000_WRITE_REG(hw, E1000_RCTL, reg);
3782 }
3783
3784 static void
3785 lem_enable_intr(struct adapter *adapter)
3786 {
3787         struct e1000_hw *hw = &adapter->hw;
3788         u32 ims_mask = IMS_ENABLE_MASK;
3789
3790         E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3791 }
3792
3793 static void
3794 lem_disable_intr(struct adapter *adapter)
3795 {
3796         struct e1000_hw *hw = &adapter->hw;
3797
3798         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3799 }
3800
3801 /*
3802  * Bit of a misnomer, what this really means is
3803  * to enable OS management of the system... aka
3804  * to disable special hardware management features 
3805  */
3806 static void
3807 lem_init_manageability(struct adapter *adapter)
3808 {
3809         /* A shared code workaround */
3810         if (adapter->has_manage) {
3811                 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3812                 /* disable hardware interception of ARP */
3813                 manc &= ~(E1000_MANC_ARP_EN);
3814                 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3815         }
3816 }
3817
3818 /*
3819  * Give control back to hardware management
3820  * controller if there is one.
3821  */
3822 static void
3823 lem_release_manageability(struct adapter *adapter)
3824 {
3825         if (adapter->has_manage) {
3826                 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3827
3828                 /* re-enable hardware interception of ARP */
3829                 manc |= E1000_MANC_ARP_EN;
3830                 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3831         }
3832 }
3833
3834 /*
3835  * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3836  * For ASF and Pass Through versions of f/w this means
3837  * that the driver is loaded. For AMT version type f/w
3838  * this means that the network i/f is open.
3839  */
3840 static void
3841 lem_get_hw_control(struct adapter *adapter)
3842 {
3843         u32 ctrl_ext;
3844
3845         ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3846         E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3847             ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3848         return;
3849 }
3850
3851 /*
3852  * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3853  * For ASF and Pass Through versions of f/w this means that
3854  * the driver is no longer loaded. For AMT versions of the
3855  * f/w this means that the network i/f is closed.
3856  */
3857 static void
3858 lem_release_hw_control(struct adapter *adapter)
3859 {
3860         u32 ctrl_ext;
3861
3862         if (!adapter->has_manage)
3863                 return;
3864
3865         ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3866         E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3867             ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3868         return;
3869 }
3870
3871 static int
3872 lem_is_valid_ether_addr(u8 *addr)
3873 {
3874         char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3875
3876         if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3877                 return (FALSE);
3878         }
3879
3880         return (TRUE);
3881 }
3882
3883 /*
3884 ** Parse the interface capabilities with regard
3885 ** to both system management and wake-on-lan for
3886 ** later use.
3887 */
3888 static void
3889 lem_get_wakeup(device_t dev)
3890 {
3891         struct adapter  *adapter = device_get_softc(dev);
3892         u16             eeprom_data = 0, device_id, apme_mask;
3893
3894         adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3895         apme_mask = EM_EEPROM_APME;
3896
3897         switch (adapter->hw.mac.type) {
3898         case e1000_82542:
3899         case e1000_82543:
3900                 break;
3901         case e1000_82544:
3902                 e1000_read_nvm(&adapter->hw,
3903                     NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3904                 apme_mask = EM_82544_APME;
3905                 break;
3906         case e1000_82546:
3907         case e1000_82546_rev_3:
3908                 if (adapter->hw.bus.func == 1) {
3909                         e1000_read_nvm(&adapter->hw,
3910                             NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3911                         break;
3912                 } else
3913                         e1000_read_nvm(&adapter->hw,
3914                             NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3915                 break;
3916         default:
3917                 e1000_read_nvm(&adapter->hw,
3918                     NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3919                 break;
3920         }
3921         if (eeprom_data & apme_mask)
3922                 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
3923         /*
3924          * We have the eeprom settings, now apply the special cases
3925          * where the eeprom may be wrong or the board won't support
3926          * wake on lan on a particular port
3927          */
3928         device_id = pci_get_device(dev);
3929         switch (device_id) {
3930         case E1000_DEV_ID_82546GB_PCIE:
3931                 adapter->wol = 0;
3932                 break;
3933         case E1000_DEV_ID_82546EB_FIBER:
3934         case E1000_DEV_ID_82546GB_FIBER:
3935                 /* Wake events only supported on port A for dual fiber
3936                  * regardless of eeprom setting */
3937                 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3938                     E1000_STATUS_FUNC_1)
3939                         adapter->wol = 0;
3940                 break;
3941         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
3942                 /* if quad port adapter, disable WoL on all but port A */
3943                 if (global_quad_port_a != 0)
3944                         adapter->wol = 0;
3945                 /* Reset for multiple quad port adapters */
3946                 if (++global_quad_port_a == 4)
3947                         global_quad_port_a = 0;
3948                 break;
3949         }
3950         return;
3951 }
3952
3953
3954 /*
3955  * Enable PCI Wake On Lan capability
3956  */
3957 static void
3958 lem_enable_wakeup(device_t dev)
3959 {
3960         struct adapter  *adapter = device_get_softc(dev);
3961         struct ifnet    *ifp = adapter->ifp;
3962         u32             pmc, ctrl, ctrl_ext, rctl;
3963         u16             status;
3964
3965         if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
3966                 return;
3967
3968         /* Advertise the wakeup capability */
3969         ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
3970         ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
3971         E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
3972         E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3973
3974         /* Keep the laser running on Fiber adapters */
3975         if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
3976             adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
3977                 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3978                 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
3979                 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
3980         }
3981
3982         /*
3983         ** Determine type of Wakeup: note that wol
3984         ** is set with all bits on by default.
3985         */
3986         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
3987                 adapter->wol &= ~E1000_WUFC_MAG;
3988
3989         if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
3990                 adapter->wol &= ~E1000_WUFC_MC;
3991         else {
3992                 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3993                 rctl |= E1000_RCTL_MPE;
3994                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3995         }
3996
3997         if (adapter->hw.mac.type == e1000_pchlan) {
3998                 if (lem_enable_phy_wakeup(adapter))
3999                         return;
4000         } else {
4001                 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4002                 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4003         }
4004
4005
4006         /* Request PME */
4007         status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4008         status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4009         if (ifp->if_capenable & IFCAP_WOL)
4010                 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4011         pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4012
4013         return;
4014 }
4015
4016 /*
4017 ** WOL in the newer chipset interfaces (pchlan)
4018 ** require thing to be copied into the phy
4019 */
4020 static int
4021 lem_enable_phy_wakeup(struct adapter *adapter)
4022 {
4023         struct e1000_hw *hw = &adapter->hw;
4024         u32 mreg, ret = 0;
4025         u16 preg;
4026
4027         /* copy MAC RARs to PHY RARs */
4028         for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4029                 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4030                 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4031                 e1000_write_phy_reg(hw, BM_RAR_M(i),
4032                     (u16)((mreg >> 16) & 0xFFFF));
4033                 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4034                 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4035                 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4036                     (u16)((mreg >> 16) & 0xFFFF));
4037         }
4038
4039         /* copy MAC MTA to PHY MTA */
4040         for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4041                 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4042                 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4043                 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4044                     (u16)((mreg >> 16) & 0xFFFF));
4045         }
4046
4047         /* configure PHY Rx Control register */
4048         e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4049         mreg = E1000_READ_REG(hw, E1000_RCTL);
4050         if (mreg & E1000_RCTL_UPE)
4051                 preg |= BM_RCTL_UPE;
4052         if (mreg & E1000_RCTL_MPE)
4053                 preg |= BM_RCTL_MPE;
4054         preg &= ~(BM_RCTL_MO_MASK);
4055         if (mreg & E1000_RCTL_MO_3)
4056                 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4057                                 << BM_RCTL_MO_SHIFT);
4058         if (mreg & E1000_RCTL_BAM)
4059                 preg |= BM_RCTL_BAM;
4060         if (mreg & E1000_RCTL_PMCF)
4061                 preg |= BM_RCTL_PMCF;
4062         mreg = E1000_READ_REG(hw, E1000_CTRL);
4063         if (mreg & E1000_CTRL_RFCE)
4064                 preg |= BM_RCTL_RFCE;
4065         e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4066
4067         /* enable PHY wakeup in MAC register */
4068         E1000_WRITE_REG(hw, E1000_WUC,
4069             E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4070         E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4071
4072         /* configure and enable PHY wakeup in PHY registers */
4073         e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4074         e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4075
4076         /* activate PHY wakeup */
4077         ret = hw->phy.ops.acquire(hw);
4078         if (ret) {
4079                 printf("Could not acquire PHY\n");
4080                 return ret;
4081         }
4082         e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4083                                  (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4084         ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4085         if (ret) {
4086                 printf("Could not read PHY page 769\n");
4087                 goto out;
4088         }
4089         preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4090         ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4091         if (ret)
4092                 printf("Could not set PHY Host Wakeup bit\n");
4093 out:
4094         hw->phy.ops.release(hw);
4095
4096         return ret;
4097 }
4098
4099 static void
4100 lem_led_func(void *arg, int onoff)
4101 {
4102         struct adapter  *adapter = arg;
4103
4104         EM_CORE_LOCK(adapter);
4105         if (onoff) {
4106                 e1000_setup_led(&adapter->hw);
4107                 e1000_led_on(&adapter->hw);
4108         } else {
4109                 e1000_led_off(&adapter->hw);
4110                 e1000_cleanup_led(&adapter->hw);
4111         }
4112         EM_CORE_UNLOCK(adapter);
4113 }
4114
4115 /*********************************************************************
4116 * 82544 Coexistence issue workaround.
4117 *    There are 2 issues.
4118 *       1. Transmit Hang issue.
4119 *    To detect this issue, following equation can be used...
4120 *         SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4121 *         If SUM[3:0] is in between 1 to 4, we will have this issue.
4122 *
4123 *       2. DAC issue.
4124 *    To detect this issue, following equation can be used...
4125 *         SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4126 *         If SUM[3:0] is in between 9 to c, we will have this issue.
4127 *
4128 *
4129 *    WORKAROUND:
4130 *         Make sure we do not have ending address
4131 *         as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4132 *
4133 *************************************************************************/
4134 static u32
4135 lem_fill_descriptors (bus_addr_t address, u32 length,
4136                 PDESC_ARRAY desc_array)
4137 {
4138         u32 safe_terminator;
4139
4140         /* Since issue is sensitive to length and address.*/
4141         /* Let us first check the address...*/
4142         if (length <= 4) {
4143                 desc_array->descriptor[0].address = address;
4144                 desc_array->descriptor[0].length = length;
4145                 desc_array->elements = 1;
4146                 return (desc_array->elements);
4147         }
4148         safe_terminator = (u32)((((u32)address & 0x7) +
4149             (length & 0xF)) & 0xF);
4150         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4151         if (safe_terminator == 0   ||
4152         (safe_terminator > 4   &&
4153         safe_terminator < 9)   ||
4154         (safe_terminator > 0xC &&
4155         safe_terminator <= 0xF)) {
4156                 desc_array->descriptor[0].address = address;
4157                 desc_array->descriptor[0].length = length;
4158                 desc_array->elements = 1;
4159                 return (desc_array->elements);
4160         }
4161
4162         desc_array->descriptor[0].address = address;
4163         desc_array->descriptor[0].length = length - 4;
4164         desc_array->descriptor[1].address = address + (length - 4);
4165         desc_array->descriptor[1].length = 4;
4166         desc_array->elements = 2;
4167         return (desc_array->elements);
4168 }
4169
4170 /**********************************************************************
4171  *
4172  *  Update the board statistics counters.
4173  *
4174  **********************************************************************/
4175 static void
4176 lem_update_stats_counters(struct adapter *adapter)
4177 {
4178         struct ifnet   *ifp;
4179
4180         if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4181            (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4182                 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4183                 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4184         }
4185         adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4186         adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4187         adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4188         adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4189
4190         adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4191         adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4192         adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4193         adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4194         adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4195         adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4196         adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4197         adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4198         adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4199         adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4200         adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4201         adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4202         adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4203         adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4204         adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4205         adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4206         adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4207         adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4208         adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4209         adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4210
4211         /* For the 64-bit byte counters the low dword must be read first. */
4212         /* Both registers clear on the read of the high dword */
4213
4214         adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4215             ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4216         adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4217             ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4218
4219         adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4220         adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4221         adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4222         adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4223         adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4224
4225         adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4226         adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4227
4228         adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4229         adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4230         adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4231         adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4232         adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4233         adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4234         adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4235         adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4236         adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4237         adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4238
4239         if (adapter->hw.mac.type >= e1000_82543) {
4240                 adapter->stats.algnerrc += 
4241                 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4242                 adapter->stats.rxerrc += 
4243                 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4244                 adapter->stats.tncrs += 
4245                 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4246                 adapter->stats.cexterr += 
4247                 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4248                 adapter->stats.tsctc += 
4249                 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4250                 adapter->stats.tsctfc += 
4251                 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4252         }
4253         ifp = adapter->ifp;
4254
4255         ifp->if_collisions = adapter->stats.colc;
4256
4257         /* Rx Errors */
4258         ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4259             adapter->stats.crcerrs + adapter->stats.algnerrc +
4260             adapter->stats.ruc + adapter->stats.roc +
4261             adapter->stats.mpc + adapter->stats.cexterr;
4262
4263         /* Tx Errors */
4264         ifp->if_oerrors = adapter->stats.ecol +
4265             adapter->stats.latecol + adapter->watchdog_events;
4266 }
4267
4268 /* Export a single 32-bit register via a read-only sysctl. */
4269 static int
4270 lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4271 {
4272         struct adapter *adapter;
4273         u_int val;
4274
4275         adapter = oidp->oid_arg1;
4276         val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4277         return (sysctl_handle_int(oidp, &val, 0, req));
4278 }
4279
4280 /*
4281  * Add sysctl variables, one per statistic, to the system.
4282  */
4283 static void
4284 lem_add_hw_stats(struct adapter *adapter)
4285 {
4286         device_t dev = adapter->dev;
4287
4288         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4289         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4290         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4291         struct e1000_hw_stats *stats = &adapter->stats;
4292
4293         struct sysctl_oid *stat_node;
4294         struct sysctl_oid_list *stat_list;
4295
4296         /* Driver Statistics */
4297         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail", 
4298                          CTLFLAG_RD, &adapter->mbuf_alloc_failed,
4299                          "Std mbuf failed");
4300         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail", 
4301                          CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4302                          "Std mbuf cluster failed");
4303         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 
4304                         CTLFLAG_RD, &adapter->dropped_pkts,
4305                         "Driver dropped packets");
4306         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", 
4307                         CTLFLAG_RD, &adapter->no_tx_dma_setup,
4308                         "Driver tx dma failure in xmit");
4309         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4310                         CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4311                         "Not enough tx descriptors failure in xmit");
4312         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4313                         CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4314                         "Not enough tx descriptors failure in xmit");
4315         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4316                         CTLFLAG_RD, &adapter->rx_overruns,
4317                         "RX overruns");
4318         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4319                         CTLFLAG_RD, &adapter->watchdog_events,
4320                         "Watchdog timeouts");
4321
4322         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4323                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
4324                         lem_sysctl_reg_handler, "IU",
4325                         "Device Control Register");
4326         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4327                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
4328                         lem_sysctl_reg_handler, "IU",
4329                         "Receiver Control Register");
4330         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4331                         CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4332                         "Flow Control High Watermark");
4333         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", 
4334                         CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4335                         "Flow Control Low Watermark");
4336         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
4337                         CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4338                         "TX FIFO workaround events");
4339         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
4340                         CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4341                         "TX FIFO resets");
4342
4343         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head", 
4344                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
4345                         lem_sysctl_reg_handler, "IU",
4346                         "Transmit Descriptor Head");
4347         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail", 
4348                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
4349                         lem_sysctl_reg_handler, "IU",
4350                         "Transmit Descriptor Tail");
4351         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head", 
4352                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
4353                         lem_sysctl_reg_handler, "IU",
4354                         "Receive Descriptor Head");
4355         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail", 
4356                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
4357                         lem_sysctl_reg_handler, "IU",
4358                         "Receive Descriptor Tail");
4359         
4360
4361         /* MAC stats get their own sub node */
4362
4363         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
4364                                     CTLFLAG_RD, NULL, "Statistics");
4365         stat_list = SYSCTL_CHILDREN(stat_node);
4366
4367         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4368                         CTLFLAG_RD, &stats->ecol,
4369                         "Excessive collisions");
4370         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4371                         CTLFLAG_RD, &stats->scc,
4372                         "Single collisions");
4373         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4374                         CTLFLAG_RD, &stats->mcc,
4375                         "Multiple collisions");
4376         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4377                         CTLFLAG_RD, &stats->latecol,
4378                         "Late collisions");
4379         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4380                         CTLFLAG_RD, &stats->colc,
4381                         "Collision Count");
4382         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4383                         CTLFLAG_RD, &adapter->stats.symerrs,
4384                         "Symbol Errors");
4385         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4386                         CTLFLAG_RD, &adapter->stats.sec,
4387                         "Sequence Errors");
4388         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4389                         CTLFLAG_RD, &adapter->stats.dc,
4390                         "Defer Count");
4391         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4392                         CTLFLAG_RD, &adapter->stats.mpc,
4393                         "Missed Packets");
4394         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4395                         CTLFLAG_RD, &adapter->stats.rnbc,
4396                         "Receive No Buffers");
4397         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4398                         CTLFLAG_RD, &adapter->stats.ruc,
4399                         "Receive Undersize");
4400         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4401                         CTLFLAG_RD, &adapter->stats.rfc,
4402                         "Fragmented Packets Received ");
4403         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4404                         CTLFLAG_RD, &adapter->stats.roc,
4405                         "Oversized Packets Received");
4406         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4407                         CTLFLAG_RD, &adapter->stats.rjc,
4408                         "Recevied Jabber");
4409         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4410                         CTLFLAG_RD, &adapter->stats.rxerrc,
4411                         "Receive Errors");
4412         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4413                         CTLFLAG_RD, &adapter->stats.crcerrs,
4414                         "CRC errors");
4415         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4416                         CTLFLAG_RD, &adapter->stats.algnerrc,
4417                         "Alignment Errors");
4418         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4419                         CTLFLAG_RD, &adapter->stats.cexterr,
4420                         "Collision/Carrier extension errors");
4421         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4422                         CTLFLAG_RD, &adapter->stats.xonrxc,
4423                         "XON Received");
4424         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4425                         CTLFLAG_RD, &adapter->stats.xontxc,
4426                         "XON Transmitted");
4427         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4428                         CTLFLAG_RD, &adapter->stats.xoffrxc,
4429                         "XOFF Received");
4430         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4431                         CTLFLAG_RD, &adapter->stats.xofftxc,
4432                         "XOFF Transmitted");
4433
4434         /* Packet Reception Stats */
4435         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4436                         CTLFLAG_RD, &adapter->stats.tpr,
4437                         "Total Packets Received ");
4438         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4439                         CTLFLAG_RD, &adapter->stats.gprc,
4440                         "Good Packets Received");
4441         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4442                         CTLFLAG_RD, &adapter->stats.bprc,
4443                         "Broadcast Packets Received");
4444         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4445                         CTLFLAG_RD, &adapter->stats.mprc,
4446                         "Multicast Packets Received");
4447         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4448                         CTLFLAG_RD, &adapter->stats.prc64,
4449                         "64 byte frames received ");
4450         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4451                         CTLFLAG_RD, &adapter->stats.prc127,
4452                         "65-127 byte frames received");
4453         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4454                         CTLFLAG_RD, &adapter->stats.prc255,
4455                         "128-255 byte frames received");
4456         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4457                         CTLFLAG_RD, &adapter->stats.prc511,
4458                         "256-511 byte frames received");
4459         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4460                         CTLFLAG_RD, &adapter->stats.prc1023,
4461                         "512-1023 byte frames received");
4462         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4463                         CTLFLAG_RD, &adapter->stats.prc1522,
4464                         "1023-1522 byte frames received");
4465         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4466                         CTLFLAG_RD, &adapter->stats.gorc, 
4467                         "Good Octets Received");
4468
4469         /* Packet Transmission Stats */
4470         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4471                         CTLFLAG_RD, &adapter->stats.gotc, 
4472                         "Good Octets Transmitted"); 
4473         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4474                         CTLFLAG_RD, &adapter->stats.tpt,
4475                         "Total Packets Transmitted");
4476         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4477                         CTLFLAG_RD, &adapter->stats.gptc,
4478                         "Good Packets Transmitted");
4479         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4480                         CTLFLAG_RD, &adapter->stats.bptc,
4481                         "Broadcast Packets Transmitted");
4482         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4483                         CTLFLAG_RD, &adapter->stats.mptc,
4484                         "Multicast Packets Transmitted");
4485         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4486                         CTLFLAG_RD, &adapter->stats.ptc64,
4487                         "64 byte frames transmitted ");
4488         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4489                         CTLFLAG_RD, &adapter->stats.ptc127,
4490                         "65-127 byte frames transmitted");
4491         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4492                         CTLFLAG_RD, &adapter->stats.ptc255,
4493                         "128-255 byte frames transmitted");
4494         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4495                         CTLFLAG_RD, &adapter->stats.ptc511,
4496                         "256-511 byte frames transmitted");
4497         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4498                         CTLFLAG_RD, &adapter->stats.ptc1023,
4499                         "512-1023 byte frames transmitted");
4500         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4501                         CTLFLAG_RD, &adapter->stats.ptc1522,
4502                         "1024-1522 byte frames transmitted");
4503         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4504                         CTLFLAG_RD, &adapter->stats.tsctc,
4505                         "TSO Contexts Transmitted");
4506         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4507                         CTLFLAG_RD, &adapter->stats.tsctfc,
4508                         "TSO Contexts Failed");
4509 }
4510
4511 /**********************************************************************
4512  *
4513  *  This routine provides a way to dump out the adapter eeprom,
4514  *  often a useful debug/service tool. This only dumps the first
4515  *  32 words, stuff that matters is in that extent.
4516  *
4517  **********************************************************************/
4518
4519 static int
4520 lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4521 {
4522         struct adapter *adapter;
4523         int error;
4524         int result;
4525
4526         result = -1;
4527         error = sysctl_handle_int(oidp, &result, 0, req);
4528
4529         if (error || !req->newptr)
4530                 return (error);
4531
4532         /*
4533          * This value will cause a hex dump of the
4534          * first 32 16-bit words of the EEPROM to
4535          * the screen.
4536          */
4537         if (result == 1) {
4538                 adapter = (struct adapter *)arg1;
4539                 lem_print_nvm_info(adapter);
4540         }
4541
4542         return (error);
4543 }
4544
4545 static void
4546 lem_print_nvm_info(struct adapter *adapter)
4547 {
4548         u16     eeprom_data;
4549         int     i, j, row = 0;
4550
4551         /* Its a bit crude, but it gets the job done */
4552         printf("\nInterface EEPROM Dump:\n");
4553         printf("Offset\n0x0000  ");
4554         for (i = 0, j = 0; i < 32; i++, j++) {
4555                 if (j == 8) { /* Make the offset block */
4556                         j = 0; ++row;
4557                         printf("\n0x00%x0  ",row);
4558                 }
4559                 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4560                 printf("%04x ", eeprom_data);
4561         }
4562         printf("\n");
4563 }
4564
4565 static int
4566 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4567 {
4568         struct em_int_delay_info *info;
4569         struct adapter *adapter;
4570         u32 regval;
4571         int error;
4572         int usecs;
4573         int ticks;
4574
4575         info = (struct em_int_delay_info *)arg1;
4576         usecs = info->value;
4577         error = sysctl_handle_int(oidp, &usecs, 0, req);
4578         if (error != 0 || req->newptr == NULL)
4579                 return (error);
4580         if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4581                 return (EINVAL);
4582         info->value = usecs;
4583         ticks = EM_USECS_TO_TICKS(usecs);
4584         if (info->offset == E1000_ITR)  /* units are 256ns here */
4585                 ticks *= 4;
4586
4587         adapter = info->adapter;
4588         
4589         EM_CORE_LOCK(adapter);
4590         regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4591         regval = (regval & ~0xffff) | (ticks & 0xffff);
4592         /* Handle a few special cases. */
4593         switch (info->offset) {
4594         case E1000_RDTR:
4595                 break;
4596         case E1000_TIDV:
4597                 if (ticks == 0) {
4598                         adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4599                         /* Don't write 0 into the TIDV register. */
4600                         regval++;
4601                 } else
4602                         adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4603                 break;
4604         }
4605         E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4606         EM_CORE_UNLOCK(adapter);
4607         return (0);
4608 }
4609
4610 static void
4611 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4612         const char *description, struct em_int_delay_info *info,
4613         int offset, int value)
4614 {
4615         info->adapter = adapter;
4616         info->offset = offset;
4617         info->value = value;
4618         SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4619             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4620             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4621             info, 0, lem_sysctl_int_delay, "I", description);
4622 }
4623
4624 static void
4625 lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4626         const char *description, int *limit, int value)
4627 {
4628         *limit = value;
4629         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4630             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4631             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4632 }
4633
4634 static void
4635 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4636         const char *description, int *limit, int value)
4637 {
4638         *limit = value;
4639         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4640             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4641             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4642 }