]> CyberLeo.Net >> Repos - FreeBSD/stable/8.git/blob - sys/dev/e1000/if_lem.c
MFC r309400:
[FreeBSD/stable/8.git] / sys / dev / e1000 / if_lem.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2012, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/bus.h>
44 #include <sys/endian.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/module.h>
50 #include <sys/rman.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/sysctl.h>
54 #include <sys/taskqueue.h>
55 #include <sys/eventhandler.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58
59 #include <net/bpf.h>
60 #include <net/ethernet.h>
61 #include <net/if.h>
62 #include <net/if_arp.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65
66 #include <net/if_types.h>
67 #include <net/if_vlan_var.h>
68
69 #include <netinet/in_systm.h>
70 #include <netinet/in.h>
71 #include <netinet/if_ether.h>
72 #include <netinet/ip.h>
73 #include <netinet/ip6.h>
74 #include <netinet/tcp.h>
75 #include <netinet/udp.h>
76
77 #include <machine/in_cksum.h>
78 #include <dev/led/led.h>
79 #include <dev/pci/pcivar.h>
80 #include <dev/pci/pcireg.h>
81
82 #include "e1000_api.h"
83 #include "if_lem.h"
84
85 /*********************************************************************
86  *  Legacy Em Driver version:
87  *********************************************************************/
88 char lem_driver_version[] = "1.0.5";
89
90 /*********************************************************************
91  *  PCI Device ID Table
92  *
93  *  Used by probe to select devices to load on
94  *  Last field stores an index into e1000_strings
95  *  Last entry must be all 0s
96  *
97  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
98  *********************************************************************/
99
100 static em_vendor_info_t lem_vendor_info_array[] =
101 {
102         /* Intel(R) PRO/1000 Network Connection */
103         { 0x8086, E1000_DEV_ID_82540EM,         PCI_ANY_ID, PCI_ANY_ID, 0},
104         { 0x8086, E1000_DEV_ID_82540EM_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
105         { 0x8086, E1000_DEV_ID_82540EP,         PCI_ANY_ID, PCI_ANY_ID, 0},
106         { 0x8086, E1000_DEV_ID_82540EP_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
107         { 0x8086, E1000_DEV_ID_82540EP_LP,      PCI_ANY_ID, PCI_ANY_ID, 0},
108
109         { 0x8086, E1000_DEV_ID_82541EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
110         { 0x8086, E1000_DEV_ID_82541ER,         PCI_ANY_ID, PCI_ANY_ID, 0},
111         { 0x8086, E1000_DEV_ID_82541ER_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
112         { 0x8086, E1000_DEV_ID_82541EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
113         { 0x8086, E1000_DEV_ID_82541GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
114         { 0x8086, E1000_DEV_ID_82541GI_LF,      PCI_ANY_ID, PCI_ANY_ID, 0},
115         { 0x8086, E1000_DEV_ID_82541GI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
116
117         { 0x8086, E1000_DEV_ID_82542,           PCI_ANY_ID, PCI_ANY_ID, 0},
118
119         { 0x8086, E1000_DEV_ID_82543GC_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
120         { 0x8086, E1000_DEV_ID_82543GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
121
122         { 0x8086, E1000_DEV_ID_82544EI_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
123         { 0x8086, E1000_DEV_ID_82544EI_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
124         { 0x8086, E1000_DEV_ID_82544GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
125         { 0x8086, E1000_DEV_ID_82544GC_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
126
127         { 0x8086, E1000_DEV_ID_82545EM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
128         { 0x8086, E1000_DEV_ID_82545EM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
129         { 0x8086, E1000_DEV_ID_82545GM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
130         { 0x8086, E1000_DEV_ID_82545GM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
131         { 0x8086, E1000_DEV_ID_82545GM_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
132
133         { 0x8086, E1000_DEV_ID_82546EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
134         { 0x8086, E1000_DEV_ID_82546EB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
135         { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
136         { 0x8086, E1000_DEV_ID_82546GB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
137         { 0x8086, E1000_DEV_ID_82546GB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
138         { 0x8086, E1000_DEV_ID_82546GB_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
139         { 0x8086, E1000_DEV_ID_82546GB_PCIE,    PCI_ANY_ID, PCI_ANY_ID, 0},
140         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
141         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
142                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
143
144         { 0x8086, E1000_DEV_ID_82547EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
145         { 0x8086, E1000_DEV_ID_82547EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
146         { 0x8086, E1000_DEV_ID_82547GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
147         /* required last entry */
148         { 0, 0, 0, 0, 0}
149 };
150
151 /*********************************************************************
152  *  Table of branding strings for all supported NICs.
153  *********************************************************************/
154
155 static char *lem_strings[] = {
156         "Intel(R) PRO/1000 Legacy Network Connection"
157 };
158
159 /*********************************************************************
160  *  Function prototypes
161  *********************************************************************/
162 static int      lem_probe(device_t);
163 static int      lem_attach(device_t);
164 static int      lem_detach(device_t);
165 static int      lem_shutdown(device_t);
166 static int      lem_suspend(device_t);
167 static int      lem_resume(device_t);
168 static void     lem_start(struct ifnet *);
169 static void     lem_start_locked(struct ifnet *ifp);
170 static int      lem_ioctl(struct ifnet *, u_long, caddr_t);
171 static void     lem_init(void *);
172 static void     lem_init_locked(struct adapter *);
173 static void     lem_stop(void *);
174 static void     lem_media_status(struct ifnet *, struct ifmediareq *);
175 static int      lem_media_change(struct ifnet *);
176 static void     lem_identify_hardware(struct adapter *);
177 static int      lem_allocate_pci_resources(struct adapter *);
178 static int      lem_allocate_irq(struct adapter *adapter);
179 static void     lem_free_pci_resources(struct adapter *);
180 static void     lem_local_timer(void *);
181 static int      lem_hardware_init(struct adapter *);
182 static int      lem_setup_interface(device_t, struct adapter *);
183 static void     lem_setup_transmit_structures(struct adapter *);
184 static void     lem_initialize_transmit_unit(struct adapter *);
185 static int      lem_setup_receive_structures(struct adapter *);
186 static void     lem_initialize_receive_unit(struct adapter *);
187 static void     lem_enable_intr(struct adapter *);
188 static void     lem_disable_intr(struct adapter *);
189 static void     lem_free_transmit_structures(struct adapter *);
190 static void     lem_free_receive_structures(struct adapter *);
191 static void     lem_update_stats_counters(struct adapter *);
192 static void     lem_add_hw_stats(struct adapter *adapter);
193 static void     lem_txeof(struct adapter *);
194 static void     lem_tx_purge(struct adapter *);
195 static int      lem_allocate_receive_structures(struct adapter *);
196 static int      lem_allocate_transmit_structures(struct adapter *);
197 static bool     lem_rxeof(struct adapter *, int, int *);
198 #ifndef __NO_STRICT_ALIGNMENT
199 static int      lem_fixup_rx(struct adapter *);
200 #endif
201 static void     lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
202                     struct mbuf *);
203 static void     lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
204                     u32 *, u32 *);
205 static void     lem_set_promisc(struct adapter *);
206 static void     lem_disable_promisc(struct adapter *);
207 static void     lem_set_multi(struct adapter *);
208 static void     lem_update_link_status(struct adapter *);
209 static int      lem_get_buf(struct adapter *, int);
210 static void     lem_register_vlan(void *, struct ifnet *, u16);
211 static void     lem_unregister_vlan(void *, struct ifnet *, u16);
212 static void     lem_setup_vlan_hw_support(struct adapter *);
213 static int      lem_xmit(struct adapter *, struct mbuf **);
214 static void     lem_smartspeed(struct adapter *);
215 static int      lem_82547_fifo_workaround(struct adapter *, int);
216 static void     lem_82547_update_fifo_head(struct adapter *, int);
217 static int      lem_82547_tx_fifo_reset(struct adapter *);
218 static void     lem_82547_move_tail(void *);
219 static int      lem_dma_malloc(struct adapter *, bus_size_t,
220                     struct em_dma_alloc *, int);
221 static void     lem_dma_free(struct adapter *, struct em_dma_alloc *);
222 static int      lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
223 static void     lem_print_nvm_info(struct adapter *);
224 static int      lem_is_valid_ether_addr(u8 *);
225 static u32      lem_fill_descriptors (bus_addr_t address, u32 length,
226                     PDESC_ARRAY desc_array);
227 static int      lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
228 static void     lem_add_int_delay_sysctl(struct adapter *, const char *,
229                     const char *, struct em_int_delay_info *, int, int);
230 static void     lem_set_flow_cntrl(struct adapter *, const char *,
231                     const char *, int *, int);
232 /* Management and WOL Support */
233 static void     lem_init_manageability(struct adapter *);
234 static void     lem_release_manageability(struct adapter *);
235 static void     lem_get_hw_control(struct adapter *);
236 static void     lem_release_hw_control(struct adapter *);
237 static void     lem_get_wakeup(device_t);
238 static void     lem_enable_wakeup(device_t);
239 static int      lem_enable_phy_wakeup(struct adapter *);
240 static void     lem_led_func(void *, int);
241
242 static void     lem_intr(void *);
243 static int      lem_irq_fast(void *);
244 static void     lem_handle_rxtx(void *context, int pending);
245 static void     lem_handle_link(void *context, int pending);
246 static void     lem_add_rx_process_limit(struct adapter *, const char *,
247                     const char *, int *, int);
248
249 #ifdef DEVICE_POLLING
250 static poll_handler_t lem_poll;
251 #endif /* POLLING */
252
253 /*********************************************************************
254  *  FreeBSD Device Interface Entry Points
255  *********************************************************************/
256
257 static device_method_t lem_methods[] = {
258         /* Device interface */
259         DEVMETHOD(device_probe, lem_probe),
260         DEVMETHOD(device_attach, lem_attach),
261         DEVMETHOD(device_detach, lem_detach),
262         DEVMETHOD(device_shutdown, lem_shutdown),
263         DEVMETHOD(device_suspend, lem_suspend),
264         DEVMETHOD(device_resume, lem_resume),
265         DEVMETHOD_END
266 };
267
268 static driver_t lem_driver = {
269         "em", lem_methods, sizeof(struct adapter),
270 };
271
272 extern devclass_t em_devclass;
273 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
274 MODULE_DEPEND(lem, pci, 1, 1, 1);
275 MODULE_DEPEND(lem, ether, 1, 1, 1);
276
277 /*********************************************************************
278  *  Tunable default values.
279  *********************************************************************/
280
281 #define EM_TICKS_TO_USECS(ticks)        ((1024 * (ticks) + 500) / 1000)
282 #define EM_USECS_TO_TICKS(usecs)        ((1000 * (usecs) + 512) / 1024)
283
284 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
285 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
286 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
287 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
288 static int lem_rxd = EM_DEFAULT_RXD;
289 static int lem_txd = EM_DEFAULT_TXD;
290 static int lem_smart_pwr_down = FALSE;
291
292 /* Controls whether promiscuous also shows bad packets */
293 static int lem_debug_sbp = FALSE;
294
295 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
296 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
297 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
298 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
299 TUNABLE_INT("hw.em.rxd", &lem_rxd);
300 TUNABLE_INT("hw.em.txd", &lem_txd);
301 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
302 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
303
304 /* Interrupt style - default to fast */
305 static int lem_use_legacy_irq = 0;
306 TUNABLE_INT("hw.em.use_legacy_irq", &lem_use_legacy_irq);
307
308 /* How many packets rxeof tries to clean at a time */
309 static int lem_rx_process_limit = 100;
310 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
311
312 /* Flow control setting - default to FULL */
313 static int lem_fc_setting = e1000_fc_full;
314 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
315
316 /* Global used in WOL setup with multiport cards */
317 static int global_quad_port_a = 0;
318
319 #ifdef DEV_NETMAP       /* see ixgbe.c for details */
320 #include <dev/netmap/if_lem_netmap.h>
321 #endif /* DEV_NETMAP */
322
323 /*********************************************************************
324  *  Device identification routine
325  *
326  *  em_probe determines if the driver should be loaded on
327  *  adapter based on PCI vendor/device id of the adapter.
328  *
329  *  return BUS_PROBE_DEFAULT on success, positive on failure
330  *********************************************************************/
331
332 static int
333 lem_probe(device_t dev)
334 {
335         char            adapter_name[60];
336         u16             pci_vendor_id = 0;
337         u16             pci_device_id = 0;
338         u16             pci_subvendor_id = 0;
339         u16             pci_subdevice_id = 0;
340         em_vendor_info_t *ent;
341
342         INIT_DEBUGOUT("em_probe: begin");
343
344         pci_vendor_id = pci_get_vendor(dev);
345         if (pci_vendor_id != EM_VENDOR_ID)
346                 return (ENXIO);
347
348         pci_device_id = pci_get_device(dev);
349         pci_subvendor_id = pci_get_subvendor(dev);
350         pci_subdevice_id = pci_get_subdevice(dev);
351
352         ent = lem_vendor_info_array;
353         while (ent->vendor_id != 0) {
354                 if ((pci_vendor_id == ent->vendor_id) &&
355                     (pci_device_id == ent->device_id) &&
356
357                     ((pci_subvendor_id == ent->subvendor_id) ||
358                     (ent->subvendor_id == PCI_ANY_ID)) &&
359
360                     ((pci_subdevice_id == ent->subdevice_id) ||
361                     (ent->subdevice_id == PCI_ANY_ID))) {
362                         sprintf(adapter_name, "%s %s",
363                                 lem_strings[ent->index],
364                                 lem_driver_version);
365                         device_set_desc_copy(dev, adapter_name);
366                         return (BUS_PROBE_DEFAULT);
367                 }
368                 ent++;
369         }
370
371         return (ENXIO);
372 }
373
374 /*********************************************************************
375  *  Device initialization routine
376  *
377  *  The attach entry point is called when the driver is being loaded.
378  *  This routine identifies the type of hardware, allocates all resources
379  *  and initializes the hardware.
380  *
381  *  return 0 on success, positive on failure
382  *********************************************************************/
383
384 static int
385 lem_attach(device_t dev)
386 {
387         struct adapter  *adapter;
388         int             tsize, rsize;
389         int             error = 0;
390
391         INIT_DEBUGOUT("lem_attach: begin");
392
393         adapter = device_get_softc(dev);
394         adapter->dev = adapter->osdep.dev = dev;
395         EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
396         EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
397         EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
398
399         /* SYSCTL stuff */
400         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
401             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
402             OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
403             lem_sysctl_nvm_info, "I", "NVM Information");
404
405         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
406         callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
407
408         /* Determine hardware and mac info */
409         lem_identify_hardware(adapter);
410
411         /* Setup PCI resources */
412         if (lem_allocate_pci_resources(adapter)) {
413                 device_printf(dev, "Allocation of PCI resources failed\n");
414                 error = ENXIO;
415                 goto err_pci;
416         }
417
418         /* Do Shared Code initialization */
419         if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
420                 device_printf(dev, "Setup of Shared code failed\n");
421                 error = ENXIO;
422                 goto err_pci;
423         }
424
425         e1000_get_bus_info(&adapter->hw);
426
427         /* Set up some sysctls for the tunable interrupt delays */
428         lem_add_int_delay_sysctl(adapter, "rx_int_delay",
429             "receive interrupt delay in usecs", &adapter->rx_int_delay,
430             E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
431         lem_add_int_delay_sysctl(adapter, "tx_int_delay",
432             "transmit interrupt delay in usecs", &adapter->tx_int_delay,
433             E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
434         if (adapter->hw.mac.type >= e1000_82540) {
435                 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
436                     "receive interrupt delay limit in usecs",
437                     &adapter->rx_abs_int_delay,
438                     E1000_REGISTER(&adapter->hw, E1000_RADV),
439                     lem_rx_abs_int_delay_dflt);
440                 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
441                     "transmit interrupt delay limit in usecs",
442                     &adapter->tx_abs_int_delay,
443                     E1000_REGISTER(&adapter->hw, E1000_TADV),
444                     lem_tx_abs_int_delay_dflt);
445         }
446
447         /* Sysctls for limiting the amount of work done in the taskqueue */
448         lem_add_rx_process_limit(adapter, "rx_processing_limit",
449             "max number of rx packets to process", &adapter->rx_process_limit,
450             lem_rx_process_limit);
451
452         /* Sysctl for setting the interface flow control */
453         lem_set_flow_cntrl(adapter, "flow_control",
454             "flow control setting",
455             &adapter->fc_setting, lem_fc_setting);
456
457         /*
458          * Validate number of transmit and receive descriptors. It
459          * must not exceed hardware maximum, and must be multiple
460          * of E1000_DBA_ALIGN.
461          */
462         if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
463             (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
464             (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
465             (lem_txd < EM_MIN_TXD)) {
466                 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
467                     EM_DEFAULT_TXD, lem_txd);
468                 adapter->num_tx_desc = EM_DEFAULT_TXD;
469         } else
470                 adapter->num_tx_desc = lem_txd;
471         if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
472             (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
473             (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
474             (lem_rxd < EM_MIN_RXD)) {
475                 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
476                     EM_DEFAULT_RXD, lem_rxd);
477                 adapter->num_rx_desc = EM_DEFAULT_RXD;
478         } else
479                 adapter->num_rx_desc = lem_rxd;
480
481         adapter->hw.mac.autoneg = DO_AUTO_NEG;
482         adapter->hw.phy.autoneg_wait_to_complete = FALSE;
483         adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
484         adapter->rx_buffer_len = 2048;
485
486         e1000_init_script_state_82541(&adapter->hw, TRUE);
487         e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
488
489         /* Copper options */
490         if (adapter->hw.phy.media_type == e1000_media_type_copper) {
491                 adapter->hw.phy.mdix = AUTO_ALL_MODES;
492                 adapter->hw.phy.disable_polarity_correction = FALSE;
493                 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
494         }
495
496         /*
497          * Set the frame limits assuming
498          * standard ethernet sized frames.
499          */
500         adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
501         adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
502
503         /*
504          * This controls when hardware reports transmit completion
505          * status.
506          */
507         adapter->hw.mac.report_tx_early = 1;
508
509         tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
510             EM_DBA_ALIGN);
511
512         /* Allocate Transmit Descriptor ring */
513         if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
514                 device_printf(dev, "Unable to allocate tx_desc memory\n");
515                 error = ENOMEM;
516                 goto err_tx_desc;
517         }
518         adapter->tx_desc_base = 
519             (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
520
521         rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
522             EM_DBA_ALIGN);
523
524         /* Allocate Receive Descriptor ring */
525         if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
526                 device_printf(dev, "Unable to allocate rx_desc memory\n");
527                 error = ENOMEM;
528                 goto err_rx_desc;
529         }
530         adapter->rx_desc_base =
531             (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
532
533         /* Allocate multicast array memory. */
534         adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
535             MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
536         if (adapter->mta == NULL) {
537                 device_printf(dev, "Can not allocate multicast setup array\n");
538                 error = ENOMEM;
539                 goto err_hw_init;
540         }
541
542         /*
543         ** Start from a known state, this is
544         ** important in reading the nvm and
545         ** mac from that.
546         */
547         e1000_reset_hw(&adapter->hw);
548
549         /* Make sure we have a good EEPROM before we read from it */
550         if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
551                 /*
552                 ** Some PCI-E parts fail the first check due to
553                 ** the link being in sleep state, call it again,
554                 ** if it fails a second time its a real issue.
555                 */
556                 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
557                         device_printf(dev,
558                             "The EEPROM Checksum Is Not Valid\n");
559                         error = EIO;
560                         goto err_hw_init;
561                 }
562         }
563
564         /* Copy the permanent MAC address out of the EEPROM */
565         if (e1000_read_mac_addr(&adapter->hw) < 0) {
566                 device_printf(dev, "EEPROM read error while reading MAC"
567                     " address\n");
568                 error = EIO;
569                 goto err_hw_init;
570         }
571
572         if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
573                 device_printf(dev, "Invalid MAC address\n");
574                 error = EIO;
575                 goto err_hw_init;
576         }
577
578         /* Initialize the hardware */
579         if (lem_hardware_init(adapter)) {
580                 device_printf(dev, "Unable to initialize the hardware\n");
581                 error = EIO;
582                 goto err_hw_init;
583         }
584
585         /* Allocate transmit descriptors and buffers */
586         if (lem_allocate_transmit_structures(adapter)) {
587                 device_printf(dev, "Could not setup transmit structures\n");
588                 error = ENOMEM;
589                 goto err_tx_struct;
590         }
591
592         /* Allocate receive descriptors and buffers */
593         if (lem_allocate_receive_structures(adapter)) {
594                 device_printf(dev, "Could not setup receive structures\n");
595                 error = ENOMEM;
596                 goto err_rx_struct;
597         }
598
599         /*
600         **  Do interrupt configuration
601         */
602         error = lem_allocate_irq(adapter);
603         if (error)
604                 goto err_rx_struct;
605
606         /*
607          * Get Wake-on-Lan and Management info for later use
608          */
609         lem_get_wakeup(dev);
610
611         /* Setup OS specific network interface */
612         if (lem_setup_interface(dev, adapter) != 0)
613                 goto err_rx_struct;
614
615         /* Initialize statistics */
616         lem_update_stats_counters(adapter);
617
618         adapter->hw.mac.get_link_status = 1;
619         lem_update_link_status(adapter);
620
621         /* Indicate SOL/IDER usage */
622         if (e1000_check_reset_block(&adapter->hw))
623                 device_printf(dev,
624                     "PHY reset is blocked due to SOL/IDER session.\n");
625
626         /* Do we need workaround for 82544 PCI-X adapter? */
627         if (adapter->hw.bus.type == e1000_bus_type_pcix &&
628             adapter->hw.mac.type == e1000_82544)
629                 adapter->pcix_82544 = TRUE;
630         else
631                 adapter->pcix_82544 = FALSE;
632
633         /* Register for VLAN events */
634         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
635             lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
636         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
637             lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 
638
639         lem_add_hw_stats(adapter);
640
641         /* Non-AMT based hardware can now take control from firmware */
642         if (adapter->has_manage && !adapter->has_amt)
643                 lem_get_hw_control(adapter);
644
645         /* Tell the stack that the interface is not active */
646         adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
647
648         adapter->led_dev = led_create(lem_led_func, adapter,
649             device_get_nameunit(dev));
650
651 #ifdef DEV_NETMAP
652         lem_netmap_attach(adapter);
653 #endif /* DEV_NETMAP */
654         INIT_DEBUGOUT("lem_attach: end");
655
656         return (0);
657
658 err_rx_struct:
659         lem_free_transmit_structures(adapter);
660 err_tx_struct:
661 err_hw_init:
662         lem_release_hw_control(adapter);
663         lem_dma_free(adapter, &adapter->rxdma);
664 err_rx_desc:
665         lem_dma_free(adapter, &adapter->txdma);
666 err_tx_desc:
667 err_pci:
668         if (adapter->ifp != NULL)
669                 if_free(adapter->ifp);
670         lem_free_pci_resources(adapter);
671         free(adapter->mta, M_DEVBUF);
672         EM_TX_LOCK_DESTROY(adapter);
673         EM_RX_LOCK_DESTROY(adapter);
674         EM_CORE_LOCK_DESTROY(adapter);
675
676         return (error);
677 }
678
679 /*********************************************************************
680  *  Device removal routine
681  *
682  *  The detach entry point is called when the driver is being removed.
683  *  This routine stops the adapter and deallocates all the resources
684  *  that were allocated for driver operation.
685  *
686  *  return 0 on success, positive on failure
687  *********************************************************************/
688
689 static int
690 lem_detach(device_t dev)
691 {
692         struct adapter  *adapter = device_get_softc(dev);
693         struct ifnet    *ifp = adapter->ifp;
694
695         INIT_DEBUGOUT("em_detach: begin");
696
697         /* Make sure VLANS are not using driver */
698         if (adapter->ifp->if_vlantrunk != NULL) {
699                 device_printf(dev,"Vlan in use, detach first\n");
700                 return (EBUSY);
701         }
702
703 #ifdef DEVICE_POLLING
704         if (ifp->if_capenable & IFCAP_POLLING)
705                 ether_poll_deregister(ifp);
706 #endif
707
708         if (adapter->led_dev != NULL)
709                 led_destroy(adapter->led_dev);
710
711         EM_CORE_LOCK(adapter);
712         EM_TX_LOCK(adapter);
713         adapter->in_detach = 1;
714         lem_stop(adapter);
715         e1000_phy_hw_reset(&adapter->hw);
716
717         lem_release_manageability(adapter);
718
719         EM_TX_UNLOCK(adapter);
720         EM_CORE_UNLOCK(adapter);
721
722         /* Unregister VLAN events */
723         if (adapter->vlan_attach != NULL)
724                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
725         if (adapter->vlan_detach != NULL)
726                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 
727
728         ether_ifdetach(adapter->ifp);
729         callout_drain(&adapter->timer);
730         callout_drain(&adapter->tx_fifo_timer);
731
732 #ifdef DEV_NETMAP
733         netmap_detach(ifp);
734 #endif /* DEV_NETMAP */
735         lem_free_pci_resources(adapter);
736         bus_generic_detach(dev);
737         if_free(ifp);
738
739         lem_free_transmit_structures(adapter);
740         lem_free_receive_structures(adapter);
741
742         /* Free Transmit Descriptor ring */
743         if (adapter->tx_desc_base) {
744                 lem_dma_free(adapter, &adapter->txdma);
745                 adapter->tx_desc_base = NULL;
746         }
747
748         /* Free Receive Descriptor ring */
749         if (adapter->rx_desc_base) {
750                 lem_dma_free(adapter, &adapter->rxdma);
751                 adapter->rx_desc_base = NULL;
752         }
753
754         lem_release_hw_control(adapter);
755         free(adapter->mta, M_DEVBUF);
756         EM_TX_LOCK_DESTROY(adapter);
757         EM_RX_LOCK_DESTROY(adapter);
758         EM_CORE_LOCK_DESTROY(adapter);
759
760         return (0);
761 }
762
763 /*********************************************************************
764  *
765  *  Shutdown entry point
766  *
767  **********************************************************************/
768
769 static int
770 lem_shutdown(device_t dev)
771 {
772         return lem_suspend(dev);
773 }
774
775 /*
776  * Suspend/resume device methods.
777  */
778 static int
779 lem_suspend(device_t dev)
780 {
781         struct adapter *adapter = device_get_softc(dev);
782
783         EM_CORE_LOCK(adapter);
784
785         lem_release_manageability(adapter);
786         lem_release_hw_control(adapter);
787         lem_enable_wakeup(dev);
788
789         EM_CORE_UNLOCK(adapter);
790
791         return bus_generic_suspend(dev);
792 }
793
794 static int
795 lem_resume(device_t dev)
796 {
797         struct adapter *adapter = device_get_softc(dev);
798         struct ifnet *ifp = adapter->ifp;
799
800         EM_CORE_LOCK(adapter);
801         lem_init_locked(adapter);
802         lem_init_manageability(adapter);
803         EM_CORE_UNLOCK(adapter);
804         lem_start(ifp);
805
806         return bus_generic_resume(dev);
807 }
808
809
810 static void
811 lem_start_locked(struct ifnet *ifp)
812 {
813         struct adapter  *adapter = ifp->if_softc;
814         struct mbuf     *m_head;
815
816         EM_TX_LOCK_ASSERT(adapter);
817
818         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
819             IFF_DRV_RUNNING)
820                 return;
821         if (!adapter->link_active)
822                 return;
823
824         /*
825          * Force a cleanup if number of TX descriptors
826          * available hits the threshold
827          */
828         if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
829                 lem_txeof(adapter);
830                 /* Now do we at least have a minimal? */
831                 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
832                         adapter->no_tx_desc_avail1++;
833                         return;
834                 }
835         }
836
837         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
838
839                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
840                 if (m_head == NULL)
841                         break;
842                 /*
843                  *  Encapsulation can modify our pointer, and or make it
844                  *  NULL on failure.  In that event, we can't requeue.
845                  */
846                 if (lem_xmit(adapter, &m_head)) {
847                         if (m_head == NULL)
848                                 break;
849                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
850                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
851                         break;
852                 }
853
854                 /* Send a copy of the frame to the BPF listener */
855                 ETHER_BPF_MTAP(ifp, m_head);
856
857                 /* Set timeout in case hardware has problems transmitting. */
858                 adapter->watchdog_check = TRUE;
859                 adapter->watchdog_time = ticks;
860         }
861         if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
862                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
863
864         return;
865 }
866
867 static void
868 lem_start(struct ifnet *ifp)
869 {
870         struct adapter *adapter = ifp->if_softc;
871
872         EM_TX_LOCK(adapter);
873         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
874                 lem_start_locked(ifp);
875         EM_TX_UNLOCK(adapter);
876 }
877
878 /*********************************************************************
879  *  Ioctl entry point
880  *
881  *  em_ioctl is called when the user wants to configure the
882  *  interface.
883  *
884  *  return 0 on success, positive on failure
885  **********************************************************************/
886
887 static int
888 lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
889 {
890         struct adapter  *adapter = ifp->if_softc;
891         struct ifreq    *ifr = (struct ifreq *)data;
892 #if defined(INET) || defined(INET6)
893         struct ifaddr   *ifa = (struct ifaddr *)data;
894 #endif
895         bool            avoid_reset = FALSE;
896         int             error = 0;
897
898         if (adapter->in_detach)
899                 return (error);
900
901         switch (command) {
902         case SIOCSIFADDR:
903 #ifdef INET
904                 if (ifa->ifa_addr->sa_family == AF_INET)
905                         avoid_reset = TRUE;
906 #endif
907 #ifdef INET6
908                 if (ifa->ifa_addr->sa_family == AF_INET6)
909                         avoid_reset = TRUE;
910 #endif
911                 /*
912                 ** Calling init results in link renegotiation,
913                 ** so we avoid doing it when possible.
914                 */
915                 if (avoid_reset) {
916                         ifp->if_flags |= IFF_UP;
917                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
918                                 lem_init(adapter);
919 #ifdef INET
920                         if (!(ifp->if_flags & IFF_NOARP))
921                                 arp_ifinit(ifp, ifa);
922 #endif
923                 } else
924                         error = ether_ioctl(ifp, command, data);
925                 break;
926         case SIOCSIFMTU:
927             {
928                 int max_frame_size;
929
930                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
931
932                 EM_CORE_LOCK(adapter);
933                 switch (adapter->hw.mac.type) {
934                 case e1000_82542:
935                         max_frame_size = ETHER_MAX_LEN;
936                         break;
937                 default:
938                         max_frame_size = MAX_JUMBO_FRAME_SIZE;
939                 }
940                 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
941                     ETHER_CRC_LEN) {
942                         EM_CORE_UNLOCK(adapter);
943                         error = EINVAL;
944                         break;
945                 }
946
947                 ifp->if_mtu = ifr->ifr_mtu;
948                 adapter->max_frame_size =
949                     ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
950                 lem_init_locked(adapter);
951                 EM_CORE_UNLOCK(adapter);
952                 break;
953             }
954         case SIOCSIFFLAGS:
955                 IOCTL_DEBUGOUT("ioctl rcv'd:\
956                     SIOCSIFFLAGS (Set Interface Flags)");
957                 EM_CORE_LOCK(adapter);
958                 if (ifp->if_flags & IFF_UP) {
959                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
960                                 if ((ifp->if_flags ^ adapter->if_flags) &
961                                     (IFF_PROMISC | IFF_ALLMULTI)) {
962                                         lem_disable_promisc(adapter);
963                                         lem_set_promisc(adapter);
964                                 }
965                         } else
966                                 lem_init_locked(adapter);
967                 } else
968                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
969                                 EM_TX_LOCK(adapter);
970                                 lem_stop(adapter);
971                                 EM_TX_UNLOCK(adapter);
972                         }
973                 adapter->if_flags = ifp->if_flags;
974                 EM_CORE_UNLOCK(adapter);
975                 break;
976         case SIOCADDMULTI:
977         case SIOCDELMULTI:
978                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
979                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
980                         EM_CORE_LOCK(adapter);
981                         lem_disable_intr(adapter);
982                         lem_set_multi(adapter);
983                         if (adapter->hw.mac.type == e1000_82542 && 
984                             adapter->hw.revision_id == E1000_REVISION_2) {
985                                 lem_initialize_receive_unit(adapter);
986                         }
987 #ifdef DEVICE_POLLING
988                         if (!(ifp->if_capenable & IFCAP_POLLING))
989 #endif
990                                 lem_enable_intr(adapter);
991                         EM_CORE_UNLOCK(adapter);
992                 }
993                 break;
994         case SIOCSIFMEDIA:
995                 /* Check SOL/IDER usage */
996                 EM_CORE_LOCK(adapter);
997                 if (e1000_check_reset_block(&adapter->hw)) {
998                         EM_CORE_UNLOCK(adapter);
999                         device_printf(adapter->dev, "Media change is"
1000                             " blocked due to SOL/IDER session.\n");
1001                         break;
1002                 }
1003                 EM_CORE_UNLOCK(adapter);
1004         case SIOCGIFMEDIA:
1005                 IOCTL_DEBUGOUT("ioctl rcv'd: \
1006                     SIOCxIFMEDIA (Get/Set Interface Media)");
1007                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1008                 break;
1009         case SIOCSIFCAP:
1010             {
1011                 int mask, reinit;
1012
1013                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1014                 reinit = 0;
1015                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1016 #ifdef DEVICE_POLLING
1017                 if (mask & IFCAP_POLLING) {
1018                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
1019                                 error = ether_poll_register(lem_poll, ifp);
1020                                 if (error)
1021                                         return (error);
1022                                 EM_CORE_LOCK(adapter);
1023                                 lem_disable_intr(adapter);
1024                                 ifp->if_capenable |= IFCAP_POLLING;
1025                                 EM_CORE_UNLOCK(adapter);
1026                         } else {
1027                                 error = ether_poll_deregister(ifp);
1028                                 /* Enable interrupt even in error case */
1029                                 EM_CORE_LOCK(adapter);
1030                                 lem_enable_intr(adapter);
1031                                 ifp->if_capenable &= ~IFCAP_POLLING;
1032                                 EM_CORE_UNLOCK(adapter);
1033                         }
1034                 }
1035 #endif
1036                 if (mask & IFCAP_HWCSUM) {
1037                         ifp->if_capenable ^= IFCAP_HWCSUM;
1038                         reinit = 1;
1039                 }
1040                 if (mask & IFCAP_VLAN_HWTAGGING) {
1041                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1042                         reinit = 1;
1043                 }
1044                 if ((mask & IFCAP_WOL) &&
1045                     (ifp->if_capabilities & IFCAP_WOL) != 0) {
1046                         if (mask & IFCAP_WOL_MCAST)
1047                                 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1048                         if (mask & IFCAP_WOL_MAGIC)
1049                                 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1050                 }
1051                 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1052                         lem_init(adapter);
1053                 VLAN_CAPABILITIES(ifp);
1054                 break;
1055             }
1056
1057         default:
1058                 error = ether_ioctl(ifp, command, data);
1059                 break;
1060         }
1061
1062         return (error);
1063 }
1064
1065
1066 /*********************************************************************
1067  *  Init entry point
1068  *
1069  *  This routine is used in two ways. It is used by the stack as
1070  *  init entry point in network interface structure. It is also used
1071  *  by the driver as a hw/sw initialization routine to get to a
1072  *  consistent state.
1073  *
1074  *  return 0 on success, positive on failure
1075  **********************************************************************/
1076
1077 static void
1078 lem_init_locked(struct adapter *adapter)
1079 {
1080         struct ifnet    *ifp = adapter->ifp;
1081         device_t        dev = adapter->dev;
1082         u32             pba;
1083
1084         INIT_DEBUGOUT("lem_init: begin");
1085
1086         EM_CORE_LOCK_ASSERT(adapter);
1087
1088         EM_TX_LOCK(adapter);
1089         lem_stop(adapter);
1090         EM_TX_UNLOCK(adapter);
1091
1092         /*
1093          * Packet Buffer Allocation (PBA)
1094          * Writing PBA sets the receive portion of the buffer
1095          * the remainder is used for the transmit buffer.
1096          *
1097          * Devices before the 82547 had a Packet Buffer of 64K.
1098          *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1099          * After the 82547 the buffer was reduced to 40K.
1100          *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1101          *   Note: default does not leave enough room for Jumbo Frame >10k.
1102          */
1103         switch (adapter->hw.mac.type) {
1104         case e1000_82547:
1105         case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1106                 if (adapter->max_frame_size > 8192)
1107                         pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1108                 else
1109                         pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1110                 adapter->tx_fifo_head = 0;
1111                 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1112                 adapter->tx_fifo_size =
1113                     (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1114                 break;
1115         default:
1116                 /* Devices before 82547 had a Packet Buffer of 64K.   */
1117                 if (adapter->max_frame_size > 8192)
1118                         pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1119                 else
1120                         pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1121         }
1122
1123         INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1124         E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1125         
1126         /* Get the latest mac address, User can use a LAA */
1127         bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1128               ETHER_ADDR_LEN);
1129
1130         /* Put the address into the Receive Address Array */
1131         e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1132
1133         /* Initialize the hardware */
1134         if (lem_hardware_init(adapter)) {
1135                 device_printf(dev, "Unable to initialize the hardware\n");
1136                 return;
1137         }
1138         lem_update_link_status(adapter);
1139
1140         /* Setup VLAN support, basic and offload if available */
1141         E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1142
1143         /* Set hardware offload abilities */
1144         ifp->if_hwassist = 0;
1145         if (adapter->hw.mac.type >= e1000_82543) {
1146                 if (ifp->if_capenable & IFCAP_TXCSUM)
1147                         ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1148         }
1149
1150         /* Configure for OS presence */
1151         lem_init_manageability(adapter);
1152
1153         /* Prepare transmit descriptors and buffers */
1154         lem_setup_transmit_structures(adapter);
1155         lem_initialize_transmit_unit(adapter);
1156
1157         /* Setup Multicast table */
1158         lem_set_multi(adapter);
1159
1160         /* Prepare receive descriptors and buffers */
1161         if (lem_setup_receive_structures(adapter)) {
1162                 device_printf(dev, "Could not setup receive structures\n");
1163                 EM_TX_LOCK(adapter);
1164                 lem_stop(adapter);
1165                 EM_TX_UNLOCK(adapter);
1166                 return;
1167         }
1168         lem_initialize_receive_unit(adapter);
1169
1170         /* Use real VLAN Filter support? */
1171         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1172                 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1173                         /* Use real VLAN Filter support */
1174                         lem_setup_vlan_hw_support(adapter);
1175                 else {
1176                         u32 ctrl;
1177                         ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1178                         ctrl |= E1000_CTRL_VME;
1179                         E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1180                 }
1181         }
1182
1183         /* Don't lose promiscuous settings */
1184         lem_set_promisc(adapter);
1185
1186         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1187         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1188
1189         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1190         e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1191
1192 #ifdef DEVICE_POLLING
1193         /*
1194          * Only enable interrupts if we are not polling, make sure
1195          * they are off otherwise.
1196          */
1197         if (ifp->if_capenable & IFCAP_POLLING)
1198                 lem_disable_intr(adapter);
1199         else
1200 #endif /* DEVICE_POLLING */
1201                 lem_enable_intr(adapter);
1202
1203         /* AMT based hardware can now take control from firmware */
1204         if (adapter->has_manage && adapter->has_amt)
1205                 lem_get_hw_control(adapter);
1206 }
1207
1208 static void
1209 lem_init(void *arg)
1210 {
1211         struct adapter *adapter = arg;
1212
1213         EM_CORE_LOCK(adapter);
1214         lem_init_locked(adapter);
1215         EM_CORE_UNLOCK(adapter);
1216 }
1217
1218
1219 #ifdef DEVICE_POLLING
1220 /*********************************************************************
1221  *
1222  *  Legacy polling routine  
1223  *
1224  *********************************************************************/
1225 static int
1226 lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1227 {
1228         struct adapter *adapter = ifp->if_softc;
1229         u32             reg_icr, rx_done = 0;
1230
1231         EM_CORE_LOCK(adapter);
1232         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1233                 EM_CORE_UNLOCK(adapter);
1234                 return (rx_done);
1235         }
1236
1237         if (cmd == POLL_AND_CHECK_STATUS) {
1238                 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1239                 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1240                         callout_stop(&adapter->timer);
1241                         adapter->hw.mac.get_link_status = 1;
1242                         lem_update_link_status(adapter);
1243                         callout_reset(&adapter->timer, hz,
1244                             lem_local_timer, adapter);
1245                 }
1246         }
1247         EM_CORE_UNLOCK(adapter);
1248
1249         lem_rxeof(adapter, count, &rx_done);
1250
1251         EM_TX_LOCK(adapter);
1252         lem_txeof(adapter);
1253         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1254                 lem_start_locked(ifp);
1255         EM_TX_UNLOCK(adapter);
1256         return (rx_done);
1257 }
1258 #endif /* DEVICE_POLLING */
1259
1260 /*********************************************************************
1261  *
1262  *  Legacy Interrupt Service routine  
1263  *
1264  *********************************************************************/
1265 static void
1266 lem_intr(void *arg)
1267 {
1268         struct adapter  *adapter = arg;
1269         struct ifnet    *ifp = adapter->ifp;
1270         u32             reg_icr;
1271
1272
1273         if ((ifp->if_capenable & IFCAP_POLLING) ||
1274             ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0))
1275                 return;
1276
1277         EM_CORE_LOCK(adapter);
1278         reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1279         if (reg_icr & E1000_ICR_RXO)
1280                 adapter->rx_overruns++;
1281
1282         if ((reg_icr == 0xffffffff) || (reg_icr == 0)) {
1283                 EM_CORE_UNLOCK(adapter);
1284                 return;
1285         }
1286
1287         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1288                 callout_stop(&adapter->timer);
1289                 adapter->hw.mac.get_link_status = 1;
1290                 lem_update_link_status(adapter);
1291                 /* Deal with TX cruft when link lost */
1292                 lem_tx_purge(adapter);
1293                 callout_reset(&adapter->timer, hz,
1294                     lem_local_timer, adapter);
1295                 EM_CORE_UNLOCK(adapter);
1296                 return;
1297         }
1298
1299         EM_CORE_UNLOCK(adapter);
1300         lem_rxeof(adapter, -1, NULL);
1301
1302         EM_TX_LOCK(adapter);
1303         lem_txeof(adapter);
1304         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1305             !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1306                 lem_start_locked(ifp);
1307         EM_TX_UNLOCK(adapter);
1308         return;
1309 }
1310
1311
1312 static void
1313 lem_handle_link(void *context, int pending)
1314 {
1315         struct adapter  *adapter = context;
1316         struct ifnet *ifp = adapter->ifp;
1317
1318         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1319                 return;
1320
1321         EM_CORE_LOCK(adapter);
1322         callout_stop(&adapter->timer);
1323         lem_update_link_status(adapter);
1324         /* Deal with TX cruft when link lost */
1325         lem_tx_purge(adapter);
1326         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1327         EM_CORE_UNLOCK(adapter);
1328 }
1329
1330
1331 /* Combined RX/TX handler, used by Legacy and MSI */
1332 static void
1333 lem_handle_rxtx(void *context, int pending)
1334 {
1335         struct adapter  *adapter = context;
1336         struct ifnet    *ifp = adapter->ifp;
1337
1338
1339         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1340                 lem_rxeof(adapter, adapter->rx_process_limit, NULL);
1341                 EM_TX_LOCK(adapter);
1342                 lem_txeof(adapter);
1343                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1344                         lem_start_locked(ifp);
1345                 EM_TX_UNLOCK(adapter);
1346         }
1347
1348         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1349                 lem_enable_intr(adapter);
1350 }
1351
1352 /*********************************************************************
1353  *
1354  *  Fast Legacy/MSI Combined Interrupt Service routine  
1355  *
1356  *********************************************************************/
1357 static int
1358 lem_irq_fast(void *arg)
1359 {
1360         struct adapter  *adapter = arg;
1361         struct ifnet    *ifp;
1362         u32             reg_icr;
1363
1364         ifp = adapter->ifp;
1365
1366         reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1367
1368         /* Hot eject?  */
1369         if (reg_icr == 0xffffffff)
1370                 return FILTER_STRAY;
1371
1372         /* Definitely not our interrupt.  */
1373         if (reg_icr == 0x0)
1374                 return FILTER_STRAY;
1375
1376         /*
1377          * Mask interrupts until the taskqueue is finished running.  This is
1378          * cheap, just assume that it is needed.  This also works around the
1379          * MSI message reordering errata on certain systems.
1380          */
1381         lem_disable_intr(adapter);
1382         taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1383
1384         /* Link status change */
1385         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1386                 adapter->hw.mac.get_link_status = 1;
1387                 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1388         }
1389
1390         if (reg_icr & E1000_ICR_RXO)
1391                 adapter->rx_overruns++;
1392         return FILTER_HANDLED;
1393 }
1394
1395
1396 /*********************************************************************
1397  *
1398  *  Media Ioctl callback
1399  *
1400  *  This routine is called whenever the user queries the status of
1401  *  the interface using ifconfig.
1402  *
1403  **********************************************************************/
1404 static void
1405 lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1406 {
1407         struct adapter *adapter = ifp->if_softc;
1408         u_char fiber_type = IFM_1000_SX;
1409
1410         INIT_DEBUGOUT("lem_media_status: begin");
1411
1412         EM_CORE_LOCK(adapter);
1413         lem_update_link_status(adapter);
1414
1415         ifmr->ifm_status = IFM_AVALID;
1416         ifmr->ifm_active = IFM_ETHER;
1417
1418         if (!adapter->link_active) {
1419                 EM_CORE_UNLOCK(adapter);
1420                 return;
1421         }
1422
1423         ifmr->ifm_status |= IFM_ACTIVE;
1424
1425         if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1426             (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1427                 if (adapter->hw.mac.type == e1000_82545)
1428                         fiber_type = IFM_1000_LX;
1429                 ifmr->ifm_active |= fiber_type | IFM_FDX;
1430         } else {
1431                 switch (adapter->link_speed) {
1432                 case 10:
1433                         ifmr->ifm_active |= IFM_10_T;
1434                         break;
1435                 case 100:
1436                         ifmr->ifm_active |= IFM_100_TX;
1437                         break;
1438                 case 1000:
1439                         ifmr->ifm_active |= IFM_1000_T;
1440                         break;
1441                 }
1442                 if (adapter->link_duplex == FULL_DUPLEX)
1443                         ifmr->ifm_active |= IFM_FDX;
1444                 else
1445                         ifmr->ifm_active |= IFM_HDX;
1446         }
1447         EM_CORE_UNLOCK(adapter);
1448 }
1449
1450 /*********************************************************************
1451  *
1452  *  Media Ioctl callback
1453  *
1454  *  This routine is called when the user changes speed/duplex using
1455  *  media/mediopt option with ifconfig.
1456  *
1457  **********************************************************************/
1458 static int
1459 lem_media_change(struct ifnet *ifp)
1460 {
1461         struct adapter *adapter = ifp->if_softc;
1462         struct ifmedia  *ifm = &adapter->media;
1463
1464         INIT_DEBUGOUT("lem_media_change: begin");
1465
1466         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1467                 return (EINVAL);
1468
1469         EM_CORE_LOCK(adapter);
1470         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1471         case IFM_AUTO:
1472                 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1473                 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1474                 break;
1475         case IFM_1000_LX:
1476         case IFM_1000_SX:
1477         case IFM_1000_T:
1478                 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1479                 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1480                 break;
1481         case IFM_100_TX:
1482                 adapter->hw.mac.autoneg = FALSE;
1483                 adapter->hw.phy.autoneg_advertised = 0;
1484                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1485                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1486                 else
1487                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1488                 break;
1489         case IFM_10_T:
1490                 adapter->hw.mac.autoneg = FALSE;
1491                 adapter->hw.phy.autoneg_advertised = 0;
1492                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1493                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1494                 else
1495                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1496                 break;
1497         default:
1498                 device_printf(adapter->dev, "Unsupported media type\n");
1499         }
1500
1501         lem_init_locked(adapter);
1502         EM_CORE_UNLOCK(adapter);
1503
1504         return (0);
1505 }
1506
1507 /*********************************************************************
1508  *
1509  *  This routine maps the mbufs to tx descriptors.
1510  *
1511  *  return 0 on success, positive on failure
1512  **********************************************************************/
1513
1514 static int
1515 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1516 {
1517         bus_dma_segment_t       segs[EM_MAX_SCATTER];
1518         bus_dmamap_t            map;
1519         struct em_buffer        *tx_buffer, *tx_buffer_mapped;
1520         struct e1000_tx_desc    *ctxd = NULL;
1521         struct mbuf             *m_head;
1522         u32                     txd_upper, txd_lower, txd_used, txd_saved;
1523         int                     error, nsegs, i, j, first, last = 0;
1524
1525         m_head = *m_headp;
1526         txd_upper = txd_lower = txd_used = txd_saved = 0;
1527
1528         /*
1529         ** When doing checksum offload, it is critical to
1530         ** make sure the first mbuf has more than header,
1531         ** because that routine expects data to be present.
1532         */
1533         if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
1534             (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
1535                 m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
1536                 *m_headp = m_head;
1537                 if (m_head == NULL)
1538                         return (ENOBUFS);
1539         }
1540
1541         /*
1542          * Map the packet for DMA
1543          *
1544          * Capture the first descriptor index,
1545          * this descriptor will have the index
1546          * of the EOP which is the only one that
1547          * now gets a DONE bit writeback.
1548          */
1549         first = adapter->next_avail_tx_desc;
1550         tx_buffer = &adapter->tx_buffer_area[first];
1551         tx_buffer_mapped = tx_buffer;
1552         map = tx_buffer->map;
1553
1554         error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1555             *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1556
1557         /*
1558          * There are two types of errors we can (try) to handle:
1559          * - EFBIG means the mbuf chain was too long and bus_dma ran
1560          *   out of segments.  Defragment the mbuf chain and try again.
1561          * - ENOMEM means bus_dma could not obtain enough bounce buffers
1562          *   at this point in time.  Defer sending and try again later.
1563          * All other errors, in particular EINVAL, are fatal and prevent the
1564          * mbuf chain from ever going through.  Drop it and report error.
1565          */
1566         if (error == EFBIG) {
1567                 struct mbuf *m;
1568
1569                 m = m_defrag(*m_headp, M_NOWAIT);
1570                 if (m == NULL) {
1571                         adapter->mbuf_alloc_failed++;
1572                         m_freem(*m_headp);
1573                         *m_headp = NULL;
1574                         return (ENOBUFS);
1575                 }
1576                 *m_headp = m;
1577
1578                 /* Try it again */
1579                 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1580                     *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1581
1582                 if (error) {
1583                         adapter->no_tx_dma_setup++;
1584                         m_freem(*m_headp);
1585                         *m_headp = NULL;
1586                         return (error);
1587                 }
1588         } else if (error != 0) {
1589                 adapter->no_tx_dma_setup++;
1590                 return (error);
1591         }
1592
1593         if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1594                 adapter->no_tx_desc_avail2++;
1595                 bus_dmamap_unload(adapter->txtag, map);
1596                 return (ENOBUFS);
1597         }
1598         m_head = *m_headp;
1599
1600         /* Do hardware assists */
1601         if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1602                 lem_transmit_checksum_setup(adapter,  m_head,
1603                     &txd_upper, &txd_lower);
1604
1605         i = adapter->next_avail_tx_desc;
1606         if (adapter->pcix_82544) 
1607                 txd_saved = i;
1608
1609         /* Set up our transmit descriptors */
1610         for (j = 0; j < nsegs; j++) {
1611                 bus_size_t seg_len;
1612                 bus_addr_t seg_addr;
1613                 /* If adapter is 82544 and on PCIX bus */
1614                 if(adapter->pcix_82544) {
1615                         DESC_ARRAY      desc_array;
1616                         u32             array_elements, counter;
1617                         /*
1618                          * Check the Address and Length combination and
1619                          * split the data accordingly
1620                          */
1621                         array_elements = lem_fill_descriptors(segs[j].ds_addr,
1622                             segs[j].ds_len, &desc_array);
1623                         for (counter = 0; counter < array_elements; counter++) {
1624                                 if (txd_used == adapter->num_tx_desc_avail) {
1625                                         adapter->next_avail_tx_desc = txd_saved;
1626                                         adapter->no_tx_desc_avail2++;
1627                                         bus_dmamap_unload(adapter->txtag, map);
1628                                         return (ENOBUFS);
1629                                 }
1630                                 tx_buffer = &adapter->tx_buffer_area[i];
1631                                 ctxd = &adapter->tx_desc_base[i];
1632                                 ctxd->buffer_addr = htole64(
1633                                     desc_array.descriptor[counter].address);
1634                                 ctxd->lower.data = htole32(
1635                                     (adapter->txd_cmd | txd_lower | (u16)
1636                                     desc_array.descriptor[counter].length));
1637                                 ctxd->upper.data =
1638                                     htole32((txd_upper));
1639                                 last = i;
1640                                 if (++i == adapter->num_tx_desc)
1641                                          i = 0;
1642                                 tx_buffer->m_head = NULL;
1643                                 tx_buffer->next_eop = -1;
1644                                 txd_used++;
1645                         }
1646                 } else {
1647                         tx_buffer = &adapter->tx_buffer_area[i];
1648                         ctxd = &adapter->tx_desc_base[i];
1649                         seg_addr = segs[j].ds_addr;
1650                         seg_len  = segs[j].ds_len;
1651                         ctxd->buffer_addr = htole64(seg_addr);
1652                         ctxd->lower.data = htole32(
1653                         adapter->txd_cmd | txd_lower | seg_len);
1654                         ctxd->upper.data =
1655                             htole32(txd_upper);
1656                         last = i;
1657                         if (++i == adapter->num_tx_desc)
1658                                 i = 0;
1659                         tx_buffer->m_head = NULL;
1660                         tx_buffer->next_eop = -1;
1661                 }
1662         }
1663
1664         adapter->next_avail_tx_desc = i;
1665
1666         if (adapter->pcix_82544)
1667                 adapter->num_tx_desc_avail -= txd_used;
1668         else
1669                 adapter->num_tx_desc_avail -= nsegs;
1670
1671         if (m_head->m_flags & M_VLANTAG) {
1672                 /* Set the vlan id. */
1673                 ctxd->upper.fields.special =
1674                     htole16(m_head->m_pkthdr.ether_vtag);
1675                 /* Tell hardware to add tag */
1676                 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1677         }
1678
1679         tx_buffer->m_head = m_head;
1680         tx_buffer_mapped->map = tx_buffer->map;
1681         tx_buffer->map = map;
1682         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1683
1684         /*
1685          * Last Descriptor of Packet
1686          * needs End Of Packet (EOP)
1687          * and Report Status (RS)
1688          */
1689         ctxd->lower.data |=
1690             htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1691         /*
1692          * Keep track in the first buffer which
1693          * descriptor will be written back
1694          */
1695         tx_buffer = &adapter->tx_buffer_area[first];
1696         tx_buffer->next_eop = last;
1697         adapter->watchdog_time = ticks;
1698
1699         /*
1700          * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1701          * that this frame is available to transmit.
1702          */
1703         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1704             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1705         if (adapter->hw.mac.type == e1000_82547 &&
1706             adapter->link_duplex == HALF_DUPLEX)
1707                 lem_82547_move_tail(adapter);
1708         else {
1709                 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1710                 if (adapter->hw.mac.type == e1000_82547)
1711                         lem_82547_update_fifo_head(adapter,
1712                             m_head->m_pkthdr.len);
1713         }
1714
1715         return (0);
1716 }
1717
1718 /*********************************************************************
1719  *
1720  * 82547 workaround to avoid controller hang in half-duplex environment.
1721  * The workaround is to avoid queuing a large packet that would span
1722  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1723  * in this case. We do that only when FIFO is quiescent.
1724  *
1725  **********************************************************************/
1726 static void
1727 lem_82547_move_tail(void *arg)
1728 {
1729         struct adapter *adapter = arg;
1730         struct e1000_tx_desc *tx_desc;
1731         u16     hw_tdt, sw_tdt, length = 0;
1732         bool    eop = 0;
1733
1734         EM_TX_LOCK_ASSERT(adapter);
1735
1736         hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1737         sw_tdt = adapter->next_avail_tx_desc;
1738         
1739         while (hw_tdt != sw_tdt) {
1740                 tx_desc = &adapter->tx_desc_base[hw_tdt];
1741                 length += tx_desc->lower.flags.length;
1742                 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1743                 if (++hw_tdt == adapter->num_tx_desc)
1744                         hw_tdt = 0;
1745
1746                 if (eop) {
1747                         if (lem_82547_fifo_workaround(adapter, length)) {
1748                                 adapter->tx_fifo_wrk_cnt++;
1749                                 callout_reset(&adapter->tx_fifo_timer, 1,
1750                                         lem_82547_move_tail, adapter);
1751                                 break;
1752                         }
1753                         E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1754                         lem_82547_update_fifo_head(adapter, length);
1755                         length = 0;
1756                 }
1757         }       
1758 }
1759
1760 static int
1761 lem_82547_fifo_workaround(struct adapter *adapter, int len)
1762 {       
1763         int fifo_space, fifo_pkt_len;
1764
1765         fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1766
1767         if (adapter->link_duplex == HALF_DUPLEX) {
1768                 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1769
1770                 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1771                         if (lem_82547_tx_fifo_reset(adapter))
1772                                 return (0);
1773                         else
1774                                 return (1);
1775                 }
1776         }
1777
1778         return (0);
1779 }
1780
1781 static void
1782 lem_82547_update_fifo_head(struct adapter *adapter, int len)
1783 {
1784         int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1785         
1786         /* tx_fifo_head is always 16 byte aligned */
1787         adapter->tx_fifo_head += fifo_pkt_len;
1788         if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1789                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1790         }
1791 }
1792
1793
1794 static int
1795 lem_82547_tx_fifo_reset(struct adapter *adapter)
1796 {
1797         u32 tctl;
1798
1799         if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1800             E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1801             (E1000_READ_REG(&adapter->hw, E1000_TDFT) == 
1802             E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1803             (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1804             E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1805             (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1806                 /* Disable TX unit */
1807                 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1808                 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1809                     tctl & ~E1000_TCTL_EN);
1810
1811                 /* Reset FIFO pointers */
1812                 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1813                     adapter->tx_head_addr);
1814                 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1815                     adapter->tx_head_addr);
1816                 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1817                     adapter->tx_head_addr);
1818                 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1819                     adapter->tx_head_addr);
1820
1821                 /* Re-enable TX unit */
1822                 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1823                 E1000_WRITE_FLUSH(&adapter->hw);
1824
1825                 adapter->tx_fifo_head = 0;
1826                 adapter->tx_fifo_reset_cnt++;
1827
1828                 return (TRUE);
1829         }
1830         else {
1831                 return (FALSE);
1832         }
1833 }
1834
1835 static void
1836 lem_set_promisc(struct adapter *adapter)
1837 {
1838         struct ifnet    *ifp = adapter->ifp;
1839         u32             reg_rctl;
1840
1841         reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1842
1843         if (ifp->if_flags & IFF_PROMISC) {
1844                 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1845                 /* Turn this on if you want to see bad packets */
1846                 if (lem_debug_sbp)
1847                         reg_rctl |= E1000_RCTL_SBP;
1848                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1849         } else if (ifp->if_flags & IFF_ALLMULTI) {
1850                 reg_rctl |= E1000_RCTL_MPE;
1851                 reg_rctl &= ~E1000_RCTL_UPE;
1852                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1853         }
1854 }
1855
1856 static void
1857 lem_disable_promisc(struct adapter *adapter)
1858 {
1859         u32     reg_rctl;
1860
1861         reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1862
1863         reg_rctl &=  (~E1000_RCTL_UPE);
1864         reg_rctl &=  (~E1000_RCTL_MPE);
1865         reg_rctl &=  (~E1000_RCTL_SBP);
1866         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1867 }
1868
1869
1870 /*********************************************************************
1871  *  Multicast Update
1872  *
1873  *  This routine is called whenever multicast address list is updated.
1874  *
1875  **********************************************************************/
1876
1877 static void
1878 lem_set_multi(struct adapter *adapter)
1879 {
1880         struct ifnet    *ifp = adapter->ifp;
1881         struct ifmultiaddr *ifma;
1882         u32 reg_rctl = 0;
1883         u8  *mta; /* Multicast array memory */
1884         int mcnt = 0;
1885
1886         IOCTL_DEBUGOUT("lem_set_multi: begin");
1887
1888         mta = adapter->mta;
1889         bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1890
1891         if (adapter->hw.mac.type == e1000_82542 && 
1892             adapter->hw.revision_id == E1000_REVISION_2) {
1893                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1894                 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1895                         e1000_pci_clear_mwi(&adapter->hw);
1896                 reg_rctl |= E1000_RCTL_RST;
1897                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1898                 msec_delay(5);
1899         }
1900
1901 #if __FreeBSD_version < 800000
1902         IF_ADDR_LOCK(ifp);
1903 #else
1904         if_maddr_rlock(ifp);
1905 #endif
1906         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1907                 if (ifma->ifma_addr->sa_family != AF_LINK)
1908                         continue;
1909
1910                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1911                         break;
1912
1913                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1914                     &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1915                 mcnt++;
1916         }
1917 #if __FreeBSD_version < 800000
1918         IF_ADDR_UNLOCK(ifp);
1919 #else
1920         if_maddr_runlock(ifp);
1921 #endif
1922         if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1923                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1924                 reg_rctl |= E1000_RCTL_MPE;
1925                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1926         } else
1927                 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1928
1929         if (adapter->hw.mac.type == e1000_82542 && 
1930             adapter->hw.revision_id == E1000_REVISION_2) {
1931                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1932                 reg_rctl &= ~E1000_RCTL_RST;
1933                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1934                 msec_delay(5);
1935                 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1936                         e1000_pci_set_mwi(&adapter->hw);
1937         }
1938 }
1939
1940
1941 /*********************************************************************
1942  *  Timer routine
1943  *
1944  *  This routine checks for link status and updates statistics.
1945  *
1946  **********************************************************************/
1947
1948 static void
1949 lem_local_timer(void *arg)
1950 {
1951         struct adapter  *adapter = arg;
1952
1953         EM_CORE_LOCK_ASSERT(adapter);
1954
1955         lem_update_link_status(adapter);
1956         lem_update_stats_counters(adapter);
1957
1958         lem_smartspeed(adapter);
1959
1960         /*
1961          * We check the watchdog: the time since
1962          * the last TX descriptor was cleaned.
1963          * This implies a functional TX engine.
1964          */
1965         if ((adapter->watchdog_check == TRUE) &&
1966             (ticks - adapter->watchdog_time > EM_WATCHDOG))
1967                 goto hung;
1968
1969         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1970         return;
1971 hung:
1972         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1973         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1974         adapter->watchdog_events++;
1975         lem_init_locked(adapter);
1976 }
1977
1978 static void
1979 lem_update_link_status(struct adapter *adapter)
1980 {
1981         struct e1000_hw *hw = &adapter->hw;
1982         struct ifnet *ifp = adapter->ifp;
1983         device_t dev = adapter->dev;
1984         u32 link_check = 0;
1985
1986         /* Get the cached link value or read phy for real */
1987         switch (hw->phy.media_type) {
1988         case e1000_media_type_copper:
1989                 if (hw->mac.get_link_status) {
1990                         /* Do the work to read phy */
1991                         e1000_check_for_link(hw);
1992                         link_check = !hw->mac.get_link_status;
1993                         if (link_check) /* ESB2 fix */
1994                                 e1000_cfg_on_link_up(hw);
1995                 } else
1996                         link_check = TRUE;
1997                 break;
1998         case e1000_media_type_fiber:
1999                 e1000_check_for_link(hw);
2000                 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2001                                  E1000_STATUS_LU);
2002                 break;
2003         case e1000_media_type_internal_serdes:
2004                 e1000_check_for_link(hw);
2005                 link_check = adapter->hw.mac.serdes_has_link;
2006                 break;
2007         default:
2008         case e1000_media_type_unknown:
2009                 break;
2010         }
2011
2012         /* Now check for a transition */
2013         if (link_check && (adapter->link_active == 0)) {
2014                 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2015                     &adapter->link_duplex);
2016                 if (bootverbose)
2017                         device_printf(dev, "Link is up %d Mbps %s\n",
2018                             adapter->link_speed,
2019                             ((adapter->link_duplex == FULL_DUPLEX) ?
2020                             "Full Duplex" : "Half Duplex"));
2021                 adapter->link_active = 1;
2022                 adapter->smartspeed = 0;
2023                 ifp->if_baudrate = adapter->link_speed * 1000000;
2024                 if_link_state_change(ifp, LINK_STATE_UP);
2025         } else if (!link_check && (adapter->link_active == 1)) {
2026                 ifp->if_baudrate = adapter->link_speed = 0;
2027                 adapter->link_duplex = 0;
2028                 if (bootverbose)
2029                         device_printf(dev, "Link is Down\n");
2030                 adapter->link_active = 0;
2031                 /* Link down, disable watchdog */
2032                 adapter->watchdog_check = FALSE;
2033                 if_link_state_change(ifp, LINK_STATE_DOWN);
2034         }
2035 }
2036
2037 /*********************************************************************
2038  *
2039  *  This routine disables all traffic on the adapter by issuing a
2040  *  global reset on the MAC and deallocates TX/RX buffers.
2041  *
2042  *  This routine should always be called with BOTH the CORE
2043  *  and TX locks.
2044  **********************************************************************/
2045
2046 static void
2047 lem_stop(void *arg)
2048 {
2049         struct adapter  *adapter = arg;
2050         struct ifnet    *ifp = adapter->ifp;
2051
2052         EM_CORE_LOCK_ASSERT(adapter);
2053         EM_TX_LOCK_ASSERT(adapter);
2054
2055         INIT_DEBUGOUT("lem_stop: begin");
2056
2057         lem_disable_intr(adapter);
2058         callout_stop(&adapter->timer);
2059         callout_stop(&adapter->tx_fifo_timer);
2060
2061         /* Tell the stack that the interface is no longer active */
2062         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2063
2064         e1000_reset_hw(&adapter->hw);
2065         if (adapter->hw.mac.type >= e1000_82544)
2066                 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2067
2068         e1000_led_off(&adapter->hw);
2069         e1000_cleanup_led(&adapter->hw);
2070 }
2071
2072
2073 /*********************************************************************
2074  *
2075  *  Determine hardware revision.
2076  *
2077  **********************************************************************/
2078 static void
2079 lem_identify_hardware(struct adapter *adapter)
2080 {
2081         device_t dev = adapter->dev;
2082
2083         /* Make sure our PCI config space has the necessary stuff set */
2084         adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2085         if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2086             (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2087                 device_printf(dev, "Memory Access and/or Bus Master bits "
2088                     "were not set!\n");
2089                 adapter->hw.bus.pci_cmd_word |=
2090                 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2091                 pci_write_config(dev, PCIR_COMMAND,
2092                     adapter->hw.bus.pci_cmd_word, 2);
2093         }
2094
2095         /* Save off the information about this board */
2096         adapter->hw.vendor_id = pci_get_vendor(dev);
2097         adapter->hw.device_id = pci_get_device(dev);
2098         adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2099         adapter->hw.subsystem_vendor_id =
2100             pci_read_config(dev, PCIR_SUBVEND_0, 2);
2101         adapter->hw.subsystem_device_id =
2102             pci_read_config(dev, PCIR_SUBDEV_0, 2);
2103
2104         /* Do Shared Code Init and Setup */
2105         if (e1000_set_mac_type(&adapter->hw)) {
2106                 device_printf(dev, "Setup init failure\n");
2107                 return;
2108         }
2109 }
2110
2111 static int
2112 lem_allocate_pci_resources(struct adapter *adapter)
2113 {
2114         device_t        dev = adapter->dev;
2115         int             val, rid, error = E1000_SUCCESS;
2116
2117         rid = PCIR_BAR(0);
2118         adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2119             &rid, RF_ACTIVE);
2120         if (adapter->memory == NULL) {
2121                 device_printf(dev, "Unable to allocate bus resource: memory\n");
2122                 return (ENXIO);
2123         }
2124         adapter->osdep.mem_bus_space_tag =
2125             rman_get_bustag(adapter->memory);
2126         adapter->osdep.mem_bus_space_handle =
2127             rman_get_bushandle(adapter->memory);
2128         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2129
2130         /* Only older adapters use IO mapping */
2131         if (adapter->hw.mac.type > e1000_82543) {
2132                 /* Figure our where our IO BAR is ? */
2133                 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2134                         val = pci_read_config(dev, rid, 4);
2135                         if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2136                                 adapter->io_rid = rid;
2137                                 break;
2138                         }
2139                         rid += 4;
2140                         /* check for 64bit BAR */
2141                         if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2142                                 rid += 4;
2143                 }
2144                 if (rid >= PCIR_CIS) {
2145                         device_printf(dev, "Unable to locate IO BAR\n");
2146                         return (ENXIO);
2147                 }
2148                 adapter->ioport = bus_alloc_resource_any(dev,
2149                     SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2150                 if (adapter->ioport == NULL) {
2151                         device_printf(dev, "Unable to allocate bus resource: "
2152                             "ioport\n");
2153                         return (ENXIO);
2154                 }
2155                 adapter->hw.io_base = 0;
2156                 adapter->osdep.io_bus_space_tag =
2157                     rman_get_bustag(adapter->ioport);
2158                 adapter->osdep.io_bus_space_handle =
2159                     rman_get_bushandle(adapter->ioport);
2160         }
2161
2162         adapter->hw.back = &adapter->osdep;
2163
2164         return (error);
2165 }
2166
2167 /*********************************************************************
2168  *
2169  *  Setup the Legacy or MSI Interrupt handler
2170  *
2171  **********************************************************************/
2172 int
2173 lem_allocate_irq(struct adapter *adapter)
2174 {
2175         device_t dev = adapter->dev;
2176         int error, rid = 0;
2177
2178         /* Manually turn off all interrupts */
2179         E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2180
2181         /* We allocate a single interrupt resource */
2182         adapter->res[0] = bus_alloc_resource_any(dev,
2183             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2184         if (adapter->res[0] == NULL) {
2185                 device_printf(dev, "Unable to allocate bus resource: "
2186                     "interrupt\n");
2187                 return (ENXIO);
2188         }
2189
2190         /* Do Legacy setup? */
2191         if (lem_use_legacy_irq) {
2192                 if ((error = bus_setup_intr(dev, adapter->res[0],
2193                     INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2194                     &adapter->tag[0])) != 0) {
2195                         device_printf(dev,
2196                             "Failed to register interrupt handler");
2197                         return (error);
2198                 }
2199                 return (0);
2200         }
2201
2202         /*
2203          * Use a Fast interrupt and the associated
2204          * deferred processing contexts.
2205          */
2206         TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2207         TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2208         adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2209             taskqueue_thread_enqueue, &adapter->tq);
2210         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2211             device_get_nameunit(adapter->dev));
2212         if ((error = bus_setup_intr(dev, adapter->res[0],
2213             INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2214             &adapter->tag[0])) != 0) {
2215                 device_printf(dev, "Failed to register fast interrupt "
2216                             "handler: %d\n", error);
2217                 taskqueue_free(adapter->tq);
2218                 adapter->tq = NULL;
2219                 return (error);
2220         }
2221         
2222         return (0);
2223 }
2224
2225
2226 static void
2227 lem_free_pci_resources(struct adapter *adapter)
2228 {
2229         device_t dev = adapter->dev;
2230
2231
2232         if (adapter->tag[0] != NULL) {
2233                 bus_teardown_intr(dev, adapter->res[0],
2234                     adapter->tag[0]);
2235                 adapter->tag[0] = NULL;
2236         }
2237
2238         if (adapter->res[0] != NULL) {
2239                 bus_release_resource(dev, SYS_RES_IRQ,
2240                     0, adapter->res[0]);
2241         }
2242
2243         if (adapter->memory != NULL)
2244                 bus_release_resource(dev, SYS_RES_MEMORY,
2245                     PCIR_BAR(0), adapter->memory);
2246
2247         if (adapter->ioport != NULL)
2248                 bus_release_resource(dev, SYS_RES_IOPORT,
2249                     adapter->io_rid, adapter->ioport);
2250 }
2251
2252
2253 /*********************************************************************
2254  *
2255  *  Initialize the hardware to a configuration
2256  *  as specified by the adapter structure.
2257  *
2258  **********************************************************************/
2259 static int
2260 lem_hardware_init(struct adapter *adapter)
2261 {
2262         device_t dev = adapter->dev;
2263         u16     rx_buffer_size;
2264
2265         INIT_DEBUGOUT("lem_hardware_init: begin");
2266
2267         /* Issue a global reset */
2268         e1000_reset_hw(&adapter->hw);
2269
2270         /* When hardware is reset, fifo_head is also reset */
2271         adapter->tx_fifo_head = 0;
2272
2273         /*
2274          * These parameters control the automatic generation (Tx) and
2275          * response (Rx) to Ethernet PAUSE frames.
2276          * - High water mark should allow for at least two frames to be
2277          *   received after sending an XOFF.
2278          * - Low water mark works best when it is very near the high water mark.
2279          *   This allows the receiver to restart by sending XON when it has
2280          *   drained a bit. Here we use an arbitary value of 1500 which will
2281          *   restart after one full frame is pulled from the buffer. There
2282          *   could be several smaller frames in the buffer and if so they will
2283          *   not trigger the XON until their total number reduces the buffer
2284          *   by 1500.
2285          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2286          */
2287         rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2288             0xffff) << 10 );
2289
2290         adapter->hw.fc.high_water = rx_buffer_size -
2291             roundup2(adapter->max_frame_size, 1024);
2292         adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2293
2294         adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2295         adapter->hw.fc.send_xon = TRUE;
2296
2297         /* Set Flow control, use the tunable location if sane */
2298         if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
2299                 adapter->hw.fc.requested_mode = lem_fc_setting;
2300         else
2301                 adapter->hw.fc.requested_mode = e1000_fc_none;
2302
2303         if (e1000_init_hw(&adapter->hw) < 0) {
2304                 device_printf(dev, "Hardware Initialization Failed\n");
2305                 return (EIO);
2306         }
2307
2308         e1000_check_for_link(&adapter->hw);
2309
2310         return (0);
2311 }
2312
2313 /*********************************************************************
2314  *
2315  *  Setup networking device structure and register an interface.
2316  *
2317  **********************************************************************/
2318 static int
2319 lem_setup_interface(device_t dev, struct adapter *adapter)
2320 {
2321         struct ifnet   *ifp;
2322
2323         INIT_DEBUGOUT("lem_setup_interface: begin");
2324
2325         ifp = adapter->ifp = if_alloc(IFT_ETHER);
2326         if (ifp == NULL) {
2327                 device_printf(dev, "can not allocate ifnet structure\n");
2328                 return (-1);
2329         }
2330         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2331         ifp->if_init =  lem_init;
2332         ifp->if_softc = adapter;
2333         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2334         ifp->if_ioctl = lem_ioctl;
2335         ifp->if_start = lem_start;
2336         IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2337         ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2338         IFQ_SET_READY(&ifp->if_snd);
2339
2340         ether_ifattach(ifp, adapter->hw.mac.addr);
2341
2342         ifp->if_capabilities = ifp->if_capenable = 0;
2343
2344         if (adapter->hw.mac.type >= e1000_82543) {
2345                 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2346                 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2347         }
2348
2349         /*
2350          * Tell the upper layer(s) we support long frames.
2351          */
2352         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2353         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2354         ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2355
2356         /*
2357         ** Dont turn this on by default, if vlans are
2358         ** created on another pseudo device (eg. lagg)
2359         ** then vlan events are not passed thru, breaking
2360         ** operation, but with HW FILTER off it works. If
2361         ** using vlans directly on the em driver you can
2362         ** enable this and get full hardware tag filtering.
2363         */
2364         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2365
2366 #ifdef DEVICE_POLLING
2367         ifp->if_capabilities |= IFCAP_POLLING;
2368 #endif
2369
2370         /* Enable only WOL MAGIC by default */
2371         if (adapter->wol) {
2372                 ifp->if_capabilities |= IFCAP_WOL;
2373                 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2374         }
2375                 
2376         /*
2377          * Specify the media types supported by this adapter and register
2378          * callbacks to update media and link information
2379          */
2380         ifmedia_init(&adapter->media, IFM_IMASK,
2381             lem_media_change, lem_media_status);
2382         if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2383             (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2384                 u_char fiber_type = IFM_1000_SX;        /* default type */
2385
2386                 if (adapter->hw.mac.type == e1000_82545)
2387                         fiber_type = IFM_1000_LX;
2388                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 
2389                             0, NULL);
2390                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2391         } else {
2392                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2393                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2394                             0, NULL);
2395                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2396                             0, NULL);
2397                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2398                             0, NULL);
2399                 if (adapter->hw.phy.type != e1000_phy_ife) {
2400                         ifmedia_add(&adapter->media,
2401                                 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2402                         ifmedia_add(&adapter->media,
2403                                 IFM_ETHER | IFM_1000_T, 0, NULL);
2404                 }
2405         }
2406         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2407         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2408         return (0);
2409 }
2410
2411
2412 /*********************************************************************
2413  *
2414  *  Workaround for SmartSpeed on 82541 and 82547 controllers
2415  *
2416  **********************************************************************/
2417 static void
2418 lem_smartspeed(struct adapter *adapter)
2419 {
2420         u16 phy_tmp;
2421
2422         if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2423             adapter->hw.mac.autoneg == 0 ||
2424             (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2425                 return;
2426
2427         if (adapter->smartspeed == 0) {
2428                 /* If Master/Slave config fault is asserted twice,
2429                  * we assume back-to-back */
2430                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2431                 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2432                         return;
2433                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2434                 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2435                         e1000_read_phy_reg(&adapter->hw,
2436                             PHY_1000T_CTRL, &phy_tmp);
2437                         if(phy_tmp & CR_1000T_MS_ENABLE) {
2438                                 phy_tmp &= ~CR_1000T_MS_ENABLE;
2439                                 e1000_write_phy_reg(&adapter->hw,
2440                                     PHY_1000T_CTRL, phy_tmp);
2441                                 adapter->smartspeed++;
2442                                 if(adapter->hw.mac.autoneg &&
2443                                    !e1000_copper_link_autoneg(&adapter->hw) &&
2444                                    !e1000_read_phy_reg(&adapter->hw,
2445                                     PHY_CONTROL, &phy_tmp)) {
2446                                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2447                                                     MII_CR_RESTART_AUTO_NEG);
2448                                         e1000_write_phy_reg(&adapter->hw,
2449                                             PHY_CONTROL, phy_tmp);
2450                                 }
2451                         }
2452                 }
2453                 return;
2454         } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2455                 /* If still no link, perhaps using 2/3 pair cable */
2456                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2457                 phy_tmp |= CR_1000T_MS_ENABLE;
2458                 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2459                 if(adapter->hw.mac.autoneg &&
2460                    !e1000_copper_link_autoneg(&adapter->hw) &&
2461                    !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2462                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2463                                     MII_CR_RESTART_AUTO_NEG);
2464                         e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2465                 }
2466         }
2467         /* Restart process after EM_SMARTSPEED_MAX iterations */
2468         if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2469                 adapter->smartspeed = 0;
2470 }
2471
2472
2473 /*
2474  * Manage DMA'able memory.
2475  */
2476 static void
2477 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2478 {
2479         if (error)
2480                 return;
2481         *(bus_addr_t *) arg = segs[0].ds_addr;
2482 }
2483
2484 static int
2485 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2486         struct em_dma_alloc *dma, int mapflags)
2487 {
2488         int error;
2489
2490         error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2491                                 EM_DBA_ALIGN, 0,        /* alignment, bounds */
2492                                 BUS_SPACE_MAXADDR,      /* lowaddr */
2493                                 BUS_SPACE_MAXADDR,      /* highaddr */
2494                                 NULL, NULL,             /* filter, filterarg */
2495                                 size,                   /* maxsize */
2496                                 1,                      /* nsegments */
2497                                 size,                   /* maxsegsize */
2498                                 0,                      /* flags */
2499                                 NULL,                   /* lockfunc */
2500                                 NULL,                   /* lockarg */
2501                                 &dma->dma_tag);
2502         if (error) {
2503                 device_printf(adapter->dev,
2504                     "%s: bus_dma_tag_create failed: %d\n",
2505                     __func__, error);
2506                 goto fail_0;
2507         }
2508
2509         error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2510             BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2511         if (error) {
2512                 device_printf(adapter->dev,
2513                     "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2514                     __func__, (uintmax_t)size, error);
2515                 goto fail_2;
2516         }
2517
2518         dma->dma_paddr = 0;
2519         error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2520             size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2521         if (error || dma->dma_paddr == 0) {
2522                 device_printf(adapter->dev,
2523                     "%s: bus_dmamap_load failed: %d\n",
2524                     __func__, error);
2525                 goto fail_3;
2526         }
2527
2528         return (0);
2529
2530 fail_3:
2531         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2532 fail_2:
2533         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2534         bus_dma_tag_destroy(dma->dma_tag);
2535 fail_0:
2536         dma->dma_map = NULL;
2537         dma->dma_tag = NULL;
2538
2539         return (error);
2540 }
2541
2542 static void
2543 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2544 {
2545         if (dma->dma_tag == NULL)
2546                 return;
2547         if (dma->dma_map != NULL) {
2548                 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2549                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2550                 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2551                 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2552                 dma->dma_map = NULL;
2553         }
2554         bus_dma_tag_destroy(dma->dma_tag);
2555         dma->dma_tag = NULL;
2556 }
2557
2558
2559 /*********************************************************************
2560  *
2561  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2562  *  the information needed to transmit a packet on the wire.
2563  *
2564  **********************************************************************/
2565 static int
2566 lem_allocate_transmit_structures(struct adapter *adapter)
2567 {
2568         device_t dev = adapter->dev;
2569         struct em_buffer *tx_buffer;
2570         int error;
2571
2572         /*
2573          * Create DMA tags for tx descriptors
2574          */
2575         if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2576                                 1, 0,                   /* alignment, bounds */
2577                                 BUS_SPACE_MAXADDR,      /* lowaddr */
2578                                 BUS_SPACE_MAXADDR,      /* highaddr */
2579                                 NULL, NULL,             /* filter, filterarg */
2580                                 MCLBYTES * EM_MAX_SCATTER,      /* maxsize */
2581                                 EM_MAX_SCATTER,         /* nsegments */
2582                                 MCLBYTES,               /* maxsegsize */
2583                                 0,                      /* flags */
2584                                 NULL,                   /* lockfunc */
2585                                 NULL,                   /* lockarg */
2586                                 &adapter->txtag)) != 0) {
2587                 device_printf(dev, "Unable to allocate TX DMA tag\n");
2588                 goto fail;
2589         }
2590
2591         adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2592             adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2593         if (adapter->tx_buffer_area == NULL) {
2594                 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2595                 error = ENOMEM;
2596                 goto fail;
2597         }
2598
2599         /* Create the descriptor buffer dma maps */
2600         for (int i = 0; i < adapter->num_tx_desc; i++) {
2601                 tx_buffer = &adapter->tx_buffer_area[i];
2602                 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2603                 if (error != 0) {
2604                         device_printf(dev, "Unable to create TX DMA map\n");
2605                         goto fail;
2606                 }
2607                 tx_buffer->next_eop = -1;
2608         }
2609
2610         return (0);
2611 fail:
2612         lem_free_transmit_structures(adapter);
2613         return (error);
2614 }
2615
2616 /*********************************************************************
2617  *
2618  *  (Re)Initialize transmit structures.
2619  *
2620  **********************************************************************/
2621 static void
2622 lem_setup_transmit_structures(struct adapter *adapter)
2623 {
2624         struct em_buffer *tx_buffer;
2625 #ifdef DEV_NETMAP
2626         /* we are already locked */
2627         struct netmap_adapter *na = NA(adapter->ifp);
2628         struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
2629 #endif /* DEV_NETMAP */
2630
2631         /* Clear the old ring contents */
2632         bzero(adapter->tx_desc_base,
2633             (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2634
2635         /* Free any existing TX buffers */
2636         for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2637                 tx_buffer = &adapter->tx_buffer_area[i];
2638                 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2639                     BUS_DMASYNC_POSTWRITE);
2640                 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2641                 m_freem(tx_buffer->m_head);
2642                 tx_buffer->m_head = NULL;
2643 #ifdef DEV_NETMAP
2644                 if (slot) {
2645                         /* the i-th NIC entry goes to slot si */
2646                         int si = netmap_idx_n2k(&na->tx_rings[0], i);
2647                         uint64_t paddr;
2648                         void *addr;
2649
2650                         addr = PNMB(slot + si, &paddr);
2651                         adapter->tx_desc_base[si].buffer_addr = htole64(paddr);
2652                         /* reload the map for netmap mode */
2653                         netmap_load_map(adapter->txtag, tx_buffer->map, addr);
2654                 }
2655 #endif /* DEV_NETMAP */
2656                 tx_buffer->next_eop = -1;
2657         }
2658
2659         /* Reset state */
2660         adapter->last_hw_offload = 0;
2661         adapter->next_avail_tx_desc = 0;
2662         adapter->next_tx_to_clean = 0;
2663         adapter->num_tx_desc_avail = adapter->num_tx_desc;
2664
2665         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2666             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2667
2668         return;
2669 }
2670
2671 /*********************************************************************
2672  *
2673  *  Enable transmit unit.
2674  *
2675  **********************************************************************/
2676 static void
2677 lem_initialize_transmit_unit(struct adapter *adapter)
2678 {
2679         u32     tctl, tipg = 0;
2680         u64     bus_addr;
2681
2682          INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2683         /* Setup the Base and Length of the Tx Descriptor Ring */
2684         bus_addr = adapter->txdma.dma_paddr;
2685         E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2686             adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2687         E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2688             (u32)(bus_addr >> 32));
2689         E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2690             (u32)bus_addr);
2691         /* Setup the HW Tx Head and Tail descriptor pointers */
2692         E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2693         E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2694
2695         HW_DEBUGOUT2("Base = %x, Length = %x\n",
2696             E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2697             E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2698
2699         /* Set the default values for the Tx Inter Packet Gap timer */
2700         switch (adapter->hw.mac.type) {
2701         case e1000_82542:
2702                 tipg = DEFAULT_82542_TIPG_IPGT;
2703                 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2704                 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2705                 break;
2706         default:
2707                 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2708                     (adapter->hw.phy.media_type ==
2709                     e1000_media_type_internal_serdes))
2710                         tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2711                 else
2712                         tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2713                 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2714                 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2715         }
2716
2717         E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2718         E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2719         if(adapter->hw.mac.type >= e1000_82540)
2720                 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2721                     adapter->tx_abs_int_delay.value);
2722
2723         /* Program the Transmit Control Register */
2724         tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2725         tctl &= ~E1000_TCTL_CT;
2726         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2727                    (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2728
2729         /* This write will effectively turn on the transmit unit. */
2730         E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2731
2732         /* Setup Transmit Descriptor Base Settings */   
2733         adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2734
2735         if (adapter->tx_int_delay.value > 0)
2736                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2737 }
2738
2739 /*********************************************************************
2740  *
2741  *  Free all transmit related data structures.
2742  *
2743  **********************************************************************/
2744 static void
2745 lem_free_transmit_structures(struct adapter *adapter)
2746 {
2747         struct em_buffer *tx_buffer;
2748
2749         INIT_DEBUGOUT("free_transmit_structures: begin");
2750
2751         if (adapter->tx_buffer_area != NULL) {
2752                 for (int i = 0; i < adapter->num_tx_desc; i++) {
2753                         tx_buffer = &adapter->tx_buffer_area[i];
2754                         if (tx_buffer->m_head != NULL) {
2755                                 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2756                                     BUS_DMASYNC_POSTWRITE);
2757                                 bus_dmamap_unload(adapter->txtag,
2758                                     tx_buffer->map);
2759                                 m_freem(tx_buffer->m_head);
2760                                 tx_buffer->m_head = NULL;
2761                         } else if (tx_buffer->map != NULL)
2762                                 bus_dmamap_unload(adapter->txtag,
2763                                     tx_buffer->map);
2764                         if (tx_buffer->map != NULL) {
2765                                 bus_dmamap_destroy(adapter->txtag,
2766                                     tx_buffer->map);
2767                                 tx_buffer->map = NULL;
2768                         }
2769                 }
2770         }
2771         if (adapter->tx_buffer_area != NULL) {
2772                 free(adapter->tx_buffer_area, M_DEVBUF);
2773                 adapter->tx_buffer_area = NULL;
2774         }
2775         if (adapter->txtag != NULL) {
2776                 bus_dma_tag_destroy(adapter->txtag);
2777                 adapter->txtag = NULL;
2778         }
2779 #if __FreeBSD_version >= 800000
2780         if (adapter->br != NULL)
2781                 buf_ring_free(adapter->br, M_DEVBUF);
2782 #endif
2783 }
2784
2785 /*********************************************************************
2786  *
2787  *  The offload context needs to be set when we transfer the first
2788  *  packet of a particular protocol (TCP/UDP). This routine has been
2789  *  enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2790  *
2791  *  Added back the old method of keeping the current context type
2792  *  and not setting if unnecessary, as this is reported to be a
2793  *  big performance win.  -jfv
2794  **********************************************************************/
2795 static void
2796 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2797     u32 *txd_upper, u32 *txd_lower)
2798 {
2799         struct e1000_context_desc *TXD = NULL;
2800         struct em_buffer *tx_buffer;
2801         struct ether_vlan_header *eh;
2802         struct ip *ip = NULL;
2803         struct ip6_hdr *ip6;
2804         int curr_txd, ehdrlen;
2805         u32 cmd, hdr_len, ip_hlen;
2806         u16 etype;
2807         u8 ipproto;
2808
2809
2810         cmd = hdr_len = ipproto = 0;
2811         *txd_upper = *txd_lower = 0;
2812         curr_txd = adapter->next_avail_tx_desc;
2813
2814         /*
2815          * Determine where frame payload starts.
2816          * Jump over vlan headers if already present,
2817          * helpful for QinQ too.
2818          */
2819         eh = mtod(mp, struct ether_vlan_header *);
2820         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2821                 etype = ntohs(eh->evl_proto);
2822                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2823         } else {
2824                 etype = ntohs(eh->evl_encap_proto);
2825                 ehdrlen = ETHER_HDR_LEN;
2826         }
2827
2828         /*
2829          * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2830          * TODO: Support SCTP too when it hits the tree.
2831          */
2832         switch (etype) {
2833         case ETHERTYPE_IP:
2834                 ip = (struct ip *)(mp->m_data + ehdrlen);
2835                 ip_hlen = ip->ip_hl << 2;
2836
2837                 /* Setup of IP header checksum. */
2838                 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2839                         /*
2840                          * Start offset for header checksum calculation.
2841                          * End offset for header checksum calculation.
2842                          * Offset of place to put the checksum.
2843                          */
2844                         TXD = (struct e1000_context_desc *)
2845                             &adapter->tx_desc_base[curr_txd];
2846                         TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2847                         TXD->lower_setup.ip_fields.ipcse =
2848                             htole16(ehdrlen + ip_hlen);
2849                         TXD->lower_setup.ip_fields.ipcso =
2850                             ehdrlen + offsetof(struct ip, ip_sum);
2851                         cmd |= E1000_TXD_CMD_IP;
2852                         *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2853                 }
2854
2855                 hdr_len = ehdrlen + ip_hlen;
2856                 ipproto = ip->ip_p;
2857
2858                 break;
2859         case ETHERTYPE_IPV6:
2860                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2861                 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2862
2863                 /* IPv6 doesn't have a header checksum. */
2864
2865                 hdr_len = ehdrlen + ip_hlen;
2866                 ipproto = ip6->ip6_nxt;
2867                 break;
2868
2869         default:
2870                 return;
2871         }
2872
2873         switch (ipproto) {
2874         case IPPROTO_TCP:
2875                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2876                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2877                         *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2878                         /* no need for context if already set */
2879                         if (adapter->last_hw_offload == CSUM_TCP)
2880                                 return;
2881                         adapter->last_hw_offload = CSUM_TCP;
2882                         /*
2883                          * Start offset for payload checksum calculation.
2884                          * End offset for payload checksum calculation.
2885                          * Offset of place to put the checksum.
2886                          */
2887                         TXD = (struct e1000_context_desc *)
2888                             &adapter->tx_desc_base[curr_txd];
2889                         TXD->upper_setup.tcp_fields.tucss = hdr_len;
2890                         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2891                         TXD->upper_setup.tcp_fields.tucso =
2892                             hdr_len + offsetof(struct tcphdr, th_sum);
2893                         cmd |= E1000_TXD_CMD_TCP;
2894                 }
2895                 break;
2896         case IPPROTO_UDP:
2897         {
2898                 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2899                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2900                         *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2901                         /* no need for context if already set */
2902                         if (adapter->last_hw_offload == CSUM_UDP)
2903                                 return;
2904                         adapter->last_hw_offload = CSUM_UDP;
2905                         /*
2906                          * Start offset for header checksum calculation.
2907                          * End offset for header checksum calculation.
2908                          * Offset of place to put the checksum.
2909                          */
2910                         TXD = (struct e1000_context_desc *)
2911                             &adapter->tx_desc_base[curr_txd];
2912                         TXD->upper_setup.tcp_fields.tucss = hdr_len;
2913                         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2914                         TXD->upper_setup.tcp_fields.tucso =
2915                             hdr_len + offsetof(struct udphdr, uh_sum);
2916                 }
2917                 /* Fall Thru */
2918         }
2919         default:
2920                 break;
2921         }
2922
2923         if (TXD == NULL)
2924                 return;
2925         TXD->tcp_seg_setup.data = htole32(0);
2926         TXD->cmd_and_length =
2927             htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
2928         tx_buffer = &adapter->tx_buffer_area[curr_txd];
2929         tx_buffer->m_head = NULL;
2930         tx_buffer->next_eop = -1;
2931
2932         if (++curr_txd == adapter->num_tx_desc)
2933                 curr_txd = 0;
2934
2935         adapter->num_tx_desc_avail--;
2936         adapter->next_avail_tx_desc = curr_txd;
2937 }
2938
2939
2940 /**********************************************************************
2941  *
2942  *  Examine each tx_buffer in the used queue. If the hardware is done
2943  *  processing the packet then free associated resources. The
2944  *  tx_buffer is put back on the free queue.
2945  *
2946  **********************************************************************/
2947 static void
2948 lem_txeof(struct adapter *adapter)
2949 {
2950         int first, last, done, num_avail;
2951         struct em_buffer *tx_buffer;
2952         struct e1000_tx_desc   *tx_desc, *eop_desc;
2953         struct ifnet   *ifp = adapter->ifp;
2954
2955         EM_TX_LOCK_ASSERT(adapter);
2956
2957 #ifdef DEV_NETMAP
2958         if (ifp->if_capenable & IFCAP_NETMAP) {
2959                 selwakeuppri(&NA(ifp)->tx_rings[0].si, PI_NET);
2960                 return;
2961         }
2962 #endif /* DEV_NETMAP */
2963         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2964                 return;
2965
2966         num_avail = adapter->num_tx_desc_avail;
2967         first = adapter->next_tx_to_clean;
2968         tx_desc = &adapter->tx_desc_base[first];
2969         tx_buffer = &adapter->tx_buffer_area[first];
2970         last = tx_buffer->next_eop;
2971         eop_desc = &adapter->tx_desc_base[last];
2972
2973         /*
2974          * What this does is get the index of the
2975          * first descriptor AFTER the EOP of the 
2976          * first packet, that way we can do the
2977          * simple comparison on the inner while loop.
2978          */
2979         if (++last == adapter->num_tx_desc)
2980                 last = 0;
2981         done = last;
2982
2983         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2984             BUS_DMASYNC_POSTREAD);
2985
2986         while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2987                 /* We clean the range of the packet */
2988                 while (first != done) {
2989                         tx_desc->upper.data = 0;
2990                         tx_desc->lower.data = 0;
2991                         tx_desc->buffer_addr = 0;
2992                         ++num_avail;
2993
2994                         if (tx_buffer->m_head) {
2995                                 ifp->if_opackets++;
2996                                 bus_dmamap_sync(adapter->txtag,
2997                                     tx_buffer->map,
2998                                     BUS_DMASYNC_POSTWRITE);
2999                                 bus_dmamap_unload(adapter->txtag,
3000                                     tx_buffer->map);
3001
3002                                 m_freem(tx_buffer->m_head);
3003                                 tx_buffer->m_head = NULL;
3004                         }
3005                         tx_buffer->next_eop = -1;
3006                         adapter->watchdog_time = ticks;
3007
3008                         if (++first == adapter->num_tx_desc)
3009                                 first = 0;
3010
3011                         tx_buffer = &adapter->tx_buffer_area[first];
3012                         tx_desc = &adapter->tx_desc_base[first];
3013                 }
3014                 /* See if we can continue to the next packet */
3015                 last = tx_buffer->next_eop;
3016                 if (last != -1) {
3017                         eop_desc = &adapter->tx_desc_base[last];
3018                         /* Get new done point */
3019                         if (++last == adapter->num_tx_desc) last = 0;
3020                         done = last;
3021                 } else
3022                         break;
3023         }
3024         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3025             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3026
3027         adapter->next_tx_to_clean = first;
3028         adapter->num_tx_desc_avail = num_avail;
3029
3030         /*
3031          * If we have enough room, clear IFF_DRV_OACTIVE to
3032          * tell the stack that it is OK to send packets.
3033          * If there are no pending descriptors, clear the watchdog.
3034          */
3035         if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {                
3036                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3037                 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
3038                         adapter->watchdog_check = FALSE;
3039                         return;
3040                 } 
3041         }
3042 }
3043
3044 /*********************************************************************
3045  *
3046  *  When Link is lost sometimes there is work still in the TX ring
3047  *  which may result in a watchdog, rather than allow that we do an
3048  *  attempted cleanup and then reinit here. Note that this has been
3049  *  seens mostly with fiber adapters.
3050  *
3051  **********************************************************************/
3052 static void
3053 lem_tx_purge(struct adapter *adapter)
3054 {
3055         if ((!adapter->link_active) && (adapter->watchdog_check)) {
3056                 EM_TX_LOCK(adapter);
3057                 lem_txeof(adapter);
3058                 EM_TX_UNLOCK(adapter);
3059                 if (adapter->watchdog_check) /* Still outstanding? */
3060                         lem_init_locked(adapter);
3061         }
3062 }
3063
3064 /*********************************************************************
3065  *
3066  *  Get a buffer from system mbuf buffer pool.
3067  *
3068  **********************************************************************/
3069 static int
3070 lem_get_buf(struct adapter *adapter, int i)
3071 {
3072         struct mbuf             *m;
3073         bus_dma_segment_t       segs[1];
3074         bus_dmamap_t            map;
3075         struct em_buffer        *rx_buffer;
3076         int                     error, nsegs;
3077
3078         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3079         if (m == NULL) {
3080                 adapter->mbuf_cluster_failed++;
3081                 return (ENOBUFS);
3082         }
3083         m->m_len = m->m_pkthdr.len = MCLBYTES;
3084
3085         if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3086                 m_adj(m, ETHER_ALIGN);
3087
3088         /*
3089          * Using memory from the mbuf cluster pool, invoke the
3090          * bus_dma machinery to arrange the memory mapping.
3091          */
3092         error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3093             adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3094         if (error != 0) {
3095                 m_free(m);
3096                 return (error);
3097         }
3098
3099         /* If nsegs is wrong then the stack is corrupt. */
3100         KASSERT(nsegs == 1, ("Too many segments returned!"));
3101
3102         rx_buffer = &adapter->rx_buffer_area[i];
3103         if (rx_buffer->m_head != NULL)
3104                 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3105
3106         map = rx_buffer->map;
3107         rx_buffer->map = adapter->rx_sparemap;
3108         adapter->rx_sparemap = map;
3109         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3110         rx_buffer->m_head = m;
3111
3112         adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3113         return (0);
3114 }
3115
3116 /*********************************************************************
3117  *
3118  *  Allocate memory for rx_buffer structures. Since we use one
3119  *  rx_buffer per received packet, the maximum number of rx_buffer's
3120  *  that we'll need is equal to the number of receive descriptors
3121  *  that we've allocated.
3122  *
3123  **********************************************************************/
3124 static int
3125 lem_allocate_receive_structures(struct adapter *adapter)
3126 {
3127         device_t dev = adapter->dev;
3128         struct em_buffer *rx_buffer;
3129         int i, error;
3130
3131         adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3132             adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3133         if (adapter->rx_buffer_area == NULL) {
3134                 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3135                 return (ENOMEM);
3136         }
3137
3138         error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3139                                 1, 0,                   /* alignment, bounds */
3140                                 BUS_SPACE_MAXADDR,      /* lowaddr */
3141                                 BUS_SPACE_MAXADDR,      /* highaddr */
3142                                 NULL, NULL,             /* filter, filterarg */
3143                                 MCLBYTES,               /* maxsize */
3144                                 1,                      /* nsegments */
3145                                 MCLBYTES,               /* maxsegsize */
3146                                 0,                      /* flags */
3147                                 NULL,                   /* lockfunc */
3148                                 NULL,                   /* lockarg */
3149                                 &adapter->rxtag);
3150         if (error) {
3151                 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3152                     __func__, error);
3153                 goto fail;
3154         }
3155
3156         /* Create the spare map (used by getbuf) */
3157         error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3158              &adapter->rx_sparemap);
3159         if (error) {
3160                 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3161                     __func__, error);
3162                 goto fail;
3163         }
3164
3165         rx_buffer = adapter->rx_buffer_area;
3166         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3167                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3168                     &rx_buffer->map);
3169                 if (error) {
3170                         device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3171                             __func__, error);
3172                         goto fail;
3173                 }
3174         }
3175
3176         return (0);
3177
3178 fail:
3179         lem_free_receive_structures(adapter);
3180         return (error);
3181 }
3182
3183 /*********************************************************************
3184  *
3185  *  (Re)initialize receive structures.
3186  *
3187  **********************************************************************/
3188 static int
3189 lem_setup_receive_structures(struct adapter *adapter)
3190 {
3191         struct em_buffer *rx_buffer;
3192         int i, error;
3193 #ifdef DEV_NETMAP
3194         /* we are already under lock */
3195         struct netmap_adapter *na = NA(adapter->ifp);
3196         struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
3197 #endif
3198
3199         /* Reset descriptor ring */
3200         bzero(adapter->rx_desc_base,
3201             (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3202
3203         /* Free current RX buffers. */
3204         rx_buffer = adapter->rx_buffer_area;
3205         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3206                 if (rx_buffer->m_head != NULL) {
3207                         bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3208                             BUS_DMASYNC_POSTREAD);
3209                         bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3210                         m_freem(rx_buffer->m_head);
3211                         rx_buffer->m_head = NULL;
3212                 }
3213         }
3214
3215         /* Allocate new ones. */
3216         for (i = 0; i < adapter->num_rx_desc; i++) {
3217 #ifdef DEV_NETMAP
3218                 if (slot) {
3219                         /* the i-th NIC entry goes to slot si */
3220                         int si = netmap_idx_n2k(&na->rx_rings[0], i);
3221                         uint64_t paddr;
3222                         void *addr;
3223
3224                         addr = PNMB(slot + si, &paddr);
3225                         netmap_load_map(adapter->rxtag, rx_buffer->map, addr);
3226                         /* Update descriptor */
3227                         adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
3228                         continue;
3229                 }
3230 #endif /* DEV_NETMAP */
3231                 error = lem_get_buf(adapter, i);
3232                 if (error)
3233                         return (error);
3234         }
3235
3236         /* Setup our descriptor pointers */
3237         adapter->next_rx_desc_to_check = 0;
3238         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3239             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3240
3241         return (0);
3242 }
3243
3244 /*********************************************************************
3245  *
3246  *  Enable receive unit.
3247  *
3248  **********************************************************************/
3249 #define MAX_INTS_PER_SEC        8000
3250 #define DEFAULT_ITR          1000000000/(MAX_INTS_PER_SEC * 256)
3251
3252 static void
3253 lem_initialize_receive_unit(struct adapter *adapter)
3254 {
3255         struct ifnet    *ifp = adapter->ifp;
3256         u64     bus_addr;
3257         u32     rctl, rxcsum;
3258
3259         INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3260
3261         /*
3262          * Make sure receives are disabled while setting
3263          * up the descriptor ring
3264          */
3265         rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3266         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3267
3268         if (adapter->hw.mac.type >= e1000_82540) {
3269                 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3270                     adapter->rx_abs_int_delay.value);
3271                 /*
3272                  * Set the interrupt throttling rate. Value is calculated
3273                  * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3274                  */
3275                 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3276         }
3277
3278         /* Setup the Base and Length of the Rx Descriptor Ring */
3279         bus_addr = adapter->rxdma.dma_paddr;
3280         E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3281             adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3282         E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3283             (u32)(bus_addr >> 32));
3284         E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3285             (u32)bus_addr);
3286
3287         /* Setup the Receive Control Register */
3288         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3289         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3290                    E1000_RCTL_RDMTS_HALF |
3291                    (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3292
3293         /* Make sure VLAN Filters are off */
3294         rctl &= ~E1000_RCTL_VFE;
3295
3296         if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3297                 rctl |= E1000_RCTL_SBP;
3298         else
3299                 rctl &= ~E1000_RCTL_SBP;
3300
3301         switch (adapter->rx_buffer_len) {
3302         default:
3303         case 2048:
3304                 rctl |= E1000_RCTL_SZ_2048;
3305                 break;
3306         case 4096:
3307                 rctl |= E1000_RCTL_SZ_4096 |
3308                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
3309                 break;
3310         case 8192:
3311                 rctl |= E1000_RCTL_SZ_8192 |
3312                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
3313                 break;
3314         case 16384:
3315                 rctl |= E1000_RCTL_SZ_16384 |
3316                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
3317                 break;
3318         }
3319
3320         if (ifp->if_mtu > ETHERMTU)
3321                 rctl |= E1000_RCTL_LPE;
3322         else
3323                 rctl &= ~E1000_RCTL_LPE;
3324
3325         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3326         if ((adapter->hw.mac.type >= e1000_82543) &&
3327             (ifp->if_capenable & IFCAP_RXCSUM)) {
3328                 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3329                 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3330                 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3331         }
3332
3333         /* Enable Receives */
3334         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3335
3336         /*
3337          * Setup the HW Rx Head and
3338          * Tail Descriptor Pointers
3339          */
3340         E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3341 #ifdef DEV_NETMAP
3342         /* preserve buffers already made available to clients */
3343         if (ifp->if_capenable & IFCAP_NETMAP) {
3344                 struct netmap_adapter *na = NA(adapter->ifp);
3345                 struct netmap_kring *kring = &na->rx_rings[0];
3346                 int t = na->num_rx_desc - 1 - kring->nr_hwavail;
3347
3348                 if (t >= na->num_rx_desc)
3349                         t -= na->num_rx_desc;
3350                 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), t);
3351         } else
3352 #endif /* DEV_NETMAP */
3353         E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
3354
3355         return;
3356 }
3357
3358 /*********************************************************************
3359  *
3360  *  Free receive related data structures.
3361  *
3362  **********************************************************************/
3363 static void
3364 lem_free_receive_structures(struct adapter *adapter)
3365 {
3366         struct em_buffer *rx_buffer;
3367         int i;
3368
3369         INIT_DEBUGOUT("free_receive_structures: begin");
3370
3371         if (adapter->rx_sparemap) {
3372                 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3373                 adapter->rx_sparemap = NULL;
3374         }
3375
3376         /* Cleanup any existing buffers */
3377         if (adapter->rx_buffer_area != NULL) {
3378                 rx_buffer = adapter->rx_buffer_area;
3379                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3380                         if (rx_buffer->m_head != NULL) {
3381                                 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3382                                     BUS_DMASYNC_POSTREAD);
3383                                 bus_dmamap_unload(adapter->rxtag,
3384                                     rx_buffer->map);
3385                                 m_freem(rx_buffer->m_head);
3386                                 rx_buffer->m_head = NULL;
3387                         } else if (rx_buffer->map != NULL)
3388                                 bus_dmamap_unload(adapter->rxtag,
3389                                     rx_buffer->map);
3390                         if (rx_buffer->map != NULL) {
3391                                 bus_dmamap_destroy(adapter->rxtag,
3392                                     rx_buffer->map);
3393                                 rx_buffer->map = NULL;
3394                         }
3395                 }
3396         }
3397
3398         if (adapter->rx_buffer_area != NULL) {
3399                 free(adapter->rx_buffer_area, M_DEVBUF);
3400                 adapter->rx_buffer_area = NULL;
3401         }
3402
3403         if (adapter->rxtag != NULL) {
3404                 bus_dma_tag_destroy(adapter->rxtag);
3405                 adapter->rxtag = NULL;
3406         }
3407 }
3408
3409 /*********************************************************************
3410  *
3411  *  This routine executes in interrupt context. It replenishes
3412  *  the mbufs in the descriptor and sends data which has been
3413  *  dma'ed into host memory to upper layer.
3414  *
3415  *  We loop at most count times if count is > 0, or until done if
3416  *  count < 0.
3417  *  
3418  *  For polling we also now return the number of cleaned packets
3419  *********************************************************************/
3420 static bool
3421 lem_rxeof(struct adapter *adapter, int count, int *done)
3422 {
3423         struct ifnet    *ifp = adapter->ifp;
3424         struct mbuf     *mp;
3425         u8              status = 0, accept_frame = 0, eop = 0;
3426         u16             len, desc_len, prev_len_adj;
3427         int             i, rx_sent = 0;
3428         struct e1000_rx_desc   *current_desc;
3429
3430         EM_RX_LOCK(adapter);
3431         i = adapter->next_rx_desc_to_check;
3432         current_desc = &adapter->rx_desc_base[i];
3433         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3434             BUS_DMASYNC_POSTREAD);
3435
3436 #ifdef DEV_NETMAP
3437         if (ifp->if_capenable & IFCAP_NETMAP) {
3438                 struct netmap_adapter *na = NA(ifp);
3439                 na->rx_rings[0].nr_kflags |= NKR_PENDINTR;
3440                 selwakeuppri(&na->rx_rings[0].si, PI_NET);
3441                 EM_RX_UNLOCK(adapter);
3442                 return (0);
3443         }
3444 #endif /* DEV_NETMAP */
3445
3446         if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3447                 if (done != NULL)
3448                         *done = rx_sent;
3449                 EM_RX_UNLOCK(adapter);
3450                 return (FALSE);
3451         }
3452
3453         while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3454                 struct mbuf *m = NULL;
3455
3456                 status = current_desc->status;
3457                 if ((status & E1000_RXD_STAT_DD) == 0)
3458                         break;
3459
3460                 mp = adapter->rx_buffer_area[i].m_head;
3461                 /*
3462                  * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3463                  * needs to access the last received byte in the mbuf.
3464                  */
3465                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3466                     BUS_DMASYNC_POSTREAD);
3467
3468                 accept_frame = 1;
3469                 prev_len_adj = 0;
3470                 desc_len = le16toh(current_desc->length);
3471                 if (status & E1000_RXD_STAT_EOP) {
3472                         count--;
3473                         eop = 1;
3474                         if (desc_len < ETHER_CRC_LEN) {
3475                                 len = 0;
3476                                 prev_len_adj = ETHER_CRC_LEN - desc_len;
3477                         } else
3478                                 len = desc_len - ETHER_CRC_LEN;
3479                 } else {
3480                         eop = 0;
3481                         len = desc_len;
3482                 }
3483
3484                 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3485                         u8      last_byte;
3486                         u32     pkt_len = desc_len;
3487
3488                         if (adapter->fmp != NULL)
3489                                 pkt_len += adapter->fmp->m_pkthdr.len;
3490
3491                         last_byte = *(mtod(mp, caddr_t) + desc_len - 1);                        
3492                         if (TBI_ACCEPT(&adapter->hw, status,
3493                             current_desc->errors, pkt_len, last_byte,
3494                             adapter->min_frame_size, adapter->max_frame_size)) {
3495                                 e1000_tbi_adjust_stats_82543(&adapter->hw,
3496                                     &adapter->stats, pkt_len,
3497                                     adapter->hw.mac.addr,
3498                                     adapter->max_frame_size);
3499                                 if (len > 0)
3500                                         len--;
3501                         } else
3502                                 accept_frame = 0;
3503                 }
3504
3505                 if (accept_frame) {
3506                         if (lem_get_buf(adapter, i) != 0) {
3507                                 ifp->if_iqdrops++;
3508                                 goto discard;
3509                         }
3510
3511                         /* Assign correct length to the current fragment */
3512                         mp->m_len = len;
3513
3514                         if (adapter->fmp == NULL) {
3515                                 mp->m_pkthdr.len = len;
3516                                 adapter->fmp = mp; /* Store the first mbuf */
3517                                 adapter->lmp = mp;
3518                         } else {
3519                                 /* Chain mbuf's together */
3520                                 mp->m_flags &= ~M_PKTHDR;
3521                                 /*
3522                                  * Adjust length of previous mbuf in chain if
3523                                  * we received less than 4 bytes in the last
3524                                  * descriptor.
3525                                  */
3526                                 if (prev_len_adj > 0) {
3527                                         adapter->lmp->m_len -= prev_len_adj;
3528                                         adapter->fmp->m_pkthdr.len -=
3529                                             prev_len_adj;
3530                                 }
3531                                 adapter->lmp->m_next = mp;
3532                                 adapter->lmp = adapter->lmp->m_next;
3533                                 adapter->fmp->m_pkthdr.len += len;
3534                         }
3535
3536                         if (eop) {
3537                                 adapter->fmp->m_pkthdr.rcvif = ifp;
3538                                 ifp->if_ipackets++;
3539                                 lem_receive_checksum(adapter, current_desc,
3540                                     adapter->fmp);
3541 #ifndef __NO_STRICT_ALIGNMENT
3542                                 if (adapter->max_frame_size >
3543                                     (MCLBYTES - ETHER_ALIGN) &&
3544                                     lem_fixup_rx(adapter) != 0)
3545                                         goto skip;
3546 #endif
3547                                 if (status & E1000_RXD_STAT_VP) {
3548                                         adapter->fmp->m_pkthdr.ether_vtag =
3549                                             le16toh(current_desc->special);
3550                                         adapter->fmp->m_flags |= M_VLANTAG;
3551                                 }
3552 #ifndef __NO_STRICT_ALIGNMENT
3553 skip:
3554 #endif
3555                                 m = adapter->fmp;
3556                                 adapter->fmp = NULL;
3557                                 adapter->lmp = NULL;
3558                         }
3559                 } else {
3560                         adapter->dropped_pkts++;
3561 discard:
3562                         /* Reuse loaded DMA map and just update mbuf chain */
3563                         mp = adapter->rx_buffer_area[i].m_head;
3564                         mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3565                         mp->m_data = mp->m_ext.ext_buf;
3566                         mp->m_next = NULL;
3567                         if (adapter->max_frame_size <=
3568                             (MCLBYTES - ETHER_ALIGN))
3569                                 m_adj(mp, ETHER_ALIGN);
3570                         if (adapter->fmp != NULL) {
3571                                 m_freem(adapter->fmp);
3572                                 adapter->fmp = NULL;
3573                                 adapter->lmp = NULL;
3574                         }
3575                         m = NULL;
3576                 }
3577
3578                 /* Zero out the receive descriptors status. */
3579                 current_desc->status = 0;
3580                 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3581                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3582
3583                 /* Advance our pointers to the next descriptor. */
3584                 if (++i == adapter->num_rx_desc)
3585                         i = 0;
3586                 /* Call into the stack */
3587                 if (m != NULL) {
3588                         adapter->next_rx_desc_to_check = i;
3589                         EM_RX_UNLOCK(adapter);
3590                         (*ifp->if_input)(ifp, m);
3591                         EM_RX_LOCK(adapter);
3592                         rx_sent++;
3593                         i = adapter->next_rx_desc_to_check;
3594                 }
3595                 current_desc = &adapter->rx_desc_base[i];
3596         }
3597         adapter->next_rx_desc_to_check = i;
3598
3599         /* Advance the E1000's Receive Queue #0  "Tail Pointer". */
3600         if (--i < 0)
3601                 i = adapter->num_rx_desc - 1;
3602         E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3603         if (done != NULL)
3604                 *done = rx_sent;
3605         EM_RX_UNLOCK(adapter);
3606         return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
3607 }
3608
3609 #ifndef __NO_STRICT_ALIGNMENT
3610 /*
3611  * When jumbo frames are enabled we should realign entire payload on
3612  * architecures with strict alignment. This is serious design mistake of 8254x
3613  * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3614  * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3615  * payload. On architecures without strict alignment restrictions 8254x still
3616  * performs unaligned memory access which would reduce the performance too.
3617  * To avoid copying over an entire frame to align, we allocate a new mbuf and
3618  * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3619  * existing mbuf chain.
3620  *
3621  * Be aware, best performance of the 8254x is achived only when jumbo frame is
3622  * not used at all on architectures with strict alignment.
3623  */
3624 static int
3625 lem_fixup_rx(struct adapter *adapter)
3626 {
3627         struct mbuf *m, *n;
3628         int error;
3629
3630         error = 0;
3631         m = adapter->fmp;
3632         if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3633                 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3634                 m->m_data += ETHER_HDR_LEN;
3635         } else {
3636                 MGETHDR(n, M_NOWAIT, MT_DATA);
3637                 if (n != NULL) {
3638                         bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3639                         m->m_data += ETHER_HDR_LEN;
3640                         m->m_len -= ETHER_HDR_LEN;
3641                         n->m_len = ETHER_HDR_LEN;
3642                         M_MOVE_PKTHDR(n, m);
3643                         n->m_next = m;
3644                         adapter->fmp = n;
3645                 } else {
3646                         adapter->dropped_pkts++;
3647                         m_freem(adapter->fmp);
3648                         adapter->fmp = NULL;
3649                         error = ENOMEM;
3650                 }
3651         }
3652
3653         return (error);
3654 }
3655 #endif
3656
3657 /*********************************************************************
3658  *
3659  *  Verify that the hardware indicated that the checksum is valid.
3660  *  Inform the stack about the status of checksum so that stack
3661  *  doesn't spend time verifying the checksum.
3662  *
3663  *********************************************************************/
3664 static void
3665 lem_receive_checksum(struct adapter *adapter,
3666             struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3667 {
3668         /* 82543 or newer only */
3669         if ((adapter->hw.mac.type < e1000_82543) ||
3670             /* Ignore Checksum bit is set */
3671             (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3672                 mp->m_pkthdr.csum_flags = 0;
3673                 return;
3674         }
3675
3676         if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3677                 /* Did it pass? */
3678                 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3679                         /* IP Checksum Good */
3680                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3681                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3682
3683                 } else {
3684                         mp->m_pkthdr.csum_flags = 0;
3685                 }
3686         }
3687
3688         if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3689                 /* Did it pass? */
3690                 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3691                         mp->m_pkthdr.csum_flags |=
3692                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3693                         mp->m_pkthdr.csum_data = htons(0xffff);
3694                 }
3695         }
3696 }
3697
3698 /*
3699  * This routine is run via an vlan
3700  * config EVENT
3701  */
3702 static void
3703 lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3704 {
3705         struct adapter  *adapter = ifp->if_softc;
3706         u32             index, bit;
3707
3708         if (ifp->if_softc !=  arg)   /* Not our event */
3709                 return;
3710
3711         if ((vtag == 0) || (vtag > 4095))       /* Invalid ID */
3712                 return;
3713
3714         EM_CORE_LOCK(adapter);
3715         index = (vtag >> 5) & 0x7F;
3716         bit = vtag & 0x1F;
3717         adapter->shadow_vfta[index] |= (1 << bit);
3718         ++adapter->num_vlans;
3719         /* Re-init to load the changes */
3720         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3721                 lem_init_locked(adapter);
3722         EM_CORE_UNLOCK(adapter);
3723 }
3724
3725 /*
3726  * This routine is run via an vlan
3727  * unconfig EVENT
3728  */
3729 static void
3730 lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3731 {
3732         struct adapter  *adapter = ifp->if_softc;
3733         u32             index, bit;
3734
3735         if (ifp->if_softc !=  arg)
3736                 return;
3737
3738         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3739                 return;
3740
3741         EM_CORE_LOCK(adapter);
3742         index = (vtag >> 5) & 0x7F;
3743         bit = vtag & 0x1F;
3744         adapter->shadow_vfta[index] &= ~(1 << bit);
3745         --adapter->num_vlans;
3746         /* Re-init to load the changes */
3747         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3748                 lem_init_locked(adapter);
3749         EM_CORE_UNLOCK(adapter);
3750 }
3751
3752 static void
3753 lem_setup_vlan_hw_support(struct adapter *adapter)
3754 {
3755         struct e1000_hw *hw = &adapter->hw;
3756         u32             reg;
3757
3758         /*
3759         ** We get here thru init_locked, meaning
3760         ** a soft reset, this has already cleared
3761         ** the VFTA and other state, so if there
3762         ** have been no vlan's registered do nothing.
3763         */
3764         if (adapter->num_vlans == 0)
3765                 return;
3766
3767         /*
3768         ** A soft reset zero's out the VFTA, so
3769         ** we need to repopulate it now.
3770         */
3771         for (int i = 0; i < EM_VFTA_SIZE; i++)
3772                 if (adapter->shadow_vfta[i] != 0)
3773                         E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3774                             i, adapter->shadow_vfta[i]);
3775
3776         reg = E1000_READ_REG(hw, E1000_CTRL);
3777         reg |= E1000_CTRL_VME;
3778         E1000_WRITE_REG(hw, E1000_CTRL, reg);
3779
3780         /* Enable the Filter Table */
3781         reg = E1000_READ_REG(hw, E1000_RCTL);
3782         reg &= ~E1000_RCTL_CFIEN;
3783         reg |= E1000_RCTL_VFE;
3784         E1000_WRITE_REG(hw, E1000_RCTL, reg);
3785 }
3786
3787 static void
3788 lem_enable_intr(struct adapter *adapter)
3789 {
3790         struct e1000_hw *hw = &adapter->hw;
3791         u32 ims_mask = IMS_ENABLE_MASK;
3792
3793         E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3794 }
3795
3796 static void
3797 lem_disable_intr(struct adapter *adapter)
3798 {
3799         struct e1000_hw *hw = &adapter->hw;
3800
3801         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3802 }
3803
3804 /*
3805  * Bit of a misnomer, what this really means is
3806  * to enable OS management of the system... aka
3807  * to disable special hardware management features 
3808  */
3809 static void
3810 lem_init_manageability(struct adapter *adapter)
3811 {
3812         /* A shared code workaround */
3813         if (adapter->has_manage) {
3814                 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3815                 /* disable hardware interception of ARP */
3816                 manc &= ~(E1000_MANC_ARP_EN);
3817                 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3818         }
3819 }
3820
3821 /*
3822  * Give control back to hardware management
3823  * controller if there is one.
3824  */
3825 static void
3826 lem_release_manageability(struct adapter *adapter)
3827 {
3828         if (adapter->has_manage) {
3829                 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3830
3831                 /* re-enable hardware interception of ARP */
3832                 manc |= E1000_MANC_ARP_EN;
3833                 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3834         }
3835 }
3836
3837 /*
3838  * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3839  * For ASF and Pass Through versions of f/w this means
3840  * that the driver is loaded. For AMT version type f/w
3841  * this means that the network i/f is open.
3842  */
3843 static void
3844 lem_get_hw_control(struct adapter *adapter)
3845 {
3846         u32 ctrl_ext;
3847
3848         ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3849         E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3850             ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3851         return;
3852 }
3853
3854 /*
3855  * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3856  * For ASF and Pass Through versions of f/w this means that
3857  * the driver is no longer loaded. For AMT versions of the
3858  * f/w this means that the network i/f is closed.
3859  */
3860 static void
3861 lem_release_hw_control(struct adapter *adapter)
3862 {
3863         u32 ctrl_ext;
3864
3865         if (!adapter->has_manage)
3866                 return;
3867
3868         ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3869         E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3870             ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3871         return;
3872 }
3873
3874 static int
3875 lem_is_valid_ether_addr(u8 *addr)
3876 {
3877         char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3878
3879         if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3880                 return (FALSE);
3881         }
3882
3883         return (TRUE);
3884 }
3885
3886 /*
3887 ** Parse the interface capabilities with regard
3888 ** to both system management and wake-on-lan for
3889 ** later use.
3890 */
3891 static void
3892 lem_get_wakeup(device_t dev)
3893 {
3894         struct adapter  *adapter = device_get_softc(dev);
3895         u16             eeprom_data = 0, device_id, apme_mask;
3896
3897         adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3898         apme_mask = EM_EEPROM_APME;
3899
3900         switch (adapter->hw.mac.type) {
3901         case e1000_82542:
3902         case e1000_82543:
3903                 break;
3904         case e1000_82544:
3905                 e1000_read_nvm(&adapter->hw,
3906                     NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3907                 apme_mask = EM_82544_APME;
3908                 break;
3909         case e1000_82546:
3910         case e1000_82546_rev_3:
3911                 if (adapter->hw.bus.func == 1) {
3912                         e1000_read_nvm(&adapter->hw,
3913                             NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3914                         break;
3915                 } else
3916                         e1000_read_nvm(&adapter->hw,
3917                             NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3918                 break;
3919         default:
3920                 e1000_read_nvm(&adapter->hw,
3921                     NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3922                 break;
3923         }
3924         if (eeprom_data & apme_mask)
3925                 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
3926         /*
3927          * We have the eeprom settings, now apply the special cases
3928          * where the eeprom may be wrong or the board won't support
3929          * wake on lan on a particular port
3930          */
3931         device_id = pci_get_device(dev);
3932         switch (device_id) {
3933         case E1000_DEV_ID_82546GB_PCIE:
3934                 adapter->wol = 0;
3935                 break;
3936         case E1000_DEV_ID_82546EB_FIBER:
3937         case E1000_DEV_ID_82546GB_FIBER:
3938                 /* Wake events only supported on port A for dual fiber
3939                  * regardless of eeprom setting */
3940                 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3941                     E1000_STATUS_FUNC_1)
3942                         adapter->wol = 0;
3943                 break;
3944         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
3945                 /* if quad port adapter, disable WoL on all but port A */
3946                 if (global_quad_port_a != 0)
3947                         adapter->wol = 0;
3948                 /* Reset for multiple quad port adapters */
3949                 if (++global_quad_port_a == 4)
3950                         global_quad_port_a = 0;
3951                 break;
3952         }
3953         return;
3954 }
3955
3956
3957 /*
3958  * Enable PCI Wake On Lan capability
3959  */
3960 static void
3961 lem_enable_wakeup(device_t dev)
3962 {
3963         struct adapter  *adapter = device_get_softc(dev);
3964         struct ifnet    *ifp = adapter->ifp;
3965         u32             pmc, ctrl, ctrl_ext, rctl;
3966         u16             status;
3967
3968         if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
3969                 return;
3970
3971         /* Advertise the wakeup capability */
3972         ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
3973         ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
3974         E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
3975         E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3976
3977         /* Keep the laser running on Fiber adapters */
3978         if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
3979             adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
3980                 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3981                 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
3982                 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
3983         }
3984
3985         /*
3986         ** Determine type of Wakeup: note that wol
3987         ** is set with all bits on by default.
3988         */
3989         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
3990                 adapter->wol &= ~E1000_WUFC_MAG;
3991
3992         if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
3993                 adapter->wol &= ~E1000_WUFC_MC;
3994         else {
3995                 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3996                 rctl |= E1000_RCTL_MPE;
3997                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3998         }
3999
4000         if (adapter->hw.mac.type == e1000_pchlan) {
4001                 if (lem_enable_phy_wakeup(adapter))
4002                         return;
4003         } else {
4004                 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4005                 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4006         }
4007
4008
4009         /* Request PME */
4010         status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4011         status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4012         if (ifp->if_capenable & IFCAP_WOL)
4013                 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4014         pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4015
4016         return;
4017 }
4018
4019 /*
4020 ** WOL in the newer chipset interfaces (pchlan)
4021 ** require thing to be copied into the phy
4022 */
4023 static int
4024 lem_enable_phy_wakeup(struct adapter *adapter)
4025 {
4026         struct e1000_hw *hw = &adapter->hw;
4027         u32 mreg, ret = 0;
4028         u16 preg;
4029
4030         /* copy MAC RARs to PHY RARs */
4031         for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4032                 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4033                 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4034                 e1000_write_phy_reg(hw, BM_RAR_M(i),
4035                     (u16)((mreg >> 16) & 0xFFFF));
4036                 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4037                 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4038                 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4039                     (u16)((mreg >> 16) & 0xFFFF));
4040         }
4041
4042         /* copy MAC MTA to PHY MTA */
4043         for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4044                 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4045                 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4046                 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4047                     (u16)((mreg >> 16) & 0xFFFF));
4048         }
4049
4050         /* configure PHY Rx Control register */
4051         e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4052         mreg = E1000_READ_REG(hw, E1000_RCTL);
4053         if (mreg & E1000_RCTL_UPE)
4054                 preg |= BM_RCTL_UPE;
4055         if (mreg & E1000_RCTL_MPE)
4056                 preg |= BM_RCTL_MPE;
4057         preg &= ~(BM_RCTL_MO_MASK);
4058         if (mreg & E1000_RCTL_MO_3)
4059                 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4060                                 << BM_RCTL_MO_SHIFT);
4061         if (mreg & E1000_RCTL_BAM)
4062                 preg |= BM_RCTL_BAM;
4063         if (mreg & E1000_RCTL_PMCF)
4064                 preg |= BM_RCTL_PMCF;
4065         mreg = E1000_READ_REG(hw, E1000_CTRL);
4066         if (mreg & E1000_CTRL_RFCE)
4067                 preg |= BM_RCTL_RFCE;
4068         e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4069
4070         /* enable PHY wakeup in MAC register */
4071         E1000_WRITE_REG(hw, E1000_WUC,
4072             E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4073         E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4074
4075         /* configure and enable PHY wakeup in PHY registers */
4076         e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4077         e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4078
4079         /* activate PHY wakeup */
4080         ret = hw->phy.ops.acquire(hw);
4081         if (ret) {
4082                 printf("Could not acquire PHY\n");
4083                 return ret;
4084         }
4085         e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4086                                  (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4087         ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4088         if (ret) {
4089                 printf("Could not read PHY page 769\n");
4090                 goto out;
4091         }
4092         preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4093         ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4094         if (ret)
4095                 printf("Could not set PHY Host Wakeup bit\n");
4096 out:
4097         hw->phy.ops.release(hw);
4098
4099         return ret;
4100 }
4101
4102 static void
4103 lem_led_func(void *arg, int onoff)
4104 {
4105         struct adapter  *adapter = arg;
4106
4107         EM_CORE_LOCK(adapter);
4108         if (onoff) {
4109                 e1000_setup_led(&adapter->hw);
4110                 e1000_led_on(&adapter->hw);
4111         } else {
4112                 e1000_led_off(&adapter->hw);
4113                 e1000_cleanup_led(&adapter->hw);
4114         }
4115         EM_CORE_UNLOCK(adapter);
4116 }
4117
4118 /*********************************************************************
4119 * 82544 Coexistence issue workaround.
4120 *    There are 2 issues.
4121 *       1. Transmit Hang issue.
4122 *    To detect this issue, following equation can be used...
4123 *         SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4124 *         If SUM[3:0] is in between 1 to 4, we will have this issue.
4125 *
4126 *       2. DAC issue.
4127 *    To detect this issue, following equation can be used...
4128 *         SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4129 *         If SUM[3:0] is in between 9 to c, we will have this issue.
4130 *
4131 *
4132 *    WORKAROUND:
4133 *         Make sure we do not have ending address
4134 *         as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4135 *
4136 *************************************************************************/
4137 static u32
4138 lem_fill_descriptors (bus_addr_t address, u32 length,
4139                 PDESC_ARRAY desc_array)
4140 {
4141         u32 safe_terminator;
4142
4143         /* Since issue is sensitive to length and address.*/
4144         /* Let us first check the address...*/
4145         if (length <= 4) {
4146                 desc_array->descriptor[0].address = address;
4147                 desc_array->descriptor[0].length = length;
4148                 desc_array->elements = 1;
4149                 return (desc_array->elements);
4150         }
4151         safe_terminator = (u32)((((u32)address & 0x7) +
4152             (length & 0xF)) & 0xF);
4153         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4154         if (safe_terminator == 0   ||
4155         (safe_terminator > 4   &&
4156         safe_terminator < 9)   ||
4157         (safe_terminator > 0xC &&
4158         safe_terminator <= 0xF)) {
4159                 desc_array->descriptor[0].address = address;
4160                 desc_array->descriptor[0].length = length;
4161                 desc_array->elements = 1;
4162                 return (desc_array->elements);
4163         }
4164
4165         desc_array->descriptor[0].address = address;
4166         desc_array->descriptor[0].length = length - 4;
4167         desc_array->descriptor[1].address = address + (length - 4);
4168         desc_array->descriptor[1].length = 4;
4169         desc_array->elements = 2;
4170         return (desc_array->elements);
4171 }
4172
4173 /**********************************************************************
4174  *
4175  *  Update the board statistics counters.
4176  *
4177  **********************************************************************/
4178 static void
4179 lem_update_stats_counters(struct adapter *adapter)
4180 {
4181         struct ifnet   *ifp;
4182
4183         if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4184            (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4185                 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4186                 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4187         }
4188         adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4189         adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4190         adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4191         adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4192
4193         adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4194         adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4195         adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4196         adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4197         adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4198         adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4199         adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4200         adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4201         adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4202         adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4203         adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4204         adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4205         adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4206         adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4207         adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4208         adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4209         adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4210         adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4211         adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4212         adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4213
4214         /* For the 64-bit byte counters the low dword must be read first. */
4215         /* Both registers clear on the read of the high dword */
4216
4217         adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) +
4218             ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32);
4219         adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) +
4220             ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32);
4221
4222         adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4223         adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4224         adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4225         adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4226         adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4227
4228         adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4229         adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4230
4231         adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4232         adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4233         adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4234         adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4235         adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4236         adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4237         adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4238         adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4239         adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4240         adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4241
4242         if (adapter->hw.mac.type >= e1000_82543) {
4243                 adapter->stats.algnerrc += 
4244                 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4245                 adapter->stats.rxerrc += 
4246                 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4247                 adapter->stats.tncrs += 
4248                 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4249                 adapter->stats.cexterr += 
4250                 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4251                 adapter->stats.tsctc += 
4252                 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4253                 adapter->stats.tsctfc += 
4254                 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4255         }
4256         ifp = adapter->ifp;
4257
4258         ifp->if_collisions = adapter->stats.colc;
4259
4260         /* Rx Errors */
4261         ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4262             adapter->stats.crcerrs + adapter->stats.algnerrc +
4263             adapter->stats.ruc + adapter->stats.roc +
4264             adapter->stats.mpc + adapter->stats.cexterr;
4265
4266         /* Tx Errors */
4267         ifp->if_oerrors = adapter->stats.ecol +
4268             adapter->stats.latecol + adapter->watchdog_events;
4269 }
4270
4271 /* Export a single 32-bit register via a read-only sysctl. */
4272 static int
4273 lem_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
4274 {
4275         struct adapter *adapter;
4276         u_int val;
4277
4278         adapter = oidp->oid_arg1;
4279         val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
4280         return (sysctl_handle_int(oidp, &val, 0, req));
4281 }
4282
4283 /*
4284  * Add sysctl variables, one per statistic, to the system.
4285  */
4286 static void
4287 lem_add_hw_stats(struct adapter *adapter)
4288 {
4289         device_t dev = adapter->dev;
4290
4291         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4292         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4293         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4294         struct e1000_hw_stats *stats = &adapter->stats;
4295
4296         struct sysctl_oid *stat_node;
4297         struct sysctl_oid_list *stat_list;
4298
4299         /* Driver Statistics */
4300         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail", 
4301                          CTLFLAG_RD, &adapter->mbuf_alloc_failed,
4302                          "Std mbuf failed");
4303         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail", 
4304                          CTLFLAG_RD, &adapter->mbuf_cluster_failed,
4305                          "Std mbuf cluster failed");
4306         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 
4307                         CTLFLAG_RD, &adapter->dropped_pkts,
4308                         "Driver dropped packets");
4309         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", 
4310                         CTLFLAG_RD, &adapter->no_tx_dma_setup,
4311                         "Driver tx dma failure in xmit");
4312         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail1",
4313                         CTLFLAG_RD, &adapter->no_tx_desc_avail1,
4314                         "Not enough tx descriptors failure in xmit");
4315         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_desc_fail2",
4316                         CTLFLAG_RD, &adapter->no_tx_desc_avail2,
4317                         "Not enough tx descriptors failure in xmit");
4318         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
4319                         CTLFLAG_RD, &adapter->rx_overruns,
4320                         "RX overruns");
4321         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
4322                         CTLFLAG_RD, &adapter->watchdog_events,
4323                         "Watchdog timeouts");
4324
4325         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
4326                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL,
4327                         lem_sysctl_reg_handler, "IU",
4328                         "Device Control Register");
4329         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
4330                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL,
4331                         lem_sysctl_reg_handler, "IU",
4332                         "Receiver Control Register");
4333         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
4334                         CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
4335                         "Flow Control High Watermark");
4336         SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", 
4337                         CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
4338                         "Flow Control Low Watermark");
4339         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_workaround",
4340                         CTLFLAG_RD, &adapter->tx_fifo_wrk_cnt,
4341                         "TX FIFO workaround events");
4342         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "fifo_reset",
4343                         CTLFLAG_RD, &adapter->tx_fifo_reset_cnt,
4344                         "TX FIFO resets");
4345
4346         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_head", 
4347                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(0),
4348                         lem_sysctl_reg_handler, "IU",
4349                         "Transmit Descriptor Head");
4350         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txd_tail", 
4351                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(0),
4352                         lem_sysctl_reg_handler, "IU",
4353                         "Transmit Descriptor Tail");
4354         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_head", 
4355                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(0),
4356                         lem_sysctl_reg_handler, "IU",
4357                         "Receive Descriptor Head");
4358         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxd_tail", 
4359                         CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(0),
4360                         lem_sysctl_reg_handler, "IU",
4361                         "Receive Descriptor Tail");
4362         
4363
4364         /* MAC stats get their own sub node */
4365
4366         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
4367                                     CTLFLAG_RD, NULL, "Statistics");
4368         stat_list = SYSCTL_CHILDREN(stat_node);
4369
4370         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
4371                         CTLFLAG_RD, &stats->ecol,
4372                         "Excessive collisions");
4373         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
4374                         CTLFLAG_RD, &stats->scc,
4375                         "Single collisions");
4376         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
4377                         CTLFLAG_RD, &stats->mcc,
4378                         "Multiple collisions");
4379         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
4380                         CTLFLAG_RD, &stats->latecol,
4381                         "Late collisions");
4382         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
4383                         CTLFLAG_RD, &stats->colc,
4384                         "Collision Count");
4385         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
4386                         CTLFLAG_RD, &adapter->stats.symerrs,
4387                         "Symbol Errors");
4388         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
4389                         CTLFLAG_RD, &adapter->stats.sec,
4390                         "Sequence Errors");
4391         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
4392                         CTLFLAG_RD, &adapter->stats.dc,
4393                         "Defer Count");
4394         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
4395                         CTLFLAG_RD, &adapter->stats.mpc,
4396                         "Missed Packets");
4397         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
4398                         CTLFLAG_RD, &adapter->stats.rnbc,
4399                         "Receive No Buffers");
4400         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
4401                         CTLFLAG_RD, &adapter->stats.ruc,
4402                         "Receive Undersize");
4403         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4404                         CTLFLAG_RD, &adapter->stats.rfc,
4405                         "Fragmented Packets Received ");
4406         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
4407                         CTLFLAG_RD, &adapter->stats.roc,
4408                         "Oversized Packets Received");
4409         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
4410                         CTLFLAG_RD, &adapter->stats.rjc,
4411                         "Recevied Jabber");
4412         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
4413                         CTLFLAG_RD, &adapter->stats.rxerrc,
4414                         "Receive Errors");
4415         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4416                         CTLFLAG_RD, &adapter->stats.crcerrs,
4417                         "CRC errors");
4418         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
4419                         CTLFLAG_RD, &adapter->stats.algnerrc,
4420                         "Alignment Errors");
4421         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
4422                         CTLFLAG_RD, &adapter->stats.cexterr,
4423                         "Collision/Carrier extension errors");
4424         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4425                         CTLFLAG_RD, &adapter->stats.xonrxc,
4426                         "XON Received");
4427         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4428                         CTLFLAG_RD, &adapter->stats.xontxc,
4429                         "XON Transmitted");
4430         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4431                         CTLFLAG_RD, &adapter->stats.xoffrxc,
4432                         "XOFF Received");
4433         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4434                         CTLFLAG_RD, &adapter->stats.xofftxc,
4435                         "XOFF Transmitted");
4436
4437         /* Packet Reception Stats */
4438         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
4439                         CTLFLAG_RD, &adapter->stats.tpr,
4440                         "Total Packets Received ");
4441         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
4442                         CTLFLAG_RD, &adapter->stats.gprc,
4443                         "Good Packets Received");
4444         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
4445                         CTLFLAG_RD, &adapter->stats.bprc,
4446                         "Broadcast Packets Received");
4447         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
4448                         CTLFLAG_RD, &adapter->stats.mprc,
4449                         "Multicast Packets Received");
4450         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4451                         CTLFLAG_RD, &adapter->stats.prc64,
4452                         "64 byte frames received ");
4453         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4454                         CTLFLAG_RD, &adapter->stats.prc127,
4455                         "65-127 byte frames received");
4456         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4457                         CTLFLAG_RD, &adapter->stats.prc255,
4458                         "128-255 byte frames received");
4459         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4460                         CTLFLAG_RD, &adapter->stats.prc511,
4461                         "256-511 byte frames received");
4462         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4463                         CTLFLAG_RD, &adapter->stats.prc1023,
4464                         "512-1023 byte frames received");
4465         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4466                         CTLFLAG_RD, &adapter->stats.prc1522,
4467                         "1023-1522 byte frames received");
4468         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
4469                         CTLFLAG_RD, &adapter->stats.gorc, 
4470                         "Good Octets Received");
4471
4472         /* Packet Transmission Stats */
4473         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4474                         CTLFLAG_RD, &adapter->stats.gotc, 
4475                         "Good Octets Transmitted"); 
4476         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4477                         CTLFLAG_RD, &adapter->stats.tpt,
4478                         "Total Packets Transmitted");
4479         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4480                         CTLFLAG_RD, &adapter->stats.gptc,
4481                         "Good Packets Transmitted");
4482         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4483                         CTLFLAG_RD, &adapter->stats.bptc,
4484                         "Broadcast Packets Transmitted");
4485         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4486                         CTLFLAG_RD, &adapter->stats.mptc,
4487                         "Multicast Packets Transmitted");
4488         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4489                         CTLFLAG_RD, &adapter->stats.ptc64,
4490                         "64 byte frames transmitted ");
4491         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4492                         CTLFLAG_RD, &adapter->stats.ptc127,
4493                         "65-127 byte frames transmitted");
4494         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4495                         CTLFLAG_RD, &adapter->stats.ptc255,
4496                         "128-255 byte frames transmitted");
4497         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4498                         CTLFLAG_RD, &adapter->stats.ptc511,
4499                         "256-511 byte frames transmitted");
4500         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4501                         CTLFLAG_RD, &adapter->stats.ptc1023,
4502                         "512-1023 byte frames transmitted");
4503         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4504                         CTLFLAG_RD, &adapter->stats.ptc1522,
4505                         "1024-1522 byte frames transmitted");
4506         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
4507                         CTLFLAG_RD, &adapter->stats.tsctc,
4508                         "TSO Contexts Transmitted");
4509         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
4510                         CTLFLAG_RD, &adapter->stats.tsctfc,
4511                         "TSO Contexts Failed");
4512 }
4513
4514 /**********************************************************************
4515  *
4516  *  This routine provides a way to dump out the adapter eeprom,
4517  *  often a useful debug/service tool. This only dumps the first
4518  *  32 words, stuff that matters is in that extent.
4519  *
4520  **********************************************************************/
4521
4522 static int
4523 lem_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
4524 {
4525         struct adapter *adapter;
4526         int error;
4527         int result;
4528
4529         result = -1;
4530         error = sysctl_handle_int(oidp, &result, 0, req);
4531
4532         if (error || !req->newptr)
4533                 return (error);
4534
4535         /*
4536          * This value will cause a hex dump of the
4537          * first 32 16-bit words of the EEPROM to
4538          * the screen.
4539          */
4540         if (result == 1) {
4541                 adapter = (struct adapter *)arg1;
4542                 lem_print_nvm_info(adapter);
4543         }
4544
4545         return (error);
4546 }
4547
4548 static void
4549 lem_print_nvm_info(struct adapter *adapter)
4550 {
4551         u16     eeprom_data;
4552         int     i, j, row = 0;
4553
4554         /* Its a bit crude, but it gets the job done */
4555         printf("\nInterface EEPROM Dump:\n");
4556         printf("Offset\n0x0000  ");
4557         for (i = 0, j = 0; i < 32; i++, j++) {
4558                 if (j == 8) { /* Make the offset block */
4559                         j = 0; ++row;
4560                         printf("\n0x00%x0  ",row);
4561                 }
4562                 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4563                 printf("%04x ", eeprom_data);
4564         }
4565         printf("\n");
4566 }
4567
4568 static int
4569 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4570 {
4571         struct em_int_delay_info *info;
4572         struct adapter *adapter;
4573         u32 regval;
4574         int error;
4575         int usecs;
4576         int ticks;
4577
4578         info = (struct em_int_delay_info *)arg1;
4579         usecs = info->value;
4580         error = sysctl_handle_int(oidp, &usecs, 0, req);
4581         if (error != 0 || req->newptr == NULL)
4582                 return (error);
4583         if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4584                 return (EINVAL);
4585         info->value = usecs;
4586         ticks = EM_USECS_TO_TICKS(usecs);
4587
4588         adapter = info->adapter;
4589         
4590         EM_CORE_LOCK(adapter);
4591         regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4592         regval = (regval & ~0xffff) | (ticks & 0xffff);
4593         /* Handle a few special cases. */
4594         switch (info->offset) {
4595         case E1000_RDTR:
4596                 break;
4597         case E1000_TIDV:
4598                 if (ticks == 0) {
4599                         adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4600                         /* Don't write 0 into the TIDV register. */
4601                         regval++;
4602                 } else
4603                         adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4604                 break;
4605         }
4606         E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4607         EM_CORE_UNLOCK(adapter);
4608         return (0);
4609 }
4610
4611 static void
4612 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4613         const char *description, struct em_int_delay_info *info,
4614         int offset, int value)
4615 {
4616         info->adapter = adapter;
4617         info->offset = offset;
4618         info->value = value;
4619         SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4620             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4621             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4622             info, 0, lem_sysctl_int_delay, "I", description);
4623 }
4624
4625 static void
4626 lem_set_flow_cntrl(struct adapter *adapter, const char *name,
4627         const char *description, int *limit, int value)
4628 {
4629         *limit = value;
4630         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4631             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4632             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4633 }
4634
4635 static void
4636 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4637         const char *description, int *limit, int value)
4638 {
4639         *limit = value;
4640         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4641             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4642             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4643 }