]> CyberLeo.Net >> Repos - FreeBSD/releng/8.1.git/blob - sys/dev/e1000/if_lem.c
Copy stable/8 to releng/8.1 in preparation for 8.1-RC1.
[FreeBSD/releng/8.1.git] / sys / dev / e1000 / if_lem.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2010, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
37 #include "opt_inet.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bus.h>
43 #include <sys/endian.h>
44 #include <sys/kernel.h>
45 #include <sys/kthread.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/module.h>
49 #include <sys/rman.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/taskqueue.h>
54 #if __FreeBSD_version >= 700029
55 #include <sys/eventhandler.h>
56 #endif
57 #include <machine/bus.h>
58 #include <machine/resource.h>
59
60 #include <net/bpf.h>
61 #include <net/ethernet.h>
62 #include <net/if.h>
63 #include <net/if_arp.h>
64 #include <net/if_dl.h>
65 #include <net/if_media.h>
66
67 #include <net/if_types.h>
68 #include <net/if_vlan_var.h>
69
70 #include <netinet/in_systm.h>
71 #include <netinet/in.h>
72 #include <netinet/if_ether.h>
73 #include <netinet/ip.h>
74 #include <netinet/ip6.h>
75 #include <netinet/tcp.h>
76 #include <netinet/udp.h>
77
78 #include <machine/in_cksum.h>
79 #include <dev/led/led.h>
80 #include <dev/pci/pcivar.h>
81 #include <dev/pci/pcireg.h>
82
83 #include "e1000_api.h"
84 #include "if_lem.h"
85
86 /*********************************************************************
87  *  Set this to one to display debug statistics
88  *********************************************************************/
89 int     lem_display_debug_stats = 0;
90
91 /*********************************************************************
92  *  Legacy Em Driver version:
93  *********************************************************************/
94 char lem_driver_version[] = "1.0.1";
95
96
97 /*********************************************************************
98  *  PCI Device ID Table
99  *
100  *  Used by probe to select devices to load on
101  *  Last field stores an index into e1000_strings
102  *  Last entry must be all 0s
103  *
104  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
105  *********************************************************************/
106
107 static em_vendor_info_t lem_vendor_info_array[] =
108 {
109         /* Intel(R) PRO/1000 Network Connection */
110         { 0x8086, E1000_DEV_ID_82540EM,         PCI_ANY_ID, PCI_ANY_ID, 0},
111         { 0x8086, E1000_DEV_ID_82540EM_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
112         { 0x8086, E1000_DEV_ID_82540EP,         PCI_ANY_ID, PCI_ANY_ID, 0},
113         { 0x8086, E1000_DEV_ID_82540EP_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
114         { 0x8086, E1000_DEV_ID_82540EP_LP,      PCI_ANY_ID, PCI_ANY_ID, 0},
115
116         { 0x8086, E1000_DEV_ID_82541EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
117         { 0x8086, E1000_DEV_ID_82541ER,         PCI_ANY_ID, PCI_ANY_ID, 0},
118         { 0x8086, E1000_DEV_ID_82541ER_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
119         { 0x8086, E1000_DEV_ID_82541EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
120         { 0x8086, E1000_DEV_ID_82541GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
121         { 0x8086, E1000_DEV_ID_82541GI_LF,      PCI_ANY_ID, PCI_ANY_ID, 0},
122         { 0x8086, E1000_DEV_ID_82541GI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
123
124         { 0x8086, E1000_DEV_ID_82542,           PCI_ANY_ID, PCI_ANY_ID, 0},
125
126         { 0x8086, E1000_DEV_ID_82543GC_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
127         { 0x8086, E1000_DEV_ID_82543GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
128
129         { 0x8086, E1000_DEV_ID_82544EI_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
130         { 0x8086, E1000_DEV_ID_82544EI_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
131         { 0x8086, E1000_DEV_ID_82544GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
132         { 0x8086, E1000_DEV_ID_82544GC_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
133
134         { 0x8086, E1000_DEV_ID_82545EM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
135         { 0x8086, E1000_DEV_ID_82545EM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
136         { 0x8086, E1000_DEV_ID_82545GM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
137         { 0x8086, E1000_DEV_ID_82545GM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
138         { 0x8086, E1000_DEV_ID_82545GM_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
139
140         { 0x8086, E1000_DEV_ID_82546EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
141         { 0x8086, E1000_DEV_ID_82546EB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
142         { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
143         { 0x8086, E1000_DEV_ID_82546GB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
144         { 0x8086, E1000_DEV_ID_82546GB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
145         { 0x8086, E1000_DEV_ID_82546GB_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
146         { 0x8086, E1000_DEV_ID_82546GB_PCIE,    PCI_ANY_ID, PCI_ANY_ID, 0},
147         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
148         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
149                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
150
151         { 0x8086, E1000_DEV_ID_82547EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
152         { 0x8086, E1000_DEV_ID_82547EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
153         { 0x8086, E1000_DEV_ID_82547GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
154         /* required last entry */
155         { 0, 0, 0, 0, 0}
156 };
157
158 /*********************************************************************
159  *  Table of branding strings for all supported NICs.
160  *********************************************************************/
161
162 static char *lem_strings[] = {
163         "Intel(R) PRO/1000 Legacy Network Connection"
164 };
165
166 /*********************************************************************
167  *  Function prototypes
168  *********************************************************************/
169 static int      lem_probe(device_t);
170 static int      lem_attach(device_t);
171 static int      lem_detach(device_t);
172 static int      lem_shutdown(device_t);
173 static int      lem_suspend(device_t);
174 static int      lem_resume(device_t);
175 static void     lem_start(struct ifnet *);
176 static void     lem_start_locked(struct ifnet *ifp);
177 static int      lem_ioctl(struct ifnet *, u_long, caddr_t);
178 static void     lem_init(void *);
179 static void     lem_init_locked(struct adapter *);
180 static void     lem_stop(void *);
181 static void     lem_media_status(struct ifnet *, struct ifmediareq *);
182 static int      lem_media_change(struct ifnet *);
183 static void     lem_identify_hardware(struct adapter *);
184 static int      lem_allocate_pci_resources(struct adapter *);
185 static int      lem_allocate_irq(struct adapter *adapter);
186 static void     lem_free_pci_resources(struct adapter *);
187 static void     lem_local_timer(void *);
188 static int      lem_hardware_init(struct adapter *);
189 static void     lem_setup_interface(device_t, struct adapter *);
190 static void     lem_setup_transmit_structures(struct adapter *);
191 static void     lem_initialize_transmit_unit(struct adapter *);
192 static int      lem_setup_receive_structures(struct adapter *);
193 static void     lem_initialize_receive_unit(struct adapter *);
194 static void     lem_enable_intr(struct adapter *);
195 static void     lem_disable_intr(struct adapter *);
196 static void     lem_free_transmit_structures(struct adapter *);
197 static void     lem_free_receive_structures(struct adapter *);
198 static void     lem_update_stats_counters(struct adapter *);
199 static void     lem_txeof(struct adapter *);
200 static void     lem_tx_purge(struct adapter *);
201 static int      lem_allocate_receive_structures(struct adapter *);
202 static int      lem_allocate_transmit_structures(struct adapter *);
203 static int      lem_rxeof(struct adapter *, int);
204 #ifndef __NO_STRICT_ALIGNMENT
205 static int      lem_fixup_rx(struct adapter *);
206 #endif
207 static void     lem_receive_checksum(struct adapter *, struct e1000_rx_desc *,
208                     struct mbuf *);
209 static void     lem_transmit_checksum_setup(struct adapter *, struct mbuf *,
210                     u32 *, u32 *);
211 static void     lem_set_promisc(struct adapter *);
212 static void     lem_disable_promisc(struct adapter *);
213 static void     lem_set_multi(struct adapter *);
214 static void     lem_print_hw_stats(struct adapter *);
215 static void     lem_update_link_status(struct adapter *);
216 static int      lem_get_buf(struct adapter *, int);
217 #if __FreeBSD_version >= 700029
218 static void     lem_register_vlan(void *, struct ifnet *, u16);
219 static void     lem_unregister_vlan(void *, struct ifnet *, u16);
220 static void     lem_setup_vlan_hw_support(struct adapter *);
221 #endif
222 static int      lem_xmit(struct adapter *, struct mbuf **);
223 static void     lem_smartspeed(struct adapter *);
224 static int      lem_82547_fifo_workaround(struct adapter *, int);
225 static void     lem_82547_update_fifo_head(struct adapter *, int);
226 static int      lem_82547_tx_fifo_reset(struct adapter *);
227 static void     lem_82547_move_tail(void *);
228 static int      lem_dma_malloc(struct adapter *, bus_size_t,
229                     struct em_dma_alloc *, int);
230 static void     lem_dma_free(struct adapter *, struct em_dma_alloc *);
231 static void     lem_print_debug_info(struct adapter *);
232 static void     lem_print_nvm_info(struct adapter *);
233 static int      lem_is_valid_ether_addr(u8 *);
234 static int      lem_sysctl_stats(SYSCTL_HANDLER_ARGS);
235 static int      lem_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
236 static u32      lem_fill_descriptors (bus_addr_t address, u32 length,
237                     PDESC_ARRAY desc_array);
238 static int      lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
239 static void     lem_add_int_delay_sysctl(struct adapter *, const char *,
240                     const char *, struct em_int_delay_info *, int, int);
241 /* Management and WOL Support */
242 static void     lem_init_manageability(struct adapter *);
243 static void     lem_release_manageability(struct adapter *);
244 static void     lem_get_hw_control(struct adapter *);
245 static void     lem_release_hw_control(struct adapter *);
246 static void     lem_get_wakeup(device_t);
247 static void     lem_enable_wakeup(device_t);
248 static int      lem_enable_phy_wakeup(struct adapter *);
249 static void     lem_led_func(void *, int);
250
251 #ifdef EM_LEGACY_IRQ
252 static void     lem_intr(void *);
253 #else /* FAST IRQ */
254 #if __FreeBSD_version < 700000
255 static void     lem_irq_fast(void *);
256 #else
257 static int      lem_irq_fast(void *);
258 #endif
259 static void     lem_handle_rxtx(void *context, int pending);
260 static void     lem_handle_link(void *context, int pending);
261 static void     lem_add_rx_process_limit(struct adapter *, const char *,
262                     const char *, int *, int);
263 #endif /* ~EM_LEGACY_IRQ */
264
265 #ifdef DEVICE_POLLING
266 static poll_handler_t lem_poll;
267 #endif /* POLLING */
268
269 /*********************************************************************
270  *  FreeBSD Device Interface Entry Points
271  *********************************************************************/
272
273 static device_method_t lem_methods[] = {
274         /* Device interface */
275         DEVMETHOD(device_probe, lem_probe),
276         DEVMETHOD(device_attach, lem_attach),
277         DEVMETHOD(device_detach, lem_detach),
278         DEVMETHOD(device_shutdown, lem_shutdown),
279         DEVMETHOD(device_suspend, lem_suspend),
280         DEVMETHOD(device_resume, lem_resume),
281         {0, 0}
282 };
283
284 static driver_t lem_driver = {
285         "em", lem_methods, sizeof(struct adapter),
286 };
287
288 extern devclass_t em_devclass;
289 DRIVER_MODULE(lem, pci, lem_driver, em_devclass, 0, 0);
290 MODULE_DEPEND(lem, pci, 1, 1, 1);
291 MODULE_DEPEND(lem, ether, 1, 1, 1);
292
293 /*********************************************************************
294  *  Tunable default values.
295  *********************************************************************/
296
297 #define EM_TICKS_TO_USECS(ticks)        ((1024 * (ticks) + 500) / 1000)
298 #define EM_USECS_TO_TICKS(usecs)        ((1000 * (usecs) + 512) / 1024)
299
300 static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
301 static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
302 static int lem_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
303 static int lem_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
304 static int lem_rxd = EM_DEFAULT_RXD;
305 static int lem_txd = EM_DEFAULT_TXD;
306 static int lem_smart_pwr_down = FALSE;
307
308 /* Controls whether promiscuous also shows bad packets */
309 static int lem_debug_sbp = FALSE;
310
311 TUNABLE_INT("hw.em.tx_int_delay", &lem_tx_int_delay_dflt);
312 TUNABLE_INT("hw.em.rx_int_delay", &lem_rx_int_delay_dflt);
313 TUNABLE_INT("hw.em.tx_abs_int_delay", &lem_tx_abs_int_delay_dflt);
314 TUNABLE_INT("hw.em.rx_abs_int_delay", &lem_rx_abs_int_delay_dflt);
315 TUNABLE_INT("hw.em.rxd", &lem_rxd);
316 TUNABLE_INT("hw.em.txd", &lem_txd);
317 TUNABLE_INT("hw.em.smart_pwr_down", &lem_smart_pwr_down);
318 TUNABLE_INT("hw.em.sbp", &lem_debug_sbp);
319
320 #ifndef EM_LEGACY_IRQ
321 /* How many packets rxeof tries to clean at a time */
322 static int lem_rx_process_limit = 100;
323 TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
324 #endif
325
326 /* Flow control setting - default to FULL */
327 static int lem_fc_setting = e1000_fc_full;
328 TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
329
330 /*
331 ** Shadow VFTA table, this is needed because
332 ** the real vlan filter table gets cleared during
333 ** a soft reset and the driver needs to be able
334 ** to repopulate it.
335 */
336 static u32 lem_shadow_vfta[EM_VFTA_SIZE];
337
338 /* Global used in WOL setup with multiport cards */
339 static int global_quad_port_a = 0;
340
341 /*********************************************************************
342  *  Device identification routine
343  *
344  *  em_probe determines if the driver should be loaded on
345  *  adapter based on PCI vendor/device id of the adapter.
346  *
347  *  return BUS_PROBE_DEFAULT on success, positive on failure
348  *********************************************************************/
349
350 static int
351 lem_probe(device_t dev)
352 {
353         char            adapter_name[60];
354         u16             pci_vendor_id = 0;
355         u16             pci_device_id = 0;
356         u16             pci_subvendor_id = 0;
357         u16             pci_subdevice_id = 0;
358         em_vendor_info_t *ent;
359
360         INIT_DEBUGOUT("em_probe: begin");
361
362         pci_vendor_id = pci_get_vendor(dev);
363         if (pci_vendor_id != EM_VENDOR_ID)
364                 return (ENXIO);
365
366         pci_device_id = pci_get_device(dev);
367         pci_subvendor_id = pci_get_subvendor(dev);
368         pci_subdevice_id = pci_get_subdevice(dev);
369
370         ent = lem_vendor_info_array;
371         while (ent->vendor_id != 0) {
372                 if ((pci_vendor_id == ent->vendor_id) &&
373                     (pci_device_id == ent->device_id) &&
374
375                     ((pci_subvendor_id == ent->subvendor_id) ||
376                     (ent->subvendor_id == PCI_ANY_ID)) &&
377
378                     ((pci_subdevice_id == ent->subdevice_id) ||
379                     (ent->subdevice_id == PCI_ANY_ID))) {
380                         sprintf(adapter_name, "%s %s",
381                                 lem_strings[ent->index],
382                                 lem_driver_version);
383                         device_set_desc_copy(dev, adapter_name);
384                         return (BUS_PROBE_DEFAULT);
385                 }
386                 ent++;
387         }
388
389         return (ENXIO);
390 }
391
392 /*********************************************************************
393  *  Device initialization routine
394  *
395  *  The attach entry point is called when the driver is being loaded.
396  *  This routine identifies the type of hardware, allocates all resources
397  *  and initializes the hardware.
398  *
399  *  return 0 on success, positive on failure
400  *********************************************************************/
401
402 static int
403 lem_attach(device_t dev)
404 {
405         struct adapter  *adapter;
406         int             tsize, rsize;
407         int             error = 0;
408
409         INIT_DEBUGOUT("lem_attach: begin");
410
411         adapter = device_get_softc(dev);
412         adapter->dev = adapter->osdep.dev = dev;
413         EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
414         EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
415         EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
416
417         /* SYSCTL stuff */
418         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
419             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
420             OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
421             lem_sysctl_debug_info, "I", "Debug Information");
422
423         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
424             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
425             OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
426             lem_sysctl_stats, "I", "Statistics");
427
428         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
429         callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
430
431         /* Determine hardware and mac info */
432         lem_identify_hardware(adapter);
433
434         /* Setup PCI resources */
435         if (lem_allocate_pci_resources(adapter)) {
436                 device_printf(dev, "Allocation of PCI resources failed\n");
437                 error = ENXIO;
438                 goto err_pci;
439         }
440
441         /* Do Shared Code initialization */
442         if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
443                 device_printf(dev, "Setup of Shared code failed\n");
444                 error = ENXIO;
445                 goto err_pci;
446         }
447
448         e1000_get_bus_info(&adapter->hw);
449
450         /* Set up some sysctls for the tunable interrupt delays */
451         lem_add_int_delay_sysctl(adapter, "rx_int_delay",
452             "receive interrupt delay in usecs", &adapter->rx_int_delay,
453             E1000_REGISTER(&adapter->hw, E1000_RDTR), lem_rx_int_delay_dflt);
454         lem_add_int_delay_sysctl(adapter, "tx_int_delay",
455             "transmit interrupt delay in usecs", &adapter->tx_int_delay,
456             E1000_REGISTER(&adapter->hw, E1000_TIDV), lem_tx_int_delay_dflt);
457         if (adapter->hw.mac.type >= e1000_82540) {
458                 lem_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
459                     "receive interrupt delay limit in usecs",
460                     &adapter->rx_abs_int_delay,
461                     E1000_REGISTER(&adapter->hw, E1000_RADV),
462                     lem_rx_abs_int_delay_dflt);
463                 lem_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
464                     "transmit interrupt delay limit in usecs",
465                     &adapter->tx_abs_int_delay,
466                     E1000_REGISTER(&adapter->hw, E1000_TADV),
467                     lem_tx_abs_int_delay_dflt);
468         }
469
470 #ifndef EM_LEGACY_IRQ
471         /* Sysctls for limiting the amount of work done in the taskqueue */
472         lem_add_rx_process_limit(adapter, "rx_processing_limit",
473             "max number of rx packets to process", &adapter->rx_process_limit,
474             lem_rx_process_limit);
475 #endif
476
477         /*
478          * Validate number of transmit and receive descriptors. It
479          * must not exceed hardware maximum, and must be multiple
480          * of E1000_DBA_ALIGN.
481          */
482         if (((lem_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
483             (adapter->hw.mac.type >= e1000_82544 && lem_txd > EM_MAX_TXD) ||
484             (adapter->hw.mac.type < e1000_82544 && lem_txd > EM_MAX_TXD_82543) ||
485             (lem_txd < EM_MIN_TXD)) {
486                 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
487                     EM_DEFAULT_TXD, lem_txd);
488                 adapter->num_tx_desc = EM_DEFAULT_TXD;
489         } else
490                 adapter->num_tx_desc = lem_txd;
491         if (((lem_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
492             (adapter->hw.mac.type >= e1000_82544 && lem_rxd > EM_MAX_RXD) ||
493             (adapter->hw.mac.type < e1000_82544 && lem_rxd > EM_MAX_RXD_82543) ||
494             (lem_rxd < EM_MIN_RXD)) {
495                 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
496                     EM_DEFAULT_RXD, lem_rxd);
497                 adapter->num_rx_desc = EM_DEFAULT_RXD;
498         } else
499                 adapter->num_rx_desc = lem_rxd;
500
501         adapter->hw.mac.autoneg = DO_AUTO_NEG;
502         adapter->hw.phy.autoneg_wait_to_complete = FALSE;
503         adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
504         adapter->rx_buffer_len = 2048;
505
506         e1000_init_script_state_82541(&adapter->hw, TRUE);
507         e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
508
509         /* Copper options */
510         if (adapter->hw.phy.media_type == e1000_media_type_copper) {
511                 adapter->hw.phy.mdix = AUTO_ALL_MODES;
512                 adapter->hw.phy.disable_polarity_correction = FALSE;
513                 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
514         }
515
516         /*
517          * Set the frame limits assuming
518          * standard ethernet sized frames.
519          */
520         adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
521         adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
522
523         /*
524          * This controls when hardware reports transmit completion
525          * status.
526          */
527         adapter->hw.mac.report_tx_early = 1;
528
529         tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
530             EM_DBA_ALIGN);
531
532         /* Allocate Transmit Descriptor ring */
533         if (lem_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
534                 device_printf(dev, "Unable to allocate tx_desc memory\n");
535                 error = ENOMEM;
536                 goto err_tx_desc;
537         }
538         adapter->tx_desc_base = 
539             (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
540
541         rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
542             EM_DBA_ALIGN);
543
544         /* Allocate Receive Descriptor ring */
545         if (lem_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
546                 device_printf(dev, "Unable to allocate rx_desc memory\n");
547                 error = ENOMEM;
548                 goto err_rx_desc;
549         }
550         adapter->rx_desc_base =
551             (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
552
553         /*
554         ** Start from a known state, this is
555         ** important in reading the nvm and
556         ** mac from that.
557         */
558         e1000_reset_hw(&adapter->hw);
559
560         /* Make sure we have a good EEPROM before we read from it */
561         if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
562                 /*
563                 ** Some PCI-E parts fail the first check due to
564                 ** the link being in sleep state, call it again,
565                 ** if it fails a second time its a real issue.
566                 */
567                 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
568                         device_printf(dev,
569                             "The EEPROM Checksum Is Not Valid\n");
570                         error = EIO;
571                         goto err_hw_init;
572                 }
573         }
574
575         /* Copy the permanent MAC address out of the EEPROM */
576         if (e1000_read_mac_addr(&adapter->hw) < 0) {
577                 device_printf(dev, "EEPROM read error while reading MAC"
578                     " address\n");
579                 error = EIO;
580                 goto err_hw_init;
581         }
582
583         if (!lem_is_valid_ether_addr(adapter->hw.mac.addr)) {
584                 device_printf(dev, "Invalid MAC address\n");
585                 error = EIO;
586                 goto err_hw_init;
587         }
588
589         /* Initialize the hardware */
590         if (lem_hardware_init(adapter)) {
591                 device_printf(dev, "Unable to initialize the hardware\n");
592                 error = EIO;
593                 goto err_hw_init;
594         }
595
596         /* Allocate transmit descriptors and buffers */
597         if (lem_allocate_transmit_structures(adapter)) {
598                 device_printf(dev, "Could not setup transmit structures\n");
599                 error = ENOMEM;
600                 goto err_tx_struct;
601         }
602
603         /* Allocate receive descriptors and buffers */
604         if (lem_allocate_receive_structures(adapter)) {
605                 device_printf(dev, "Could not setup receive structures\n");
606                 error = ENOMEM;
607                 goto err_rx_struct;
608         }
609
610         /*
611         **  Do interrupt configuration
612         */
613         error = lem_allocate_irq(adapter);
614         if (error)
615                 goto err_rx_struct;
616
617         /*
618          * Get Wake-on-Lan and Management info for later use
619          */
620         lem_get_wakeup(dev);
621
622         /* Setup OS specific network interface */
623         lem_setup_interface(dev, adapter);
624
625         /* Initialize statistics */
626         lem_update_stats_counters(adapter);
627
628         adapter->hw.mac.get_link_status = 1;
629         lem_update_link_status(adapter);
630
631         /* Indicate SOL/IDER usage */
632         if (e1000_check_reset_block(&adapter->hw))
633                 device_printf(dev,
634                     "PHY reset is blocked due to SOL/IDER session.\n");
635
636         /* Do we need workaround for 82544 PCI-X adapter? */
637         if (adapter->hw.bus.type == e1000_bus_type_pcix &&
638             adapter->hw.mac.type == e1000_82544)
639                 adapter->pcix_82544 = TRUE;
640         else
641                 adapter->pcix_82544 = FALSE;
642
643 #if __FreeBSD_version >= 700029
644         /* Register for VLAN events */
645         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
646             lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
647         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
648             lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 
649 #endif
650
651         /* Non-AMT based hardware can now take control from firmware */
652         if (adapter->has_manage && !adapter->has_amt)
653                 lem_get_hw_control(adapter);
654
655         /* Tell the stack that the interface is not active */
656         adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
657
658         adapter->led_dev = led_create(lem_led_func, adapter,
659             device_get_nameunit(dev));
660
661         INIT_DEBUGOUT("lem_attach: end");
662
663         return (0);
664
665 err_rx_struct:
666         lem_free_transmit_structures(adapter);
667 err_tx_struct:
668 err_hw_init:
669         lem_release_hw_control(adapter);
670         lem_dma_free(adapter, &adapter->rxdma);
671 err_rx_desc:
672         lem_dma_free(adapter, &adapter->txdma);
673 err_tx_desc:
674 err_pci:
675         lem_free_pci_resources(adapter);
676         EM_TX_LOCK_DESTROY(adapter);
677         EM_RX_LOCK_DESTROY(adapter);
678         EM_CORE_LOCK_DESTROY(adapter);
679
680         return (error);
681 }
682
683 /*********************************************************************
684  *  Device removal routine
685  *
686  *  The detach entry point is called when the driver is being removed.
687  *  This routine stops the adapter and deallocates all the resources
688  *  that were allocated for driver operation.
689  *
690  *  return 0 on success, positive on failure
691  *********************************************************************/
692
693 static int
694 lem_detach(device_t dev)
695 {
696         struct adapter  *adapter = device_get_softc(dev);
697         struct ifnet    *ifp = adapter->ifp;
698
699         INIT_DEBUGOUT("em_detach: begin");
700
701         /* Make sure VLANS are not using driver */
702 #if __FreeBSD_version >= 700000
703         if (adapter->ifp->if_vlantrunk != NULL) {
704 #else
705         if (adapter->ifp->if_nvlans != 0) {
706 #endif   
707                 device_printf(dev,"Vlan in use, detach first\n");
708                 return (EBUSY);
709         }
710
711 #ifdef DEVICE_POLLING
712         if (ifp->if_capenable & IFCAP_POLLING)
713                 ether_poll_deregister(ifp);
714 #endif
715
716         if (adapter->led_dev != NULL)
717                 led_destroy(adapter->led_dev);
718
719         EM_CORE_LOCK(adapter);
720         EM_TX_LOCK(adapter);
721         adapter->in_detach = 1;
722         lem_stop(adapter);
723         e1000_phy_hw_reset(&adapter->hw);
724
725         lem_release_manageability(adapter);
726
727         EM_TX_UNLOCK(adapter);
728         EM_CORE_UNLOCK(adapter);
729
730 #if __FreeBSD_version >= 700029
731         /* Unregister VLAN events */
732         if (adapter->vlan_attach != NULL)
733                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
734         if (adapter->vlan_detach != NULL)
735                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); 
736 #endif
737
738         ether_ifdetach(adapter->ifp);
739         callout_drain(&adapter->timer);
740         callout_drain(&adapter->tx_fifo_timer);
741
742         lem_free_pci_resources(adapter);
743         bus_generic_detach(dev);
744         if_free(ifp);
745
746         lem_free_transmit_structures(adapter);
747         lem_free_receive_structures(adapter);
748
749         /* Free Transmit Descriptor ring */
750         if (adapter->tx_desc_base) {
751                 lem_dma_free(adapter, &adapter->txdma);
752                 adapter->tx_desc_base = NULL;
753         }
754
755         /* Free Receive Descriptor ring */
756         if (adapter->rx_desc_base) {
757                 lem_dma_free(adapter, &adapter->rxdma);
758                 adapter->rx_desc_base = NULL;
759         }
760
761         lem_release_hw_control(adapter);
762         EM_TX_LOCK_DESTROY(adapter);
763         EM_RX_LOCK_DESTROY(adapter);
764         EM_CORE_LOCK_DESTROY(adapter);
765
766         return (0);
767 }
768
769 /*********************************************************************
770  *
771  *  Shutdown entry point
772  *
773  **********************************************************************/
774
775 static int
776 lem_shutdown(device_t dev)
777 {
778         return lem_suspend(dev);
779 }
780
781 /*
782  * Suspend/resume device methods.
783  */
784 static int
785 lem_suspend(device_t dev)
786 {
787         struct adapter *adapter = device_get_softc(dev);
788
789         EM_CORE_LOCK(adapter);
790
791         lem_release_manageability(adapter);
792         lem_release_hw_control(adapter);
793         lem_enable_wakeup(dev);
794
795         EM_CORE_UNLOCK(adapter);
796
797         return bus_generic_suspend(dev);
798 }
799
800 static int
801 lem_resume(device_t dev)
802 {
803         struct adapter *adapter = device_get_softc(dev);
804         struct ifnet *ifp = adapter->ifp;
805
806         EM_CORE_LOCK(adapter);
807         lem_init_locked(adapter);
808         lem_init_manageability(adapter);
809         EM_CORE_UNLOCK(adapter);
810         lem_start(ifp);
811
812         return bus_generic_resume(dev);
813 }
814
815
816 static void
817 lem_start_locked(struct ifnet *ifp)
818 {
819         struct adapter  *adapter = ifp->if_softc;
820         struct mbuf     *m_head;
821
822         EM_TX_LOCK_ASSERT(adapter);
823
824         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
825             IFF_DRV_RUNNING)
826                 return;
827         if (!adapter->link_active)
828                 return;
829
830         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
831
832                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
833                 if (m_head == NULL)
834                         break;
835                 /*
836                  *  Encapsulation can modify our pointer, and or make it
837                  *  NULL on failure.  In that event, we can't requeue.
838                  */
839                 if (lem_xmit(adapter, &m_head)) {
840                         if (m_head == NULL)
841                                 break;
842                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
843                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
844                         break;
845                 }
846
847                 /* Send a copy of the frame to the BPF listener */
848                 ETHER_BPF_MTAP(ifp, m_head);
849
850                 /* Set timeout in case hardware has problems transmitting. */
851                 adapter->watchdog_check = TRUE;
852                 adapter->watchdog_time = ticks;
853         }
854         if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
855                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
856
857         return;
858 }
859
860 static void
861 lem_start(struct ifnet *ifp)
862 {
863         struct adapter *adapter = ifp->if_softc;
864
865         EM_TX_LOCK(adapter);
866         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
867                 lem_start_locked(ifp);
868         EM_TX_UNLOCK(adapter);
869 }
870
871 /*********************************************************************
872  *  Ioctl entry point
873  *
874  *  em_ioctl is called when the user wants to configure the
875  *  interface.
876  *
877  *  return 0 on success, positive on failure
878  **********************************************************************/
879
880 static int
881 lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
882 {
883         struct adapter  *adapter = ifp->if_softc;
884         struct ifreq *ifr = (struct ifreq *)data;
885 #ifdef INET
886         struct ifaddr *ifa = (struct ifaddr *)data;
887 #endif
888         int error = 0;
889
890         if (adapter->in_detach)
891                 return (error);
892
893         switch (command) {
894         case SIOCSIFADDR:
895 #ifdef INET
896                 if (ifa->ifa_addr->sa_family == AF_INET) {
897                         /*
898                          * XXX
899                          * Since resetting hardware takes a very long time
900                          * and results in link renegotiation we only
901                          * initialize the hardware only when it is absolutely
902                          * required.
903                          */
904                         ifp->if_flags |= IFF_UP;
905                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
906                                 EM_CORE_LOCK(adapter);
907                                 lem_init_locked(adapter);
908                                 EM_CORE_UNLOCK(adapter);
909                         }
910                         arp_ifinit(ifp, ifa);
911                 } else
912 #endif
913                         error = ether_ioctl(ifp, command, data);
914                 break;
915         case SIOCSIFMTU:
916             {
917                 int max_frame_size;
918
919                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
920
921                 EM_CORE_LOCK(adapter);
922                 switch (adapter->hw.mac.type) {
923                 case e1000_82542:
924                         max_frame_size = ETHER_MAX_LEN;
925                         break;
926                 default:
927                         max_frame_size = MAX_JUMBO_FRAME_SIZE;
928                 }
929                 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
930                     ETHER_CRC_LEN) {
931                         EM_CORE_UNLOCK(adapter);
932                         error = EINVAL;
933                         break;
934                 }
935
936                 ifp->if_mtu = ifr->ifr_mtu;
937                 adapter->max_frame_size =
938                     ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
939                 lem_init_locked(adapter);
940                 EM_CORE_UNLOCK(adapter);
941                 break;
942             }
943         case SIOCSIFFLAGS:
944                 IOCTL_DEBUGOUT("ioctl rcv'd:\
945                     SIOCSIFFLAGS (Set Interface Flags)");
946                 EM_CORE_LOCK(adapter);
947                 if (ifp->if_flags & IFF_UP) {
948                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
949                                 if ((ifp->if_flags ^ adapter->if_flags) &
950                                     (IFF_PROMISC | IFF_ALLMULTI)) {
951                                         lem_disable_promisc(adapter);
952                                         lem_set_promisc(adapter);
953                                 }
954                         } else
955                                 lem_init_locked(adapter);
956                 } else
957                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
958                                 EM_TX_LOCK(adapter);
959                                 lem_stop(adapter);
960                                 EM_TX_UNLOCK(adapter);
961                         }
962                 adapter->if_flags = ifp->if_flags;
963                 EM_CORE_UNLOCK(adapter);
964                 break;
965         case SIOCADDMULTI:
966         case SIOCDELMULTI:
967                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
968                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
969                         EM_CORE_LOCK(adapter);
970                         lem_disable_intr(adapter);
971                         lem_set_multi(adapter);
972                         if (adapter->hw.mac.type == e1000_82542 && 
973                             adapter->hw.revision_id == E1000_REVISION_2) {
974                                 lem_initialize_receive_unit(adapter);
975                         }
976 #ifdef DEVICE_POLLING
977                         if (!(ifp->if_capenable & IFCAP_POLLING))
978 #endif
979                                 lem_enable_intr(adapter);
980                         EM_CORE_UNLOCK(adapter);
981                 }
982                 break;
983         case SIOCSIFMEDIA:
984                 /* Check SOL/IDER usage */
985                 EM_CORE_LOCK(adapter);
986                 if (e1000_check_reset_block(&adapter->hw)) {
987                         EM_CORE_UNLOCK(adapter);
988                         device_printf(adapter->dev, "Media change is"
989                             " blocked due to SOL/IDER session.\n");
990                         break;
991                 }
992                 EM_CORE_UNLOCK(adapter);
993         case SIOCGIFMEDIA:
994                 IOCTL_DEBUGOUT("ioctl rcv'd: \
995                     SIOCxIFMEDIA (Get/Set Interface Media)");
996                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
997                 break;
998         case SIOCSIFCAP:
999             {
1000                 int mask, reinit;
1001
1002                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1003                 reinit = 0;
1004                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1005 #ifdef DEVICE_POLLING
1006                 if (mask & IFCAP_POLLING) {
1007                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
1008                                 error = ether_poll_register(lem_poll, ifp);
1009                                 if (error)
1010                                         return (error);
1011                                 EM_CORE_LOCK(adapter);
1012                                 lem_disable_intr(adapter);
1013                                 ifp->if_capenable |= IFCAP_POLLING;
1014                                 EM_CORE_UNLOCK(adapter);
1015                         } else {
1016                                 error = ether_poll_deregister(ifp);
1017                                 /* Enable interrupt even in error case */
1018                                 EM_CORE_LOCK(adapter);
1019                                 lem_enable_intr(adapter);
1020                                 ifp->if_capenable &= ~IFCAP_POLLING;
1021                                 EM_CORE_UNLOCK(adapter);
1022                         }
1023                 }
1024 #endif
1025                 if (mask & IFCAP_HWCSUM) {
1026                         ifp->if_capenable ^= IFCAP_HWCSUM;
1027                         reinit = 1;
1028                 }
1029                 if (mask & IFCAP_VLAN_HWTAGGING) {
1030                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1031                         reinit = 1;
1032                 }
1033                 if ((mask & IFCAP_WOL) &&
1034                     (ifp->if_capabilities & IFCAP_WOL) != 0) {
1035                         if (mask & IFCAP_WOL_MCAST)
1036                                 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1037                         if (mask & IFCAP_WOL_MAGIC)
1038                                 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1039                 }
1040                 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1041                         lem_init(adapter);
1042 #if __FreeBSD_version >= 700000
1043                 VLAN_CAPABILITIES(ifp);
1044 #endif
1045                 break;
1046             }
1047
1048         default:
1049                 error = ether_ioctl(ifp, command, data);
1050                 break;
1051         }
1052
1053         return (error);
1054 }
1055
1056
1057 /*********************************************************************
1058  *  Init entry point
1059  *
1060  *  This routine is used in two ways. It is used by the stack as
1061  *  init entry point in network interface structure. It is also used
1062  *  by the driver as a hw/sw initialization routine to get to a
1063  *  consistent state.
1064  *
1065  *  return 0 on success, positive on failure
1066  **********************************************************************/
1067
1068 static void
1069 lem_init_locked(struct adapter *adapter)
1070 {
1071         struct ifnet    *ifp = adapter->ifp;
1072         device_t        dev = adapter->dev;
1073         u32             pba;
1074
1075         INIT_DEBUGOUT("lem_init: begin");
1076
1077         EM_CORE_LOCK_ASSERT(adapter);
1078
1079         EM_TX_LOCK(adapter);
1080         lem_stop(adapter);
1081         EM_TX_UNLOCK(adapter);
1082
1083         /*
1084          * Packet Buffer Allocation (PBA)
1085          * Writing PBA sets the receive portion of the buffer
1086          * the remainder is used for the transmit buffer.
1087          *
1088          * Devices before the 82547 had a Packet Buffer of 64K.
1089          *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1090          * After the 82547 the buffer was reduced to 40K.
1091          *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1092          *   Note: default does not leave enough room for Jumbo Frame >10k.
1093          */
1094         switch (adapter->hw.mac.type) {
1095         case e1000_82547:
1096         case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1097                 if (adapter->max_frame_size > 8192)
1098                         pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1099                 else
1100                         pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1101                 adapter->tx_fifo_head = 0;
1102                 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1103                 adapter->tx_fifo_size =
1104                     (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1105                 break;
1106         default:
1107                 /* Devices before 82547 had a Packet Buffer of 64K.   */
1108                 if (adapter->max_frame_size > 8192)
1109                         pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1110                 else
1111                         pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1112         }
1113
1114         INIT_DEBUGOUT1("lem_init: pba=%dK",pba);
1115         E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1116         
1117         /* Get the latest mac address, User can use a LAA */
1118         bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1119               ETHER_ADDR_LEN);
1120
1121         /* Put the address into the Receive Address Array */
1122         e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1123
1124         /* Initialize the hardware */
1125         if (lem_hardware_init(adapter)) {
1126                 device_printf(dev, "Unable to initialize the hardware\n");
1127                 return;
1128         }
1129         lem_update_link_status(adapter);
1130
1131         /* Setup VLAN support, basic and offload if available */
1132         E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1133
1134 #if __FreeBSD_version < 700029
1135         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1136                 u32 ctrl;
1137                 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1138                 ctrl |= E1000_CTRL_VME;
1139                 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1140         }
1141 #else
1142         /* Use real VLAN Filter support */
1143         lem_setup_vlan_hw_support(adapter);
1144 #endif
1145
1146         /* Set hardware offload abilities */
1147         ifp->if_hwassist = 0;
1148         if (adapter->hw.mac.type >= e1000_82543) {
1149                 if (ifp->if_capenable & IFCAP_TXCSUM)
1150                         ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1151         }
1152
1153         /* Configure for OS presence */
1154         lem_init_manageability(adapter);
1155
1156         /* Prepare transmit descriptors and buffers */
1157         lem_setup_transmit_structures(adapter);
1158         lem_initialize_transmit_unit(adapter);
1159
1160         /* Setup Multicast table */
1161         lem_set_multi(adapter);
1162
1163         /* Prepare receive descriptors and buffers */
1164         if (lem_setup_receive_structures(adapter)) {
1165                 device_printf(dev, "Could not setup receive structures\n");
1166                 EM_TX_LOCK(adapter);
1167                 lem_stop(adapter);
1168                 EM_TX_UNLOCK(adapter);
1169                 return;
1170         }
1171         lem_initialize_receive_unit(adapter);
1172
1173         /* Don't lose promiscuous settings */
1174         lem_set_promisc(adapter);
1175
1176         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1177         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1178
1179         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1180         e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1181
1182         /* MSI/X configuration for 82574 */
1183         if (adapter->hw.mac.type == e1000_82574) {
1184                 int tmp;
1185                 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1186                 tmp |= E1000_CTRL_EXT_PBA_CLR;
1187                 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1188                 /*
1189                 ** Set the IVAR - interrupt vector routing.
1190                 ** Each nibble represents a vector, high bit
1191                 ** is enable, other 3 bits are the MSIX table
1192                 ** entry, we map RXQ0 to 0, TXQ0 to 1, and
1193                 ** Link (other) to 2, hence the magic number.
1194                 */
1195                 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1196         }
1197
1198 #ifdef DEVICE_POLLING
1199         /*
1200          * Only enable interrupts if we are not polling, make sure
1201          * they are off otherwise.
1202          */
1203         if (ifp->if_capenable & IFCAP_POLLING)
1204                 lem_disable_intr(adapter);
1205         else
1206 #endif /* DEVICE_POLLING */
1207                 lem_enable_intr(adapter);
1208
1209         /* AMT based hardware can now take control from firmware */
1210         if (adapter->has_manage && adapter->has_amt)
1211                 lem_get_hw_control(adapter);
1212
1213         /* Don't reset the phy next time init gets called */
1214         adapter->hw.phy.reset_disable = TRUE;
1215 }
1216
1217 static void
1218 lem_init(void *arg)
1219 {
1220         struct adapter *adapter = arg;
1221
1222         EM_CORE_LOCK(adapter);
1223         lem_init_locked(adapter);
1224         EM_CORE_UNLOCK(adapter);
1225 }
1226
1227
1228 #ifdef DEVICE_POLLING
1229 /*********************************************************************
1230  *
1231  *  Legacy polling routine  
1232  *
1233  *********************************************************************/
1234 static int
1235 lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1236 {
1237         struct adapter *adapter = ifp->if_softc;
1238         u32             reg_icr, rx_done = 0;
1239
1240         EM_CORE_LOCK(adapter);
1241         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1242                 EM_CORE_UNLOCK(adapter);
1243                 return (rx_done);
1244         }
1245
1246         if (cmd == POLL_AND_CHECK_STATUS) {
1247                 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1248                 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1249                         callout_stop(&adapter->timer);
1250                         adapter->hw.mac.get_link_status = 1;
1251                         lem_update_link_status(adapter);
1252                         callout_reset(&adapter->timer, hz,
1253                             lem_local_timer, adapter);
1254                 }
1255         }
1256         EM_CORE_UNLOCK(adapter);
1257
1258         rx_done = lem_rxeof(adapter, count);
1259
1260         EM_TX_LOCK(adapter);
1261         lem_txeof(adapter);
1262         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1263                 lem_start_locked(ifp);
1264         EM_TX_UNLOCK(adapter);
1265         return (rx_done);
1266 }
1267 #endif /* DEVICE_POLLING */
1268
1269 #ifdef EM_LEGACY_IRQ 
1270 /*********************************************************************
1271  *
1272  *  Legacy Interrupt Service routine  
1273  *
1274  *********************************************************************/
1275
1276 static void
1277 lem_intr(void *arg)
1278 {
1279         struct adapter  *adapter = arg;
1280         struct ifnet    *ifp = adapter->ifp;
1281         u32             reg_icr;
1282
1283
1284         if (ifp->if_capenable & IFCAP_POLLING)
1285                 return;
1286
1287         EM_CORE_LOCK(adapter);
1288         reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1289         if (reg_icr & E1000_ICR_RXO)
1290                 adapter->rx_overruns++;
1291
1292         if ((reg_icr == 0xffffffff) || (reg_icr == 0))
1293                         goto out;
1294
1295         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1296                         goto out;
1297
1298         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1299                 callout_stop(&adapter->timer);
1300                 adapter->hw.mac.get_link_status = 1;
1301                 lem_update_link_status(adapter);
1302                 /* Deal with TX cruft when link lost */
1303                 lem_tx_purge(adapter);
1304                 callout_reset(&adapter->timer, hz,
1305                     lem_local_timer, adapter);
1306                 goto out;
1307         }
1308
1309         EM_TX_LOCK(adapter);
1310         lem_txeof(adapter);
1311         lem_rxeof(adapter, -1);
1312         lem_txeof(adapter);
1313         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1314             !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1315                 lem_start_locked(ifp);
1316         EM_TX_UNLOCK(adapter);
1317
1318 out:
1319         EM_CORE_UNLOCK(adapter);
1320         return;
1321 }
1322
1323 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1324
1325 static void
1326 lem_handle_link(void *context, int pending)
1327 {
1328         struct adapter  *adapter = context;
1329         struct ifnet *ifp = adapter->ifp;
1330
1331         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1332                 return;
1333
1334         EM_CORE_LOCK(adapter);
1335         callout_stop(&adapter->timer);
1336         lem_update_link_status(adapter);
1337         /* Deal with TX cruft when link lost */
1338         lem_tx_purge(adapter);
1339         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
1340         EM_CORE_UNLOCK(adapter);
1341 }
1342
1343
1344 /* Combined RX/TX handler, used by Legacy and MSI */
1345 static void
1346 lem_handle_rxtx(void *context, int pending)
1347 {
1348         struct adapter  *adapter = context;
1349         struct ifnet    *ifp = adapter->ifp;
1350
1351
1352         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1353                 if (lem_rxeof(adapter, adapter->rx_process_limit) != 0)
1354                         taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1355                 EM_TX_LOCK(adapter);
1356                 lem_txeof(adapter);
1357                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1358                         lem_start_locked(ifp);
1359                 EM_TX_UNLOCK(adapter);
1360         }
1361
1362         lem_enable_intr(adapter);
1363 }
1364
1365 /*********************************************************************
1366  *
1367  *  Fast Legacy/MSI Combined Interrupt Service routine  
1368  *
1369  *********************************************************************/
1370 #if __FreeBSD_version < 700000
1371 #define FILTER_STRAY
1372 #define FILTER_HANDLED
1373 static void
1374 #else
1375 static int
1376 #endif
1377 lem_irq_fast(void *arg)
1378 {
1379         struct adapter  *adapter = arg;
1380         struct ifnet    *ifp;
1381         u32             reg_icr;
1382
1383         ifp = adapter->ifp;
1384
1385         reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1386
1387         /* Hot eject?  */
1388         if (reg_icr == 0xffffffff)
1389                 return FILTER_STRAY;
1390
1391         /* Definitely not our interrupt.  */
1392         if (reg_icr == 0x0)
1393                 return FILTER_STRAY;
1394
1395         /*
1396          * Mask interrupts until the taskqueue is finished running.  This is
1397          * cheap, just assume that it is needed.  This also works around the
1398          * MSI message reordering errata on certain systems.
1399          */
1400         lem_disable_intr(adapter);
1401         taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1402
1403         /* Link status change */
1404         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1405                 adapter->hw.mac.get_link_status = 1;
1406                 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1407         }
1408
1409         if (reg_icr & E1000_ICR_RXO)
1410                 adapter->rx_overruns++;
1411         return FILTER_HANDLED;
1412 }
1413 #endif /* ~EM_LEGACY_IRQ */
1414
1415
1416 /*********************************************************************
1417  *
1418  *  Media Ioctl callback
1419  *
1420  *  This routine is called whenever the user queries the status of
1421  *  the interface using ifconfig.
1422  *
1423  **********************************************************************/
1424 static void
1425 lem_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1426 {
1427         struct adapter *adapter = ifp->if_softc;
1428         u_char fiber_type = IFM_1000_SX;
1429
1430         INIT_DEBUGOUT("lem_media_status: begin");
1431
1432         EM_CORE_LOCK(adapter);
1433         lem_update_link_status(adapter);
1434
1435         ifmr->ifm_status = IFM_AVALID;
1436         ifmr->ifm_active = IFM_ETHER;
1437
1438         if (!adapter->link_active) {
1439                 EM_CORE_UNLOCK(adapter);
1440                 return;
1441         }
1442
1443         ifmr->ifm_status |= IFM_ACTIVE;
1444
1445         if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1446             (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1447                 if (adapter->hw.mac.type == e1000_82545)
1448                         fiber_type = IFM_1000_LX;
1449                 ifmr->ifm_active |= fiber_type | IFM_FDX;
1450         } else {
1451                 switch (adapter->link_speed) {
1452                 case 10:
1453                         ifmr->ifm_active |= IFM_10_T;
1454                         break;
1455                 case 100:
1456                         ifmr->ifm_active |= IFM_100_TX;
1457                         break;
1458                 case 1000:
1459                         ifmr->ifm_active |= IFM_1000_T;
1460                         break;
1461                 }
1462                 if (adapter->link_duplex == FULL_DUPLEX)
1463                         ifmr->ifm_active |= IFM_FDX;
1464                 else
1465                         ifmr->ifm_active |= IFM_HDX;
1466         }
1467         EM_CORE_UNLOCK(adapter);
1468 }
1469
1470 /*********************************************************************
1471  *
1472  *  Media Ioctl callback
1473  *
1474  *  This routine is called when the user changes speed/duplex using
1475  *  media/mediopt option with ifconfig.
1476  *
1477  **********************************************************************/
1478 static int
1479 lem_media_change(struct ifnet *ifp)
1480 {
1481         struct adapter *adapter = ifp->if_softc;
1482         struct ifmedia  *ifm = &adapter->media;
1483
1484         INIT_DEBUGOUT("lem_media_change: begin");
1485
1486         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1487                 return (EINVAL);
1488
1489         EM_CORE_LOCK(adapter);
1490         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1491         case IFM_AUTO:
1492                 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1493                 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1494                 break;
1495         case IFM_1000_LX:
1496         case IFM_1000_SX:
1497         case IFM_1000_T:
1498                 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1499                 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1500                 break;
1501         case IFM_100_TX:
1502                 adapter->hw.mac.autoneg = FALSE;
1503                 adapter->hw.phy.autoneg_advertised = 0;
1504                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1505                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1506                 else
1507                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1508                 break;
1509         case IFM_10_T:
1510                 adapter->hw.mac.autoneg = FALSE;
1511                 adapter->hw.phy.autoneg_advertised = 0;
1512                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1513                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1514                 else
1515                         adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1516                 break;
1517         default:
1518                 device_printf(adapter->dev, "Unsupported media type\n");
1519         }
1520
1521         /* As the speed/duplex settings my have changed we need to
1522          * reset the PHY.
1523          */
1524         adapter->hw.phy.reset_disable = FALSE;
1525
1526         lem_init_locked(adapter);
1527         EM_CORE_UNLOCK(adapter);
1528
1529         return (0);
1530 }
1531
1532 /*********************************************************************
1533  *
1534  *  This routine maps the mbufs to tx descriptors.
1535  *
1536  *  return 0 on success, positive on failure
1537  **********************************************************************/
1538
1539 static int
1540 lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
1541 {
1542         bus_dma_segment_t       segs[EM_MAX_SCATTER];
1543         bus_dmamap_t            map;
1544         struct em_buffer        *tx_buffer, *tx_buffer_mapped;
1545         struct e1000_tx_desc    *ctxd = NULL;
1546         struct mbuf             *m_head;
1547         u32                     txd_upper, txd_lower, txd_used, txd_saved;
1548         int                     error, nsegs, i, j, first, last = 0;
1549 #if __FreeBSD_version < 700000
1550         struct m_tag            *mtag;
1551 #endif
1552         m_head = *m_headp;
1553         txd_upper = txd_lower = txd_used = txd_saved = 0;
1554
1555         /*
1556          * Force a cleanup if number of TX descriptors
1557          * available hits the threshold
1558          */
1559         if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1560                 lem_txeof(adapter);
1561                 /* Now do we at least have a minimal? */
1562                 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
1563                         adapter->no_tx_desc_avail1++;
1564                         return (ENOBUFS);
1565                 }
1566         }
1567
1568         /*
1569          * Map the packet for DMA
1570          *
1571          * Capture the first descriptor index,
1572          * this descriptor will have the index
1573          * of the EOP which is the only one that
1574          * now gets a DONE bit writeback.
1575          */
1576         first = adapter->next_avail_tx_desc;
1577         tx_buffer = &adapter->tx_buffer_area[first];
1578         tx_buffer_mapped = tx_buffer;
1579         map = tx_buffer->map;
1580
1581         error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1582             *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1583
1584         /*
1585          * There are two types of errors we can (try) to handle:
1586          * - EFBIG means the mbuf chain was too long and bus_dma ran
1587          *   out of segments.  Defragment the mbuf chain and try again.
1588          * - ENOMEM means bus_dma could not obtain enough bounce buffers
1589          *   at this point in time.  Defer sending and try again later.
1590          * All other errors, in particular EINVAL, are fatal and prevent the
1591          * mbuf chain from ever going through.  Drop it and report error.
1592          */
1593         if (error == EFBIG) {
1594                 struct mbuf *m;
1595
1596                 m = m_defrag(*m_headp, M_DONTWAIT);
1597                 if (m == NULL) {
1598                         adapter->mbuf_alloc_failed++;
1599                         m_freem(*m_headp);
1600                         *m_headp = NULL;
1601                         return (ENOBUFS);
1602                 }
1603                 *m_headp = m;
1604
1605                 /* Try it again */
1606                 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
1607                     *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1608
1609                 if (error) {
1610                         adapter->no_tx_dma_setup++;
1611                         m_freem(*m_headp);
1612                         *m_headp = NULL;
1613                         return (error);
1614                 }
1615         } else if (error != 0) {
1616                 adapter->no_tx_dma_setup++;
1617                 return (error);
1618         }
1619
1620         if (nsegs > (adapter->num_tx_desc_avail - 2)) {
1621                 adapter->no_tx_desc_avail2++;
1622                 bus_dmamap_unload(adapter->txtag, map);
1623                 return (ENOBUFS);
1624         }
1625         m_head = *m_headp;
1626
1627         /* Do hardware assists */
1628         if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
1629                 lem_transmit_checksum_setup(adapter,  m_head,
1630                     &txd_upper, &txd_lower);
1631
1632         i = adapter->next_avail_tx_desc;
1633         if (adapter->pcix_82544) 
1634                 txd_saved = i;
1635
1636         /* Set up our transmit descriptors */
1637         for (j = 0; j < nsegs; j++) {
1638                 bus_size_t seg_len;
1639                 bus_addr_t seg_addr;
1640                 /* If adapter is 82544 and on PCIX bus */
1641                 if(adapter->pcix_82544) {
1642                         DESC_ARRAY      desc_array;
1643                         u32             array_elements, counter;
1644                         /*
1645                          * Check the Address and Length combination and
1646                          * split the data accordingly
1647                          */
1648                         array_elements = lem_fill_descriptors(segs[j].ds_addr,
1649                             segs[j].ds_len, &desc_array);
1650                         for (counter = 0; counter < array_elements; counter++) {
1651                                 if (txd_used == adapter->num_tx_desc_avail) {
1652                                         adapter->next_avail_tx_desc = txd_saved;
1653                                         adapter->no_tx_desc_avail2++;
1654                                         bus_dmamap_unload(adapter->txtag, map);
1655                                         return (ENOBUFS);
1656                                 }
1657                                 tx_buffer = &adapter->tx_buffer_area[i];
1658                                 ctxd = &adapter->tx_desc_base[i];
1659                                 ctxd->buffer_addr = htole64(
1660                                     desc_array.descriptor[counter].address);
1661                                 ctxd->lower.data = htole32(
1662                                     (adapter->txd_cmd | txd_lower | (u16)
1663                                     desc_array.descriptor[counter].length));
1664                                 ctxd->upper.data =
1665                                     htole32((txd_upper));
1666                                 last = i;
1667                                 if (++i == adapter->num_tx_desc)
1668                                          i = 0;
1669                                 tx_buffer->m_head = NULL;
1670                                 tx_buffer->next_eop = -1;
1671                                 txd_used++;
1672                         }
1673                 } else {
1674                         tx_buffer = &adapter->tx_buffer_area[i];
1675                         ctxd = &adapter->tx_desc_base[i];
1676                         seg_addr = segs[j].ds_addr;
1677                         seg_len  = segs[j].ds_len;
1678                         ctxd->buffer_addr = htole64(seg_addr);
1679                         ctxd->lower.data = htole32(
1680                         adapter->txd_cmd | txd_lower | seg_len);
1681                         ctxd->upper.data =
1682                             htole32(txd_upper);
1683                         last = i;
1684                         if (++i == adapter->num_tx_desc)
1685                                 i = 0;
1686                         tx_buffer->m_head = NULL;
1687                         tx_buffer->next_eop = -1;
1688                 }
1689         }
1690
1691         adapter->next_avail_tx_desc = i;
1692
1693         if (adapter->pcix_82544)
1694                 adapter->num_tx_desc_avail -= txd_used;
1695         else
1696                 adapter->num_tx_desc_avail -= nsegs;
1697
1698         /*
1699         ** Handle VLAN tag, this is the
1700         ** biggest difference between 
1701         ** 6.x and 7
1702         */
1703 #if __FreeBSD_version < 700000
1704         /* Find out if we are in vlan mode. */
1705         mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1706         if (mtag != NULL) {
1707                 ctxd->upper.fields.special =
1708                     htole16(VLAN_TAG_VALUE(mtag));
1709                 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1710         }
1711 #else /* FreeBSD 7 */
1712         if (m_head->m_flags & M_VLANTAG) {
1713                 /* Set the vlan id. */
1714                 ctxd->upper.fields.special =
1715                     htole16(m_head->m_pkthdr.ether_vtag);
1716                 /* Tell hardware to add tag */
1717                 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
1718         }
1719 #endif
1720
1721         tx_buffer->m_head = m_head;
1722         tx_buffer_mapped->map = tx_buffer->map;
1723         tx_buffer->map = map;
1724         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1725
1726         /*
1727          * Last Descriptor of Packet
1728          * needs End Of Packet (EOP)
1729          * and Report Status (RS)
1730          */
1731         ctxd->lower.data |=
1732             htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1733         /*
1734          * Keep track in the first buffer which
1735          * descriptor will be written back
1736          */
1737         tx_buffer = &adapter->tx_buffer_area[first];
1738         tx_buffer->next_eop = last;
1739
1740         /*
1741          * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
1742          * that this frame is available to transmit.
1743          */
1744         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1745             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1746         if (adapter->hw.mac.type == e1000_82547 &&
1747             adapter->link_duplex == HALF_DUPLEX)
1748                 lem_82547_move_tail(adapter);
1749         else {
1750                 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1751                 if (adapter->hw.mac.type == e1000_82547)
1752                         lem_82547_update_fifo_head(adapter,
1753                             m_head->m_pkthdr.len);
1754         }
1755
1756         return (0);
1757 }
1758
1759 /*********************************************************************
1760  *
1761  * 82547 workaround to avoid controller hang in half-duplex environment.
1762  * The workaround is to avoid queuing a large packet that would span
1763  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1764  * in this case. We do that only when FIFO is quiescent.
1765  *
1766  **********************************************************************/
1767 static void
1768 lem_82547_move_tail(void *arg)
1769 {
1770         struct adapter *adapter = arg;
1771         struct e1000_tx_desc *tx_desc;
1772         u16     hw_tdt, sw_tdt, length = 0;
1773         bool    eop = 0;
1774
1775         EM_TX_LOCK_ASSERT(adapter);
1776
1777         hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
1778         sw_tdt = adapter->next_avail_tx_desc;
1779         
1780         while (hw_tdt != sw_tdt) {
1781                 tx_desc = &adapter->tx_desc_base[hw_tdt];
1782                 length += tx_desc->lower.flags.length;
1783                 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1784                 if (++hw_tdt == adapter->num_tx_desc)
1785                         hw_tdt = 0;
1786
1787                 if (eop) {
1788                         if (lem_82547_fifo_workaround(adapter, length)) {
1789                                 adapter->tx_fifo_wrk_cnt++;
1790                                 callout_reset(&adapter->tx_fifo_timer, 1,
1791                                         lem_82547_move_tail, adapter);
1792                                 break;
1793                         }
1794                         E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
1795                         lem_82547_update_fifo_head(adapter, length);
1796                         length = 0;
1797                 }
1798         }       
1799 }
1800
1801 static int
1802 lem_82547_fifo_workaround(struct adapter *adapter, int len)
1803 {       
1804         int fifo_space, fifo_pkt_len;
1805
1806         fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1807
1808         if (adapter->link_duplex == HALF_DUPLEX) {
1809                 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1810
1811                 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1812                         if (lem_82547_tx_fifo_reset(adapter))
1813                                 return (0);
1814                         else
1815                                 return (1);
1816                 }
1817         }
1818
1819         return (0);
1820 }
1821
1822 static void
1823 lem_82547_update_fifo_head(struct adapter *adapter, int len)
1824 {
1825         int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1826         
1827         /* tx_fifo_head is always 16 byte aligned */
1828         adapter->tx_fifo_head += fifo_pkt_len;
1829         if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1830                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1831         }
1832 }
1833
1834
1835 static int
1836 lem_82547_tx_fifo_reset(struct adapter *adapter)
1837 {
1838         u32 tctl;
1839
1840         if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1841             E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1842             (E1000_READ_REG(&adapter->hw, E1000_TDFT) == 
1843             E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1844             (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1845             E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1846             (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
1847                 /* Disable TX unit */
1848                 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1849                 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1850                     tctl & ~E1000_TCTL_EN);
1851
1852                 /* Reset FIFO pointers */
1853                 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1854                     adapter->tx_head_addr);
1855                 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1856                     adapter->tx_head_addr);
1857                 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1858                     adapter->tx_head_addr);
1859                 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1860                     adapter->tx_head_addr);
1861
1862                 /* Re-enable TX unit */
1863                 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
1864                 E1000_WRITE_FLUSH(&adapter->hw);
1865
1866                 adapter->tx_fifo_head = 0;
1867                 adapter->tx_fifo_reset_cnt++;
1868
1869                 return (TRUE);
1870         }
1871         else {
1872                 return (FALSE);
1873         }
1874 }
1875
1876 static void
1877 lem_set_promisc(struct adapter *adapter)
1878 {
1879         struct ifnet    *ifp = adapter->ifp;
1880         u32             reg_rctl;
1881
1882         reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1883
1884         if (ifp->if_flags & IFF_PROMISC) {
1885                 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1886                 /* Turn this on if you want to see bad packets */
1887                 if (lem_debug_sbp)
1888                         reg_rctl |= E1000_RCTL_SBP;
1889                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1890         } else if (ifp->if_flags & IFF_ALLMULTI) {
1891                 reg_rctl |= E1000_RCTL_MPE;
1892                 reg_rctl &= ~E1000_RCTL_UPE;
1893                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1894         }
1895 }
1896
1897 static void
1898 lem_disable_promisc(struct adapter *adapter)
1899 {
1900         u32     reg_rctl;
1901
1902         reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1903
1904         reg_rctl &=  (~E1000_RCTL_UPE);
1905         reg_rctl &=  (~E1000_RCTL_MPE);
1906         reg_rctl &=  (~E1000_RCTL_SBP);
1907         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1908 }
1909
1910
1911 /*********************************************************************
1912  *  Multicast Update
1913  *
1914  *  This routine is called whenever multicast address list is updated.
1915  *
1916  **********************************************************************/
1917
1918 static void
1919 lem_set_multi(struct adapter *adapter)
1920 {
1921         struct ifnet    *ifp = adapter->ifp;
1922         struct ifmultiaddr *ifma;
1923         u32 reg_rctl = 0;
1924         u8  *mta; /* Multicast array memory */
1925         int mcnt = 0;
1926
1927         IOCTL_DEBUGOUT("lem_set_multi: begin");
1928
1929         if (adapter->hw.mac.type == e1000_82542 && 
1930             adapter->hw.revision_id == E1000_REVISION_2) {
1931                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1932                 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1933                         e1000_pci_clear_mwi(&adapter->hw);
1934                 reg_rctl |= E1000_RCTL_RST;
1935                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1936                 msec_delay(5);
1937         }
1938
1939         /* Allocate temporary memory to setup array */
1940         mta = malloc(sizeof(u8) *
1941             (ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES),
1942             M_DEVBUF, M_NOWAIT | M_ZERO);
1943         if (mta == NULL)
1944                 panic("lem_set_multi memory failure\n");
1945
1946 #if __FreeBSD_version < 800000
1947         IF_ADDR_LOCK(ifp);
1948 #else
1949         if_maddr_rlock(ifp);
1950 #endif
1951         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1952                 if (ifma->ifma_addr->sa_family != AF_LINK)
1953                         continue;
1954
1955                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1956                         break;
1957
1958                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1959                     &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1960                 mcnt++;
1961         }
1962 #if __FreeBSD_version < 800000
1963         IF_ADDR_UNLOCK(ifp);
1964 #else
1965         if_maddr_runlock(ifp);
1966 #endif
1967         if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1968                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1969                 reg_rctl |= E1000_RCTL_MPE;
1970                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1971         } else
1972                 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
1973
1974         if (adapter->hw.mac.type == e1000_82542 && 
1975             adapter->hw.revision_id == E1000_REVISION_2) {
1976                 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1977                 reg_rctl &= ~E1000_RCTL_RST;
1978                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1979                 msec_delay(5);
1980                 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1981                         e1000_pci_set_mwi(&adapter->hw);
1982         }
1983         free(mta, M_DEVBUF);
1984 }
1985
1986
1987 /*********************************************************************
1988  *  Timer routine
1989  *
1990  *  This routine checks for link status and updates statistics.
1991  *
1992  **********************************************************************/
1993
1994 static void
1995 lem_local_timer(void *arg)
1996 {
1997         struct adapter  *adapter = arg;
1998         struct ifnet    *ifp = adapter->ifp;
1999
2000         EM_CORE_LOCK_ASSERT(adapter);
2001
2002         taskqueue_enqueue(adapter->tq,
2003             &adapter->rxtx_task);
2004         lem_update_link_status(adapter);
2005         lem_update_stats_counters(adapter);
2006
2007         if (lem_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
2008                 lem_print_hw_stats(adapter);
2009
2010         lem_smartspeed(adapter);
2011
2012         /*
2013          * We check the watchdog: the time since
2014          * the last TX descriptor was cleaned.
2015          * This implies a functional TX engine.
2016          */
2017         if ((adapter->watchdog_check == TRUE) &&
2018             (ticks - adapter->watchdog_time > EM_WATCHDOG))
2019                 goto hung;
2020
2021         callout_reset(&adapter->timer, hz, lem_local_timer, adapter);
2022         return;
2023 hung:
2024         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2025         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2026         adapter->watchdog_events++;
2027         lem_init_locked(adapter);
2028 }
2029
2030 static void
2031 lem_update_link_status(struct adapter *adapter)
2032 {
2033         struct e1000_hw *hw = &adapter->hw;
2034         struct ifnet *ifp = adapter->ifp;
2035         device_t dev = adapter->dev;
2036         u32 link_check = 0;
2037
2038         /* Get the cached link value or read phy for real */
2039         switch (hw->phy.media_type) {
2040         case e1000_media_type_copper:
2041                 if (hw->mac.get_link_status) {
2042                         /* Do the work to read phy */
2043                         e1000_check_for_link(hw);
2044                         link_check = !hw->mac.get_link_status;
2045                         if (link_check) /* ESB2 fix */
2046                                 e1000_cfg_on_link_up(hw);
2047                 } else
2048                         link_check = TRUE;
2049                 break;
2050         case e1000_media_type_fiber:
2051                 e1000_check_for_link(hw);
2052                 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2053                                  E1000_STATUS_LU);
2054                 break;
2055         case e1000_media_type_internal_serdes:
2056                 e1000_check_for_link(hw);
2057                 link_check = adapter->hw.mac.serdes_has_link;
2058                 break;
2059         default:
2060         case e1000_media_type_unknown:
2061                 break;
2062         }
2063
2064         /* Now check for a transition */
2065         if (link_check && (adapter->link_active == 0)) {
2066                 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2067                     &adapter->link_duplex);
2068                 if (bootverbose)
2069                         device_printf(dev, "Link is up %d Mbps %s\n",
2070                             adapter->link_speed,
2071                             ((adapter->link_duplex == FULL_DUPLEX) ?
2072                             "Full Duplex" : "Half Duplex"));
2073                 adapter->link_active = 1;
2074                 adapter->smartspeed = 0;
2075                 ifp->if_baudrate = adapter->link_speed * 1000000;
2076                 if_link_state_change(ifp, LINK_STATE_UP);
2077         } else if (!link_check && (adapter->link_active == 1)) {
2078                 ifp->if_baudrate = adapter->link_speed = 0;
2079                 adapter->link_duplex = 0;
2080                 if (bootverbose)
2081                         device_printf(dev, "Link is Down\n");
2082                 adapter->link_active = 0;
2083                 /* Link down, disable watchdog */
2084                 adapter->watchdog_check = FALSE;
2085                 if_link_state_change(ifp, LINK_STATE_DOWN);
2086         }
2087 }
2088
2089 /*********************************************************************
2090  *
2091  *  This routine disables all traffic on the adapter by issuing a
2092  *  global reset on the MAC and deallocates TX/RX buffers.
2093  *
2094  *  This routine should always be called with BOTH the CORE
2095  *  and TX locks.
2096  **********************************************************************/
2097
2098 static void
2099 lem_stop(void *arg)
2100 {
2101         struct adapter  *adapter = arg;
2102         struct ifnet    *ifp = adapter->ifp;
2103
2104         EM_CORE_LOCK_ASSERT(adapter);
2105         EM_TX_LOCK_ASSERT(adapter);
2106
2107         INIT_DEBUGOUT("lem_stop: begin");
2108
2109         lem_disable_intr(adapter);
2110         callout_stop(&adapter->timer);
2111         callout_stop(&adapter->tx_fifo_timer);
2112
2113         /* Tell the stack that the interface is no longer active */
2114         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2115
2116         e1000_reset_hw(&adapter->hw);
2117         if (adapter->hw.mac.type >= e1000_82544)
2118                 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2119
2120         e1000_led_off(&adapter->hw);
2121         e1000_cleanup_led(&adapter->hw);
2122 }
2123
2124
2125 /*********************************************************************
2126  *
2127  *  Determine hardware revision.
2128  *
2129  **********************************************************************/
2130 static void
2131 lem_identify_hardware(struct adapter *adapter)
2132 {
2133         device_t dev = adapter->dev;
2134
2135         /* Make sure our PCI config space has the necessary stuff set */
2136         adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2137         if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2138             (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2139                 device_printf(dev, "Memory Access and/or Bus Master bits "
2140                     "were not set!\n");
2141                 adapter->hw.bus.pci_cmd_word |=
2142                 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2143                 pci_write_config(dev, PCIR_COMMAND,
2144                     adapter->hw.bus.pci_cmd_word, 2);
2145         }
2146
2147         /* Save off the information about this board */
2148         adapter->hw.vendor_id = pci_get_vendor(dev);
2149         adapter->hw.device_id = pci_get_device(dev);
2150         adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2151         adapter->hw.subsystem_vendor_id =
2152             pci_read_config(dev, PCIR_SUBVEND_0, 2);
2153         adapter->hw.subsystem_device_id =
2154             pci_read_config(dev, PCIR_SUBDEV_0, 2);
2155
2156         /* Do Shared Code Init and Setup */
2157         if (e1000_set_mac_type(&adapter->hw)) {
2158                 device_printf(dev, "Setup init failure\n");
2159                 return;
2160         }
2161 }
2162
2163 static int
2164 lem_allocate_pci_resources(struct adapter *adapter)
2165 {
2166         device_t        dev = adapter->dev;
2167         int             val, rid, error = E1000_SUCCESS;
2168
2169         rid = PCIR_BAR(0);
2170         adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2171             &rid, RF_ACTIVE);
2172         if (adapter->memory == NULL) {
2173                 device_printf(dev, "Unable to allocate bus resource: memory\n");
2174                 return (ENXIO);
2175         }
2176         adapter->osdep.mem_bus_space_tag =
2177             rman_get_bustag(adapter->memory);
2178         adapter->osdep.mem_bus_space_handle =
2179             rman_get_bushandle(adapter->memory);
2180         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2181
2182         /* Only older adapters use IO mapping */
2183         if (adapter->hw.mac.type > e1000_82543) {
2184                 /* Figure our where our IO BAR is ? */
2185                 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2186                         val = pci_read_config(dev, rid, 4);
2187                         if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2188                                 adapter->io_rid = rid;
2189                                 break;
2190                         }
2191                         rid += 4;
2192                         /* check for 64bit BAR */
2193                         if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2194                                 rid += 4;
2195                 }
2196                 if (rid >= PCIR_CIS) {
2197                         device_printf(dev, "Unable to locate IO BAR\n");
2198                         return (ENXIO);
2199                 }
2200                 adapter->ioport = bus_alloc_resource_any(dev,
2201                     SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2202                 if (adapter->ioport == NULL) {
2203                         device_printf(dev, "Unable to allocate bus resource: "
2204                             "ioport\n");
2205                         return (ENXIO);
2206                 }
2207                 adapter->hw.io_base = 0;
2208                 adapter->osdep.io_bus_space_tag =
2209                     rman_get_bustag(adapter->ioport);
2210                 adapter->osdep.io_bus_space_handle =
2211                     rman_get_bushandle(adapter->ioport);
2212         }
2213
2214         adapter->hw.back = &adapter->osdep;
2215
2216         return (error);
2217 }
2218
2219 /*********************************************************************
2220  *
2221  *  Setup the Legacy or MSI Interrupt handler
2222  *
2223  **********************************************************************/
2224 int
2225 lem_allocate_irq(struct adapter *adapter)
2226 {
2227         device_t dev = adapter->dev;
2228         int error, rid = 0;
2229
2230         /* Manually turn off all interrupts */
2231         E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2232
2233         /* We allocate a single interrupt resource */
2234         adapter->res[0] = bus_alloc_resource_any(dev,
2235             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2236         if (adapter->res[0] == NULL) {
2237                 device_printf(dev, "Unable to allocate bus resource: "
2238                     "interrupt\n");
2239                 return (ENXIO);
2240         }
2241
2242 #ifdef EM_LEGACY_IRQ
2243         /* We do Legacy setup */
2244         if ((error = bus_setup_intr(dev, adapter->res[0],
2245 #if __FreeBSD_version > 700000
2246             INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
2247 #else /* 6.X */
2248             INTR_TYPE_NET | INTR_MPSAFE, lem_intr, adapter,
2249 #endif
2250             &adapter->tag[0])) != 0) {
2251                 device_printf(dev, "Failed to register interrupt handler");
2252                 return (error);
2253         }
2254
2255 #else /* FAST_IRQ */
2256         /*
2257          * Try allocating a fast interrupt and the associated deferred
2258          * processing contexts.
2259          */
2260         TASK_INIT(&adapter->rxtx_task, 0, lem_handle_rxtx, adapter);
2261         TASK_INIT(&adapter->link_task, 0, lem_handle_link, adapter);
2262         adapter->tq = taskqueue_create_fast("lem_taskq", M_NOWAIT,
2263             taskqueue_thread_enqueue, &adapter->tq);
2264         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2265             device_get_nameunit(adapter->dev));
2266 #if __FreeBSD_version < 700000
2267         if ((error = bus_setup_intr(dev, adapter->res[0],
2268             INTR_TYPE_NET | INTR_FAST, lem_irq_fast, adapter,
2269 #else
2270         if ((error = bus_setup_intr(dev, adapter->res[0],
2271             INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
2272 #endif
2273             &adapter->tag[0])) != 0) {
2274                 device_printf(dev, "Failed to register fast interrupt "
2275                             "handler: %d\n", error);
2276                 taskqueue_free(adapter->tq);
2277                 adapter->tq = NULL;
2278                 return (error);
2279         }
2280 #endif  /* EM_LEGACY_IRQ */
2281         
2282         return (0);
2283 }
2284
2285
2286 static void
2287 lem_free_pci_resources(struct adapter *adapter)
2288 {
2289         device_t dev = adapter->dev;
2290
2291
2292         if (adapter->tag[0] != NULL) {
2293                 bus_teardown_intr(dev, adapter->res[0],
2294                     adapter->tag[0]);
2295                 adapter->tag[0] = NULL;
2296         }
2297
2298         if (adapter->res[0] != NULL) {
2299                 bus_release_resource(dev, SYS_RES_IRQ,
2300                     0, adapter->res[0]);
2301         }
2302
2303         if (adapter->memory != NULL)
2304                 bus_release_resource(dev, SYS_RES_MEMORY,
2305                     PCIR_BAR(0), adapter->memory);
2306
2307         if (adapter->ioport != NULL)
2308                 bus_release_resource(dev, SYS_RES_IOPORT,
2309                     adapter->io_rid, adapter->ioport);
2310 }
2311
2312
2313 /*********************************************************************
2314  *
2315  *  Initialize the hardware to a configuration
2316  *  as specified by the adapter structure.
2317  *
2318  **********************************************************************/
2319 static int
2320 lem_hardware_init(struct adapter *adapter)
2321 {
2322         device_t dev = adapter->dev;
2323         u16     rx_buffer_size;
2324
2325         INIT_DEBUGOUT("lem_hardware_init: begin");
2326
2327         /* Issue a global reset */
2328         e1000_reset_hw(&adapter->hw);
2329
2330         /* When hardware is reset, fifo_head is also reset */
2331         adapter->tx_fifo_head = 0;
2332
2333         /*
2334          * These parameters control the automatic generation (Tx) and
2335          * response (Rx) to Ethernet PAUSE frames.
2336          * - High water mark should allow for at least two frames to be
2337          *   received after sending an XOFF.
2338          * - Low water mark works best when it is very near the high water mark.
2339          *   This allows the receiver to restart by sending XON when it has
2340          *   drained a bit. Here we use an arbitary value of 1500 which will
2341          *   restart after one full frame is pulled from the buffer. There
2342          *   could be several smaller frames in the buffer and if so they will
2343          *   not trigger the XON until their total number reduces the buffer
2344          *   by 1500.
2345          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2346          */
2347         rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
2348             0xffff) << 10 );
2349
2350         adapter->hw.fc.high_water = rx_buffer_size -
2351             roundup2(adapter->max_frame_size, 1024);
2352         adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2353
2354         adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2355         adapter->hw.fc.send_xon = TRUE;
2356
2357         /* Set Flow control, use the tunable location if sane */
2358         if ((lem_fc_setting >= 0) || (lem_fc_setting < 4))
2359                 adapter->hw.fc.requested_mode = lem_fc_setting;
2360         else
2361                 adapter->hw.fc.requested_mode = e1000_fc_none;
2362
2363         if (e1000_init_hw(&adapter->hw) < 0) {
2364                 device_printf(dev, "Hardware Initialization Failed\n");
2365                 return (EIO);
2366         }
2367
2368         e1000_check_for_link(&adapter->hw);
2369
2370         return (0);
2371 }
2372
2373 /*********************************************************************
2374  *
2375  *  Setup networking device structure and register an interface.
2376  *
2377  **********************************************************************/
2378 static void
2379 lem_setup_interface(device_t dev, struct adapter *adapter)
2380 {
2381         struct ifnet   *ifp;
2382
2383         INIT_DEBUGOUT("lem_setup_interface: begin");
2384
2385         ifp = adapter->ifp = if_alloc(IFT_ETHER);
2386         if (ifp == NULL)
2387                 panic("%s: can not if_alloc()", device_get_nameunit(dev));
2388         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2389         ifp->if_mtu = ETHERMTU;
2390         ifp->if_init =  lem_init;
2391         ifp->if_softc = adapter;
2392         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2393         ifp->if_ioctl = lem_ioctl;
2394         ifp->if_start = lem_start;
2395         IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2396         ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2397         IFQ_SET_READY(&ifp->if_snd);
2398
2399         ether_ifattach(ifp, adapter->hw.mac.addr);
2400
2401         ifp->if_capabilities = ifp->if_capenable = 0;
2402
2403         if (adapter->hw.mac.type >= e1000_82543) {
2404                 int version_cap;
2405 #if __FreeBSD_version < 700000
2406                 version_cap = IFCAP_HWCSUM;
2407 #else
2408                 version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2409 #endif
2410                 ifp->if_capabilities |= version_cap;
2411                 ifp->if_capenable |= version_cap;
2412         }
2413
2414         /*
2415          * Tell the upper layer(s) we support long frames.
2416          */
2417         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2418         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2419         ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2420
2421 #ifdef DEVICE_POLLING
2422         ifp->if_capabilities |= IFCAP_POLLING;
2423 #endif
2424
2425         /* Enable only WOL MAGIC by default */
2426         if (adapter->wol) {
2427                 ifp->if_capabilities |= IFCAP_WOL;
2428                 ifp->if_capenable |= IFCAP_WOL_MAGIC;
2429         }
2430                 
2431         /*
2432          * Specify the media types supported by this adapter and register
2433          * callbacks to update media and link information
2434          */
2435         ifmedia_init(&adapter->media, IFM_IMASK,
2436             lem_media_change, lem_media_status);
2437         if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2438             (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2439                 u_char fiber_type = IFM_1000_SX;        /* default type */
2440
2441                 if (adapter->hw.mac.type == e1000_82545)
2442                         fiber_type = IFM_1000_LX;
2443                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 
2444                             0, NULL);
2445                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2446         } else {
2447                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2448                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2449                             0, NULL);
2450                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2451                             0, NULL);
2452                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2453                             0, NULL);
2454                 if (adapter->hw.phy.type != e1000_phy_ife) {
2455                         ifmedia_add(&adapter->media,
2456                                 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2457                         ifmedia_add(&adapter->media,
2458                                 IFM_ETHER | IFM_1000_T, 0, NULL);
2459                 }
2460         }
2461         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2462         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2463 }
2464
2465
2466 /*********************************************************************
2467  *
2468  *  Workaround for SmartSpeed on 82541 and 82547 controllers
2469  *
2470  **********************************************************************/
2471 static void
2472 lem_smartspeed(struct adapter *adapter)
2473 {
2474         u16 phy_tmp;
2475
2476         if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2477             adapter->hw.mac.autoneg == 0 ||
2478             (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2479                 return;
2480
2481         if (adapter->smartspeed == 0) {
2482                 /* If Master/Slave config fault is asserted twice,
2483                  * we assume back-to-back */
2484                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2485                 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2486                         return;
2487                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2488                 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2489                         e1000_read_phy_reg(&adapter->hw,
2490                             PHY_1000T_CTRL, &phy_tmp);
2491                         if(phy_tmp & CR_1000T_MS_ENABLE) {
2492                                 phy_tmp &= ~CR_1000T_MS_ENABLE;
2493                                 e1000_write_phy_reg(&adapter->hw,
2494                                     PHY_1000T_CTRL, phy_tmp);
2495                                 adapter->smartspeed++;
2496                                 if(adapter->hw.mac.autoneg &&
2497                                    !e1000_copper_link_autoneg(&adapter->hw) &&
2498                                    !e1000_read_phy_reg(&adapter->hw,
2499                                     PHY_CONTROL, &phy_tmp)) {
2500                                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2501                                                     MII_CR_RESTART_AUTO_NEG);
2502                                         e1000_write_phy_reg(&adapter->hw,
2503                                             PHY_CONTROL, phy_tmp);
2504                                 }
2505                         }
2506                 }
2507                 return;
2508         } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2509                 /* If still no link, perhaps using 2/3 pair cable */
2510                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2511                 phy_tmp |= CR_1000T_MS_ENABLE;
2512                 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2513                 if(adapter->hw.mac.autoneg &&
2514                    !e1000_copper_link_autoneg(&adapter->hw) &&
2515                    !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2516                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2517                                     MII_CR_RESTART_AUTO_NEG);
2518                         e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2519                 }
2520         }
2521         /* Restart process after EM_SMARTSPEED_MAX iterations */
2522         if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2523                 adapter->smartspeed = 0;
2524 }
2525
2526
2527 /*
2528  * Manage DMA'able memory.
2529  */
2530 static void
2531 lem_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2532 {
2533         if (error)
2534                 return;
2535         *(bus_addr_t *) arg = segs[0].ds_addr;
2536 }
2537
2538 static int
2539 lem_dma_malloc(struct adapter *adapter, bus_size_t size,
2540         struct em_dma_alloc *dma, int mapflags)
2541 {
2542         int error;
2543
2544 #if __FreeBSD_version >= 700000
2545         error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
2546 #else
2547         error = bus_dma_tag_create(NULL,                 /* parent */
2548 #endif
2549                                 EM_DBA_ALIGN, 0,        /* alignment, bounds */
2550                                 BUS_SPACE_MAXADDR,      /* lowaddr */
2551                                 BUS_SPACE_MAXADDR,      /* highaddr */
2552                                 NULL, NULL,             /* filter, filterarg */
2553                                 size,                   /* maxsize */
2554                                 1,                      /* nsegments */
2555                                 size,                   /* maxsegsize */
2556                                 0,                      /* flags */
2557                                 NULL,                   /* lockfunc */
2558                                 NULL,                   /* lockarg */
2559                                 &dma->dma_tag);
2560         if (error) {
2561                 device_printf(adapter->dev,
2562                     "%s: bus_dma_tag_create failed: %d\n",
2563                     __func__, error);
2564                 goto fail_0;
2565         }
2566
2567         error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2568             BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
2569         if (error) {
2570                 device_printf(adapter->dev,
2571                     "%s: bus_dmamem_alloc(%ju) failed: %d\n",
2572                     __func__, (uintmax_t)size, error);
2573                 goto fail_2;
2574         }
2575
2576         dma->dma_paddr = 0;
2577         error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2578             size, lem_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
2579         if (error || dma->dma_paddr == 0) {
2580                 device_printf(adapter->dev,
2581                     "%s: bus_dmamap_load failed: %d\n",
2582                     __func__, error);
2583                 goto fail_3;
2584         }
2585
2586         return (0);
2587
2588 fail_3:
2589         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2590 fail_2:
2591         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2592         bus_dma_tag_destroy(dma->dma_tag);
2593 fail_0:
2594         dma->dma_map = NULL;
2595         dma->dma_tag = NULL;
2596
2597         return (error);
2598 }
2599
2600 static void
2601 lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2602 {
2603         if (dma->dma_tag == NULL)
2604                 return;
2605         if (dma->dma_map != NULL) {
2606                 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2607                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2608                 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2609                 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2610                 dma->dma_map = NULL;
2611         }
2612         bus_dma_tag_destroy(dma->dma_tag);
2613         dma->dma_tag = NULL;
2614 }
2615
2616
2617 /*********************************************************************
2618  *
2619  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2620  *  the information needed to transmit a packet on the wire.
2621  *
2622  **********************************************************************/
2623 static int
2624 lem_allocate_transmit_structures(struct adapter *adapter)
2625 {
2626         device_t dev = adapter->dev;
2627         struct em_buffer *tx_buffer;
2628         int error;
2629
2630         /*
2631          * Create DMA tags for tx descriptors
2632          */
2633 #if __FreeBSD_version >= 700000
2634         if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2635 #else
2636         if ((error = bus_dma_tag_create(NULL,            /* parent */
2637 #endif
2638                                 1, 0,                   /* alignment, bounds */
2639                                 BUS_SPACE_MAXADDR,      /* lowaddr */
2640                                 BUS_SPACE_MAXADDR,      /* highaddr */
2641                                 NULL, NULL,             /* filter, filterarg */
2642                                 EM_TSO_SIZE,            /* maxsize */
2643                                 EM_MAX_SCATTER,         /* nsegments */
2644                                 EM_TSO_SEG_SIZE,        /* maxsegsize */
2645                                 0,                      /* flags */
2646                                 NULL,           /* lockfunc */
2647                                 NULL,           /* lockarg */
2648                                 &adapter->txtag)) != 0) {
2649                 device_printf(dev, "Unable to allocate TX DMA tag\n");
2650                 goto fail;
2651         }
2652
2653         adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2654             adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2655         if (adapter->tx_buffer_area == NULL) {
2656                 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2657                 error = ENOMEM;
2658                 goto fail;
2659         }
2660
2661         /* Create the descriptor buffer dma maps */
2662         for (int i = 0; i < adapter->num_tx_desc; i++) {
2663                 tx_buffer = &adapter->tx_buffer_area[i];
2664                 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2665                 if (error != 0) {
2666                         device_printf(dev, "Unable to create TX DMA map\n");
2667                         goto fail;
2668                 }
2669                 tx_buffer->next_eop = -1;
2670         }
2671
2672         return (0);
2673 fail:
2674         lem_free_transmit_structures(adapter);
2675         return (error);
2676 }
2677
2678 /*********************************************************************
2679  *
2680  *  (Re)Initialize transmit structures.
2681  *
2682  **********************************************************************/
2683 static void
2684 lem_setup_transmit_structures(struct adapter *adapter)
2685 {
2686         struct em_buffer *tx_buffer;
2687
2688         /* Clear the old ring contents */
2689         bzero(adapter->tx_desc_base,
2690             (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2691
2692         /* Free any existing TX buffers */
2693         for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2694                 tx_buffer = &adapter->tx_buffer_area[i];
2695                 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2696                     BUS_DMASYNC_POSTWRITE);
2697                 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2698                 m_freem(tx_buffer->m_head);
2699                 tx_buffer->m_head = NULL;
2700                 tx_buffer->next_eop = -1;
2701         }
2702
2703         /* Reset state */
2704         adapter->next_avail_tx_desc = 0;
2705         adapter->next_tx_to_clean = 0;
2706         adapter->num_tx_desc_avail = adapter->num_tx_desc;
2707
2708         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2709             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2710
2711         return;
2712 }
2713
2714 /*********************************************************************
2715  *
2716  *  Enable transmit unit.
2717  *
2718  **********************************************************************/
2719 static void
2720 lem_initialize_transmit_unit(struct adapter *adapter)
2721 {
2722         u32     tctl, tipg = 0;
2723         u64     bus_addr;
2724
2725          INIT_DEBUGOUT("lem_initialize_transmit_unit: begin");
2726         /* Setup the Base and Length of the Tx Descriptor Ring */
2727         bus_addr = adapter->txdma.dma_paddr;
2728         E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2729             adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2730         E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2731             (u32)(bus_addr >> 32));
2732         E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2733             (u32)bus_addr);
2734         /* Setup the HW Tx Head and Tail descriptor pointers */
2735         E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2736         E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
2737
2738         HW_DEBUGOUT2("Base = %x, Length = %x\n",
2739             E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
2740             E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
2741
2742         /* Set the default values for the Tx Inter Packet Gap timer */
2743         switch (adapter->hw.mac.type) {
2744         case e1000_82542:
2745                 tipg = DEFAULT_82542_TIPG_IPGT;
2746                 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2747                 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2748                 break;
2749         default:
2750                 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2751                     (adapter->hw.phy.media_type ==
2752                     e1000_media_type_internal_serdes))
2753                         tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2754                 else
2755                         tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2756                 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2757                 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2758         }
2759
2760         E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
2761         E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
2762         if(adapter->hw.mac.type >= e1000_82540)
2763                 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
2764                     adapter->tx_abs_int_delay.value);
2765
2766         /* Program the Transmit Control Register */
2767         tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2768         tctl &= ~E1000_TCTL_CT;
2769         tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2770                    (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2771
2772         /* This write will effectively turn on the transmit unit. */
2773         E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2774
2775         /* Setup Transmit Descriptor Base Settings */   
2776         adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2777
2778         if (adapter->tx_int_delay.value > 0)
2779                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2780 }
2781
2782 /*********************************************************************
2783  *
2784  *  Free all transmit related data structures.
2785  *
2786  **********************************************************************/
2787 static void
2788 lem_free_transmit_structures(struct adapter *adapter)
2789 {
2790         struct em_buffer *tx_buffer;
2791
2792         INIT_DEBUGOUT("free_transmit_structures: begin");
2793
2794         if (adapter->tx_buffer_area != NULL) {
2795                 for (int i = 0; i < adapter->num_tx_desc; i++) {
2796                         tx_buffer = &adapter->tx_buffer_area[i];
2797                         if (tx_buffer->m_head != NULL) {
2798                                 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2799                                     BUS_DMASYNC_POSTWRITE);
2800                                 bus_dmamap_unload(adapter->txtag,
2801                                     tx_buffer->map);
2802                                 m_freem(tx_buffer->m_head);
2803                                 tx_buffer->m_head = NULL;
2804                         } else if (tx_buffer->map != NULL)
2805                                 bus_dmamap_unload(adapter->txtag,
2806                                     tx_buffer->map);
2807                         if (tx_buffer->map != NULL) {
2808                                 bus_dmamap_destroy(adapter->txtag,
2809                                     tx_buffer->map);
2810                                 tx_buffer->map = NULL;
2811                         }
2812                 }
2813         }
2814         if (adapter->tx_buffer_area != NULL) {
2815                 free(adapter->tx_buffer_area, M_DEVBUF);
2816                 adapter->tx_buffer_area = NULL;
2817         }
2818         if (adapter->txtag != NULL) {
2819                 bus_dma_tag_destroy(adapter->txtag);
2820                 adapter->txtag = NULL;
2821         }
2822 #if __FreeBSD_version >= 800000
2823         if (adapter->br != NULL)
2824                 buf_ring_free(adapter->br, M_DEVBUF);
2825 #endif
2826 }
2827
2828 /*********************************************************************
2829  *
2830  *  The offload context needs to be set when we transfer the first
2831  *  packet of a particular protocol (TCP/UDP). This routine has been
2832  *  enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
2833  *
2834  *  Added back the old method of keeping the current context type
2835  *  and not setting if unnecessary, as this is reported to be a
2836  *  big performance win.  -jfv
2837  **********************************************************************/
2838 static void
2839 lem_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
2840     u32 *txd_upper, u32 *txd_lower)
2841 {
2842         struct e1000_context_desc *TXD = NULL;
2843         struct em_buffer *tx_buffer;
2844         struct ether_vlan_header *eh;
2845         struct ip *ip = NULL;
2846         struct ip6_hdr *ip6;
2847         int curr_txd, ehdrlen;
2848         u32 cmd, hdr_len, ip_hlen;
2849         u16 etype;
2850         u8 ipproto;
2851
2852
2853         cmd = hdr_len = ipproto = 0;
2854         curr_txd = adapter->next_avail_tx_desc;
2855
2856         /*
2857          * Determine where frame payload starts.
2858          * Jump over vlan headers if already present,
2859          * helpful for QinQ too.
2860          */
2861         eh = mtod(mp, struct ether_vlan_header *);
2862         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2863                 etype = ntohs(eh->evl_proto);
2864                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2865         } else {
2866                 etype = ntohs(eh->evl_encap_proto);
2867                 ehdrlen = ETHER_HDR_LEN;
2868         }
2869
2870         /*
2871          * We only support TCP/UDP for IPv4 and IPv6 for the moment.
2872          * TODO: Support SCTP too when it hits the tree.
2873          */
2874         switch (etype) {
2875         case ETHERTYPE_IP:
2876                 ip = (struct ip *)(mp->m_data + ehdrlen);
2877                 ip_hlen = ip->ip_hl << 2;
2878
2879                 /* Setup of IP header checksum. */
2880                 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
2881                         /*
2882                          * Start offset for header checksum calculation.
2883                          * End offset for header checksum calculation.
2884                          * Offset of place to put the checksum.
2885                          */
2886                         TXD = (struct e1000_context_desc *)
2887                             &adapter->tx_desc_base[curr_txd];
2888                         TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2889                         TXD->lower_setup.ip_fields.ipcse =
2890                             htole16(ehdrlen + ip_hlen);
2891                         TXD->lower_setup.ip_fields.ipcso =
2892                             ehdrlen + offsetof(struct ip, ip_sum);
2893                         cmd |= E1000_TXD_CMD_IP;
2894                         *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2895                 }
2896
2897                 if (mp->m_len < ehdrlen + ip_hlen)
2898                         return; /* failure */
2899
2900                 hdr_len = ehdrlen + ip_hlen;
2901                 ipproto = ip->ip_p;
2902
2903                 break;
2904         case ETHERTYPE_IPV6:
2905                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2906                 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
2907
2908                 if (mp->m_len < ehdrlen + ip_hlen)
2909                         return; /* failure */
2910
2911                 /* IPv6 doesn't have a header checksum. */
2912
2913                 hdr_len = ehdrlen + ip_hlen;
2914                 ipproto = ip6->ip6_nxt;
2915
2916                 break;
2917         default:
2918                 *txd_upper = 0;
2919                 *txd_lower = 0;
2920                 return;
2921         }
2922
2923         switch (ipproto) {
2924         case IPPROTO_TCP:
2925                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2926                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2927                         *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2928                         /* no need for context if already set */
2929                         if (adapter->last_hw_offload == CSUM_TCP)
2930                                 return;
2931                         adapter->last_hw_offload = CSUM_TCP;
2932                         /*
2933                          * Start offset for payload checksum calculation.
2934                          * End offset for payload checksum calculation.
2935                          * Offset of place to put the checksum.
2936                          */
2937                         TXD = (struct e1000_context_desc *)
2938                             &adapter->tx_desc_base[curr_txd];
2939                         TXD->upper_setup.tcp_fields.tucss = hdr_len;
2940                         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2941                         TXD->upper_setup.tcp_fields.tucso =
2942                             hdr_len + offsetof(struct tcphdr, th_sum);
2943                         cmd |= E1000_TXD_CMD_TCP;
2944                 }
2945                 break;
2946         case IPPROTO_UDP:
2947         {
2948                 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2949                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2950                         *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2951                         /* no need for context if already set */
2952                         if (adapter->last_hw_offload == CSUM_UDP)
2953                                 return;
2954                         adapter->last_hw_offload = CSUM_UDP;
2955                         /*
2956                          * Start offset for header checksum calculation.
2957                          * End offset for header checksum calculation.
2958                          * Offset of place to put the checksum.
2959                          */
2960                         TXD = (struct e1000_context_desc *)
2961                             &adapter->tx_desc_base[curr_txd];
2962                         TXD->upper_setup.tcp_fields.tucss = hdr_len;
2963                         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2964                         TXD->upper_setup.tcp_fields.tucso =
2965                             hdr_len + offsetof(struct udphdr, uh_sum);
2966                 }
2967                 /* Fall Thru */
2968         }
2969         default:
2970                 break;
2971         }
2972
2973         TXD->tcp_seg_setup.data = htole32(0);
2974         TXD->cmd_and_length =
2975             htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
2976         tx_buffer = &adapter->tx_buffer_area[curr_txd];
2977         tx_buffer->m_head = NULL;
2978         tx_buffer->next_eop = -1;
2979
2980         if (++curr_txd == adapter->num_tx_desc)
2981                 curr_txd = 0;
2982
2983         adapter->num_tx_desc_avail--;
2984         adapter->next_avail_tx_desc = curr_txd;
2985 }
2986
2987
2988 /**********************************************************************
2989  *
2990  *  Examine each tx_buffer in the used queue. If the hardware is done
2991  *  processing the packet then free associated resources. The
2992  *  tx_buffer is put back on the free queue.
2993  *
2994  **********************************************************************/
2995 static void
2996 lem_txeof(struct adapter *adapter)
2997 {
2998         int first, last, done, num_avail;
2999         struct em_buffer *tx_buffer;
3000         struct e1000_tx_desc   *tx_desc, *eop_desc;
3001         struct ifnet   *ifp = adapter->ifp;
3002
3003         EM_TX_LOCK_ASSERT(adapter);
3004
3005         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3006                 return;
3007
3008         num_avail = adapter->num_tx_desc_avail;
3009         first = adapter->next_tx_to_clean;
3010         tx_desc = &adapter->tx_desc_base[first];
3011         tx_buffer = &adapter->tx_buffer_area[first];
3012         last = tx_buffer->next_eop;
3013         eop_desc = &adapter->tx_desc_base[last];
3014
3015         /*
3016          * What this does is get the index of the
3017          * first descriptor AFTER the EOP of the 
3018          * first packet, that way we can do the
3019          * simple comparison on the inner while loop.
3020          */
3021         if (++last == adapter->num_tx_desc)
3022                 last = 0;
3023         done = last;
3024
3025         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3026             BUS_DMASYNC_POSTREAD);
3027
3028         while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3029                 /* We clean the range of the packet */
3030                 while (first != done) {
3031                         tx_desc->upper.data = 0;
3032                         tx_desc->lower.data = 0;
3033                         tx_desc->buffer_addr = 0;
3034                         ++num_avail;
3035
3036                         if (tx_buffer->m_head) {
3037                                 ifp->if_opackets++;
3038                                 bus_dmamap_sync(adapter->txtag,
3039                                     tx_buffer->map,
3040                                     BUS_DMASYNC_POSTWRITE);
3041                                 bus_dmamap_unload(adapter->txtag,
3042                                     tx_buffer->map);
3043
3044                                 m_freem(tx_buffer->m_head);
3045                                 tx_buffer->m_head = NULL;
3046                         }
3047                         tx_buffer->next_eop = -1;
3048                         adapter->watchdog_time = ticks;
3049
3050                         if (++first == adapter->num_tx_desc)
3051                                 first = 0;
3052
3053                         tx_buffer = &adapter->tx_buffer_area[first];
3054                         tx_desc = &adapter->tx_desc_base[first];
3055                 }
3056                 /* See if we can continue to the next packet */
3057                 last = tx_buffer->next_eop;
3058                 if (last != -1) {
3059                         eop_desc = &adapter->tx_desc_base[last];
3060                         /* Get new done point */
3061                         if (++last == adapter->num_tx_desc) last = 0;
3062                         done = last;
3063                 } else
3064                         break;
3065         }
3066         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3067             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3068
3069         adapter->next_tx_to_clean = first;
3070
3071         /*
3072          * If we have enough room, clear IFF_DRV_OACTIVE to
3073          * tell the stack that it is OK to send packets.
3074          * If there are no pending descriptors, clear the watchdog.
3075          */
3076         if (num_avail > EM_TX_CLEANUP_THRESHOLD) {                
3077                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3078                 if (num_avail == adapter->num_tx_desc) {
3079                         adapter->watchdog_check = FALSE;
3080                         adapter->num_tx_desc_avail = num_avail;
3081                         return;
3082                 } 
3083         }
3084
3085         adapter->num_tx_desc_avail = num_avail;
3086         return;
3087 }
3088
3089 /*********************************************************************
3090  *
3091  *  When Link is lost sometimes there is work still in the TX ring
3092  *  which may result in a watchdog, rather than allow that we do an
3093  *  attempted cleanup and then reinit here. Note that this has been
3094  *  seens mostly with fiber adapters.
3095  *
3096  **********************************************************************/
3097 static void
3098 lem_tx_purge(struct adapter *adapter)
3099 {
3100         if ((!adapter->link_active) && (adapter->watchdog_check)) {
3101                 EM_TX_LOCK(adapter);
3102                 lem_txeof(adapter);
3103                 EM_TX_UNLOCK(adapter);
3104                 if (adapter->watchdog_check) /* Still outstanding? */
3105                         lem_init_locked(adapter);
3106         }
3107 }
3108
3109 /*********************************************************************
3110  *
3111  *  Get a buffer from system mbuf buffer pool.
3112  *
3113  **********************************************************************/
3114 static int
3115 lem_get_buf(struct adapter *adapter, int i)
3116 {
3117         struct mbuf             *m;
3118         bus_dma_segment_t       segs[1];
3119         bus_dmamap_t            map;
3120         struct em_buffer        *rx_buffer;
3121         int                     error, nsegs;
3122
3123         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3124         if (m == NULL) {
3125                 adapter->mbuf_cluster_failed++;
3126                 return (ENOBUFS);
3127         }
3128         m->m_len = m->m_pkthdr.len = MCLBYTES;
3129
3130         if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3131                 m_adj(m, ETHER_ALIGN);
3132
3133         /*
3134          * Using memory from the mbuf cluster pool, invoke the
3135          * bus_dma machinery to arrange the memory mapping.
3136          */
3137         error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
3138             adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
3139         if (error != 0) {
3140                 m_free(m);
3141                 return (error);
3142         }
3143
3144         /* If nsegs is wrong then the stack is corrupt. */
3145         KASSERT(nsegs == 1, ("Too many segments returned!"));
3146
3147         rx_buffer = &adapter->rx_buffer_area[i];
3148         if (rx_buffer->m_head != NULL)
3149                 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3150
3151         map = rx_buffer->map;
3152         rx_buffer->map = adapter->rx_sparemap;
3153         adapter->rx_sparemap = map;
3154         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
3155         rx_buffer->m_head = m;
3156
3157         adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
3158         return (0);
3159 }
3160
3161 /*********************************************************************
3162  *
3163  *  Allocate memory for rx_buffer structures. Since we use one
3164  *  rx_buffer per received packet, the maximum number of rx_buffer's
3165  *  that we'll need is equal to the number of receive descriptors
3166  *  that we've allocated.
3167  *
3168  **********************************************************************/
3169 static int
3170 lem_allocate_receive_structures(struct adapter *adapter)
3171 {
3172         device_t dev = adapter->dev;
3173         struct em_buffer *rx_buffer;
3174         int i, error;
3175
3176         adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
3177             adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3178         if (adapter->rx_buffer_area == NULL) {
3179                 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3180                 return (ENOMEM);
3181         }
3182
3183 #if __FreeBSD_version >= 700000
3184         error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3185 #else
3186         error = bus_dma_tag_create(NULL,                 /* parent */
3187 #endif
3188                                 1, 0,                   /* alignment, bounds */
3189                                 BUS_SPACE_MAXADDR,      /* lowaddr */
3190                                 BUS_SPACE_MAXADDR,      /* highaddr */
3191                                 NULL, NULL,             /* filter, filterarg */
3192                                 MCLBYTES,               /* maxsize */
3193                                 1,                      /* nsegments */
3194                                 MCLBYTES,               /* maxsegsize */
3195                                 0,                      /* flags */
3196                                 NULL,                   /* lockfunc */
3197                                 NULL,                   /* lockarg */
3198                                 &adapter->rxtag);
3199         if (error) {
3200                 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
3201                     __func__, error);
3202                 goto fail;
3203         }
3204
3205         /* Create the spare map (used by getbuf) */
3206         error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3207              &adapter->rx_sparemap);
3208         if (error) {
3209                 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3210                     __func__, error);
3211                 goto fail;
3212         }
3213
3214         rx_buffer = adapter->rx_buffer_area;
3215         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3216                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
3217                     &rx_buffer->map);
3218                 if (error) {
3219                         device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3220                             __func__, error);
3221                         goto fail;
3222                 }
3223         }
3224
3225         return (0);
3226
3227 fail:
3228         lem_free_receive_structures(adapter);
3229         return (error);
3230 }
3231
3232 /*********************************************************************
3233  *
3234  *  (Re)initialize receive structures.
3235  *
3236  **********************************************************************/
3237 static int
3238 lem_setup_receive_structures(struct adapter *adapter)
3239 {
3240         struct em_buffer *rx_buffer;
3241         int i, error;
3242
3243         /* Reset descriptor ring */
3244         bzero(adapter->rx_desc_base,
3245             (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
3246
3247         /* Free current RX buffers. */
3248         rx_buffer = adapter->rx_buffer_area;
3249         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3250                 if (rx_buffer->m_head != NULL) {
3251                         bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3252                             BUS_DMASYNC_POSTREAD);
3253                         bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3254                         m_freem(rx_buffer->m_head);
3255                         rx_buffer->m_head = NULL;
3256                 }
3257         }
3258
3259         /* Allocate new ones. */
3260         for (i = 0; i < adapter->num_rx_desc; i++) {
3261                 error = lem_get_buf(adapter, i);
3262                 if (error)
3263                         return (error);
3264         }
3265
3266         /* Setup our descriptor pointers */
3267         adapter->next_rx_desc_to_check = 0;
3268         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3269             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3270
3271         return (0);
3272 }
3273
3274 /*********************************************************************
3275  *
3276  *  Enable receive unit.
3277  *
3278  **********************************************************************/
3279 #define MAX_INTS_PER_SEC        8000
3280 #define DEFAULT_ITR          1000000000/(MAX_INTS_PER_SEC * 256)
3281
3282 static void
3283 lem_initialize_receive_unit(struct adapter *adapter)
3284 {
3285         struct ifnet    *ifp = adapter->ifp;
3286         u64     bus_addr;
3287         u32     rctl, rxcsum;
3288
3289         INIT_DEBUGOUT("lem_initialize_receive_unit: begin");
3290
3291         /*
3292          * Make sure receives are disabled while setting
3293          * up the descriptor ring
3294          */
3295         rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3296         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3297
3298         if (adapter->hw.mac.type >= e1000_82540) {
3299                 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
3300                     adapter->rx_abs_int_delay.value);
3301                 /*
3302                  * Set the interrupt throttling rate. Value is calculated
3303                  * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3304                  */
3305                 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
3306         }
3307
3308         /*
3309         ** When using MSIX interrupts we need to throttle
3310         ** using the EITR register (82574 only)
3311         */
3312         if (adapter->msix)
3313                 for (int i = 0; i < 4; i++)
3314                         E1000_WRITE_REG(&adapter->hw,
3315                             E1000_EITR_82574(i), DEFAULT_ITR);
3316
3317         /* Disable accelerated ackknowledge */
3318         if (adapter->hw.mac.type == e1000_82574)
3319                 E1000_WRITE_REG(&adapter->hw,
3320                     E1000_RFCTL, E1000_RFCTL_ACK_DIS);
3321
3322         /* Setup the Base and Length of the Rx Descriptor Ring */
3323         bus_addr = adapter->rxdma.dma_paddr;
3324         E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3325             adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3326         E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3327             (u32)(bus_addr >> 32));
3328         E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3329             (u32)bus_addr);
3330
3331         /* Setup the Receive Control Register */
3332         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3333         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3334                    E1000_RCTL_RDMTS_HALF |
3335                    (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3336
3337         /* Make sure VLAN Filters are off */
3338         rctl &= ~E1000_RCTL_VFE;
3339
3340         if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3341                 rctl |= E1000_RCTL_SBP;
3342         else
3343                 rctl &= ~E1000_RCTL_SBP;
3344
3345         switch (adapter->rx_buffer_len) {
3346         default:
3347         case 2048:
3348                 rctl |= E1000_RCTL_SZ_2048;
3349                 break;
3350         case 4096:
3351                 rctl |= E1000_RCTL_SZ_4096 |
3352                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
3353                 break;
3354         case 8192:
3355                 rctl |= E1000_RCTL_SZ_8192 |
3356                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
3357                 break;
3358         case 16384:
3359                 rctl |= E1000_RCTL_SZ_16384 |
3360                     E1000_RCTL_BSEX | E1000_RCTL_LPE;
3361                 break;
3362         }
3363
3364         if (ifp->if_mtu > ETHERMTU)
3365                 rctl |= E1000_RCTL_LPE;
3366         else
3367                 rctl &= ~E1000_RCTL_LPE;
3368
3369         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3370         if ((adapter->hw.mac.type >= e1000_82543) &&
3371             (ifp->if_capenable & IFCAP_RXCSUM)) {
3372                 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3373                 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3374                 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3375         }
3376
3377         /* Enable Receives */
3378         E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3379
3380         /*
3381          * Setup the HW Rx Head and
3382          * Tail Descriptor Pointers
3383          */
3384         E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3385         E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
3386
3387         return;
3388 }
3389
3390 /*********************************************************************
3391  *
3392  *  Free receive related data structures.
3393  *
3394  **********************************************************************/
3395 static void
3396 lem_free_receive_structures(struct adapter *adapter)
3397 {
3398         struct em_buffer *rx_buffer;
3399         int i;
3400
3401         INIT_DEBUGOUT("free_receive_structures: begin");
3402
3403         if (adapter->rx_sparemap) {
3404                 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3405                 adapter->rx_sparemap = NULL;
3406         }
3407
3408         /* Cleanup any existing buffers */
3409         if (adapter->rx_buffer_area != NULL) {
3410                 rx_buffer = adapter->rx_buffer_area;
3411                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
3412                         if (rx_buffer->m_head != NULL) {
3413                                 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
3414                                     BUS_DMASYNC_POSTREAD);
3415                                 bus_dmamap_unload(adapter->rxtag,
3416                                     rx_buffer->map);
3417                                 m_freem(rx_buffer->m_head);
3418                                 rx_buffer->m_head = NULL;
3419                         } else if (rx_buffer->map != NULL)
3420                                 bus_dmamap_unload(adapter->rxtag,
3421                                     rx_buffer->map);
3422                         if (rx_buffer->map != NULL) {
3423                                 bus_dmamap_destroy(adapter->rxtag,
3424                                     rx_buffer->map);
3425                                 rx_buffer->map = NULL;
3426                         }
3427                 }
3428         }
3429
3430         if (adapter->rx_buffer_area != NULL) {
3431                 free(adapter->rx_buffer_area, M_DEVBUF);
3432                 adapter->rx_buffer_area = NULL;
3433         }
3434
3435         if (adapter->rxtag != NULL) {
3436                 bus_dma_tag_destroy(adapter->rxtag);
3437                 adapter->rxtag = NULL;
3438         }
3439 }
3440
3441 /*********************************************************************
3442  *
3443  *  This routine executes in interrupt context. It replenishes
3444  *  the mbufs in the descriptor and sends data which has been
3445  *  dma'ed into host memory to upper layer.
3446  *
3447  *  We loop at most count times if count is > 0, or until done if
3448  *  count < 0.
3449  *  
3450  *  For polling we also now return the number of cleaned packets
3451  *********************************************************************/
3452 static int
3453 lem_rxeof(struct adapter *adapter, int count)
3454 {
3455         struct ifnet    *ifp = adapter->ifp;;
3456         struct mbuf     *mp;
3457         u8              status, accept_frame = 0, eop = 0;
3458         u16             len, desc_len, prev_len_adj;
3459         int             i, rx_sent = 0;
3460         struct e1000_rx_desc   *current_desc;
3461
3462         EM_RX_LOCK(adapter);
3463         i = adapter->next_rx_desc_to_check;
3464         current_desc = &adapter->rx_desc_base[i];
3465         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3466             BUS_DMASYNC_POSTREAD);
3467
3468         if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
3469                 EM_RX_UNLOCK(adapter);
3470                 return (rx_sent);
3471         }
3472
3473         while ((current_desc->status & E1000_RXD_STAT_DD) &&
3474             (count != 0) &&
3475             (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3476                 struct mbuf *m = NULL;
3477
3478                 mp = adapter->rx_buffer_area[i].m_head;
3479                 /*
3480                  * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3481                  * needs to access the last received byte in the mbuf.
3482                  */
3483                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3484                     BUS_DMASYNC_POSTREAD);
3485
3486                 accept_frame = 1;
3487                 prev_len_adj = 0;
3488                 desc_len = le16toh(current_desc->length);
3489                 status = current_desc->status;
3490                 if (status & E1000_RXD_STAT_EOP) {
3491                         count--;
3492                         eop = 1;
3493                         if (desc_len < ETHER_CRC_LEN) {
3494                                 len = 0;
3495                                 prev_len_adj = ETHER_CRC_LEN - desc_len;
3496                         } else
3497                                 len = desc_len - ETHER_CRC_LEN;
3498                 } else {
3499                         eop = 0;
3500                         len = desc_len;
3501                 }
3502
3503                 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3504                         u8      last_byte;
3505                         u32     pkt_len = desc_len;
3506
3507                         if (adapter->fmp != NULL)
3508                                 pkt_len += adapter->fmp->m_pkthdr.len;
3509
3510                         last_byte = *(mtod(mp, caddr_t) + desc_len - 1);                        
3511                         if (TBI_ACCEPT(&adapter->hw, status,
3512                             current_desc->errors, pkt_len, last_byte,
3513                             adapter->min_frame_size, adapter->max_frame_size)) {
3514                                 e1000_tbi_adjust_stats_82543(&adapter->hw,
3515                                     &adapter->stats, pkt_len,
3516                                     adapter->hw.mac.addr,
3517                                     adapter->max_frame_size);
3518                                 if (len > 0)
3519                                         len--;
3520                         } else
3521                                 accept_frame = 0;
3522                 }
3523
3524                 if (accept_frame) {
3525                         if (lem_get_buf(adapter, i) != 0) {
3526                                 ifp->if_iqdrops++;
3527                                 goto discard;
3528                         }
3529
3530                         /* Assign correct length to the current fragment */
3531                         mp->m_len = len;
3532
3533                         if (adapter->fmp == NULL) {
3534                                 mp->m_pkthdr.len = len;
3535                                 adapter->fmp = mp; /* Store the first mbuf */
3536                                 adapter->lmp = mp;
3537                         } else {
3538                                 /* Chain mbuf's together */
3539                                 mp->m_flags &= ~M_PKTHDR;
3540                                 /*
3541                                  * Adjust length of previous mbuf in chain if
3542                                  * we received less than 4 bytes in the last
3543                                  * descriptor.
3544                                  */
3545                                 if (prev_len_adj > 0) {
3546                                         adapter->lmp->m_len -= prev_len_adj;
3547                                         adapter->fmp->m_pkthdr.len -=
3548                                             prev_len_adj;
3549                                 }
3550                                 adapter->lmp->m_next = mp;
3551                                 adapter->lmp = adapter->lmp->m_next;
3552                                 adapter->fmp->m_pkthdr.len += len;
3553                         }
3554
3555                         if (eop) {
3556                                 adapter->fmp->m_pkthdr.rcvif = ifp;
3557                                 ifp->if_ipackets++;
3558                                 lem_receive_checksum(adapter, current_desc,
3559                                     adapter->fmp);
3560 #ifndef __NO_STRICT_ALIGNMENT
3561                                 if (adapter->max_frame_size >
3562                                     (MCLBYTES - ETHER_ALIGN) &&
3563                                     lem_fixup_rx(adapter) != 0)
3564                                         goto skip;
3565 #endif
3566                                 if (status & E1000_RXD_STAT_VP) {
3567 #if __FreeBSD_version < 700000
3568                                         VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
3569                                             (le16toh(current_desc->special) &
3570                                             E1000_RXD_SPC_VLAN_MASK));
3571 #else
3572                                         adapter->fmp->m_pkthdr.ether_vtag =
3573                                             (le16toh(current_desc->special) &
3574                                             E1000_RXD_SPC_VLAN_MASK);
3575                                         adapter->fmp->m_flags |= M_VLANTAG;
3576 #endif
3577                                 }
3578 #ifndef __NO_STRICT_ALIGNMENT
3579 skip:
3580 #endif
3581                                 m = adapter->fmp;
3582                                 adapter->fmp = NULL;
3583                                 adapter->lmp = NULL;
3584                         }
3585                 } else {
3586                         ifp->if_ierrors++;
3587 discard:
3588                         /* Reuse loaded DMA map and just update mbuf chain */
3589                         mp = adapter->rx_buffer_area[i].m_head;
3590                         mp->m_len = mp->m_pkthdr.len = MCLBYTES;
3591                         mp->m_data = mp->m_ext.ext_buf;
3592                         mp->m_next = NULL;
3593                         if (adapter->max_frame_size <=
3594                             (MCLBYTES - ETHER_ALIGN))
3595                                 m_adj(mp, ETHER_ALIGN);
3596                         if (adapter->fmp != NULL) {
3597                                 m_freem(adapter->fmp);
3598                                 adapter->fmp = NULL;
3599                                 adapter->lmp = NULL;
3600                         }
3601                         m = NULL;
3602                 }
3603
3604                 /* Zero out the receive descriptors status. */
3605                 current_desc->status = 0;
3606                 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3607                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3608
3609                 /* Advance our pointers to the next descriptor. */
3610                 if (++i == adapter->num_rx_desc)
3611                         i = 0;
3612                 /* Call into the stack */
3613                 if (m != NULL) {
3614                         adapter->next_rx_desc_to_check = i;
3615                         EM_RX_UNLOCK(adapter);
3616                         (*ifp->if_input)(ifp, m);
3617                         EM_RX_LOCK(adapter);
3618                         rx_sent++;
3619                         i = adapter->next_rx_desc_to_check;
3620                 }
3621                 current_desc = &adapter->rx_desc_base[i];
3622         }
3623         adapter->next_rx_desc_to_check = i;
3624
3625         /* Advance the E1000's Receive Queue #0  "Tail Pointer". */
3626         if (--i < 0)
3627                 i = adapter->num_rx_desc - 1;
3628         E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
3629         EM_RX_UNLOCK(adapter);
3630         return (rx_sent);
3631 }
3632
3633 #ifndef __NO_STRICT_ALIGNMENT
3634 /*
3635  * When jumbo frames are enabled we should realign entire payload on
3636  * architecures with strict alignment. This is serious design mistake of 8254x
3637  * as it nullifies DMA operations. 8254x just allows RX buffer size to be
3638  * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
3639  * payload. On architecures without strict alignment restrictions 8254x still
3640  * performs unaligned memory access which would reduce the performance too.
3641  * To avoid copying over an entire frame to align, we allocate a new mbuf and
3642  * copy ethernet header to the new mbuf. The new mbuf is prepended into the
3643  * existing mbuf chain.
3644  *
3645  * Be aware, best performance of the 8254x is achived only when jumbo frame is
3646  * not used at all on architectures with strict alignment.
3647  */
3648 static int
3649 lem_fixup_rx(struct adapter *adapter)
3650 {
3651         struct mbuf *m, *n;
3652         int error;
3653
3654         error = 0;
3655         m = adapter->fmp;
3656         if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
3657                 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
3658                 m->m_data += ETHER_HDR_LEN;
3659         } else {
3660                 MGETHDR(n, M_DONTWAIT, MT_DATA);
3661                 if (n != NULL) {
3662                         bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
3663                         m->m_data += ETHER_HDR_LEN;
3664                         m->m_len -= ETHER_HDR_LEN;
3665                         n->m_len = ETHER_HDR_LEN;
3666                         M_MOVE_PKTHDR(n, m);
3667                         n->m_next = m;
3668                         adapter->fmp = n;
3669                 } else {
3670                         adapter->dropped_pkts++;
3671                         m_freem(adapter->fmp);
3672                         adapter->fmp = NULL;
3673                         error = ENOMEM;
3674                 }
3675         }
3676
3677         return (error);
3678 }
3679 #endif
3680
3681 /*********************************************************************
3682  *
3683  *  Verify that the hardware indicated that the checksum is valid.
3684  *  Inform the stack about the status of checksum so that stack
3685  *  doesn't spend time verifying the checksum.
3686  *
3687  *********************************************************************/
3688 static void
3689 lem_receive_checksum(struct adapter *adapter,
3690             struct e1000_rx_desc *rx_desc, struct mbuf *mp)
3691 {
3692         /* 82543 or newer only */
3693         if ((adapter->hw.mac.type < e1000_82543) ||
3694             /* Ignore Checksum bit is set */
3695             (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3696                 mp->m_pkthdr.csum_flags = 0;
3697                 return;
3698         }
3699
3700         if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3701                 /* Did it pass? */
3702                 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3703                         /* IP Checksum Good */
3704                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3705                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3706
3707                 } else {
3708                         mp->m_pkthdr.csum_flags = 0;
3709                 }
3710         }
3711
3712         if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3713                 /* Did it pass? */
3714                 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3715                         mp->m_pkthdr.csum_flags |=
3716                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3717                         mp->m_pkthdr.csum_data = htons(0xffff);
3718                 }
3719         }
3720 }
3721
3722 #if __FreeBSD_version >= 700029
3723 /*
3724  * This routine is run via an vlan
3725  * config EVENT
3726  */
3727 static void
3728 lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3729 {
3730         struct adapter  *adapter = ifp->if_softc;
3731         u32             index, bit;
3732
3733         if (ifp->if_softc !=  arg)   /* Not our event */
3734                 return;
3735
3736         if ((vtag == 0) || (vtag > 4095))       /* Invalid ID */
3737                 return;
3738
3739         index = (vtag >> 5) & 0x7F;
3740         bit = vtag & 0x1F;
3741         lem_shadow_vfta[index] |= (1 << bit);
3742         ++adapter->num_vlans;
3743         /* Re-init to load the changes */
3744         lem_init(adapter);
3745 }
3746
3747 /*
3748  * This routine is run via an vlan
3749  * unconfig EVENT
3750  */
3751 static void
3752 lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3753 {
3754         struct adapter  *adapter = ifp->if_softc;
3755         u32             index, bit;
3756
3757         if (ifp->if_softc !=  arg)
3758                 return;
3759
3760         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3761                 return;
3762
3763         index = (vtag >> 5) & 0x7F;
3764         bit = vtag & 0x1F;
3765         lem_shadow_vfta[index] &= ~(1 << bit);
3766         --adapter->num_vlans;
3767         /* Re-init to load the changes */
3768         lem_init(adapter);
3769 }
3770
3771 static void
3772 lem_setup_vlan_hw_support(struct adapter *adapter)
3773 {
3774         struct e1000_hw *hw = &adapter->hw;
3775         u32             reg;
3776
3777         /*
3778         ** We get here thru init_locked, meaning
3779         ** a soft reset, this has already cleared
3780         ** the VFTA and other state, so if there
3781         ** have been no vlan's registered do nothing.
3782         */
3783         if (adapter->num_vlans == 0)
3784                 return;
3785
3786         /*
3787         ** A soft reset zero's out the VFTA, so
3788         ** we need to repopulate it now.
3789         */
3790         for (int i = 0; i < EM_VFTA_SIZE; i++)
3791                 if (lem_shadow_vfta[i] != 0)
3792                         E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
3793                             i, lem_shadow_vfta[i]);
3794
3795         reg = E1000_READ_REG(hw, E1000_CTRL);
3796         reg |= E1000_CTRL_VME;
3797         E1000_WRITE_REG(hw, E1000_CTRL, reg);
3798
3799         /* Enable the Filter Table */
3800         reg = E1000_READ_REG(hw, E1000_RCTL);
3801         reg &= ~E1000_RCTL_CFIEN;
3802         reg |= E1000_RCTL_VFE;
3803         E1000_WRITE_REG(hw, E1000_RCTL, reg);
3804
3805         /* Update the frame size */
3806         E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
3807             adapter->max_frame_size + VLAN_TAG_SIZE);
3808 }
3809 #endif
3810
3811 static void
3812 lem_enable_intr(struct adapter *adapter)
3813 {
3814         struct e1000_hw *hw = &adapter->hw;
3815         u32 ims_mask = IMS_ENABLE_MASK;
3816
3817         if (adapter->msix) {
3818                 E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
3819                 ims_mask |= EM_MSIX_MASK;
3820         } 
3821         E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3822 }
3823
3824 static void
3825 lem_disable_intr(struct adapter *adapter)
3826 {
3827         struct e1000_hw *hw = &adapter->hw;
3828
3829         if (adapter->msix)
3830                 E1000_WRITE_REG(hw, EM_EIAC, 0);
3831         E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
3832 }
3833
3834 /*
3835  * Bit of a misnomer, what this really means is
3836  * to enable OS management of the system... aka
3837  * to disable special hardware management features 
3838  */
3839 static void
3840 lem_init_manageability(struct adapter *adapter)
3841 {
3842         /* A shared code workaround */
3843         if (adapter->has_manage) {
3844                 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3845                 /* disable hardware interception of ARP */
3846                 manc &= ~(E1000_MANC_ARP_EN);
3847                 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3848         }
3849 }
3850
3851 /*
3852  * Give control back to hardware management
3853  * controller if there is one.
3854  */
3855 static void
3856 lem_release_manageability(struct adapter *adapter)
3857 {
3858         if (adapter->has_manage) {
3859                 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
3860
3861                 /* re-enable hardware interception of ARP */
3862                 manc |= E1000_MANC_ARP_EN;
3863                 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
3864         }
3865 }
3866
3867 /*
3868  * lem_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3869  * For ASF and Pass Through versions of f/w this means
3870  * that the driver is loaded. For AMT version type f/w
3871  * this means that the network i/f is open.
3872  */
3873 static void
3874 lem_get_hw_control(struct adapter *adapter)
3875 {
3876         u32 ctrl_ext;
3877
3878         ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3879         E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3880             ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3881         return;
3882 }
3883
3884 /*
3885  * lem_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3886  * For ASF and Pass Through versions of f/w this means that
3887  * the driver is no longer loaded. For AMT versions of the
3888  * f/w this means that the network i/f is closed.
3889  */
3890 static void
3891 lem_release_hw_control(struct adapter *adapter)
3892 {
3893         u32 ctrl_ext;
3894
3895         if (!adapter->has_manage)
3896                 return;
3897
3898         ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
3899         E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
3900             ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3901         return;
3902 }
3903
3904 static int
3905 lem_is_valid_ether_addr(u8 *addr)
3906 {
3907         char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3908
3909         if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3910                 return (FALSE);
3911         }
3912
3913         return (TRUE);
3914 }
3915
3916 /*
3917 ** Parse the interface capabilities with regard
3918 ** to both system management and wake-on-lan for
3919 ** later use.
3920 */
3921 static void
3922 lem_get_wakeup(device_t dev)
3923 {
3924         struct adapter  *adapter = device_get_softc(dev);
3925         u16             eeprom_data = 0, device_id, apme_mask;
3926
3927         adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3928         apme_mask = EM_EEPROM_APME;
3929
3930         switch (adapter->hw.mac.type) {
3931         case e1000_82542:
3932         case e1000_82543:
3933                 break;
3934         case e1000_82544:
3935                 e1000_read_nvm(&adapter->hw,
3936                     NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3937                 apme_mask = EM_82544_APME;
3938                 break;
3939         case e1000_82546:
3940         case e1000_82546_rev_3:
3941                 if (adapter->hw.bus.func == 1) {
3942                         e1000_read_nvm(&adapter->hw,
3943                             NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3944                         break;
3945                 } else
3946                         e1000_read_nvm(&adapter->hw,
3947                             NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3948                 break;
3949         default:
3950                 e1000_read_nvm(&adapter->hw,
3951                     NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3952                 break;
3953         }
3954         if (eeprom_data & apme_mask)
3955                 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
3956         /*
3957          * We have the eeprom settings, now apply the special cases
3958          * where the eeprom may be wrong or the board won't support
3959          * wake on lan on a particular port
3960          */
3961         device_id = pci_get_device(dev);
3962         switch (device_id) {
3963         case E1000_DEV_ID_82546GB_PCIE:
3964                 adapter->wol = 0;
3965                 break;
3966         case E1000_DEV_ID_82546EB_FIBER:
3967         case E1000_DEV_ID_82546GB_FIBER:
3968                 /* Wake events only supported on port A for dual fiber
3969                  * regardless of eeprom setting */
3970                 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3971                     E1000_STATUS_FUNC_1)
3972                         adapter->wol = 0;
3973                 break;
3974         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
3975                 /* if quad port adapter, disable WoL on all but port A */
3976                 if (global_quad_port_a != 0)
3977                         adapter->wol = 0;
3978                 /* Reset for multiple quad port adapters */
3979                 if (++global_quad_port_a == 4)
3980                         global_quad_port_a = 0;
3981                 break;
3982         }
3983         return;
3984 }
3985
3986
3987 /*
3988  * Enable PCI Wake On Lan capability
3989  */
3990 static void
3991 lem_enable_wakeup(device_t dev)
3992 {
3993         struct adapter  *adapter = device_get_softc(dev);
3994         struct ifnet    *ifp = adapter->ifp;
3995         u32             pmc, ctrl, ctrl_ext, rctl;
3996         u16             status;
3997
3998         if ((pci_find_extcap(dev, PCIY_PMG, &pmc) != 0))
3999                 return;
4000
4001         /* Advertise the wakeup capability */
4002         ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4003         ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4004         E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4005         E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4006
4007         /* Keep the laser running on Fiber adapters */
4008         if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4009             adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4010                 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4011                 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4012                 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4013         }
4014
4015         /*
4016         ** Determine type of Wakeup: note that wol
4017         ** is set with all bits on by default.
4018         */
4019         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
4020                 adapter->wol &= ~E1000_WUFC_MAG;
4021
4022         if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
4023                 adapter->wol &= ~E1000_WUFC_MC;
4024         else {
4025                 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4026                 rctl |= E1000_RCTL_MPE;
4027                 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4028         }
4029
4030         if (adapter->hw.mac.type == e1000_pchlan) {
4031                 if (lem_enable_phy_wakeup(adapter))
4032                         return;
4033         } else {
4034                 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4035                 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4036         }
4037
4038
4039         /* Request PME */
4040         status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4041         status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4042         if (ifp->if_capenable & IFCAP_WOL)
4043                 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4044         pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4045
4046         return;
4047 }
4048
4049 /*
4050 ** WOL in the newer chipset interfaces (pchlan)
4051 ** require thing to be copied into the phy
4052 */
4053 static int
4054 lem_enable_phy_wakeup(struct adapter *adapter)
4055 {
4056         struct e1000_hw *hw = &adapter->hw;
4057         u32 mreg, ret = 0;
4058         u16 preg;
4059
4060         /* copy MAC RARs to PHY RARs */
4061         for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4062                 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4063                 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
4064                 e1000_write_phy_reg(hw, BM_RAR_M(i),
4065                     (u16)((mreg >> 16) & 0xFFFF));
4066                 mreg = E1000_READ_REG(hw, E1000_RAH(i));
4067                 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
4068                 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
4069                     (u16)((mreg >> 16) & 0xFFFF));
4070         }
4071
4072         /* copy MAC MTA to PHY MTA */
4073         for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
4074                 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
4075                 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
4076                 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
4077                     (u16)((mreg >> 16) & 0xFFFF));
4078         }
4079
4080         /* configure PHY Rx Control register */
4081         e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
4082         mreg = E1000_READ_REG(hw, E1000_RCTL);
4083         if (mreg & E1000_RCTL_UPE)
4084                 preg |= BM_RCTL_UPE;
4085         if (mreg & E1000_RCTL_MPE)
4086                 preg |= BM_RCTL_MPE;
4087         preg &= ~(BM_RCTL_MO_MASK);
4088         if (mreg & E1000_RCTL_MO_3)
4089                 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
4090                                 << BM_RCTL_MO_SHIFT);
4091         if (mreg & E1000_RCTL_BAM)
4092                 preg |= BM_RCTL_BAM;
4093         if (mreg & E1000_RCTL_PMCF)
4094                 preg |= BM_RCTL_PMCF;
4095         mreg = E1000_READ_REG(hw, E1000_CTRL);
4096         if (mreg & E1000_CTRL_RFCE)
4097                 preg |= BM_RCTL_RFCE;
4098         e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
4099
4100         /* enable PHY wakeup in MAC register */
4101         E1000_WRITE_REG(hw, E1000_WUC,
4102             E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
4103         E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
4104
4105         /* configure and enable PHY wakeup in PHY registers */
4106         e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
4107         e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4108
4109         /* activate PHY wakeup */
4110         ret = hw->phy.ops.acquire(hw);
4111         if (ret) {
4112                 printf("Could not acquire PHY\n");
4113                 return ret;
4114         }
4115         e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4116                                  (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
4117         ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
4118         if (ret) {
4119                 printf("Could not read PHY page 769\n");
4120                 goto out;
4121         }
4122         preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
4123         ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
4124         if (ret)
4125                 printf("Could not set PHY Host Wakeup bit\n");
4126 out:
4127         hw->phy.ops.release(hw);
4128
4129         return ret;
4130 }
4131
4132 static void
4133 lem_led_func(void *arg, int onoff)
4134 {
4135         struct adapter  *adapter = arg;
4136
4137         EM_CORE_LOCK(adapter);
4138         if (onoff) {
4139                 e1000_setup_led(&adapter->hw);
4140                 e1000_led_on(&adapter->hw);
4141         } else {
4142                 e1000_led_off(&adapter->hw);
4143                 e1000_cleanup_led(&adapter->hw);
4144         }
4145         EM_CORE_UNLOCK(adapter);
4146 }
4147
4148 /*********************************************************************
4149 * 82544 Coexistence issue workaround.
4150 *    There are 2 issues.
4151 *       1. Transmit Hang issue.
4152 *    To detect this issue, following equation can be used...
4153 *         SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4154 *         If SUM[3:0] is in between 1 to 4, we will have this issue.
4155 *
4156 *       2. DAC issue.
4157 *    To detect this issue, following equation can be used...
4158 *         SIZE[3:0] + ADDR[2:0] = SUM[3:0].
4159 *         If SUM[3:0] is in between 9 to c, we will have this issue.
4160 *
4161 *
4162 *    WORKAROUND:
4163 *         Make sure we do not have ending address
4164 *         as 1,2,3,4(Hang) or 9,a,b,c (DAC)
4165 *
4166 *************************************************************************/
4167 static u32
4168 lem_fill_descriptors (bus_addr_t address, u32 length,
4169                 PDESC_ARRAY desc_array)
4170 {
4171         u32 safe_terminator;
4172
4173         /* Since issue is sensitive to length and address.*/
4174         /* Let us first check the address...*/
4175         if (length <= 4) {
4176                 desc_array->descriptor[0].address = address;
4177                 desc_array->descriptor[0].length = length;
4178                 desc_array->elements = 1;
4179                 return (desc_array->elements);
4180         }
4181         safe_terminator = (u32)((((u32)address & 0x7) +
4182             (length & 0xF)) & 0xF);
4183         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
4184         if (safe_terminator == 0   ||
4185         (safe_terminator > 4   &&
4186         safe_terminator < 9)   ||
4187         (safe_terminator > 0xC &&
4188         safe_terminator <= 0xF)) {
4189                 desc_array->descriptor[0].address = address;
4190                 desc_array->descriptor[0].length = length;
4191                 desc_array->elements = 1;
4192                 return (desc_array->elements);
4193         }
4194
4195         desc_array->descriptor[0].address = address;
4196         desc_array->descriptor[0].length = length - 4;
4197         desc_array->descriptor[1].address = address + (length - 4);
4198         desc_array->descriptor[1].length = 4;
4199         desc_array->elements = 2;
4200         return (desc_array->elements);
4201 }
4202
4203 /**********************************************************************
4204  *
4205  *  Update the board statistics counters.
4206  *
4207  **********************************************************************/
4208 static void
4209 lem_update_stats_counters(struct adapter *adapter)
4210 {
4211         struct ifnet   *ifp;
4212
4213         if(adapter->hw.phy.media_type == e1000_media_type_copper ||
4214            (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
4215                 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
4216                 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
4217         }
4218         adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
4219         adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
4220         adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
4221         adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
4222
4223         adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
4224         adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
4225         adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
4226         adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
4227         adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
4228         adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
4229         adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
4230         adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
4231         adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
4232         adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
4233         adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
4234         adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
4235         adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
4236         adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
4237         adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
4238         adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
4239         adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
4240         adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
4241         adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
4242         adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
4243
4244         /* For the 64-bit byte counters the low dword must be read first. */
4245         /* Both registers clear on the read of the high dword */
4246
4247         adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH);
4248         adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
4249
4250         adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
4251         adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
4252         adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
4253         adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
4254         adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
4255
4256         adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
4257         adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
4258
4259         adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
4260         adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
4261         adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
4262         adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
4263         adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
4264         adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
4265         adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
4266         adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
4267         adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
4268         adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
4269
4270         if (adapter->hw.mac.type >= e1000_82543) {
4271                 adapter->stats.algnerrc += 
4272                 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
4273                 adapter->stats.rxerrc += 
4274                 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
4275                 adapter->stats.tncrs += 
4276                 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
4277                 adapter->stats.cexterr += 
4278                 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
4279                 adapter->stats.tsctc += 
4280                 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
4281                 adapter->stats.tsctfc += 
4282                 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
4283         }
4284         ifp = adapter->ifp;
4285
4286         ifp->if_collisions = adapter->stats.colc;
4287
4288         /* Rx Errors */
4289         ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
4290             adapter->stats.crcerrs + adapter->stats.algnerrc +
4291             adapter->stats.ruc + adapter->stats.roc +
4292             adapter->stats.mpc + adapter->stats.cexterr;
4293
4294         /* Tx Errors */
4295         ifp->if_oerrors = adapter->stats.ecol +
4296             adapter->stats.latecol + adapter->watchdog_events;
4297 }
4298
4299
4300 /**********************************************************************
4301  *
4302  *  This routine is called only when lem_display_debug_stats is enabled.
4303  *  This routine provides a way to take a look at important statistics
4304  *  maintained by the driver and hardware.
4305  *
4306  **********************************************************************/
4307 static void
4308 lem_print_debug_info(struct adapter *adapter)
4309 {
4310         device_t dev = adapter->dev;
4311         u8 *hw_addr = adapter->hw.hw_addr;
4312
4313         device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
4314         device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
4315             E1000_READ_REG(&adapter->hw, E1000_CTRL),
4316             E1000_READ_REG(&adapter->hw, E1000_RCTL));
4317         device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
4318             ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
4319             (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
4320         device_printf(dev, "Flow control watermarks high = %d low = %d\n",
4321             adapter->hw.fc.high_water,
4322             adapter->hw.fc.low_water);
4323         device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
4324             E1000_READ_REG(&adapter->hw, E1000_TIDV),
4325             E1000_READ_REG(&adapter->hw, E1000_TADV));
4326         device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
4327             E1000_READ_REG(&adapter->hw, E1000_RDTR),
4328             E1000_READ_REG(&adapter->hw, E1000_RADV));
4329         device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
4330             (long long)adapter->tx_fifo_wrk_cnt,
4331             (long long)adapter->tx_fifo_reset_cnt);
4332         device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
4333             E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
4334             E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
4335         device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
4336             E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
4337             E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
4338         device_printf(dev, "Num Tx descriptors avail = %d\n",
4339             adapter->num_tx_desc_avail);
4340         device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
4341             adapter->no_tx_desc_avail1);
4342         device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
4343             adapter->no_tx_desc_avail2);
4344         device_printf(dev, "Std mbuf failed = %ld\n",
4345             adapter->mbuf_alloc_failed);
4346         device_printf(dev, "Std mbuf cluster failed = %ld\n",
4347             adapter->mbuf_cluster_failed);
4348         device_printf(dev, "Driver dropped packets = %ld\n",
4349             adapter->dropped_pkts);
4350         device_printf(dev, "Driver tx dma failure in encap = %ld\n",
4351                 adapter->no_tx_dma_setup);
4352 }
4353
4354 static void
4355 lem_print_hw_stats(struct adapter *adapter)
4356 {
4357         device_t dev = adapter->dev;
4358
4359         device_printf(dev, "Excessive collisions = %lld\n",
4360             (long long)adapter->stats.ecol);
4361 #if     (DEBUG_HW > 0)  /* Dont output these errors normally */
4362         device_printf(dev, "Symbol errors = %lld\n",
4363             (long long)adapter->stats.symerrs);
4364 #endif
4365         device_printf(dev, "Sequence errors = %lld\n",
4366             (long long)adapter->stats.sec);
4367         device_printf(dev, "Defer count = %lld\n",
4368             (long long)adapter->stats.dc);
4369         device_printf(dev, "Missed Packets = %lld\n",
4370             (long long)adapter->stats.mpc);
4371         device_printf(dev, "Receive No Buffers = %lld\n",
4372             (long long)adapter->stats.rnbc);
4373         /* RLEC is inaccurate on some hardware, calculate our own. */
4374         device_printf(dev, "Receive Length Errors = %lld\n",
4375             ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
4376         device_printf(dev, "Receive errors = %lld\n",
4377             (long long)adapter->stats.rxerrc);
4378         device_printf(dev, "Crc errors = %lld\n",
4379             (long long)adapter->stats.crcerrs);
4380         device_printf(dev, "Alignment errors = %lld\n",
4381             (long long)adapter->stats.algnerrc);
4382         device_printf(dev, "Collision/Carrier extension errors = %lld\n",
4383             (long long)adapter->stats.cexterr);
4384         device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
4385         device_printf(dev, "watchdog timeouts = %ld\n",
4386             adapter->watchdog_events);
4387         device_printf(dev, "RX MSIX IRQ = %ld TX MSIX IRQ = %ld"
4388             " LINK MSIX IRQ = %ld\n", adapter->rx_irq,
4389             adapter->tx_irq , adapter->link_irq);
4390         device_printf(dev, "XON Rcvd = %lld\n",
4391             (long long)adapter->stats.xonrxc);
4392         device_printf(dev, "XON Xmtd = %lld\n",
4393             (long long)adapter->stats.xontxc);
4394         device_printf(dev, "XOFF Rcvd = %lld\n",
4395             (long long)adapter->stats.xoffrxc);
4396         device_printf(dev, "XOFF Xmtd = %lld\n",
4397             (long long)adapter->stats.xofftxc);
4398         device_printf(dev, "Good Packets Rcvd = %lld\n",
4399             (long long)adapter->stats.gprc);
4400         device_printf(dev, "Good Packets Xmtd = %lld\n",
4401             (long long)adapter->stats.gptc);
4402 }
4403
4404 /**********************************************************************
4405  *
4406  *  This routine provides a way to dump out the adapter eeprom,
4407  *  often a useful debug/service tool. This only dumps the first
4408  *  32 words, stuff that matters is in that extent.
4409  *
4410  **********************************************************************/
4411 static void
4412 lem_print_nvm_info(struct adapter *adapter)
4413 {
4414         u16     eeprom_data;
4415         int     i, j, row = 0;
4416
4417         /* Its a bit crude, but it gets the job done */
4418         printf("\nInterface EEPROM Dump:\n");
4419         printf("Offset\n0x0000  ");
4420         for (i = 0, j = 0; i < 32; i++, j++) {
4421                 if (j == 8) { /* Make the offset block */
4422                         j = 0; ++row;
4423                         printf("\n0x00%x0  ",row);
4424                 }
4425                 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
4426                 printf("%04x ", eeprom_data);
4427         }
4428         printf("\n");
4429 }
4430
4431 static int
4432 lem_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
4433 {
4434         struct adapter *adapter;
4435         int error;
4436         int result;
4437
4438         result = -1;
4439         error = sysctl_handle_int(oidp, &result, 0, req);
4440
4441         if (error || !req->newptr)
4442                 return (error);
4443
4444         if (result == 1) {
4445                 adapter = (struct adapter *)arg1;
4446                 lem_print_debug_info(adapter);
4447         }
4448         /*
4449          * This value will cause a hex dump of the
4450          * first 32 16-bit words of the EEPROM to
4451          * the screen.
4452          */
4453         if (result == 2) {
4454                 adapter = (struct adapter *)arg1;
4455                 lem_print_nvm_info(adapter);
4456         }
4457
4458         return (error);
4459 }
4460
4461
4462 static int
4463 lem_sysctl_stats(SYSCTL_HANDLER_ARGS)
4464 {
4465         struct adapter *adapter;
4466         int error;
4467         int result;
4468
4469         result = -1;
4470         error = sysctl_handle_int(oidp, &result, 0, req);
4471
4472         if (error || !req->newptr)
4473                 return (error);
4474
4475         if (result == 1) {
4476                 adapter = (struct adapter *)arg1;
4477                 lem_print_hw_stats(adapter);
4478         }
4479
4480         return (error);
4481 }
4482
4483 static int
4484 lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
4485 {
4486         struct em_int_delay_info *info;
4487         struct adapter *adapter;
4488         u32 regval;
4489         int error;
4490         int usecs;
4491         int ticks;
4492
4493         info = (struct em_int_delay_info *)arg1;
4494         usecs = info->value;
4495         error = sysctl_handle_int(oidp, &usecs, 0, req);
4496         if (error != 0 || req->newptr == NULL)
4497                 return (error);
4498         if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
4499                 return (EINVAL);
4500         info->value = usecs;
4501         ticks = EM_USECS_TO_TICKS(usecs);
4502
4503         adapter = info->adapter;
4504         
4505         EM_CORE_LOCK(adapter);
4506         regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4507         regval = (regval & ~0xffff) | (ticks & 0xffff);
4508         /* Handle a few special cases. */
4509         switch (info->offset) {
4510         case E1000_RDTR:
4511                 break;
4512         case E1000_TIDV:
4513                 if (ticks == 0) {
4514                         adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4515                         /* Don't write 0 into the TIDV register. */
4516                         regval++;
4517                 } else
4518                         adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4519                 break;
4520         }
4521         E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4522         EM_CORE_UNLOCK(adapter);
4523         return (0);
4524 }
4525
4526 static void
4527 lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4528         const char *description, struct em_int_delay_info *info,
4529         int offset, int value)
4530 {
4531         info->adapter = adapter;
4532         info->offset = offset;
4533         info->value = value;
4534         SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
4535             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4536             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
4537             info, 0, lem_sysctl_int_delay, "I", description);
4538 }
4539
4540 #ifndef EM_LEGACY_IRQ
4541 static void
4542 lem_add_rx_process_limit(struct adapter *adapter, const char *name,
4543         const char *description, int *limit, int value)
4544 {
4545         *limit = value;
4546         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4547             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4548             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4549 }
4550 #endif
4551
4552