]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/em/if_em.c
Cleanup the strings for printing chipset names, saves quite some space.
[FreeBSD/FreeBSD.git] / sys / dev / em / if_em.c
1 /**************************************************************************
2
3 Copyright (c) 2001-2005, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /*$FreeBSD$*/
35
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39
40 #include <dev/em/if_em.h>
41
42 /*********************************************************************
43  *  Set this to one to display debug statistics                                                   
44  *********************************************************************/
45 int             em_display_debug_stats = 0;
46
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50
51 char em_driver_version[] = "Version - 3.2.18";
52
53
54 /*********************************************************************
55  *  PCI Device ID Table
56  *
57  *  Used by probe to select devices to load on
58  *  Last field stores an index into em_strings
59  *  Last entry must be all 0s
60  *
61  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62  *********************************************************************/
63
64 static em_vendor_info_t em_vendor_info_array[] =
65 {
66         /* Intel(R) PRO/1000 Network Connection */
67         { 0x8086, E1000_DEV_ID_82540EM,             PCI_ANY_ID, PCI_ANY_ID, 0},
68         { 0x8086, E1000_DEV_ID_82540EM_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
69         { 0x8086, E1000_DEV_ID_82540EP,             PCI_ANY_ID, PCI_ANY_ID, 0},
70         { 0x8086, E1000_DEV_ID_82540EP_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
71         { 0x8086, E1000_DEV_ID_82540EP_LP,          PCI_ANY_ID, PCI_ANY_ID, 0},
72
73         { 0x8086, E1000_DEV_ID_82541EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
74         { 0x8086, E1000_DEV_ID_82541ER,             PCI_ANY_ID, PCI_ANY_ID, 0},
75         { 0x8086, E1000_DEV_ID_82541ER_LOM,             PCI_ANY_ID, PCI_ANY_ID, 0},
76         { 0x8086, E1000_DEV_ID_82541EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
77         { 0x8086, E1000_DEV_ID_82541GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
78         { 0x8086, E1000_DEV_ID_82541GI_LF,          PCI_ANY_ID, PCI_ANY_ID, 0},
79         { 0x8086, E1000_DEV_ID_82541GI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
80
81         { 0x8086, E1000_DEV_ID_82542,               PCI_ANY_ID, PCI_ANY_ID, 0},
82
83         { 0x8086, E1000_DEV_ID_82543GC_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
84         { 0x8086, E1000_DEV_ID_82543GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
85
86         { 0x8086, E1000_DEV_ID_82544EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
87         { 0x8086, E1000_DEV_ID_82544EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
88         { 0x8086, E1000_DEV_ID_82544GC_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
89         { 0x8086, E1000_DEV_ID_82544GC_LOM,         PCI_ANY_ID, PCI_ANY_ID, 0},
90
91         { 0x8086, E1000_DEV_ID_82545EM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
92         { 0x8086, E1000_DEV_ID_82545EM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
93         { 0x8086, E1000_DEV_ID_82545GM_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
94         { 0x8086, E1000_DEV_ID_82545GM_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
95         { 0x8086, E1000_DEV_ID_82545GM_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
96
97         { 0x8086, E1000_DEV_ID_82546EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
98         { 0x8086, E1000_DEV_ID_82546EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
99         { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
100         { 0x8086, E1000_DEV_ID_82546GB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
101         { 0x8086, E1000_DEV_ID_82546GB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
102         { 0x8086, E1000_DEV_ID_82546GB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
103         { 0x8086, E1000_DEV_ID_82546GB_PCIE,        PCI_ANY_ID, PCI_ANY_ID, 0},
104         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
105
106         { 0x8086, E1000_DEV_ID_82547EI,             PCI_ANY_ID, PCI_ANY_ID, 0},
107         { 0x8086, E1000_DEV_ID_82547EI_MOBILE,      PCI_ANY_ID, PCI_ANY_ID, 0},
108         { 0x8086, E1000_DEV_ID_82547GI,             PCI_ANY_ID, PCI_ANY_ID, 0},
109
110         { 0x8086, E1000_DEV_ID_82571EB_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
111         { 0x8086, E1000_DEV_ID_82571EB_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
112         { 0x8086, E1000_DEV_ID_82571EB_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
113
114         { 0x8086, E1000_DEV_ID_82572EI_COPPER,      PCI_ANY_ID, PCI_ANY_ID, 0},
115         { 0x8086, E1000_DEV_ID_82572EI_FIBER,       PCI_ANY_ID, PCI_ANY_ID, 0},
116         { 0x8086, E1000_DEV_ID_82572EI_SERDES,      PCI_ANY_ID, PCI_ANY_ID, 0},
117
118         { 0x8086, E1000_DEV_ID_82573E,              PCI_ANY_ID, PCI_ANY_ID, 0},
119         { 0x8086, E1000_DEV_ID_82573E_IAMT,         PCI_ANY_ID, PCI_ANY_ID, 0},
120         { 0x8086, E1000_DEV_ID_82573L,              PCI_ANY_ID, PCI_ANY_ID, 0},
121
122         /* required last entry */
123         { 0, 0, 0, 0, 0}
124 };
125
126 /*********************************************************************
127  *  Table of branding strings for all supported NICs.
128  *********************************************************************/
129
130 static char *em_strings[] = {
131         "Intel(R) PRO/1000 Network Connection"
132 };
133
134 /*********************************************************************
135  *  Function prototypes            
136  *********************************************************************/
137 static int  em_probe(device_t);
138 static int  em_attach(device_t);
139 static int  em_detach(device_t);
140 static int  em_shutdown(device_t);
141 static void em_intr(void *);
142 static void em_start(struct ifnet *);
143 static int  em_ioctl(struct ifnet *, u_long, caddr_t);
144 static void em_watchdog(struct ifnet *);
145 static void em_init(void *);
146 static void em_init_locked(struct adapter *);
147 static void em_stop(void *);
148 static void em_media_status(struct ifnet *, struct ifmediareq *);
149 static int  em_media_change(struct ifnet *);
150 static void em_identify_hardware(struct adapter *);
151 static int  em_allocate_pci_resources(struct adapter *);
152 static void em_free_pci_resources(struct adapter *);
153 static void em_local_timer(void *);
154 static int  em_hardware_init(struct adapter *);
155 static void em_setup_interface(device_t, struct adapter *);
156 static int  em_setup_transmit_structures(struct adapter *);
157 static void em_initialize_transmit_unit(struct adapter *);
158 static int  em_setup_receive_structures(struct adapter *);
159 static void em_initialize_receive_unit(struct adapter *);
160 static void em_enable_intr(struct adapter *);
161 static void em_disable_intr(struct adapter *);
162 static void em_free_transmit_structures(struct adapter *);
163 static void em_free_receive_structures(struct adapter *);
164 static void em_update_stats_counters(struct adapter *);
165 static void em_clean_transmit_interrupts(struct adapter *);
166 static int  em_allocate_receive_structures(struct adapter *);
167 static int  em_allocate_transmit_structures(struct adapter *);
168 static void em_process_receive_interrupts(struct adapter *, int);
169 static void em_receive_checksum(struct adapter *, 
170                                 struct em_rx_desc *,
171                                 struct mbuf *);
172 static void em_transmit_checksum_setup(struct adapter *,
173                                        struct mbuf *,
174                                        u_int32_t *,
175                                        u_int32_t *);
176 static void em_set_promisc(struct adapter *);
177 static void em_disable_promisc(struct adapter *);
178 static void em_set_multi(struct adapter *);
179 static void em_print_hw_stats(struct adapter *);
180 static void em_print_link_status(struct adapter *);
181 static int  em_get_buf(int i, struct adapter *,
182                        struct mbuf *);
183 static void em_enable_vlans(struct adapter *);
184 static void em_disable_vlans(struct adapter *);
185 static int  em_encap(struct adapter *, struct mbuf **);
186 static void em_smartspeed(struct adapter *);
187 static int  em_82547_fifo_workaround(struct adapter *, int);
188 static void em_82547_update_fifo_head(struct adapter *, int);
189 static int  em_82547_tx_fifo_reset(struct adapter *);
190 static void em_82547_move_tail(void *arg);
191 static void em_82547_move_tail_locked(struct adapter *);
192 static int  em_dma_malloc(struct adapter *, bus_size_t,
193                           struct em_dma_alloc *, int);
194 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
195 static void em_print_debug_info(struct adapter *);
196 static int  em_is_valid_ether_addr(u_int8_t *);
197 static int  em_sysctl_stats(SYSCTL_HANDLER_ARGS);
198 static int  em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
199 static u_int32_t em_fill_descriptors (bus_addr_t address, 
200                                       u_int32_t length, 
201                                       PDESC_ARRAY desc_array);
202 static int  em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
203 static void em_add_int_delay_sysctl(struct adapter *, const char *,
204                                     const char *, struct em_int_delay_info *,
205                                     int, int);
206 #ifdef DEVICE_POLLING
207 static poll_handler_t em_poll;
208 #endif
209
210 /*********************************************************************
211  *  FreeBSD Device Interface Entry Points                    
212  *********************************************************************/
213
214 static device_method_t em_methods[] = {
215         /* Device interface */
216         DEVMETHOD(device_probe, em_probe),
217         DEVMETHOD(device_attach, em_attach),
218         DEVMETHOD(device_detach, em_detach),
219         DEVMETHOD(device_shutdown, em_shutdown),
220         {0, 0}
221 };
222
223 static driver_t em_driver = {
224         "em", em_methods, sizeof(struct adapter ),
225 };
226
227 static devclass_t em_devclass;
228 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
229 MODULE_DEPEND(em, pci, 1, 1, 1);
230 MODULE_DEPEND(em, ether, 1, 1, 1);
231
232 /*********************************************************************
233  *  Tunable default values.
234  *********************************************************************/
235
236 #define E1000_TICKS_TO_USECS(ticks)     ((1024 * (ticks) + 500) / 1000)
237 #define E1000_USECS_TO_TICKS(usecs)     ((1000 * (usecs) + 512) / 1024)
238
239 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
240 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
241 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
242 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
243 static int em_rxd = EM_DEFAULT_RXD;
244 static int em_txd = EM_DEFAULT_TXD;
245
246 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
247 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
248 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
249 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
250 TUNABLE_INT("hw.em.rxd", &em_rxd);
251 TUNABLE_INT("hw.em.txd", &em_txd);
252
253 /*********************************************************************
254  *  Device identification routine
255  *
256  *  em_probe determines if the driver should be loaded on
257  *  adapter based on PCI vendor/device id of the adapter.
258  *
259  *  return BUS_PROBE_DEFAULT on success, positive on failure
260  *********************************************************************/
261
262 static int
263 em_probe(device_t dev)
264 {
265         em_vendor_info_t *ent;
266
267         u_int16_t       pci_vendor_id = 0;
268         u_int16_t       pci_device_id = 0;
269         u_int16_t       pci_subvendor_id = 0;
270         u_int16_t       pci_subdevice_id = 0;
271         char            adapter_name[60];
272
273         INIT_DEBUGOUT("em_probe: begin");
274
275         pci_vendor_id = pci_get_vendor(dev);
276         if (pci_vendor_id != EM_VENDOR_ID)
277                 return(ENXIO);
278
279         pci_device_id = pci_get_device(dev);
280         pci_subvendor_id = pci_get_subvendor(dev);
281         pci_subdevice_id = pci_get_subdevice(dev);
282
283         ent = em_vendor_info_array;
284         while (ent->vendor_id != 0) {
285                 if ((pci_vendor_id == ent->vendor_id) &&
286                     (pci_device_id == ent->device_id) &&
287
288                     ((pci_subvendor_id == ent->subvendor_id) ||
289                      (ent->subvendor_id == PCI_ANY_ID)) &&
290
291                     ((pci_subdevice_id == ent->subdevice_id) ||
292                      (ent->subdevice_id == PCI_ANY_ID))) {
293                         sprintf(adapter_name, "%s %s", 
294                                 em_strings[ent->index], 
295                                 em_driver_version);
296                         device_set_desc_copy(dev, adapter_name);
297                         return(BUS_PROBE_DEFAULT);
298                 }
299                 ent++;
300         }
301
302         return(ENXIO);
303 }
304
305 /*********************************************************************
306  *  Device initialization routine
307  *
308  *  The attach entry point is called when the driver is being loaded.
309  *  This routine identifies the type of hardware, allocates all resources 
310  *  and initializes the hardware.     
311  *  
312  *  return 0 on success, positive on failure
313  *********************************************************************/
314
315 static int
316 em_attach(device_t dev)
317 {
318         struct adapter * adapter;
319         int             tsize, rsize;
320         int             error = 0;
321
322         INIT_DEBUGOUT("em_attach: begin");
323
324         /* Allocate, clear, and link in our adapter structure */
325         if (!(adapter = device_get_softc(dev))) {
326                 printf("em: adapter structure allocation failed\n");
327                 return(ENOMEM);
328         }
329         bzero(adapter, sizeof(struct adapter ));
330         adapter->dev = dev;
331         adapter->osdep.dev = dev;
332         adapter->unit = device_get_unit(dev);
333         EM_LOCK_INIT(adapter, device_get_nameunit(dev));
334
335         /* SYSCTL stuff */
336         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
337                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
338                         OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
339                         (void *)adapter, 0,
340                         em_sysctl_debug_info, "I", "Debug Information");
341         
342         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
343                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
344                         OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
345                         (void *)adapter, 0,
346                         em_sysctl_stats, "I", "Statistics");
347
348         callout_init(&adapter->timer, CALLOUT_MPSAFE);
349         callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE);
350
351         /* Determine hardware revision */
352         em_identify_hardware(adapter);
353
354         /* Set up some sysctls for the tunable interrupt delays */
355         em_add_int_delay_sysctl(adapter, "rx_int_delay",
356             "receive interrupt delay in usecs", &adapter->rx_int_delay,
357             E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
358         em_add_int_delay_sysctl(adapter, "tx_int_delay",
359             "transmit interrupt delay in usecs", &adapter->tx_int_delay,
360             E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
361         if (adapter->hw.mac_type >= em_82540) {
362                 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
363                     "receive interrupt delay limit in usecs",
364                     &adapter->rx_abs_int_delay,
365                     E1000_REG_OFFSET(&adapter->hw, RADV),
366                     em_rx_abs_int_delay_dflt);
367                 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
368                     "transmit interrupt delay limit in usecs",
369                     &adapter->tx_abs_int_delay,
370                     E1000_REG_OFFSET(&adapter->hw, TADV),
371                     em_tx_abs_int_delay_dflt);
372         }
373
374         /*
375          * Validate number of transmit and receive descriptors. It
376          * must not exceed hardware maximum, and must be multiple
377          * of E1000_DBA_ALIGN.
378          */
379         if (((em_txd * sizeof(struct em_tx_desc)) % E1000_DBA_ALIGN) != 0 ||
380             (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) ||
381             (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) ||
382             (em_txd < EM_MIN_TXD)) {
383                 printf("em%d: Using %d TX descriptors instead of %d!\n",
384                     adapter->unit, EM_DEFAULT_TXD, em_txd);
385                 adapter->num_tx_desc = EM_DEFAULT_TXD;
386         } else
387                 adapter->num_tx_desc = em_txd;
388         if (((em_rxd * sizeof(struct em_rx_desc)) % E1000_DBA_ALIGN) != 0 ||
389             (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) ||
390             (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) ||
391             (em_rxd < EM_MIN_RXD)) {
392                 printf("em%d: Using %d RX descriptors instead of %d!\n",
393                     adapter->unit, EM_DEFAULT_RXD, em_rxd);
394                 adapter->num_rx_desc = EM_DEFAULT_RXD;
395         } else
396                 adapter->num_rx_desc = em_rxd;
397
398         adapter->hw.autoneg = DO_AUTO_NEG;
399         adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
400         adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
401         adapter->hw.tbi_compatibility_en = TRUE;
402         adapter->rx_buffer_len = EM_RXBUFFER_2048;
403                         
404         adapter->hw.phy_init_script = 1;
405         adapter->hw.phy_reset_disable = FALSE;
406
407 #ifndef EM_MASTER_SLAVE
408         adapter->hw.master_slave = em_ms_hw_default;
409 #else
410         adapter->hw.master_slave = EM_MASTER_SLAVE;
411 #endif
412         /* 
413          * Set the max frame size assuming standard ethernet 
414          * sized frames 
415          */   
416         adapter->hw.max_frame_size = 
417                 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
418
419         adapter->hw.min_frame_size = 
420                 MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
421
422         /* 
423          * This controls when hardware reports transmit completion 
424          * status. 
425          */
426         adapter->hw.report_tx_early = 1;
427
428
429         if (em_allocate_pci_resources(adapter)) {
430                 printf("em%d: Allocation of PCI resources failed\n", 
431                        adapter->unit);
432                 error = ENXIO;
433                 goto err_pci;
434         }
435   
436         
437         /* Initialize eeprom parameters */
438         em_init_eeprom_params(&adapter->hw);
439
440         tsize = roundup2(adapter->num_tx_desc * sizeof(struct em_tx_desc),
441             E1000_DBA_ALIGN);
442
443         /* Allocate Transmit Descriptor ring */
444         if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
445                 printf("em%d: Unable to allocate tx_desc memory\n",
446                        adapter->unit);
447                 error = ENOMEM;
448                 goto err_tx_desc;
449         }
450         adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
451
452         rsize = roundup2(adapter->num_rx_desc * sizeof(struct em_rx_desc),
453             E1000_DBA_ALIGN);
454
455         /* Allocate Receive Descriptor ring */  
456         if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
457                 printf("em%d: Unable to allocate rx_desc memory\n",
458                         adapter->unit);
459                 error = ENOMEM;
460                 goto err_rx_desc;
461         }
462         adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
463
464         /* Initialize the hardware */
465         if (em_hardware_init(adapter)) {
466                 printf("em%d: Unable to initialize the hardware\n",
467                        adapter->unit);
468                 error = EIO;
469                 goto err_hw_init;
470         }
471
472         /* Copy the permanent MAC address out of the EEPROM */
473         if (em_read_mac_addr(&adapter->hw) < 0) {
474                 printf("em%d: EEPROM read error while reading mac address\n",
475                        adapter->unit);
476                 error = EIO;
477                 goto err_mac_addr;
478         }
479
480         if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
481                 printf("em%d: Invalid mac address\n", adapter->unit);
482                 error = EIO;
483                 goto err_mac_addr;
484         }
485
486         /* Setup OS specific network interface */
487         em_setup_interface(dev, adapter);
488
489         /* Initialize statistics */
490         em_clear_hw_cntrs(&adapter->hw);
491         em_update_stats_counters(adapter);
492         adapter->hw.get_link_status = 1;
493         em_check_for_link(&adapter->hw);
494
495         if (bootverbose) {
496                 /* Print the link status */
497                 if (adapter->link_active == 1) {
498                         em_get_speed_and_duplex(&adapter->hw,
499                             &adapter->link_speed, &adapter->link_duplex);
500                         printf("em%d:  Speed:%d Mbps  Duplex:%s\n",
501                                adapter->unit,
502                                adapter->link_speed,
503                                adapter->link_duplex == FULL_DUPLEX ? "Full" :
504                                 "Half");
505                 } else
506                         printf("em%d:  Speed:N/A  Duplex:N/A\n",
507                             adapter->unit);
508         }
509
510         /* Identify 82544 on PCIX */
511         em_get_bus_info(&adapter->hw);
512         if(adapter->hw.bus_type == em_bus_type_pcix &&
513            adapter->hw.mac_type == em_82544) {
514                 adapter->pcix_82544 = TRUE;
515         }
516         else {
517                 adapter->pcix_82544 = FALSE;
518         }
519         INIT_DEBUGOUT("em_attach: end");
520         return(0);
521
522 err_mac_addr:
523 err_hw_init:
524         em_dma_free(adapter, &adapter->rxdma);
525 err_rx_desc:
526         em_dma_free(adapter, &adapter->txdma);
527 err_tx_desc:
528 err_pci:
529         em_free_pci_resources(adapter);
530         EM_LOCK_DESTROY(adapter);
531         return(error);
532
533 }
534
535 /*********************************************************************
536  *  Device removal routine
537  *
538  *  The detach entry point is called when the driver is being removed.
539  *  This routine stops the adapter and deallocates all the resources
540  *  that were allocated for driver operation.
541  *  
542  *  return 0 on success, positive on failure
543  *********************************************************************/
544
545 static int
546 em_detach(device_t dev)
547 {
548         struct adapter * adapter = device_get_softc(dev);
549         struct ifnet   *ifp = adapter->ifp;
550
551         INIT_DEBUGOUT("em_detach: begin");
552
553 #ifdef DEVICE_POLLING
554         if (ifp->if_capenable & IFCAP_POLLING)
555                 ether_poll_deregister(ifp);
556 #endif
557
558         EM_LOCK(adapter);
559         adapter->in_detach = 1;
560         em_stop(adapter);
561         em_phy_hw_reset(&adapter->hw);
562         EM_UNLOCK(adapter);
563         ether_ifdetach(adapter->ifp);
564
565         em_free_pci_resources(adapter);
566         bus_generic_detach(dev);
567         if_free(ifp);
568
569         /* Free Transmit Descriptor ring */
570         if (adapter->tx_desc_base) {
571                 em_dma_free(adapter, &adapter->txdma);
572                 adapter->tx_desc_base = NULL;
573         }
574
575         /* Free Receive Descriptor ring */
576         if (adapter->rx_desc_base) {
577                 em_dma_free(adapter, &adapter->rxdma);
578                 adapter->rx_desc_base = NULL;
579         }
580
581         EM_LOCK_DESTROY(adapter);
582
583         return(0);
584 }
585
586 /*********************************************************************
587  *
588  *  Shutdown entry point
589  *
590  **********************************************************************/ 
591
592 static int
593 em_shutdown(device_t dev)
594 {
595         struct adapter *adapter = device_get_softc(dev);
596         EM_LOCK(adapter);
597         em_stop(adapter);
598         EM_UNLOCK(adapter);
599         return(0);
600 }
601
602
603 /*********************************************************************
604  *  Transmit entry point
605  *
606  *  em_start is called by the stack to initiate a transmit.
607  *  The driver will remain in this routine as long as there are
608  *  packets to transmit and transmit resources are available.
609  *  In case resources are not available stack is notified and
610  *  the packet is requeued.
611  **********************************************************************/
612
613 static void
614 em_start_locked(struct ifnet *ifp)
615 {
616         struct mbuf    *m_head;
617         struct adapter *adapter = ifp->if_softc;
618
619         mtx_assert(&adapter->mtx, MA_OWNED);
620
621         if (!adapter->link_active)
622                 return;
623
624         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
625
626                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
627                 
628                 if (m_head == NULL) break;
629
630                 /*
631                  * em_encap() can modify our pointer, and or make it NULL on
632                  * failure.  In that event, we can't requeue.
633                  */
634                 if (em_encap(adapter, &m_head)) { 
635                         if (m_head == NULL)
636                                 break;
637                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
638                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
639                         break;
640                 }
641
642                 /* Send a copy of the frame to the BPF listener */
643                 BPF_MTAP(ifp, m_head);
644         
645                 /* Set timeout in case hardware has problems transmitting */
646                 ifp->if_timer = EM_TX_TIMEOUT;
647         
648         }
649         return;
650 }
651
652 static void
653 em_start(struct ifnet *ifp)
654 {
655         struct adapter *adapter = ifp->if_softc;
656
657         EM_LOCK(adapter);
658         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
659                 em_start_locked(ifp);
660         EM_UNLOCK(adapter);
661         return;
662 }
663
664 /*********************************************************************
665  *  Ioctl entry point
666  *
667  *  em_ioctl is called when the user wants to configure the
668  *  interface.
669  *
670  *  return 0 on success, positive on failure
671  **********************************************************************/
672
673 static int
674 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
675 {
676         struct ifreq   *ifr = (struct ifreq *) data;
677         struct adapter * adapter = ifp->if_softc;
678         int error = 0;
679
680         if (adapter->in_detach) return(error);
681
682         switch (command) {
683         case SIOCSIFADDR:
684         case SIOCGIFADDR:
685                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
686                 ether_ioctl(ifp, command, data);
687                 break;
688         case SIOCSIFMTU:
689             {
690 #ifndef __NO_STRICT_ALIGNMENT
691                 if (ifr->ifr_mtu > ETHERMTU)
692                         /*
693                          * XXX
694                          * Due to the limitation of DMA engine, it needs fix-up
695                          * code for strict alignment architectures. Disable
696                          * jumbo frame until we have better solutions.
697                          */
698                         error = EINVAL;
699 #else
700                 int max_frame_size;
701
702                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
703
704                 switch (adapter->hw.mac_type) {
705                 case em_82571:
706                 case em_82572:
707                         max_frame_size = 10500;
708                         break;
709                 case em_82573:
710                         /* 82573 does not support jumbo frames. */
711                         max_frame_size = ETHER_MAX_LEN;
712                         break;
713                 default:
714                         max_frame_size = MAX_JUMBO_FRAME_SIZE;
715                 }
716                 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
717                     ETHER_CRC_LEN) {
718                         error = EINVAL;
719                         break;
720                 }
721
722                 EM_LOCK(adapter);
723                 ifp->if_mtu = ifr->ifr_mtu;
724                 adapter->hw.max_frame_size = 
725                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
726                 em_init_locked(adapter);
727                 EM_UNLOCK(adapter);
728 #endif
729                 break;
730             }
731         case SIOCSIFFLAGS:
732                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
733                 EM_LOCK(adapter);
734                 if (ifp->if_flags & IFF_UP) {
735                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
736                                 em_init_locked(adapter);
737                         }
738
739                         em_disable_promisc(adapter);
740                         em_set_promisc(adapter);
741                 } else {
742                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
743                                 em_stop(adapter);
744                         }
745                 }
746                 EM_UNLOCK(adapter);
747                 break;
748         case SIOCADDMULTI:
749         case SIOCDELMULTI:
750                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
751                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
752                         EM_LOCK(adapter);
753                         em_disable_intr(adapter);
754                         em_set_multi(adapter);
755                         if (adapter->hw.mac_type == em_82542_rev2_0) {
756                                 em_initialize_receive_unit(adapter);
757                         }
758 #ifdef DEVICE_POLLING
759                         if (!(ifp->if_capenable & IFCAP_POLLING))
760 #endif
761                                 em_enable_intr(adapter);
762                         EM_UNLOCK(adapter);
763                 }
764                 break;
765         case SIOCSIFMEDIA:
766         case SIOCGIFMEDIA:
767                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
768                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
769                 break;
770         case SIOCSIFCAP:
771             {
772                 int mask, reinit;
773
774                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
775                 reinit = 0;
776                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
777 #ifdef DEVICE_POLLING
778                 if (mask & IFCAP_POLLING) {
779                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
780                                 error = ether_poll_register(em_poll, ifp);
781                                 if (error)
782                                         return(error);
783                                 EM_LOCK(adapter);
784                                 em_disable_intr(adapter);
785                                 ifp->if_capenable |= IFCAP_POLLING;
786                                 EM_UNLOCK(adapter);
787                         } else {
788                                 error = ether_poll_deregister(ifp);
789                                 /* Enable interrupt even in error case */
790                                 EM_LOCK(adapter);
791                                 em_enable_intr(adapter);
792                                 ifp->if_capenable &= ~IFCAP_POLLING;
793                                 EM_UNLOCK(adapter);
794                         }
795                 }
796 #endif
797                 if (mask & IFCAP_HWCSUM) {
798                         ifp->if_capenable ^= IFCAP_HWCSUM;
799                         reinit = 1;
800                 }
801                 if (mask & IFCAP_VLAN_HWTAGGING) {
802                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
803                         reinit = 1;
804                 }
805                 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
806                         em_init(adapter);
807                 break;
808             }
809         default:
810                 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
811                 error = EINVAL;
812         }
813
814         return(error);
815 }
816
817 /*********************************************************************
818  *  Watchdog entry point
819  *
820  *  This routine is called whenever hardware quits transmitting.
821  *
822  **********************************************************************/
823
824 static void
825 em_watchdog(struct ifnet *ifp)
826 {
827         struct adapter * adapter;
828         adapter = ifp->if_softc;
829
830         EM_LOCK(adapter);
831         /* If we are in this routine because of pause frames, then
832          * don't reset the hardware.
833          */
834         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
835                 ifp->if_timer = EM_TX_TIMEOUT;
836                 EM_UNLOCK(adapter);
837                 return;
838         }
839
840         if (em_check_for_link(&adapter->hw))
841                 printf("em%d: watchdog timeout -- resetting\n", adapter->unit);
842
843         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
844         adapter->watchdog_events++;
845
846         em_init_locked(adapter);
847         EM_UNLOCK(adapter);
848 }
849
850 /*********************************************************************
851  *  Init entry point
852  *
853  *  This routine is used in two ways. It is used by the stack as
854  *  init entry point in network interface structure. It is also used
855  *  by the driver as a hw/sw initialization routine to get to a 
856  *  consistent state.
857  *
858  *  return 0 on success, positive on failure
859  **********************************************************************/
860
861 static void
862 em_init_locked(struct adapter * adapter)
863 {
864         struct ifnet   *ifp;
865
866         uint32_t        pba;
867         ifp = adapter->ifp;
868
869         INIT_DEBUGOUT("em_init: begin");
870
871         mtx_assert(&adapter->mtx, MA_OWNED);
872
873         em_stop(adapter);
874
875         /*
876          * Packet Buffer Allocation (PBA)
877          * Writing PBA sets the receive portion of the buffer
878          * the remainder is used for the transmit buffer.
879          */
880         switch (adapter->hw.mac_type) {
881         case em_82547:
882         case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
883                 if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
884                         pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
885                 else
886                         pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
887                 adapter->tx_fifo_head = 0;
888                 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
889                 adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
890                 break;
891         case em_82571: /* 82571: Total Packet Buffer is 48K */
892         case em_82572: /* 82572: Total Packet Buffer is 48K */
893                         pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
894                 break;
895         case em_82573: /* 82573: Total Packet Buffer is 32K */
896                 /* Jumbo frames not supported */
897                         pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
898                 break;
899         default:
900                 /* Devices before 82547 had a Packet Buffer of 64K.   */
901                 if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
902                         pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
903                 else
904                         pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
905         }
906
907         INIT_DEBUGOUT1("em_init: pba=%dK",pba);
908         E1000_WRITE_REG(&adapter->hw, PBA, pba);
909         
910         /* Get the latest mac address, User can use a LAA */
911         bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac_addr,
912               ETHER_ADDR_LEN);
913
914         /* Initialize the hardware */
915         if (em_hardware_init(adapter)) {
916                 printf("em%d: Unable to initialize the hardware\n", 
917                        adapter->unit);
918                 return;
919         }
920
921         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
922                 em_enable_vlans(adapter);
923
924         /* Prepare transmit descriptors and buffers */
925         if (em_setup_transmit_structures(adapter)) {
926                 printf("em%d: Could not setup transmit structures\n", 
927                        adapter->unit);
928                 em_stop(adapter); 
929                 return;
930         }
931         em_initialize_transmit_unit(adapter);
932
933         /* Setup Multicast table */
934         em_set_multi(adapter);
935
936         /* Prepare receive descriptors and buffers */
937         if (em_setup_receive_structures(adapter)) {
938                 printf("em%d: Could not setup receive structures\n", 
939                        adapter->unit);
940                 em_stop(adapter);
941                 return;
942         }
943         em_initialize_receive_unit(adapter);
944  
945         /* Don't loose promiscuous settings */
946         em_set_promisc(adapter);
947
948         ifp->if_drv_flags |= IFF_DRV_RUNNING;
949         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
950
951         if (adapter->hw.mac_type >= em_82543) {
952                 if (ifp->if_capenable & IFCAP_TXCSUM)
953                         ifp->if_hwassist = EM_CHECKSUM_FEATURES;
954                 else
955                         ifp->if_hwassist = 0;
956         }
957
958         callout_reset(&adapter->timer, hz, em_local_timer, adapter);
959         em_clear_hw_cntrs(&adapter->hw);
960 #ifdef DEVICE_POLLING
961         /*
962          * Only enable interrupts if we are not polling, make sure
963          * they are off otherwise.
964          */
965         if (ifp->if_capenable & IFCAP_POLLING)
966                 em_disable_intr(adapter);
967         else
968 #endif /* DEVICE_POLLING */
969                 em_enable_intr(adapter);
970
971         /* Don't reset the phy next time init gets called */
972         adapter->hw.phy_reset_disable = TRUE;
973         
974         return;
975 }
976
977 static void
978 em_init(void *arg)
979 {
980         struct adapter * adapter = arg;
981
982         EM_LOCK(adapter);
983         em_init_locked(adapter);
984         EM_UNLOCK(adapter);
985         return;
986 }
987
988
989 #ifdef DEVICE_POLLING
990 static void     
991 em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
992 {
993         struct adapter *adapter = ifp->if_softc;
994         u_int32_t reg_icr;
995
996         mtx_assert(&adapter->mtx, MA_OWNED);
997
998         if (cmd == POLL_AND_CHECK_STATUS) {
999                 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1000                 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1001                         callout_stop(&adapter->timer);
1002                         adapter->hw.get_link_status = 1;
1003                         em_check_for_link(&adapter->hw);
1004                         em_print_link_status(adapter);
1005                         callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1006                 }
1007         }
1008         em_process_receive_interrupts(adapter, count);
1009         em_clean_transmit_interrupts(adapter);
1010         
1011         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1012                 em_start_locked(ifp);
1013 }
1014         
1015 static void     
1016 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1017 {
1018         struct adapter *adapter = ifp->if_softc;
1019
1020         EM_LOCK(adapter);
1021         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1022                 em_poll_locked(ifp, cmd, count);
1023         EM_UNLOCK(adapter);
1024 }
1025 #endif /* DEVICE_POLLING */
1026
1027 /*********************************************************************
1028  *
1029  *  Interrupt Service routine  
1030  *
1031  **********************************************************************/
1032 static void
1033 em_intr(void *arg)
1034 {
1035         struct adapter  *adapter = arg;
1036         struct ifnet    *ifp;
1037         uint32_t        reg_icr;
1038         int             wantinit = 0;
1039
1040         EM_LOCK(adapter);
1041
1042         ifp = adapter->ifp;  
1043
1044 #ifdef DEVICE_POLLING
1045         if (ifp->if_capenable & IFCAP_POLLING) {
1046                 EM_UNLOCK(adapter);
1047                 return;
1048         }
1049 #endif /* DEVICE_POLLING */
1050
1051         for (;;) {
1052                 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1053                 if (adapter->hw.mac_type >= em_82571 &&
1054                     (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1055                         break;
1056                 else if (reg_icr == 0)
1057                         break;
1058
1059                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1060                         em_process_receive_interrupts(adapter, -1);
1061                         em_clean_transmit_interrupts(adapter);
1062                 }
1063                  
1064                 /* Link status change */
1065                 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1066                         callout_stop(&adapter->timer);
1067                         adapter->hw.get_link_status = 1;
1068                         em_check_for_link(&adapter->hw);
1069                         em_print_link_status(adapter);
1070                         callout_reset(&adapter->timer, hz, em_local_timer,
1071                             adapter);
1072                 }
1073
1074                 if (reg_icr & E1000_ICR_RXO) {
1075                         adapter->rx_overruns++;
1076                         wantinit = 1;
1077                 }
1078         }
1079 #if 0
1080         if (wantinit)
1081                 em_init_locked(adapter);
1082 #endif
1083         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1084             !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1085                 em_start_locked(ifp);
1086
1087         EM_UNLOCK(adapter);
1088         return;
1089 }
1090
1091
1092
1093 /*********************************************************************
1094  *
1095  *  Media Ioctl callback
1096  *
1097  *  This routine is called whenever the user queries the status of
1098  *  the interface using ifconfig.
1099  *
1100  **********************************************************************/
1101 static void
1102 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1103 {
1104         struct adapter * adapter = ifp->if_softc;
1105
1106         INIT_DEBUGOUT("em_media_status: begin");
1107
1108         em_check_for_link(&adapter->hw);
1109         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1110                 if (adapter->link_active == 0) {
1111                         em_get_speed_and_duplex(&adapter->hw, 
1112                                                 &adapter->link_speed, 
1113                                                 &adapter->link_duplex);
1114                         adapter->link_active = 1;
1115                 }
1116         } else {
1117                 if (adapter->link_active == 1) {
1118                         adapter->link_speed = 0;
1119                         adapter->link_duplex = 0;
1120                         adapter->link_active = 0;
1121                 }
1122         }
1123
1124         ifmr->ifm_status = IFM_AVALID;
1125         ifmr->ifm_active = IFM_ETHER;
1126
1127         if (!adapter->link_active)
1128                 return;
1129
1130         ifmr->ifm_status |= IFM_ACTIVE;
1131
1132         if (adapter->hw.media_type == em_media_type_fiber) {
1133                 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1134         } else {
1135                 switch (adapter->link_speed) {
1136                 case 10:
1137                         ifmr->ifm_active |= IFM_10_T;
1138                         break;
1139                 case 100:
1140                         ifmr->ifm_active |= IFM_100_TX;
1141                         break;
1142                 case 1000:
1143                         ifmr->ifm_active |= IFM_1000_T;
1144                         break;
1145                 }
1146                 if (adapter->link_duplex == FULL_DUPLEX)
1147                         ifmr->ifm_active |= IFM_FDX;
1148                 else
1149                         ifmr->ifm_active |= IFM_HDX;
1150         }
1151         return;
1152 }
1153
1154 /*********************************************************************
1155  *
1156  *  Media Ioctl callback
1157  *
1158  *  This routine is called when the user changes speed/duplex using
1159  *  media/mediopt option with ifconfig.
1160  *
1161  **********************************************************************/
1162 static int
1163 em_media_change(struct ifnet *ifp)
1164 {
1165         struct adapter * adapter = ifp->if_softc;
1166         struct ifmedia  *ifm = &adapter->media;
1167
1168         INIT_DEBUGOUT("em_media_change: begin");
1169
1170         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1171                 return(EINVAL);
1172
1173         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1174         case IFM_AUTO:
1175                 adapter->hw.autoneg = DO_AUTO_NEG;
1176                 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1177                 break;
1178         case IFM_1000_SX:
1179         case IFM_1000_T:
1180                 adapter->hw.autoneg = DO_AUTO_NEG;
1181                 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1182                 break;
1183         case IFM_100_TX:
1184                 adapter->hw.autoneg = FALSE;
1185                 adapter->hw.autoneg_advertised = 0;
1186                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1187                         adapter->hw.forced_speed_duplex = em_100_full;
1188                 else
1189                         adapter->hw.forced_speed_duplex = em_100_half;
1190                 break;
1191         case IFM_10_T:
1192                 adapter->hw.autoneg = FALSE;
1193                 adapter->hw.autoneg_advertised = 0;
1194                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1195                         adapter->hw.forced_speed_duplex = em_10_full;
1196                 else
1197                         adapter->hw.forced_speed_duplex = em_10_half;
1198                 break;
1199         default:
1200                 printf("em%d: Unsupported media type\n", adapter->unit);
1201         }
1202
1203         /* As the speed/duplex settings my have changed we need to
1204          * reset the PHY.
1205          */
1206         adapter->hw.phy_reset_disable = FALSE;
1207
1208         em_init(adapter);
1209
1210         return(0);
1211 }
1212
1213 /*********************************************************************
1214  *
1215  *  This routine maps the mbufs to tx descriptors.
1216  *
1217  *  return 0 on success, positive on failure
1218  **********************************************************************/
1219 static int              
1220 em_encap(struct adapter *adapter, struct mbuf **m_headp)
1221 {
1222         u_int32_t       txd_upper;
1223         u_int32_t       txd_lower, txd_used = 0, txd_saved = 0;
1224         int             i, j, error = 0;
1225         bus_dmamap_t    map;
1226
1227         struct mbuf     *m_head;
1228
1229         /* For 82544 Workaround */
1230         DESC_ARRAY              desc_array;
1231         u_int32_t               array_elements;
1232         u_int32_t               counter;
1233         struct m_tag    *mtag;
1234         bus_dma_segment_t       segs[EM_MAX_SCATTER];
1235         int                     nsegs;
1236         struct em_buffer   *tx_buffer;
1237         struct em_tx_desc *current_tx_desc = NULL;
1238         struct ifnet   *ifp = adapter->ifp;
1239
1240         m_head = *m_headp;
1241
1242         /*
1243          * Force a cleanup if number of TX descriptors
1244          * available hits the threshold
1245          */
1246         if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1247                 em_clean_transmit_interrupts(adapter);
1248                 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1249                         adapter->no_tx_desc_avail1++;
1250                         return(ENOBUFS);
1251                 }
1252         }
1253
1254         /*
1255          * Map the packet for DMA.
1256          */
1257         tx_buffer = &adapter->tx_buffer_area[adapter->next_avail_tx_desc];
1258         error = bus_dmamap_load_mbuf_sg(adapter->txtag, tx_buffer->map, m_head,
1259             segs, &nsegs, BUS_DMA_NOWAIT);
1260         map = tx_buffer->map;
1261         if (error != 0) {
1262                 adapter->no_tx_dma_setup++;
1263                 return (error);
1264         }
1265         KASSERT(nsegs != 0, ("em_encap: empty packet"));
1266
1267         if (nsegs > adapter->num_tx_desc_avail) {
1268                 adapter->no_tx_desc_avail2++;
1269                 error = ENOBUFS;
1270                 goto encap_fail;
1271         }
1272
1273
1274         if (ifp->if_hwassist > 0) {
1275                 em_transmit_checksum_setup(adapter,  m_head,
1276                                            &txd_upper, &txd_lower);
1277         } else
1278                 txd_upper = txd_lower = 0;
1279
1280
1281         /* Find out if we are in vlan mode */
1282         mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1283
1284         /*
1285          * When operating in promiscuous mode, hardware encapsulation for
1286          * packets is disabled.  This means we have to add the vlan
1287          * encapsulation in the driver, since it will have come down from the
1288          * VLAN layer with a tag instead of a VLAN header.
1289          */
1290         if (mtag != NULL && adapter->em_insert_vlan_header) {
1291                 struct ether_vlan_header *evl;
1292                 struct ether_header eh;
1293
1294                 m_head = m_pullup(m_head, sizeof(eh));
1295                 if (m_head == NULL) {
1296                         *m_headp = NULL;
1297                         error = ENOBUFS;
1298                         goto encap_fail;
1299                 }
1300                 eh = *mtod(m_head, struct ether_header *);
1301                 M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT);
1302                 if (m_head == NULL) {
1303                         *m_headp = NULL;
1304                         error = ENOBUFS;
1305                         goto encap_fail;
1306                 }
1307                 m_head = m_pullup(m_head, sizeof(*evl));
1308                 if (m_head == NULL) {
1309                         *m_headp = NULL;
1310                         error = ENOBUFS;
1311                         goto encap_fail;
1312                 }
1313                 evl = mtod(m_head, struct ether_vlan_header *);
1314                 bcopy(&eh, evl, sizeof(*evl));
1315                 evl->evl_proto = evl->evl_encap_proto;
1316                 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1317                 evl->evl_tag = htons(VLAN_TAG_VALUE(mtag));
1318                 m_tag_delete(m_head, mtag);
1319                 mtag = NULL;
1320                 *m_headp = m_head;
1321         }
1322
1323         i = adapter->next_avail_tx_desc;
1324         if (adapter->pcix_82544) {
1325                 txd_saved = i;
1326                 txd_used = 0;
1327         }
1328         for (j = 0; j < nsegs; j++) {
1329                 /* If adapter is 82544 and on PCIX bus */
1330                 if(adapter->pcix_82544) {
1331                         /* 
1332                          * Check the Address and Length combination and 
1333                          * split the data accordingly 
1334                          */
1335                         array_elements = em_fill_descriptors(segs[j].ds_addr,
1336                             segs[j].ds_len, &desc_array);
1337                         for (counter = 0; counter < array_elements; counter++) {
1338                                 if (txd_used == adapter->num_tx_desc_avail) {
1339                                         adapter->next_avail_tx_desc = txd_saved;
1340                                         adapter->no_tx_desc_avail2++;
1341                                         error = ENOBUFS;
1342                                         goto encap_fail;
1343                                 }
1344                                 tx_buffer = &adapter->tx_buffer_area[i];
1345                                 current_tx_desc = &adapter->tx_desc_base[i];
1346                                 current_tx_desc->buffer_addr = htole64(
1347                                         desc_array.descriptor[counter].address);
1348                                 current_tx_desc->lower.data = htole32(
1349                                         (adapter->txd_cmd | txd_lower | 
1350                                          (u_int16_t)desc_array.descriptor[counter].length));
1351                                 current_tx_desc->upper.data = htole32((txd_upper));
1352                                 if (++i == adapter->num_tx_desc)
1353                                          i = 0;
1354
1355                                 tx_buffer->m_head = NULL;
1356                                 txd_used++;
1357                         }
1358                 } else {
1359                         tx_buffer = &adapter->tx_buffer_area[i];
1360                         current_tx_desc = &adapter->tx_desc_base[i];
1361
1362                         current_tx_desc->buffer_addr = htole64(segs[j].ds_addr);
1363                         current_tx_desc->lower.data = htole32(
1364                                 adapter->txd_cmd | txd_lower | segs[j].ds_len);
1365                         current_tx_desc->upper.data = htole32(txd_upper);
1366
1367                         if (++i == adapter->num_tx_desc)
1368                                 i = 0;
1369
1370                         tx_buffer->m_head = NULL;
1371                 }
1372         }
1373
1374         adapter->next_avail_tx_desc = i;
1375         if (adapter->pcix_82544) {
1376                 adapter->num_tx_desc_avail -= txd_used;
1377         }
1378         else {
1379                 adapter->num_tx_desc_avail -= nsegs;
1380         }
1381
1382         if (mtag != NULL) {
1383                 /* Set the vlan id */
1384                 current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag));
1385
1386                 /* Tell hardware to add tag */
1387                 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1388         }
1389
1390         tx_buffer->m_head = m_head;
1391         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1392
1393         /*
1394          * Last Descriptor of Packet needs End Of Packet (EOP)
1395          */
1396         current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1397
1398         /*
1399          * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1400          * that this frame is available to transmit.
1401          */
1402         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1403             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1404         if (adapter->hw.mac_type == em_82547 &&
1405             adapter->link_duplex == HALF_DUPLEX) {
1406                 em_82547_move_tail_locked(adapter);
1407         } else {
1408                 E1000_WRITE_REG(&adapter->hw, TDT, i);
1409                 if (adapter->hw.mac_type == em_82547) {
1410                         em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1411                 }
1412         }
1413
1414         return(0);
1415
1416 encap_fail:
1417         bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1418         return (error);
1419 }
1420
1421 /*********************************************************************
1422  *
1423  * 82547 workaround to avoid controller hang in half-duplex environment.
1424  * The workaround is to avoid queuing a large packet that would span   
1425  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1426  * in this case. We do that only when FIFO is quiescent.
1427  *
1428  **********************************************************************/
1429 static void
1430 em_82547_move_tail_locked(struct adapter *adapter)
1431 {
1432         uint16_t hw_tdt;
1433         uint16_t sw_tdt;
1434         struct em_tx_desc *tx_desc;
1435         uint16_t length = 0;
1436         boolean_t eop = 0;
1437
1438         EM_LOCK_ASSERT(adapter);
1439
1440         hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1441         sw_tdt = adapter->next_avail_tx_desc;
1442         
1443         while (hw_tdt != sw_tdt) {
1444                 tx_desc = &adapter->tx_desc_base[hw_tdt];
1445                 length += tx_desc->lower.flags.length;
1446                 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1447                 if(++hw_tdt == adapter->num_tx_desc)
1448                         hw_tdt = 0;
1449
1450                 if(eop) {
1451                         if (em_82547_fifo_workaround(adapter, length)) {
1452                                 adapter->tx_fifo_wrk_cnt++;
1453                                 callout_reset(&adapter->tx_fifo_timer, 1,
1454                                         em_82547_move_tail, adapter);
1455                                 break;
1456                         }
1457                         E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1458                         em_82547_update_fifo_head(adapter, length);
1459                         length = 0;
1460                 }
1461         }       
1462         return;
1463 }
1464
1465 static void
1466 em_82547_move_tail(void *arg)
1467 {
1468         struct adapter *adapter = arg;
1469
1470         EM_LOCK(adapter);
1471         em_82547_move_tail_locked(adapter);
1472         EM_UNLOCK(adapter);
1473 }
1474
1475 static int
1476 em_82547_fifo_workaround(struct adapter *adapter, int len)
1477 {       
1478         int fifo_space, fifo_pkt_len;
1479
1480         fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1481
1482         if (adapter->link_duplex == HALF_DUPLEX) {
1483                 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1484
1485                 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1486                         if (em_82547_tx_fifo_reset(adapter)) {
1487                                 return(0);
1488                         }
1489                         else {
1490                                 return(1);
1491                         }
1492                 }
1493         }
1494
1495         return(0);
1496 }
1497
1498 static void
1499 em_82547_update_fifo_head(struct adapter *adapter, int len)
1500 {
1501         int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1502         
1503         /* tx_fifo_head is always 16 byte aligned */
1504         adapter->tx_fifo_head += fifo_pkt_len;
1505         if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
1506                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1507         }
1508
1509         return;
1510 }
1511
1512
1513 static int
1514 em_82547_tx_fifo_reset(struct adapter *adapter)
1515 {       
1516         uint32_t tctl;
1517
1518         if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1519               E1000_READ_REG(&adapter->hw, TDH)) &&
1520              (E1000_READ_REG(&adapter->hw, TDFT) == 
1521               E1000_READ_REG(&adapter->hw, TDFH)) &&
1522              (E1000_READ_REG(&adapter->hw, TDFTS) ==
1523               E1000_READ_REG(&adapter->hw, TDFHS)) &&
1524              (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1525
1526                 /* Disable TX unit */
1527                 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1528                 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1529
1530                 /* Reset FIFO pointers */
1531                 E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1532                 E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1533                 E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1534                 E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1535
1536                 /* Re-enable TX unit */
1537                 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1538                 E1000_WRITE_FLUSH(&adapter->hw);
1539
1540                 adapter->tx_fifo_head = 0;
1541                 adapter->tx_fifo_reset_cnt++;
1542
1543                 return(TRUE);
1544         }
1545         else {
1546                 return(FALSE);
1547         }
1548 }
1549
1550 static void
1551 em_set_promisc(struct adapter * adapter)
1552 {
1553
1554         u_int32_t       reg_rctl;
1555         struct ifnet   *ifp = adapter->ifp;
1556
1557         reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1558
1559         if (ifp->if_flags & IFF_PROMISC) {
1560                 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1561                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1562                 /* Disable VLAN stripping in promiscous mode 
1563                  * This enables bridging of vlan tagged frames to occur 
1564                  * and also allows vlan tags to be seen in tcpdump
1565                  */
1566                 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1567                         em_disable_vlans(adapter);
1568                 adapter->em_insert_vlan_header = 1;
1569         } else if (ifp->if_flags & IFF_ALLMULTI) {
1570                 reg_rctl |= E1000_RCTL_MPE;
1571                 reg_rctl &= ~E1000_RCTL_UPE;
1572                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1573                 adapter->em_insert_vlan_header = 0;
1574         } else
1575                 adapter->em_insert_vlan_header = 0;
1576
1577         return;
1578 }
1579
1580 static void
1581 em_disable_promisc(struct adapter * adapter)
1582 {
1583         u_int32_t       reg_rctl;
1584         struct ifnet   *ifp = adapter->ifp;
1585
1586         reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1587
1588         reg_rctl &=  (~E1000_RCTL_UPE);
1589         reg_rctl &=  (~E1000_RCTL_MPE);
1590         E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1591
1592         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1593                 em_enable_vlans(adapter);
1594         adapter->em_insert_vlan_header = 0;
1595
1596         return;
1597 }
1598
1599
1600 /*********************************************************************
1601  *  Multicast Update
1602  *
1603  *  This routine is called whenever multicast address list is updated.
1604  *
1605  **********************************************************************/
1606
1607 static void
1608 em_set_multi(struct adapter * adapter)
1609 {
1610         u_int32_t reg_rctl = 0;
1611         u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1612         struct ifmultiaddr  *ifma;
1613         int mcnt = 0;
1614         struct ifnet   *ifp = adapter->ifp;
1615     
1616         IOCTL_DEBUGOUT("em_set_multi: begin");
1617  
1618         if (adapter->hw.mac_type == em_82542_rev2_0) {
1619                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1620                 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) { 
1621                         em_pci_clear_mwi(&adapter->hw);
1622                 }
1623                 reg_rctl |= E1000_RCTL_RST;
1624                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1625                 msec_delay(5);
1626         }
1627
1628         IF_ADDR_LOCK(ifp);
1629         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1630                 if (ifma->ifma_addr->sa_family != AF_LINK)
1631                         continue;
1632  
1633                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break;
1634
1635                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1636                       &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1637                 mcnt++;
1638         }
1639         IF_ADDR_UNLOCK(ifp);
1640
1641         if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1642                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1643                 reg_rctl |= E1000_RCTL_MPE;
1644                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1645         } else
1646                 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1647
1648         if (adapter->hw.mac_type == em_82542_rev2_0) {
1649                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1650                 reg_rctl &= ~E1000_RCTL_RST;
1651                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1652                 msec_delay(5);
1653                 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1654                         em_pci_set_mwi(&adapter->hw);
1655                 }
1656         }
1657
1658         return;
1659 }
1660
1661
1662 /*********************************************************************
1663  *  Timer routine
1664  *
1665  *  This routine checks for link status and updates statistics.
1666  *
1667  **********************************************************************/
1668
1669 static void
1670 em_local_timer(void *arg)
1671 {
1672         struct ifnet   *ifp;
1673         struct adapter * adapter = arg;
1674         ifp = adapter->ifp;
1675
1676         EM_LOCK(adapter);
1677
1678         em_check_for_link(&adapter->hw);
1679         em_print_link_status(adapter);
1680         em_update_stats_counters(adapter);   
1681         if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1682                 em_print_hw_stats(adapter);
1683         }
1684         em_smartspeed(adapter);
1685
1686         callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1687
1688         EM_UNLOCK(adapter);
1689         return;
1690 }
1691
1692 static void
1693 em_print_link_status(struct adapter * adapter)
1694 {
1695         struct ifnet *ifp = adapter->ifp;
1696
1697         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1698                 if (adapter->link_active == 0) {
1699                         em_get_speed_and_duplex(&adapter->hw, 
1700                                                 &adapter->link_speed, 
1701                                                 &adapter->link_duplex);
1702                         if (bootverbose)
1703                                 printf("em%d: Link is up %d Mbps %s\n",
1704                                        adapter->unit,
1705                                        adapter->link_speed,
1706                                        ((adapter->link_duplex == FULL_DUPLEX) ?
1707                                         "Full Duplex" : "Half Duplex"));
1708                         adapter->link_active = 1;
1709                         adapter->smartspeed = 0;
1710                         if_link_state_change(ifp, LINK_STATE_UP);
1711                 }
1712         } else {
1713                 if (adapter->link_active == 1) {
1714                         adapter->link_speed = 0;
1715                         adapter->link_duplex = 0;
1716                         if (bootverbose)
1717                                 printf("em%d: Link is Down\n", adapter->unit);
1718                         adapter->link_active = 0;
1719                         if_link_state_change(ifp, LINK_STATE_DOWN);
1720                 }
1721         }
1722
1723         return;
1724 }
1725
1726 /*********************************************************************
1727  *
1728  *  This routine disables all traffic on the adapter by issuing a
1729  *  global reset on the MAC and deallocates TX/RX buffers. 
1730  *
1731  **********************************************************************/
1732
1733 static void
1734 em_stop(void *arg)
1735 {
1736         struct ifnet   *ifp;
1737         struct adapter * adapter = arg;
1738         ifp = adapter->ifp;
1739
1740         mtx_assert(&adapter->mtx, MA_OWNED);
1741
1742         INIT_DEBUGOUT("em_stop: begin");
1743
1744         em_disable_intr(adapter);
1745         em_reset_hw(&adapter->hw);
1746         callout_stop(&adapter->timer);
1747         callout_stop(&adapter->tx_fifo_timer);
1748         em_free_transmit_structures(adapter);
1749         em_free_receive_structures(adapter);
1750
1751
1752         /* Tell the stack that the interface is no longer active */
1753         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1754
1755         return;
1756 }
1757
1758
1759 /*********************************************************************
1760  *
1761  *  Determine hardware revision.
1762  *
1763  **********************************************************************/
1764 static void
1765 em_identify_hardware(struct adapter * adapter)
1766 {
1767         device_t dev = adapter->dev;
1768
1769         /* Make sure our PCI config space has the necessary stuff set */
1770         adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1771         if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1772               (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1773                 printf("em%d: Memory Access and/or Bus Master bits were not set!\n", 
1774                        adapter->unit);
1775                 adapter->hw.pci_cmd_word |= 
1776                 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1777                 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1778         }
1779
1780         /* Save off the information about this board */
1781         adapter->hw.vendor_id = pci_get_vendor(dev);
1782         adapter->hw.device_id = pci_get_device(dev);
1783         adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1784         adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1785         adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1786
1787         /* Identify the MAC */
1788         if (em_set_mac_type(&adapter->hw))
1789                 printf("em%d: Unknown MAC Type\n", adapter->unit);
1790         
1791         if(adapter->hw.mac_type == em_82541 || 
1792            adapter->hw.mac_type == em_82541_rev_2 ||
1793            adapter->hw.mac_type == em_82547 || 
1794            adapter->hw.mac_type == em_82547_rev_2)
1795                 adapter->hw.phy_init_script = TRUE;
1796
1797         return;
1798 }
1799
1800 static int
1801 em_allocate_pci_resources(struct adapter * adapter)
1802 {
1803         int             val, rid;
1804         device_t        dev = adapter->dev;
1805
1806         rid = PCIR_BAR(0);
1807         adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1808                                                      &rid, RF_ACTIVE);
1809         if (!(adapter->res_memory)) {
1810                 printf("em%d: Unable to allocate bus resource: memory\n", 
1811                        adapter->unit);
1812                 return(ENXIO);
1813         }
1814         adapter->osdep.mem_bus_space_tag = 
1815         rman_get_bustag(adapter->res_memory);
1816         adapter->osdep.mem_bus_space_handle = 
1817         rman_get_bushandle(adapter->res_memory);
1818         adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1819
1820
1821         if (adapter->hw.mac_type > em_82543) {
1822                 /* Figure our where our IO BAR is ? */
1823                 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1824                         val = pci_read_config(dev, rid, 4);
1825                         if (E1000_BAR_TYPE(val) == E1000_BAR_TYPE_IO) {
1826                                 adapter->io_rid = rid;
1827                                 break;
1828                         }
1829                         rid += 4;
1830                         /* check for 64bit BAR */
1831                         if (E1000_BAR_MEM_TYPE(val) == E1000_BAR_MEM_TYPE_64BIT)
1832                                 rid += 4;
1833                 }
1834                 if (rid >= PCIR_CIS) {
1835                         printf("em%d: Unable to locate IO BAR\n", adapter->unit);
1836                         return (ENXIO);
1837                 }
1838                 adapter->res_ioport = bus_alloc_resource_any(dev, 
1839                                                              SYS_RES_IOPORT,
1840                                                              &adapter->io_rid,
1841                                                              RF_ACTIVE);
1842                 if (!(adapter->res_ioport)) {
1843                         printf("em%d: Unable to allocate bus resource: ioport\n",
1844                                adapter->unit);
1845                         return(ENXIO);  
1846                 }
1847                 adapter->hw.io_base = 0;
1848                 adapter->osdep.io_bus_space_tag =
1849                     rman_get_bustag(adapter->res_ioport);
1850                 adapter->osdep.io_bus_space_handle =
1851                     rman_get_bushandle(adapter->res_ioport);
1852         }
1853
1854         rid = 0x0;
1855         adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1856                                                         RF_SHAREABLE | 
1857                                                         RF_ACTIVE);
1858         if (!(adapter->res_interrupt)) {
1859                 printf("em%d: Unable to allocate bus resource: interrupt\n", 
1860                        adapter->unit);
1861                 return(ENXIO);
1862         }
1863         if (bus_setup_intr(dev, adapter->res_interrupt,
1864                            INTR_TYPE_NET | INTR_MPSAFE,
1865                            (void (*)(void *)) em_intr, adapter,
1866                            &adapter->int_handler_tag)) {
1867                 printf("em%d: Error registering interrupt handler!\n", 
1868                        adapter->unit);
1869                 return(ENXIO);
1870         }
1871
1872         adapter->hw.back = &adapter->osdep;
1873
1874         return(0);
1875 }
1876
1877 static void
1878 em_free_pci_resources(struct adapter * adapter)
1879 {
1880         device_t dev = adapter->dev;
1881
1882         if (adapter->res_interrupt != NULL) {
1883                 bus_teardown_intr(dev, adapter->res_interrupt, 
1884                                   adapter->int_handler_tag);
1885                 bus_release_resource(dev, SYS_RES_IRQ, 0, 
1886                                      adapter->res_interrupt);
1887         }
1888         if (adapter->res_memory != NULL) {
1889                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 
1890                                      adapter->res_memory);
1891         }
1892
1893         if (adapter->res_ioport != NULL) {
1894                 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, 
1895                                      adapter->res_ioport);
1896         }
1897         return;
1898 }
1899
1900 /*********************************************************************
1901  *
1902  *  Initialize the hardware to a configuration as specified by the
1903  *  adapter structure. The controller is reset, the EEPROM is
1904  *  verified, the MAC address is set, then the shared initialization
1905  *  routines are called.
1906  *
1907  **********************************************************************/
1908 static int
1909 em_hardware_init(struct adapter * adapter)
1910 {
1911         uint16_t rx_buffer_size;
1912
1913         INIT_DEBUGOUT("em_hardware_init: begin");
1914         /* Issue a global reset */
1915         em_reset_hw(&adapter->hw);
1916
1917         /* When hardware is reset, fifo_head is also reset */
1918         adapter->tx_fifo_head = 0;
1919
1920         /* Make sure we have a good EEPROM before we read from it */
1921         if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1922                 printf("em%d: The EEPROM Checksum Is Not Valid\n",
1923                        adapter->unit);
1924                 return(EIO);
1925         }
1926
1927         if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1928                 printf("em%d: EEPROM read error while reading part number\n",
1929                        adapter->unit);
1930                 return(EIO);
1931         }
1932
1933         /*
1934          * These parameters control the automatic generation (Tx) and 
1935          * response (Rx) to Ethernet PAUSE frames.
1936          * - High water mark should allow for at least two frames to be
1937          *   received after sending an XOFF.
1938          * - Low water mark works best when it is very near the high water mark.
1939          *   This allows the receiver to restart by sending XON when it has drained
1940          *   a bit.  Here we use an arbitary value of 1500 which will restart after
1941          *   one full frame is pulled from the buffer.  There could be several smaller
1942          *   frames in the buffer and if so they will not trigger the XON until their
1943          *   total number reduces the buffer by 1500.
1944          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1945          */
1946         rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10 );
1947
1948         adapter->hw.fc_high_water = rx_buffer_size -
1949             roundup2(adapter->hw.max_frame_size, 1024);
1950         adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
1951         adapter->hw.fc_pause_time = 0x1000;
1952         adapter->hw.fc_send_xon = TRUE;
1953         adapter->hw.fc = em_fc_full;
1954
1955         if (em_init_hw(&adapter->hw) < 0) {
1956                 printf("em%d: Hardware Initialization Failed",
1957                        adapter->unit);
1958                 return(EIO);
1959         }
1960
1961         em_check_for_link(&adapter->hw);
1962         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1963                 adapter->link_active = 1;
1964         else
1965                 adapter->link_active = 0;
1966
1967         if (adapter->link_active) {
1968                 em_get_speed_and_duplex(&adapter->hw, 
1969                                         &adapter->link_speed, 
1970                                         &adapter->link_duplex);
1971         } else {
1972                 adapter->link_speed = 0;
1973                 adapter->link_duplex = 0;
1974         }
1975
1976         return(0);
1977 }
1978
1979 /*********************************************************************
1980  *
1981  *  Setup networking device structure and register an interface.
1982  *
1983  **********************************************************************/
1984 static void
1985 em_setup_interface(device_t dev, struct adapter * adapter)
1986 {
1987         struct ifnet   *ifp;
1988         INIT_DEBUGOUT("em_setup_interface: begin");
1989
1990         ifp = adapter->ifp = if_alloc(IFT_ETHER);
1991         if (ifp == NULL)
1992                 panic("%s: can not if_alloc()", device_get_nameunit(dev));
1993         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1994         ifp->if_mtu = ETHERMTU;
1995         ifp->if_baudrate = 1000000000;
1996         ifp->if_init =  em_init;
1997         ifp->if_softc = adapter;
1998         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1999         ifp->if_ioctl = em_ioctl;
2000         ifp->if_start = em_start;
2001         ifp->if_watchdog = em_watchdog;
2002         IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
2003         ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
2004         IFQ_SET_READY(&ifp->if_snd);
2005
2006         ether_ifattach(ifp, adapter->hw.mac_addr);
2007
2008         ifp->if_capabilities = ifp->if_capenable = 0;
2009
2010         if (adapter->hw.mac_type >= em_82543) {
2011                 ifp->if_capabilities |= IFCAP_HWCSUM;
2012                 ifp->if_capenable |= IFCAP_HWCSUM;
2013         }
2014
2015         /*
2016          * Tell the upper layer(s) we support long frames.
2017          */
2018         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2019         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2020         ifp->if_capenable |= IFCAP_VLAN_MTU;
2021
2022 #ifdef DEVICE_POLLING
2023         ifp->if_capabilities |= IFCAP_POLLING;
2024 #endif
2025
2026         /* 
2027          * Specify the media types supported by this adapter and register
2028          * callbacks to update media and link information
2029          */
2030         ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
2031                      em_media_status);
2032         if (adapter->hw.media_type == em_media_type_fiber) {
2033                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 
2034                             0, NULL);
2035                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 
2036                             0, NULL);
2037         } else {
2038                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2039                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 
2040                             0, NULL);
2041                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 
2042                             0, NULL);
2043                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 
2044                             0, NULL);
2045                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 
2046                             0, NULL);
2047                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2048         }
2049         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2050         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2051
2052         return;
2053 }
2054
2055
2056 /*********************************************************************
2057  *
2058  *  Workaround for SmartSpeed on 82541 and 82547 controllers
2059  *
2060  **********************************************************************/        
2061 static void
2062 em_smartspeed(struct adapter *adapter)
2063 {
2064         uint16_t phy_tmp;
2065  
2066         if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) || 
2067            !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2068                 return;
2069
2070         if(adapter->smartspeed == 0) {
2071                 /* If Master/Slave config fault is asserted twice,
2072                  * we assume back-to-back */
2073                 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2074                 if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
2075                 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2076                 if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2077                         em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2078                                         &phy_tmp);
2079                         if(phy_tmp & CR_1000T_MS_ENABLE) {
2080                                 phy_tmp &= ~CR_1000T_MS_ENABLE;
2081                                 em_write_phy_reg(&adapter->hw,
2082                                                     PHY_1000T_CTRL, phy_tmp);
2083                                 adapter->smartspeed++;
2084                                 if(adapter->hw.autoneg &&
2085                                    !em_phy_setup_autoneg(&adapter->hw) &&
2086                                    !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2087                                                        &phy_tmp)) {
2088                                         phy_tmp |= (MII_CR_AUTO_NEG_EN |  
2089                                                     MII_CR_RESTART_AUTO_NEG);
2090                                         em_write_phy_reg(&adapter->hw,
2091                                                          PHY_CTRL, phy_tmp);
2092                                 }
2093                         }
2094                 }
2095                 return;
2096         } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2097                 /* If still no link, perhaps using 2/3 pair cable */
2098                 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2099                 phy_tmp |= CR_1000T_MS_ENABLE;
2100                 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2101                 if(adapter->hw.autoneg &&
2102                    !em_phy_setup_autoneg(&adapter->hw) &&
2103                    !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2104                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2105                                     MII_CR_RESTART_AUTO_NEG);
2106                         em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2107                 }
2108         }
2109         /* Restart process after EM_SMARTSPEED_MAX iterations */
2110         if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2111                 adapter->smartspeed = 0;
2112
2113         return;
2114 }
2115
2116
2117 /*
2118  * Manage DMA'able memory.
2119  */
2120 static void
2121 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2122
2123         if (error)
2124                 return;
2125         *(bus_addr_t *) arg = segs[0].ds_addr;
2126 }
2127
2128 static int
2129 em_dma_malloc(struct adapter *adapter, bus_size_t size,
2130         struct em_dma_alloc *dma, int mapflags)
2131 {
2132         int r;
2133          
2134         r = bus_dma_tag_create(NULL,                    /* parent */
2135                                E1000_DBA_ALIGN, 0,      /* alignment, bounds */
2136                                BUS_SPACE_MAXADDR,       /* lowaddr */
2137                                BUS_SPACE_MAXADDR,       /* highaddr */
2138                                NULL, NULL,              /* filter, filterarg */
2139                                size,                    /* maxsize */
2140                                1,                       /* nsegments */
2141                                size,                    /* maxsegsize */
2142                                BUS_DMA_ALLOCNOW,        /* flags */
2143                                NULL,                    /* lockfunc */
2144                                NULL,                    /* lockarg */
2145                                &dma->dma_tag);
2146         if (r != 0) {
2147                 printf("em%d: em_dma_malloc: bus_dma_tag_create failed; "
2148                         "error %u\n", adapter->unit, r);
2149                 goto fail_0;
2150         }
2151
2152         r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
2153                              BUS_DMA_NOWAIT, &dma->dma_map);
2154         if (r != 0) {
2155                 printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; "
2156                         "size %ju, error %d\n", adapter->unit,
2157                         (uintmax_t)size, r);
2158                 goto fail_2;
2159         }
2160
2161         dma->dma_paddr = 0;
2162         r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2163                             size,
2164                             em_dmamap_cb,
2165                             &dma->dma_paddr,
2166                             mapflags | BUS_DMA_NOWAIT);
2167         if (r != 0 || dma->dma_paddr == 0) {
2168                 printf("em%d: em_dma_malloc: bus_dmamap_load failed; "
2169                         "error %u\n", adapter->unit, r);
2170                 goto fail_3;
2171         }
2172
2173         return (0);
2174
2175 fail_3:
2176         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2177 fail_2:
2178         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2179         bus_dma_tag_destroy(dma->dma_tag);
2180 fail_0:
2181         dma->dma_map = NULL;
2182         dma->dma_tag = NULL;
2183         return (r);
2184 }
2185
2186 static void
2187 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2188 {
2189         if (dma->dma_tag == NULL)
2190                 return;
2191         if (dma->dma_map != NULL) {
2192                 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2193                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2194                 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2195                 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2196                 dma->dma_map = NULL;
2197         }
2198         bus_dma_tag_destroy(dma->dma_tag);
2199         dma->dma_tag = NULL;
2200 }
2201
2202
2203 /*********************************************************************
2204  *
2205  *  Allocate memory for tx_buffer structures. The tx_buffer stores all 
2206  *  the information needed to transmit a packet on the wire. 
2207  *
2208  **********************************************************************/
2209 static int
2210 em_allocate_transmit_structures(struct adapter * adapter)
2211 {
2212         if (!(adapter->tx_buffer_area =
2213               (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2214                                              adapter->num_tx_desc, M_DEVBUF,
2215                                              M_NOWAIT))) {
2216                 printf("em%d: Unable to allocate tx_buffer memory\n", 
2217                        adapter->unit);
2218                 return ENOMEM;
2219         }
2220
2221         bzero(adapter->tx_buffer_area,
2222               sizeof(struct em_buffer) * adapter->num_tx_desc);
2223
2224         return 0;
2225 }
2226
2227 /*********************************************************************
2228  *
2229  *  Allocate and initialize transmit structures. 
2230  *
2231  **********************************************************************/
2232 static int
2233 em_setup_transmit_structures(struct adapter * adapter)
2234 {
2235         struct em_buffer *tx_buffer;
2236         bus_size_t size;
2237         int error, i;
2238
2239         /*
2240          * Setup DMA descriptor areas.
2241          */
2242         size = roundup2(adapter->hw.max_frame_size, MCLBYTES);
2243         if ((error = bus_dma_tag_create(NULL,           /* parent */
2244                                1, 0,                    /* alignment, bounds */
2245                                BUS_SPACE_MAXADDR,       /* lowaddr */ 
2246                                BUS_SPACE_MAXADDR,       /* highaddr */
2247                                NULL, NULL,              /* filter, filterarg */
2248                                size,                    /* maxsize */
2249                                EM_MAX_SCATTER,          /* nsegments */
2250                                size,                    /* maxsegsize */
2251                                0,                       /* flags */ 
2252                                NULL,                    /* lockfunc */
2253                                NULL,                    /* lockarg */
2254                                &adapter->txtag)) != 0) {
2255                 printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit);
2256                 goto fail;
2257         }
2258
2259         if ((error = em_allocate_transmit_structures(adapter)) != 0)
2260                 goto fail;
2261
2262         bzero((void *) adapter->tx_desc_base,
2263               (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2264         tx_buffer = adapter->tx_buffer_area;
2265         for (i = 0; i < adapter->num_tx_desc; i++) {
2266                 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2267                 if (error != 0) {
2268                         printf("em%d: Unable to create TX DMA map\n",
2269                             adapter->unit);
2270                         goto fail;
2271                 }
2272                 tx_buffer++;
2273         }
2274
2275         adapter->next_avail_tx_desc = 0;
2276         adapter->oldest_used_tx_desc = 0;
2277
2278         /* Set number of descriptors available */
2279         adapter->num_tx_desc_avail = adapter->num_tx_desc;
2280
2281         /* Set checksum context */
2282         adapter->active_checksum_context = OFFLOAD_NONE;
2283         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2284             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2285
2286         return (0);
2287
2288 fail:
2289         em_free_transmit_structures(adapter);
2290         return (error);
2291 }
2292
2293 /*********************************************************************
2294  *
2295  *  Enable transmit unit.
2296  *
2297  **********************************************************************/
2298 static void
2299 em_initialize_transmit_unit(struct adapter * adapter)
2300 {
2301         u_int32_t       reg_tctl;
2302         u_int32_t       reg_tipg = 0;
2303         u_int64_t       bus_addr;
2304
2305          INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2306         /* Setup the Base and Length of the Tx Descriptor Ring */
2307         bus_addr = adapter->txdma.dma_paddr;
2308         E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr);
2309         E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2310         E1000_WRITE_REG(&adapter->hw, TDLEN, 
2311                         adapter->num_tx_desc *
2312                         sizeof(struct em_tx_desc));
2313
2314         /* Setup the HW Tx Head and Tail descriptor pointers */
2315         E1000_WRITE_REG(&adapter->hw, TDH, 0);
2316         E1000_WRITE_REG(&adapter->hw, TDT, 0);
2317
2318
2319         HW_DEBUGOUT2("Base = %x, Length = %x\n", 
2320                      E1000_READ_REG(&adapter->hw, TDBAL),
2321                      E1000_READ_REG(&adapter->hw, TDLEN));
2322
2323         /* Set the default values for the Tx Inter Packet Gap timer */
2324         switch (adapter->hw.mac_type) {
2325         case em_82542_rev2_0:
2326         case em_82542_rev2_1:
2327                 reg_tipg = DEFAULT_82542_TIPG_IPGT;
2328                 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2329                 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2330                 break;
2331         default:
2332                 if (adapter->hw.media_type == em_media_type_fiber)
2333                         reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2334                 else
2335                         reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2336                 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2337                 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2338         }
2339
2340         E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2341         E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2342         if(adapter->hw.mac_type >= em_82540)
2343                 E1000_WRITE_REG(&adapter->hw, TADV,
2344                     adapter->tx_abs_int_delay.value);
2345
2346         /* Program the Transmit Control Register */
2347         reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2348                    (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2349         if (adapter->hw.mac_type >= em_82571)
2350                 reg_tctl |= E1000_TCTL_MULR;
2351         if (adapter->link_duplex == 1) {
2352                 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2353         } else {
2354                 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2355         }
2356         E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2357
2358         /* Setup Transmit Descriptor Settings for this adapter */   
2359         adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2360
2361         if (adapter->tx_int_delay.value > 0)
2362                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2363
2364         return;
2365 }
2366
2367 /*********************************************************************
2368  *
2369  *  Free all transmit related data structures.
2370  *
2371  **********************************************************************/
2372 static void
2373 em_free_transmit_structures(struct adapter * adapter)
2374 {
2375         struct em_buffer   *tx_buffer;
2376         int             i;
2377
2378         INIT_DEBUGOUT("free_transmit_structures: begin");
2379
2380         if (adapter->tx_buffer_area != NULL) {
2381                 tx_buffer = adapter->tx_buffer_area;
2382                 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2383                         if (tx_buffer->m_head != NULL) {
2384                                 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2385                                     BUS_DMASYNC_POSTWRITE);
2386                                 bus_dmamap_unload(adapter->txtag,
2387                                     tx_buffer->map);
2388                                 m_freem(tx_buffer->m_head);
2389                                 tx_buffer->m_head = NULL;
2390                         } else if (tx_buffer->map != NULL)
2391                                 bus_dmamap_unload(adapter->txtag,
2392                                     tx_buffer->map);
2393                         if (tx_buffer->map != NULL) {
2394                                 bus_dmamap_destroy(adapter->txtag,
2395                                     tx_buffer->map);
2396                                 tx_buffer->map = NULL;
2397                         }
2398                 }
2399         }
2400         if (adapter->tx_buffer_area != NULL) {
2401                 free(adapter->tx_buffer_area, M_DEVBUF);
2402                 adapter->tx_buffer_area = NULL;
2403         }
2404         if (adapter->txtag != NULL) {
2405                 bus_dma_tag_destroy(adapter->txtag);
2406                 adapter->txtag = NULL;
2407         }
2408         return;
2409 }
2410
2411 /*********************************************************************
2412  *
2413  *  The offload context needs to be set when we transfer the first
2414  *  packet of a particular protocol (TCP/UDP). We change the
2415  *  context only if the protocol type changes.
2416  *
2417  **********************************************************************/
2418 static void
2419 em_transmit_checksum_setup(struct adapter * adapter,
2420                            struct mbuf *mp,
2421                            u_int32_t *txd_upper,
2422                            u_int32_t *txd_lower) 
2423 {
2424         struct em_context_desc *TXD;
2425         struct em_buffer *tx_buffer;
2426         int curr_txd;
2427
2428         if (mp->m_pkthdr.csum_flags) {
2429
2430                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2431                         *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2432                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2433                         if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2434                                 return;
2435                         else
2436                                 adapter->active_checksum_context = OFFLOAD_TCP_IP;
2437
2438                 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2439                         *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2440                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2441                         if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2442                                 return;
2443                         else
2444                                 adapter->active_checksum_context = OFFLOAD_UDP_IP;
2445                 } else {
2446                         *txd_upper = 0;
2447                         *txd_lower = 0;
2448                         return;
2449                 }
2450         } else {
2451                 *txd_upper = 0;
2452                 *txd_lower = 0;
2453                 return;
2454         }
2455
2456         /* If we reach this point, the checksum offload context
2457          * needs to be reset.
2458          */
2459         curr_txd = adapter->next_avail_tx_desc;
2460         tx_buffer = &adapter->tx_buffer_area[curr_txd];
2461         TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2462
2463         TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2464         TXD->lower_setup.ip_fields.ipcso = 
2465                 ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2466         TXD->lower_setup.ip_fields.ipcse = 
2467                 htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2468
2469         TXD->upper_setup.tcp_fields.tucss = 
2470                 ETHER_HDR_LEN + sizeof(struct ip);
2471         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2472
2473         if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2474                 TXD->upper_setup.tcp_fields.tucso = 
2475                         ETHER_HDR_LEN + sizeof(struct ip) + 
2476                         offsetof(struct tcphdr, th_sum);
2477         } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2478                 TXD->upper_setup.tcp_fields.tucso = 
2479                         ETHER_HDR_LEN + sizeof(struct ip) + 
2480                         offsetof(struct udphdr, uh_sum);
2481         }
2482
2483         TXD->tcp_seg_setup.data = htole32(0);
2484         TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2485
2486         tx_buffer->m_head = NULL;
2487
2488         if (++curr_txd == adapter->num_tx_desc)
2489                 curr_txd = 0;
2490
2491         adapter->num_tx_desc_avail--;
2492         adapter->next_avail_tx_desc = curr_txd;
2493
2494         return;
2495 }
2496
2497 /**********************************************************************
2498  *
2499  *  Examine each tx_buffer in the used queue. If the hardware is done
2500  *  processing the packet then free associated resources. The
2501  *  tx_buffer is put back on the free queue.
2502  *
2503  **********************************************************************/
2504 static void
2505 em_clean_transmit_interrupts(struct adapter * adapter)
2506 {
2507         int i, num_avail;
2508         struct em_buffer *tx_buffer;
2509         struct em_tx_desc   *tx_desc;
2510         struct ifnet   *ifp = adapter->ifp;
2511
2512         mtx_assert(&adapter->mtx, MA_OWNED);
2513
2514         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2515                 return;
2516
2517         num_avail = adapter->num_tx_desc_avail;
2518         i = adapter->oldest_used_tx_desc;
2519
2520         tx_buffer = &adapter->tx_buffer_area[i];
2521         tx_desc = &adapter->tx_desc_base[i];
2522
2523         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2524             BUS_DMASYNC_POSTREAD);
2525         while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2526
2527                 tx_desc->upper.data = 0;
2528                 num_avail++;
2529
2530                 if (tx_buffer->m_head) {
2531                         ifp->if_opackets++;
2532                         bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2533                             BUS_DMASYNC_POSTWRITE);
2534                         bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2535
2536                         m_freem(tx_buffer->m_head);
2537                         tx_buffer->m_head = NULL;
2538                 }
2539
2540                 if (++i == adapter->num_tx_desc)
2541                         i = 0;
2542
2543                 tx_buffer = &adapter->tx_buffer_area[i];
2544                 tx_desc = &adapter->tx_desc_base[i];
2545         }
2546         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2547             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2548
2549         adapter->oldest_used_tx_desc = i;
2550
2551         /*
2552          * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
2553          * that it is OK to send packets.
2554          * If there are no pending descriptors, clear the timeout. Otherwise,
2555          * if some descriptors have been freed, restart the timeout.
2556          */
2557         if (num_avail > EM_TX_CLEANUP_THRESHOLD) {                
2558                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2559                 if (num_avail == adapter->num_tx_desc)
2560                         ifp->if_timer = 0;
2561                 else if (num_avail == adapter->num_tx_desc_avail)
2562                         ifp->if_timer = EM_TX_TIMEOUT;
2563         }
2564         adapter->num_tx_desc_avail = num_avail;
2565         return;
2566 }
2567
2568 /*********************************************************************
2569  *
2570  *  Get a buffer from system mbuf buffer pool.
2571  *
2572  **********************************************************************/
2573 static int
2574 em_get_buf(int i, struct adapter *adapter,
2575            struct mbuf *nmp)
2576 {
2577         struct mbuf    *mp = nmp;
2578         struct em_buffer *rx_buffer;
2579         struct ifnet   *ifp;
2580         bus_dma_segment_t segs[1];
2581         int error, nsegs;
2582
2583         ifp = adapter->ifp;
2584
2585         if (mp == NULL) {
2586                 mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2587                 if (mp == NULL) {
2588                         adapter->mbuf_cluster_failed++;
2589                         return(ENOBUFS);
2590                 }
2591                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2592         } else {
2593                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2594                 mp->m_data = mp->m_ext.ext_buf;
2595                 mp->m_next = NULL;
2596         }
2597
2598         if (ifp->if_mtu <= ETHERMTU) {
2599                 m_adj(mp, ETHER_ALIGN);
2600         }
2601
2602         rx_buffer = &adapter->rx_buffer_area[i];
2603
2604         /*
2605          * Using memory from the mbuf cluster pool, invoke the
2606          * bus_dma machinery to arrange the memory mapping.
2607          */
2608         error = bus_dmamap_load_mbuf_sg(adapter->rxtag, rx_buffer->map,
2609             mp, segs, &nsegs, 0);
2610         if (error != 0) {
2611                 m_free(mp);
2612                 return(error);
2613         }
2614         /* If nsegs is wrong then the stack is corrupt */
2615         KASSERT(nsegs == 1, ("Too many segments returned!"));
2616         rx_buffer->m_head = mp;
2617         adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
2618         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2619
2620         return(0);
2621 }
2622
2623 /*********************************************************************
2624  *
2625  *  Allocate memory for rx_buffer structures. Since we use one 
2626  *  rx_buffer per received packet, the maximum number of rx_buffer's 
2627  *  that we'll need is equal to the number of receive descriptors 
2628  *  that we've allocated.
2629  *
2630  **********************************************************************/
2631 static int
2632 em_allocate_receive_structures(struct adapter * adapter)
2633 {
2634         int             i, error;
2635         struct em_buffer *rx_buffer;
2636
2637         if (!(adapter->rx_buffer_area =
2638               (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2639                                           adapter->num_rx_desc, M_DEVBUF,
2640                                           M_NOWAIT))) {
2641                 printf("em%d: Unable to allocate rx_buffer memory\n",
2642                        adapter->unit);
2643                 return(ENOMEM);
2644         }
2645
2646         bzero(adapter->rx_buffer_area,
2647               sizeof(struct em_buffer) * adapter->num_rx_desc);
2648
2649         error = bus_dma_tag_create(NULL,                /* parent */
2650                                1, 0,                    /* alignment, bounds */
2651                                BUS_SPACE_MAXADDR,       /* lowaddr */
2652                                BUS_SPACE_MAXADDR,       /* highaddr */
2653                                NULL, NULL,              /* filter, filterarg */
2654                                MCLBYTES,                /* maxsize */
2655                                1,                       /* nsegments */
2656                                MCLBYTES,                /* maxsegsize */
2657                                BUS_DMA_ALLOCNOW,        /* flags */
2658                                NULL,                    /* lockfunc */
2659                                NULL,                    /* lockarg */
2660                                &adapter->rxtag);
2661         if (error != 0) {
2662                 printf("em%d: em_allocate_receive_structures: "
2663                         "bus_dma_tag_create failed; error %u\n",
2664                        adapter->unit, error);
2665                 goto fail;
2666         }
2667
2668         rx_buffer = adapter->rx_buffer_area;
2669         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2670                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2671                                           &rx_buffer->map);
2672                 if (error != 0) {
2673                         printf("em%d: em_allocate_receive_structures: "
2674                                 "bus_dmamap_create failed; error %u\n",
2675                                 adapter->unit, error);
2676                         goto fail;
2677                 }
2678         }
2679
2680         for (i = 0; i < adapter->num_rx_desc; i++) {
2681                 error = em_get_buf(i, adapter, NULL);
2682                 if (error != 0)
2683                         goto fail;
2684         }
2685         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2686             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2687
2688         return(0);
2689
2690 fail:
2691         em_free_receive_structures(adapter);
2692         return (error);
2693 }
2694
2695 /*********************************************************************
2696  *
2697  *  Allocate and initialize receive structures.
2698  *  
2699  **********************************************************************/
2700 static int
2701 em_setup_receive_structures(struct adapter * adapter)
2702 {
2703         bzero((void *) adapter->rx_desc_base,
2704               (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2705
2706         if (em_allocate_receive_structures(adapter))
2707                 return ENOMEM;
2708
2709         /* Setup our descriptor pointers */
2710         adapter->next_rx_desc_to_check = 0;
2711         return(0);
2712 }
2713
2714 /*********************************************************************
2715  *
2716  *  Enable receive unit.
2717  *  
2718  **********************************************************************/
2719 static void
2720 em_initialize_receive_unit(struct adapter * adapter)
2721 {
2722         u_int32_t       reg_rctl;
2723         u_int32_t       reg_rxcsum;
2724         struct ifnet    *ifp;
2725         u_int64_t       bus_addr;
2726
2727         INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2728         ifp = adapter->ifp;
2729
2730         /* Make sure receives are disabled while setting up the descriptor ring */
2731         E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2732
2733         /* Set the Receive Delay Timer Register */
2734         E1000_WRITE_REG(&adapter->hw, RDTR, 
2735                         adapter->rx_int_delay.value | E1000_RDT_FPDB);
2736
2737         if(adapter->hw.mac_type >= em_82540) {
2738                 E1000_WRITE_REG(&adapter->hw, RADV,
2739                     adapter->rx_abs_int_delay.value);
2740
2741                 /* Set the interrupt throttling rate.  Value is calculated
2742                  * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2743 #define MAX_INTS_PER_SEC        8000
2744 #define DEFAULT_ITR             1000000000/(MAX_INTS_PER_SEC * 256)
2745                 E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
2746         }       
2747
2748         /* Setup the Base and Length of the Rx Descriptor Ring */
2749         bus_addr = adapter->rxdma.dma_paddr;
2750         E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr);
2751         E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2752         E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2753                         sizeof(struct em_rx_desc));
2754
2755         /* Setup the HW Rx Head and Tail Descriptor Pointers */
2756         E1000_WRITE_REG(&adapter->hw, RDH, 0);
2757         E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2758
2759         /* Setup the Receive Control Register */
2760         reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2761                    E1000_RCTL_RDMTS_HALF |
2762                    (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2763
2764         if (adapter->hw.tbi_compatibility_on == TRUE)
2765                 reg_rctl |= E1000_RCTL_SBP;
2766
2767
2768         switch (adapter->rx_buffer_len) {
2769         default:
2770         case EM_RXBUFFER_2048:
2771                 reg_rctl |= E1000_RCTL_SZ_2048;
2772                 break;
2773         case EM_RXBUFFER_4096:
2774                 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2775                 break;            
2776         case EM_RXBUFFER_8192:
2777                 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2778                 break;
2779         case EM_RXBUFFER_16384:
2780                 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2781                 break;
2782         }
2783
2784         if (ifp->if_mtu > ETHERMTU)
2785                 reg_rctl |= E1000_RCTL_LPE;
2786
2787         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2788         if ((adapter->hw.mac_type >= em_82543) && 
2789             (ifp->if_capenable & IFCAP_RXCSUM)) {
2790                 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2791                 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2792                 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2793         }
2794
2795         /* Enable Receives */
2796         E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2797
2798         return;
2799 }
2800
2801 /*********************************************************************
2802  *
2803  *  Free receive related data structures.
2804  *
2805  **********************************************************************/
2806 static void
2807 em_free_receive_structures(struct adapter *adapter)
2808 {
2809         struct em_buffer   *rx_buffer;
2810         int             i;
2811
2812         INIT_DEBUGOUT("free_receive_structures: begin");
2813
2814         if (adapter->rx_buffer_area != NULL) {
2815                 rx_buffer = adapter->rx_buffer_area;
2816                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2817                         if (rx_buffer->m_head != NULL) {
2818                                 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
2819                                     BUS_DMASYNC_POSTREAD);
2820                                 bus_dmamap_unload(adapter->rxtag,
2821                                     rx_buffer->map);
2822                                 m_freem(rx_buffer->m_head);
2823                                 rx_buffer->m_head = NULL;
2824                         } else if (rx_buffer->map != NULL)
2825                                 bus_dmamap_unload(adapter->rxtag,
2826                                     rx_buffer->map);
2827                         if (rx_buffer->map != NULL) {
2828                                 bus_dmamap_destroy(adapter->rxtag,
2829                                     rx_buffer->map);
2830                                 rx_buffer->map = NULL;
2831                         }
2832                 }
2833         }
2834         if (adapter->rx_buffer_area != NULL) {
2835                 free(adapter->rx_buffer_area, M_DEVBUF);
2836                 adapter->rx_buffer_area = NULL;
2837         }
2838         if (adapter->rxtag != NULL) {
2839                 bus_dma_tag_destroy(adapter->rxtag);
2840                 adapter->rxtag = NULL;
2841         }
2842         return;
2843 }
2844
2845 /*********************************************************************
2846  *
2847  *  This routine executes in interrupt context. It replenishes
2848  *  the mbufs in the descriptor and sends data which has been
2849  *  dma'ed into host memory to upper layer.
2850  *
2851  *  We loop at most count times if count is > 0, or until done if
2852  *  count < 0.
2853  *
2854  *********************************************************************/
2855 static void
2856 em_process_receive_interrupts(struct adapter * adapter, int count)
2857 {
2858         struct ifnet        *ifp;
2859         struct mbuf         *mp;
2860         u_int8_t            accept_frame = 0;
2861         u_int8_t            eop = 0;
2862         u_int16_t           len, desc_len, prev_len_adj;
2863         int                 i;
2864
2865         /* Pointer to the receive descriptor being examined. */
2866         struct em_rx_desc   *current_desc;
2867
2868         mtx_assert(&adapter->mtx, MA_OWNED);
2869
2870         ifp = adapter->ifp;
2871         i = adapter->next_rx_desc_to_check;
2872         current_desc = &adapter->rx_desc_base[i];
2873         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2874             BUS_DMASYNC_POSTREAD);
2875
2876         if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2877                 return;
2878         }
2879
2880         while ((current_desc->status & E1000_RXD_STAT_DD) &&
2881                     (count != 0) &&
2882                     (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2883                 struct mbuf *m = NULL;
2884
2885                 mp = adapter->rx_buffer_area[i].m_head;
2886                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2887                     BUS_DMASYNC_POSTREAD);
2888                 bus_dmamap_unload(adapter->rxtag,
2889                     adapter->rx_buffer_area[i].map);
2890
2891                 accept_frame = 1;
2892                 prev_len_adj = 0;
2893                 desc_len = le16toh(current_desc->length);
2894                 if (current_desc->status & E1000_RXD_STAT_EOP) {
2895                         count--;
2896                         eop = 1;
2897                         if (desc_len < ETHER_CRC_LEN) {
2898                                 len = 0;
2899                                 prev_len_adj = ETHER_CRC_LEN - desc_len;
2900                         }
2901                         else {
2902                                 len = desc_len - ETHER_CRC_LEN;
2903                         }
2904                 } else {
2905                         eop = 0;
2906                         len = desc_len;
2907                 }
2908
2909                 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2910                         u_int8_t            last_byte;
2911                         u_int32_t           pkt_len = desc_len;
2912
2913                         if (adapter->fmp != NULL)
2914                                 pkt_len += adapter->fmp->m_pkthdr.len; 
2915  
2916                         last_byte = *(mtod(mp, caddr_t) + desc_len - 1);                        
2917
2918                         if (TBI_ACCEPT(&adapter->hw, current_desc->status, 
2919                                        current_desc->errors, 
2920                                        pkt_len, last_byte)) {
2921                                 em_tbi_adjust_stats(&adapter->hw, 
2922                                                     &adapter->stats, 
2923                                                     pkt_len, 
2924                                                     adapter->hw.mac_addr);
2925                                 if (len > 0) len--;
2926                         } 
2927                         else {
2928                                 accept_frame = 0;
2929                         }
2930                 }
2931
2932                 if (accept_frame) {
2933
2934                         if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
2935                                 adapter->dropped_pkts++;
2936                                 em_get_buf(i, adapter, mp);
2937                                 if (adapter->fmp != NULL) 
2938                                         m_freem(adapter->fmp);
2939                                 adapter->fmp = NULL;
2940                                 adapter->lmp = NULL;
2941                                 break;
2942                         }
2943
2944                         /* Assign correct length to the current fragment */
2945                         mp->m_len = len;
2946
2947                         if (adapter->fmp == NULL) {
2948                                 mp->m_pkthdr.len = len;
2949                                 adapter->fmp = mp;       /* Store the first mbuf */
2950                                 adapter->lmp = mp;
2951                         } else {
2952                                 /* Chain mbuf's together */
2953                                 mp->m_flags &= ~M_PKTHDR;
2954                                 /* 
2955                                  * Adjust length of previous mbuf in chain if we 
2956                                  * received less than 4 bytes in the last descriptor.
2957                                  */
2958                                 if (prev_len_adj > 0) {
2959                                         adapter->lmp->m_len -= prev_len_adj;
2960                                         adapter->fmp->m_pkthdr.len -= prev_len_adj;
2961                                 }
2962                                 adapter->lmp->m_next = mp;
2963                                 adapter->lmp = adapter->lmp->m_next;
2964                                 adapter->fmp->m_pkthdr.len += len;
2965                         }
2966
2967                         if (eop) {
2968                                 adapter->fmp->m_pkthdr.rcvif = ifp;
2969                                 ifp->if_ipackets++;
2970                                 em_receive_checksum(adapter, current_desc,
2971                                                     adapter->fmp);
2972                                 if (current_desc->status & E1000_RXD_STAT_VP)
2973                                         VLAN_INPUT_TAG(ifp, adapter->fmp,
2974                                             (le16toh(current_desc->special) &
2975                                             E1000_RXD_SPC_VLAN_MASK),
2976                                             adapter->fmp = NULL);
2977
2978                                 m = adapter->fmp;
2979                                 adapter->fmp = NULL;
2980                                 adapter->lmp = NULL;
2981                         }
2982                 } else {
2983                         adapter->dropped_pkts++;
2984                         em_get_buf(i, adapter, mp);
2985                         if (adapter->fmp != NULL) 
2986                                 m_freem(adapter->fmp);
2987                         adapter->fmp = NULL;
2988                         adapter->lmp = NULL;
2989                 }
2990
2991                 /* Zero out the receive descriptors status  */
2992                 current_desc->status = 0;
2993                 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2994                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2995  
2996                 /* Advance the E1000's Receive Queue #0  "Tail Pointer". */
2997                 E1000_WRITE_REG(&adapter->hw, RDT, i);
2998
2999                 /* Advance our pointers to the next descriptor */
3000                 if (++i == adapter->num_rx_desc)
3001                         i = 0;
3002                 if (m != NULL) {
3003                         adapter->next_rx_desc_to_check = i;
3004                         EM_UNLOCK(adapter);
3005                         (*ifp->if_input)(ifp, m);
3006                         EM_LOCK(adapter);
3007                         i = adapter->next_rx_desc_to_check;
3008                 }
3009                 current_desc = &adapter->rx_desc_base[i];
3010         }
3011         adapter->next_rx_desc_to_check = i;
3012         return;
3013 }
3014
3015 /*********************************************************************
3016  *
3017  *  Verify that the hardware indicated that the checksum is valid. 
3018  *  Inform the stack about the status of checksum so that stack
3019  *  doesn't spend time verifying the checksum.
3020  *
3021  *********************************************************************/
3022 static void
3023 em_receive_checksum(struct adapter *adapter,
3024                     struct em_rx_desc *rx_desc,
3025                     struct mbuf *mp)
3026 {
3027         /* 82543 or newer only */
3028         if ((adapter->hw.mac_type < em_82543) ||
3029             /* Ignore Checksum bit is set */
3030             (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3031                 mp->m_pkthdr.csum_flags = 0;
3032                 return;
3033         }
3034
3035         if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3036                 /* Did it pass? */
3037                 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3038                         /* IP Checksum Good */
3039                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3040                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3041
3042                 } else {
3043                         mp->m_pkthdr.csum_flags = 0;
3044                 }
3045         }
3046
3047         if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3048                 /* Did it pass? */        
3049                 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3050                         mp->m_pkthdr.csum_flags |= 
3051                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3052                         mp->m_pkthdr.csum_data = htons(0xffff);
3053                 }
3054         }
3055
3056         return;
3057 }
3058
3059
3060 static void 
3061 em_enable_vlans(struct adapter *adapter)
3062 {
3063         uint32_t ctrl;
3064
3065         E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
3066
3067         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3068         ctrl |= E1000_CTRL_VME; 
3069         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3070
3071         return;
3072 }
3073
3074 static void
3075 em_disable_vlans(struct adapter *adapter)
3076 {
3077         uint32_t ctrl;
3078
3079         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3080         ctrl &= ~E1000_CTRL_VME;
3081         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3082
3083         return;
3084 }
3085
3086 static void
3087 em_enable_intr(struct adapter * adapter)
3088 {
3089         E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
3090         return;
3091 }
3092
3093 static void
3094 em_disable_intr(struct adapter *adapter)
3095 {
3096         /*
3097          * The first version of 82542 had an errata where when link was forced it
3098          * would stay up even up even if the cable was disconnected.  Sequence errors
3099          * were used to detect the disconnect and then the driver would unforce the link.
3100          * This code in the in the ISR.  For this to work correctly the Sequence error 
3101          * interrupt had to be enabled all the time.
3102          */
3103
3104         if (adapter->hw.mac_type == em_82542_rev2_0)
3105             E1000_WRITE_REG(&adapter->hw, IMC,
3106                 (0xffffffff & ~E1000_IMC_RXSEQ));
3107         else
3108             E1000_WRITE_REG(&adapter->hw, IMC,
3109                 0xffffffff);
3110         return;
3111 }
3112
3113 static int
3114 em_is_valid_ether_addr(u_int8_t *addr)
3115 {
3116         char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3117                                 
3118         if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
3119                 return (FALSE);
3120         }
3121
3122         return(TRUE);
3123 }
3124
3125 void 
3126 em_write_pci_cfg(struct em_hw *hw,
3127                       uint32_t reg,
3128                       uint16_t *value)
3129 {
3130         pci_write_config(((struct em_osdep *)hw->back)->dev, reg, 
3131                          *value, 2);
3132 }
3133
3134 void 
3135 em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
3136                      uint16_t *value)
3137 {
3138         *value = pci_read_config(((struct em_osdep *)hw->back)->dev,
3139                                  reg, 2);
3140         return;
3141 }
3142
3143 void
3144 em_pci_set_mwi(struct em_hw *hw)
3145 {
3146         pci_write_config(((struct em_osdep *)hw->back)->dev,
3147                          PCIR_COMMAND,
3148                          (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3149         return;
3150 }
3151
3152 void
3153 em_pci_clear_mwi(struct em_hw *hw)
3154 {
3155         pci_write_config(((struct em_osdep *)hw->back)->dev,
3156                          PCIR_COMMAND,
3157                          (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3158         return;
3159 }
3160
3161 /*********************************************************************
3162 * 82544 Coexistence issue workaround.
3163 *    There are 2 issues.
3164 *       1. Transmit Hang issue.
3165 *    To detect this issue, following equation can be used...
3166 *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3167 *          If SUM[3:0] is in between 1 to 4, we will have this issue.
3168 *
3169 *       2. DAC issue.
3170 *    To detect this issue, following equation can be used...
3171 *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3172 *          If SUM[3:0] is in between 9 to c, we will have this issue.
3173 *
3174 *
3175 *    WORKAROUND:
3176 *          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3177 *
3178 *** *********************************************************************/
3179 static u_int32_t
3180 em_fill_descriptors (bus_addr_t address,
3181                               u_int32_t length,
3182                               PDESC_ARRAY desc_array)
3183 {
3184         /* Since issue is sensitive to length and address.*/
3185         /* Let us first check the address...*/
3186         u_int32_t safe_terminator;
3187         if (length <= 4) {
3188                 desc_array->descriptor[0].address = address;
3189                 desc_array->descriptor[0].length = length;
3190                 desc_array->elements = 1;
3191                 return desc_array->elements;
3192         }
3193         safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3194         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3195         if (safe_terminator == 0   ||
3196         (safe_terminator > 4   &&
3197         safe_terminator < 9)   ||
3198         (safe_terminator > 0xC &&
3199         safe_terminator <= 0xF)) {
3200                 desc_array->descriptor[0].address = address;
3201                 desc_array->descriptor[0].length = length;
3202                 desc_array->elements = 1;
3203                 return desc_array->elements;
3204         }
3205          
3206         desc_array->descriptor[0].address = address;
3207         desc_array->descriptor[0].length = length - 4;
3208         desc_array->descriptor[1].address = address + (length - 4);
3209         desc_array->descriptor[1].length = 4;
3210         desc_array->elements = 2;
3211         return desc_array->elements;
3212 }
3213
3214 /**********************************************************************
3215  *
3216  *  Update the board statistics counters. 
3217  *
3218  **********************************************************************/
3219 static void
3220 em_update_stats_counters(struct adapter *adapter)
3221 {
3222         struct ifnet   *ifp;
3223
3224         if(adapter->hw.media_type == em_media_type_copper ||
3225            (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3226                 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3227                 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3228         }
3229         adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3230         adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3231         adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3232         adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3233
3234         adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3235         adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3236         adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3237         adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3238         adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3239         adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3240         adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3241         adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3242         adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3243         adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3244         adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3245         adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3246         adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3247         adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3248         adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3249         adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3250         adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3251         adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3252         adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3253         adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3254
3255         /* For the 64-bit byte counters the low dword must be read first. */
3256         /* Both registers clear on the read of the high dword */
3257
3258         adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL); 
3259         adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3260         adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3261         adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3262
3263         adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3264         adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3265         adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3266         adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3267         adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3268
3269         adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3270         adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3271         adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3272         adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3273
3274         adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3275         adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3276         adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3277         adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3278         adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3279         adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3280         adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3281         adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3282         adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3283         adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3284
3285         if (adapter->hw.mac_type >= em_82543) {
3286                 adapter->stats.algnerrc += 
3287                 E1000_READ_REG(&adapter->hw, ALGNERRC);
3288                 adapter->stats.rxerrc += 
3289                 E1000_READ_REG(&adapter->hw, RXERRC);
3290                 adapter->stats.tncrs += 
3291                 E1000_READ_REG(&adapter->hw, TNCRS);
3292                 adapter->stats.cexterr += 
3293                 E1000_READ_REG(&adapter->hw, CEXTERR);
3294                 adapter->stats.tsctc += 
3295                 E1000_READ_REG(&adapter->hw, TSCTC);
3296                 adapter->stats.tsctfc += 
3297                 E1000_READ_REG(&adapter->hw, TSCTFC);
3298         }
3299         ifp = adapter->ifp;
3300
3301         ifp->if_collisions = adapter->stats.colc;
3302
3303         /* Rx Errors */
3304         ifp->if_ierrors =
3305         adapter->dropped_pkts +
3306         adapter->stats.rxerrc +
3307         adapter->stats.crcerrs +
3308         adapter->stats.algnerrc +
3309         adapter->stats.rlec +
3310         adapter->stats.mpc + adapter->stats.cexterr;
3311
3312         /* Tx Errors */
3313         ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
3314             adapter->watchdog_events;
3315
3316 }
3317
3318
3319 /**********************************************************************
3320  *
3321  *  This routine is called only when em_display_debug_stats is enabled.
3322  *  This routine provides a way to take a look at important statistics
3323  *  maintained by the driver and hardware.
3324  *
3325  **********************************************************************/
3326 static void
3327 em_print_debug_info(struct adapter *adapter)
3328 {
3329         int unit = adapter->unit;
3330         uint8_t *hw_addr = adapter->hw.hw_addr;
3331  
3332         printf("em%d: Adapter hardware address = %p \n", unit, hw_addr);
3333         printf("em%d: CTRL = 0x%x RCTL = 0x%x \n", unit, 
3334             E1000_READ_REG(&adapter->hw, CTRL),
3335             E1000_READ_REG(&adapter->hw, RCTL)); 
3336         printf("em%d: Packet buffer = Tx=%dk Rx=%dk \n", unit, 
3337             ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),\
3338             (E1000_READ_REG(&adapter->hw, PBA) & 0xffff) );
3339         printf("em%d: Flow control watermarks high = %d low = %d\n", unit, 
3340             adapter->hw.fc_high_water,
3341             adapter->hw.fc_low_water);
3342         printf("em%d: tx_int_delay = %d, tx_abs_int_delay = %d\n", unit, 
3343             E1000_READ_REG(&adapter->hw, TIDV),
3344             E1000_READ_REG(&adapter->hw, TADV));
3345         printf("em%d: rx_int_delay = %d, rx_abs_int_delay = %d\n", unit, 
3346             E1000_READ_REG(&adapter->hw, RDTR),
3347             E1000_READ_REG(&adapter->hw, RADV));
3348         printf("em%d: fifo workaround = %lld, fifo_reset_count = %lld\n",
3349             unit, (long long)adapter->tx_fifo_wrk_cnt, 
3350             (long long)adapter->tx_fifo_reset_cnt);
3351         printf("em%d: hw tdh = %d, hw tdt = %d\n", unit,
3352             E1000_READ_REG(&adapter->hw, TDH),
3353             E1000_READ_REG(&adapter->hw, TDT));
3354         printf("em%d: Num Tx descriptors avail = %d\n", unit,
3355             adapter->num_tx_desc_avail);
3356         printf("em%d: Tx Descriptors not avail1 = %ld\n", unit,
3357             adapter->no_tx_desc_avail1);
3358         printf("em%d: Tx Descriptors not avail2 = %ld\n", unit,
3359             adapter->no_tx_desc_avail2);
3360         printf("em%d: Std mbuf failed = %ld\n", unit,
3361             adapter->mbuf_alloc_failed);
3362         printf("em%d: Std mbuf cluster failed = %ld\n", unit,
3363             adapter->mbuf_cluster_failed);
3364         printf("em%d: Driver dropped packets = %ld\n", unit,
3365             adapter->dropped_pkts);
3366
3367         return;
3368 }
3369
3370 static void
3371 em_print_hw_stats(struct adapter *adapter)
3372 {
3373         int unit = adapter->unit;
3374
3375         printf("em%d: Excessive collisions = %lld\n", unit,
3376                (long long)adapter->stats.ecol);
3377         printf("em%d: Symbol errors = %lld\n", unit,
3378                (long long)adapter->stats.symerrs);
3379         printf("em%d: Sequence errors = %lld\n", unit,
3380                (long long)adapter->stats.sec);
3381         printf("em%d: Defer count = %lld\n", unit,
3382                (long long)adapter->stats.dc);
3383
3384         printf("em%d: Missed Packets = %lld\n", unit,
3385                (long long)adapter->stats.mpc);
3386         printf("em%d: Receive No Buffers = %lld\n", unit,
3387                (long long)adapter->stats.rnbc);
3388         printf("em%d: Receive length errors = %lld\n", unit,
3389                (long long)adapter->stats.rlec);
3390         printf("em%d: Receive errors = %lld\n", unit,
3391                (long long)adapter->stats.rxerrc);
3392         printf("em%d: Crc errors = %lld\n", unit,
3393                (long long)adapter->stats.crcerrs);
3394         printf("em%d: Alignment errors = %lld\n", unit,
3395                (long long)adapter->stats.algnerrc);
3396         printf("em%d: Carrier extension errors = %lld\n", unit,
3397                (long long)adapter->stats.cexterr);
3398         printf("em%d: RX overruns = %ld\n", unit, adapter->rx_overruns);
3399         printf("em%d: watchdog timeouts = %ld\n", unit,
3400                 adapter->watchdog_events);
3401
3402         printf("em%d: XON Rcvd = %lld\n", unit,
3403                (long long)adapter->stats.xonrxc);
3404         printf("em%d: XON Xmtd = %lld\n", unit,
3405                (long long)adapter->stats.xontxc);
3406         printf("em%d: XOFF Rcvd = %lld\n", unit,
3407                (long long)adapter->stats.xoffrxc);
3408         printf("em%d: XOFF Xmtd = %lld\n", unit,
3409                (long long)adapter->stats.xofftxc);
3410
3411         printf("em%d: Good Packets Rcvd = %lld\n", unit,
3412                (long long)adapter->stats.gprc);
3413         printf("em%d: Good Packets Xmtd = %lld\n", unit,
3414                (long long)adapter->stats.gptc);
3415
3416         return;
3417 }
3418
3419 static int
3420 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3421 {
3422         int error;
3423         int result;
3424         struct adapter *adapter;
3425
3426         result = -1;
3427         error = sysctl_handle_int(oidp, &result, 0, req);
3428
3429         if (error || !req->newptr)
3430                 return (error);
3431
3432         if (result == 1) {
3433                 adapter = (struct adapter *)arg1;
3434                 em_print_debug_info(adapter);
3435         }
3436
3437         return error;
3438 }
3439
3440
3441 static int
3442 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3443 {
3444         int error;
3445         int result;
3446         struct adapter *adapter;
3447
3448         result = -1;
3449         error = sysctl_handle_int(oidp, &result, 0, req);
3450
3451         if (error || !req->newptr)
3452                 return (error);
3453
3454         if (result == 1) {
3455                 adapter = (struct adapter *)arg1;
3456                 em_print_hw_stats(adapter);
3457         }
3458
3459         return error;
3460 }
3461
3462 static int
3463 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3464 {
3465         struct em_int_delay_info *info;
3466         struct adapter *adapter;
3467         u_int32_t regval;
3468         int error;
3469         int usecs;
3470         int ticks;
3471
3472         info = (struct em_int_delay_info *)arg1;
3473         usecs = info->value;
3474         error = sysctl_handle_int(oidp, &usecs, 0, req);
3475         if (error != 0 || req->newptr == NULL)
3476                 return error;
3477         if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3478                 return EINVAL;
3479         info->value = usecs;
3480         ticks = E1000_USECS_TO_TICKS(usecs);
3481
3482         adapter = info->adapter;
3483         
3484         EM_LOCK(adapter);
3485         regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3486         regval = (regval & ~0xffff) | (ticks & 0xffff);
3487         /* Handle a few special cases. */
3488         switch (info->offset) {
3489         case E1000_RDTR:
3490         case E1000_82542_RDTR:
3491                 regval |= E1000_RDT_FPDB;
3492                 break;
3493         case E1000_TIDV:
3494         case E1000_82542_TIDV:
3495                 if (ticks == 0) {
3496                         adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3497                         /* Don't write 0 into the TIDV register. */
3498                         regval++;
3499                 } else
3500                         adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3501                 break;
3502         }
3503         E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3504         EM_UNLOCK(adapter);
3505         return 0;
3506 }
3507
3508 static void
3509 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3510     const char *description, struct em_int_delay_info *info,
3511     int offset, int value)
3512 {
3513         info->adapter = adapter;
3514         info->offset = offset;
3515         info->value = value;
3516         SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
3517             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3518             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3519             info, 0, em_sysctl_int_delay, "I", description);
3520 }