]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/e1000/e1000_ich8lan.c
Add a different #define for the maximum number of transmit and
[FreeBSD/FreeBSD.git] / sys / dev / e1000 / e1000_ich8lan.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 /* 82562G 10/100 Network Connection
36  * 82562G-2 10/100 Network Connection
37  * 82562GT 10/100 Network Connection
38  * 82562GT-2 10/100 Network Connection
39  * 82562V 10/100 Network Connection
40  * 82562V-2 10/100 Network Connection
41  * 82566DC-2 Gigabit Network Connection
42  * 82566DC Gigabit Network Connection
43  * 82566DM-2 Gigabit Network Connection
44  * 82566DM Gigabit Network Connection
45  * 82566MC Gigabit Network Connection
46  * 82566MM Gigabit Network Connection
47  * 82567LM Gigabit Network Connection
48  * 82567LF Gigabit Network Connection
49  * 82567V Gigabit Network Connection
50  * 82567LM-2 Gigabit Network Connection
51  * 82567LF-2 Gigabit Network Connection
52  * 82567V-2 Gigabit Network Connection
53  * 82567LF-3 Gigabit Network Connection
54  * 82567LM-3 Gigabit Network Connection
55  * 82567LM-4 Gigabit Network Connection
56  * 82577LM Gigabit Network Connection
57  * 82577LC Gigabit Network Connection
58  * 82578DM Gigabit Network Connection
59  * 82578DC Gigabit Network Connection
60  * 82579LM Gigabit Network Connection
61  * 82579V Gigabit Network Connection
62  * Ethernet Connection I217-LM
63  * Ethernet Connection I217-V
64  * Ethernet Connection I218-V
65  * Ethernet Connection I218-LM
66  * Ethernet Connection (2) I218-LM
67  * Ethernet Connection (2) I218-V
68  * Ethernet Connection (3) I218-LM
69  * Ethernet Connection (3) I218-V
70  */
71
72 #include "e1000_api.h"
73
74 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 static int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 static int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
84                                               u8 *mc_addr_list,
85                                               u32 mc_addr_count);
86 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
87 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
88 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
89 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
90                                             bool active);
91 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
92                                             bool active);
93 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
94                                    u16 words, u16 *data);
95 static s32  e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
96                                u16 *data);
97 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98                                     u16 words, u16 *data);
99 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 static s32  e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
102 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
103                                             u16 *data);
104 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
105 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
106 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
107 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
108 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
109 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
110 static s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
111 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
112                                            u16 *speed, u16 *duplex);
113 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
114 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
115 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
116 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
117 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
118 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
119 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
120 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
121 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
122 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
123 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
124 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
125 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
126                                           u32 offset, u8 *data);
127 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
128                                           u8 size, u16 *data);
129 static s32  e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
130                                             u32 *data);
131 static s32  e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
132                                            u32 offset, u32 *data);
133 static s32  e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
134                                              u32 offset, u32 data);
135 static s32  e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
136                                                   u32 offset, u32 dword);
137 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
138                                           u32 offset, u16 *data);
139 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
140                                                  u32 offset, u8 byte);
141 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
142 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
143 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
144 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
145 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
146 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
147 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
148
149 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
150 /* Offset 04h HSFSTS */
151 union ich8_hws_flash_status {
152         struct ich8_hsfsts {
153                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
154                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
155                 u16 dael:1; /* bit 2 Direct Access error Log */
156                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
157                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
158                 u16 reserved1:2; /* bit 13:6 Reserved */
159                 u16 reserved2:6; /* bit 13:6 Reserved */
160                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
161                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
162         } hsf_status;
163         u16 regval;
164 };
165
166 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
167 /* Offset 06h FLCTL */
168 union ich8_hws_flash_ctrl {
169         struct ich8_hsflctl {
170                 u16 flcgo:1;   /* 0 Flash Cycle Go */
171                 u16 flcycle:2;   /* 2:1 Flash Cycle */
172                 u16 reserved:5;   /* 7:3 Reserved  */
173                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
174                 u16 flockdn:6;   /* 15:10 Reserved */
175         } hsf_ctrl;
176         u16 regval;
177 };
178
179 /* ICH Flash Region Access Permissions */
180 union ich8_hws_flash_regacc {
181         struct ich8_flracc {
182                 u32 grra:8; /* 0:7 GbE region Read Access */
183                 u32 grwa:8; /* 8:15 GbE region Write Access */
184                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
185                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
186         } hsf_flregacc;
187         u16 regval;
188 };
189
190 /**
191  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
192  *  @hw: pointer to the HW structure
193  *
194  *  Test access to the PHY registers by reading the PHY ID registers.  If
195  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
196  *  otherwise assume the read PHY ID is correct if it is valid.
197  *
198  *  Assumes the sw/fw/hw semaphore is already acquired.
199  **/
200 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
201 {
202         u16 phy_reg = 0;
203         u32 phy_id = 0;
204         s32 ret_val = 0;
205         u16 retry_count;
206         u32 mac_reg = 0;
207
208         for (retry_count = 0; retry_count < 2; retry_count++) {
209                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
210                 if (ret_val || (phy_reg == 0xFFFF))
211                         continue;
212                 phy_id = (u32)(phy_reg << 16);
213
214                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
215                 if (ret_val || (phy_reg == 0xFFFF)) {
216                         phy_id = 0;
217                         continue;
218                 }
219                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
220                 break;
221         }
222
223         if (hw->phy.id) {
224                 if  (hw->phy.id == phy_id)
225                         goto out;
226         } else if (phy_id) {
227                 hw->phy.id = phy_id;
228                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
229                 goto out;
230         }
231
232         /* In case the PHY needs to be in mdio slow mode,
233          * set slow mode and try to get the PHY id again.
234          */
235         if (hw->mac.type < e1000_pch_lpt) {
236                 hw->phy.ops.release(hw);
237                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
238                 if (!ret_val)
239                         ret_val = e1000_get_phy_id(hw);
240                 hw->phy.ops.acquire(hw);
241         }
242
243         if (ret_val)
244                 return FALSE;
245 out:
246         if (hw->mac.type >= e1000_pch_lpt) {
247                 /* Only unforce SMBus if ME is not active */
248                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
249                     E1000_ICH_FWSM_FW_VALID)) {
250                         /* Unforce SMBus mode in PHY */
251                         hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
252                         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
253                         hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
254
255                         /* Unforce SMBus mode in MAC */
256                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
257                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
258                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
259                 }
260         }
261
262         return TRUE;
263 }
264
265 /**
266  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
267  *  @hw: pointer to the HW structure
268  *
269  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
270  *  used to reset the PHY to a quiescent state when necessary.
271  **/
272 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
273 {
274         u32 mac_reg;
275
276         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
277
278         /* Set Phy Config Counter to 50msec */
279         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
280         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
281         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
282         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
283
284         /* Toggle LANPHYPC Value bit */
285         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
286         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
287         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
288         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
289         E1000_WRITE_FLUSH(hw);
290         msec_delay(1);
291         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
292         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
293         E1000_WRITE_FLUSH(hw);
294
295         if (hw->mac.type < e1000_pch_lpt) {
296                 msec_delay(50);
297         } else {
298                 u16 count = 20;
299
300                 do {
301                         msec_delay(5);
302                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
303                            E1000_CTRL_EXT_LPCD) && count--);
304
305                 msec_delay(30);
306         }
307 }
308
309 /**
310  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
311  *  @hw: pointer to the HW structure
312  *
313  *  Workarounds/flow necessary for PHY initialization during driver load
314  *  and resume paths.
315  **/
316 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
317 {
318         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
319         s32 ret_val;
320
321         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
322
323         /* Gate automatic PHY configuration by hardware on managed and
324          * non-managed 82579 and newer adapters.
325          */
326         e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
327
328         /* It is not possible to be certain of the current state of ULP
329          * so forcibly disable it.
330          */
331         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
332         e1000_disable_ulp_lpt_lp(hw, TRUE);
333
334         ret_val = hw->phy.ops.acquire(hw);
335         if (ret_val) {
336                 DEBUGOUT("Failed to initialize PHY flow\n");
337                 goto out;
338         }
339
340         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
341          * inaccessible and resetting the PHY is not blocked, toggle the
342          * LANPHYPC Value bit to force the interconnect to PCIe mode.
343          */
344         switch (hw->mac.type) {
345         case e1000_pch_lpt:
346         case e1000_pch_spt:
347                 if (e1000_phy_is_accessible_pchlan(hw))
348                         break;
349
350                 /* Before toggling LANPHYPC, see if PHY is accessible by
351                  * forcing MAC to SMBus mode first.
352                  */
353                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
354                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
355                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
356
357                 /* Wait 50 milliseconds for MAC to finish any retries
358                  * that it might be trying to perform from previous
359                  * attempts to acknowledge any phy read requests.
360                  */
361                  msec_delay(50);
362
363                 /* fall-through */
364         case e1000_pch2lan:
365                 if (e1000_phy_is_accessible_pchlan(hw))
366                         break;
367
368                 /* fall-through */
369         case e1000_pchlan:
370                 if ((hw->mac.type == e1000_pchlan) &&
371                     (fwsm & E1000_ICH_FWSM_FW_VALID))
372                         break;
373
374                 if (hw->phy.ops.check_reset_block(hw)) {
375                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
376                         ret_val = -E1000_ERR_PHY;
377                         break;
378                 }
379
380                 /* Toggle LANPHYPC Value bit */
381                 e1000_toggle_lanphypc_pch_lpt(hw);
382                 if (hw->mac.type >= e1000_pch_lpt) {
383                         if (e1000_phy_is_accessible_pchlan(hw))
384                                 break;
385
386                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
387                          * so ensure that the MAC is also out of SMBus mode
388                          */
389                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
390                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
391                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
392
393                         if (e1000_phy_is_accessible_pchlan(hw))
394                                 break;
395
396                         ret_val = -E1000_ERR_PHY;
397                 }
398                 break;
399         default:
400                 break;
401         }
402
403         hw->phy.ops.release(hw);
404         if (!ret_val) {
405
406                 /* Check to see if able to reset PHY.  Print error if not */
407                 if (hw->phy.ops.check_reset_block(hw)) {
408                         ERROR_REPORT("Reset blocked by ME\n");
409                         goto out;
410                 }
411
412                 /* Reset the PHY before any access to it.  Doing so, ensures
413                  * that the PHY is in a known good state before we read/write
414                  * PHY registers.  The generic reset is sufficient here,
415                  * because we haven't determined the PHY type yet.
416                  */
417                 ret_val = e1000_phy_hw_reset_generic(hw);
418                 if (ret_val)
419                         goto out;
420
421                 /* On a successful reset, possibly need to wait for the PHY
422                  * to quiesce to an accessible state before returning control
423                  * to the calling function.  If the PHY does not quiesce, then
424                  * return E1000E_BLK_PHY_RESET, as this is the condition that
425                  *  the PHY is in.
426                  */
427                 ret_val = hw->phy.ops.check_reset_block(hw);
428                 if (ret_val)
429                         ERROR_REPORT("ME blocked access to PHY after reset\n");
430         }
431
432 out:
433         /* Ungate automatic PHY configuration on non-managed 82579 */
434         if ((hw->mac.type == e1000_pch2lan) &&
435             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
436                 msec_delay(10);
437                 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
438         }
439
440         return ret_val;
441 }
442
443 /**
444  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
445  *  @hw: pointer to the HW structure
446  *
447  *  Initialize family-specific PHY parameters and function pointers.
448  **/
449 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
450 {
451         struct e1000_phy_info *phy = &hw->phy;
452         s32 ret_val;
453
454         DEBUGFUNC("e1000_init_phy_params_pchlan");
455
456         phy->addr               = 1;
457         phy->reset_delay_us     = 100;
458
459         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
460         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
461         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
462         phy->ops.set_page       = e1000_set_page_igp;
463         phy->ops.read_reg       = e1000_read_phy_reg_hv;
464         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
465         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
466         phy->ops.release        = e1000_release_swflag_ich8lan;
467         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
468         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
469         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
470         phy->ops.write_reg      = e1000_write_phy_reg_hv;
471         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
472         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
473         phy->ops.power_up       = e1000_power_up_phy_copper;
474         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
475         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
476
477         phy->id = e1000_phy_unknown;
478
479         ret_val = e1000_init_phy_workarounds_pchlan(hw);
480         if (ret_val)
481                 return ret_val;
482
483         if (phy->id == e1000_phy_unknown)
484                 switch (hw->mac.type) {
485                 default:
486                         ret_val = e1000_get_phy_id(hw);
487                         if (ret_val)
488                                 return ret_val;
489                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
490                                 break;
491                         /* fall-through */
492                 case e1000_pch2lan:
493                 case e1000_pch_lpt:
494                 case e1000_pch_spt:
495                         /* In case the PHY needs to be in mdio slow mode,
496                          * set slow mode and try to get the PHY id again.
497                          */
498                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
499                         if (ret_val)
500                                 return ret_val;
501                         ret_val = e1000_get_phy_id(hw);
502                         if (ret_val)
503                                 return ret_val;
504                         break;
505                 }
506         phy->type = e1000_get_phy_type_from_id(phy->id);
507
508         switch (phy->type) {
509         case e1000_phy_82577:
510         case e1000_phy_82579:
511         case e1000_phy_i217:
512                 phy->ops.check_polarity = e1000_check_polarity_82577;
513                 phy->ops.force_speed_duplex =
514                         e1000_phy_force_speed_duplex_82577;
515                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
516                 phy->ops.get_info = e1000_get_phy_info_82577;
517                 phy->ops.commit = e1000_phy_sw_reset_generic;
518                 break;
519         case e1000_phy_82578:
520                 phy->ops.check_polarity = e1000_check_polarity_m88;
521                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
522                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
523                 phy->ops.get_info = e1000_get_phy_info_m88;
524                 break;
525         default:
526                 ret_val = -E1000_ERR_PHY;
527                 break;
528         }
529
530         return ret_val;
531 }
532
533 /**
534  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
535  *  @hw: pointer to the HW structure
536  *
537  *  Initialize family-specific PHY parameters and function pointers.
538  **/
539 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
540 {
541         struct e1000_phy_info *phy = &hw->phy;
542         s32 ret_val;
543         u16 i = 0;
544
545         DEBUGFUNC("e1000_init_phy_params_ich8lan");
546
547         phy->addr               = 1;
548         phy->reset_delay_us     = 100;
549
550         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
551         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
552         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
553         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
554         phy->ops.read_reg       = e1000_read_phy_reg_igp;
555         phy->ops.release        = e1000_release_swflag_ich8lan;
556         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
557         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
558         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
559         phy->ops.write_reg      = e1000_write_phy_reg_igp;
560         phy->ops.power_up       = e1000_power_up_phy_copper;
561         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
562
563         /* We may need to do this twice - once for IGP and if that fails,
564          * we'll set BM func pointers and try again
565          */
566         ret_val = e1000_determine_phy_address(hw);
567         if (ret_val) {
568                 phy->ops.write_reg = e1000_write_phy_reg_bm;
569                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
570                 ret_val = e1000_determine_phy_address(hw);
571                 if (ret_val) {
572                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
573                         return ret_val;
574                 }
575         }
576
577         phy->id = 0;
578         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
579                (i++ < 100)) {
580                 msec_delay(1);
581                 ret_val = e1000_get_phy_id(hw);
582                 if (ret_val)
583                         return ret_val;
584         }
585
586         /* Verify phy id */
587         switch (phy->id) {
588         case IGP03E1000_E_PHY_ID:
589                 phy->type = e1000_phy_igp_3;
590                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
591                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
592                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
593                 phy->ops.get_info = e1000_get_phy_info_igp;
594                 phy->ops.check_polarity = e1000_check_polarity_igp;
595                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
596                 break;
597         case IFE_E_PHY_ID:
598         case IFE_PLUS_E_PHY_ID:
599         case IFE_C_E_PHY_ID:
600                 phy->type = e1000_phy_ife;
601                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
602                 phy->ops.get_info = e1000_get_phy_info_ife;
603                 phy->ops.check_polarity = e1000_check_polarity_ife;
604                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
605                 break;
606         case BME1000_E_PHY_ID:
607                 phy->type = e1000_phy_bm;
608                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
609                 phy->ops.read_reg = e1000_read_phy_reg_bm;
610                 phy->ops.write_reg = e1000_write_phy_reg_bm;
611                 phy->ops.commit = e1000_phy_sw_reset_generic;
612                 phy->ops.get_info = e1000_get_phy_info_m88;
613                 phy->ops.check_polarity = e1000_check_polarity_m88;
614                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
615                 break;
616         default:
617                 return -E1000_ERR_PHY;
618                 break;
619         }
620
621         return E1000_SUCCESS;
622 }
623
624 /**
625  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
626  *  @hw: pointer to the HW structure
627  *
628  *  Initialize family-specific NVM parameters and function
629  *  pointers.
630  **/
631 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
632 {
633         struct e1000_nvm_info *nvm = &hw->nvm;
634         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
635         u32 gfpreg, sector_base_addr, sector_end_addr;
636         u16 i;
637         u32 nvm_size;
638
639         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
640
641         nvm->type = e1000_nvm_flash_sw;
642
643         if (hw->mac.type >= e1000_pch_spt) {
644                 /* in SPT, gfpreg doesn't exist. NVM size is taken from the
645                  * STRAP register. This is because in SPT the GbE Flash region
646                  * is no longer accessed through the flash registers. Instead,
647                  * the mechanism has changed, and the Flash region access
648                  * registers are now implemented in GbE memory space.
649                  */
650                 nvm->flash_base_addr = 0;
651                 nvm_size =
652                     (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
653                     * NVM_SIZE_MULTIPLIER;
654                 nvm->flash_bank_size = nvm_size / 2;
655                 /* Adjust to word count */
656                 nvm->flash_bank_size /= sizeof(u16);
657                 /* Set the base address for flash register access */
658                 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
659         } else {
660                 /* Can't read flash registers if register set isn't mapped. */
661                 if (!hw->flash_address) {
662                         DEBUGOUT("ERROR: Flash registers not mapped\n");
663                         return -E1000_ERR_CONFIG;
664                 }
665
666                 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
667
668                 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
669                  * Add 1 to sector_end_addr since this sector is included in
670                  * the overall size.
671                  */
672                 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
673                 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
674
675                 /* flash_base_addr is byte-aligned */
676                 nvm->flash_base_addr = sector_base_addr
677                                        << FLASH_SECTOR_ADDR_SHIFT;
678
679                 /* find total size of the NVM, then cut in half since the total
680                  * size represents two separate NVM banks.
681                  */
682                 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
683                                         << FLASH_SECTOR_ADDR_SHIFT);
684                 nvm->flash_bank_size /= 2;
685                 /* Adjust to word count */
686                 nvm->flash_bank_size /= sizeof(u16);
687         }
688
689         nvm->word_size = E1000_SHADOW_RAM_WORDS;
690
691         /* Clear shadow ram */
692         for (i = 0; i < nvm->word_size; i++) {
693                 dev_spec->shadow_ram[i].modified = FALSE;
694                 dev_spec->shadow_ram[i].value    = 0xFFFF;
695         }
696
697         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
698         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
699
700         /* Function Pointers */
701         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
702         nvm->ops.release        = e1000_release_nvm_ich8lan;
703         if (hw->mac.type >= e1000_pch_spt) {
704                 nvm->ops.read   = e1000_read_nvm_spt;
705                 nvm->ops.update = e1000_update_nvm_checksum_spt;
706         } else {
707                 nvm->ops.read   = e1000_read_nvm_ich8lan;
708                 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
709         }
710         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
711         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
712         nvm->ops.write          = e1000_write_nvm_ich8lan;
713
714         return E1000_SUCCESS;
715 }
716
717 /**
718  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
719  *  @hw: pointer to the HW structure
720  *
721  *  Initialize family-specific MAC parameters and function
722  *  pointers.
723  **/
724 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
725 {
726         struct e1000_mac_info *mac = &hw->mac;
727
728         DEBUGFUNC("e1000_init_mac_params_ich8lan");
729
730         /* Set media type function pointer */
731         hw->phy.media_type = e1000_media_type_copper;
732
733         /* Set mta register count */
734         mac->mta_reg_count = 32;
735         /* Set rar entry count */
736         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
737         if (mac->type == e1000_ich8lan)
738                 mac->rar_entry_count--;
739         /* Set if part includes ASF firmware */
740         mac->asf_firmware_present = TRUE;
741         /* FWSM register */
742         mac->has_fwsm = TRUE;
743         /* ARC subsystem not supported */
744         mac->arc_subsystem_valid = FALSE;
745         /* Adaptive IFS supported */
746         mac->adaptive_ifs = TRUE;
747
748         /* Function pointers */
749
750         /* bus type/speed/width */
751         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
752         /* function id */
753         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
754         /* reset */
755         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
756         /* hw initialization */
757         mac->ops.init_hw = e1000_init_hw_ich8lan;
758         /* link setup */
759         mac->ops.setup_link = e1000_setup_link_ich8lan;
760         /* physical interface setup */
761         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
762         /* check for link */
763         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
764         /* link info */
765         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
766         /* multicast address update */
767         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
768         /* clear hardware counters */
769         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
770
771         /* LED and other operations */
772         switch (mac->type) {
773         case e1000_ich8lan:
774         case e1000_ich9lan:
775         case e1000_ich10lan:
776                 /* check management mode */
777                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
778                 /* ID LED init */
779                 mac->ops.id_led_init = e1000_id_led_init_generic;
780                 /* blink LED */
781                 mac->ops.blink_led = e1000_blink_led_generic;
782                 /* setup LED */
783                 mac->ops.setup_led = e1000_setup_led_generic;
784                 /* cleanup LED */
785                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
786                 /* turn on/off LED */
787                 mac->ops.led_on = e1000_led_on_ich8lan;
788                 mac->ops.led_off = e1000_led_off_ich8lan;
789                 break;
790         case e1000_pch2lan:
791                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
792                 mac->ops.rar_set = e1000_rar_set_pch2lan;
793                 /* fall-through */
794         case e1000_pch_lpt:
795         case e1000_pch_spt:
796                 /* multicast address update for pch2 */
797                 mac->ops.update_mc_addr_list =
798                         e1000_update_mc_addr_list_pch2lan;
799                 /* fall-through */
800         case e1000_pchlan:
801                 /* check management mode */
802                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
803                 /* ID LED init */
804                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
805                 /* setup LED */
806                 mac->ops.setup_led = e1000_setup_led_pchlan;
807                 /* cleanup LED */
808                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
809                 /* turn on/off LED */
810                 mac->ops.led_on = e1000_led_on_pchlan;
811                 mac->ops.led_off = e1000_led_off_pchlan;
812                 break;
813         default:
814                 break;
815         }
816
817         if (mac->type >= e1000_pch_lpt) {
818                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
819                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
820                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
821                 mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
822         }
823
824         /* Enable PCS Lock-loss workaround for ICH8 */
825         if (mac->type == e1000_ich8lan)
826                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
827
828         return E1000_SUCCESS;
829 }
830
831 /**
832  *  __e1000_access_emi_reg_locked - Read/write EMI register
833  *  @hw: pointer to the HW structure
834  *  @addr: EMI address to program
835  *  @data: pointer to value to read/write from/to the EMI address
836  *  @read: boolean flag to indicate read or write
837  *
838  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
839  **/
840 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
841                                          u16 *data, bool read)
842 {
843         s32 ret_val;
844
845         DEBUGFUNC("__e1000_access_emi_reg_locked");
846
847         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
848         if (ret_val)
849                 return ret_val;
850
851         if (read)
852                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
853                                                       data);
854         else
855                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
856                                                        *data);
857
858         return ret_val;
859 }
860
861 /**
862  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
863  *  @hw: pointer to the HW structure
864  *  @addr: EMI address to program
865  *  @data: value to be read from the EMI address
866  *
867  *  Assumes the SW/FW/HW Semaphore is already acquired.
868  **/
869 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
870 {
871         DEBUGFUNC("e1000_read_emi_reg_locked");
872
873         return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
874 }
875
876 /**
877  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
878  *  @hw: pointer to the HW structure
879  *  @addr: EMI address to program
880  *  @data: value to be written to the EMI address
881  *
882  *  Assumes the SW/FW/HW Semaphore is already acquired.
883  **/
884 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
885 {
886         DEBUGFUNC("e1000_read_emi_reg_locked");
887
888         return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
889 }
890
891 /**
892  *  e1000_set_eee_pchlan - Enable/disable EEE support
893  *  @hw: pointer to the HW structure
894  *
895  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
896  *  the link and the EEE capabilities of the link partner.  The LPI Control
897  *  register bits will remain set only if/when link is up.
898  *
899  *  EEE LPI must not be asserted earlier than one second after link is up.
900  *  On 82579, EEE LPI should not be enabled until such time otherwise there
901  *  can be link issues with some switches.  Other devices can have EEE LPI
902  *  enabled immediately upon link up since they have a timer in hardware which
903  *  prevents LPI from being asserted too early.
904  **/
905 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
906 {
907         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
908         s32 ret_val;
909         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
910
911         DEBUGFUNC("e1000_set_eee_pchlan");
912
913         switch (hw->phy.type) {
914         case e1000_phy_82579:
915                 lpa = I82579_EEE_LP_ABILITY;
916                 pcs_status = I82579_EEE_PCS_STATUS;
917                 adv_addr = I82579_EEE_ADVERTISEMENT;
918                 break;
919         case e1000_phy_i217:
920                 lpa = I217_EEE_LP_ABILITY;
921                 pcs_status = I217_EEE_PCS_STATUS;
922                 adv_addr = I217_EEE_ADVERTISEMENT;
923                 break;
924         default:
925                 return E1000_SUCCESS;
926         }
927
928         ret_val = hw->phy.ops.acquire(hw);
929         if (ret_val)
930                 return ret_val;
931
932         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
933         if (ret_val)
934                 goto release;
935
936         /* Clear bits that enable EEE in various speeds */
937         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
938
939         /* Enable EEE if not disabled by user */
940         if (!dev_spec->eee_disable) {
941                 /* Save off link partner's EEE ability */
942                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
943                                                     &dev_spec->eee_lp_ability);
944                 if (ret_val)
945                         goto release;
946
947                 /* Read EEE advertisement */
948                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
949                 if (ret_val)
950                         goto release;
951
952                 /* Enable EEE only for speeds in which the link partner is
953                  * EEE capable and for which we advertise EEE.
954                  */
955                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
956                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
957
958                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
959                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
960                         if (data & NWAY_LPAR_100TX_FD_CAPS)
961                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
962                         else
963                                 /* EEE is not supported in 100Half, so ignore
964                                  * partner's EEE in 100 ability if full-duplex
965                                  * is not advertised.
966                                  */
967                                 dev_spec->eee_lp_ability &=
968                                     ~I82579_EEE_100_SUPPORTED;
969                 }
970         }
971
972         if (hw->phy.type == e1000_phy_82579) {
973                 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
974                                                     &data);
975                 if (ret_val)
976                         goto release;
977
978                 data &= ~I82579_LPI_100_PLL_SHUT;
979                 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
980                                                      data);
981         }
982
983         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
984         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
985         if (ret_val)
986                 goto release;
987
988         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
989 release:
990         hw->phy.ops.release(hw);
991
992         return ret_val;
993 }
994
995 /**
996  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
997  *  @hw:   pointer to the HW structure
998  *  @link: link up bool flag
999  *
1000  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
1001  *  preventing further DMA write requests.  Workaround the issue by disabling
1002  *  the de-assertion of the clock request when in 1Gpbs mode.
1003  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1004  *  speeds in order to avoid Tx hangs.
1005  **/
1006 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1007 {
1008         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1009         u32 status = E1000_READ_REG(hw, E1000_STATUS);
1010         s32 ret_val = E1000_SUCCESS;
1011         u16 reg;
1012
1013         if (link && (status & E1000_STATUS_SPEED_1000)) {
1014                 ret_val = hw->phy.ops.acquire(hw);
1015                 if (ret_val)
1016                         return ret_val;
1017
1018                 ret_val =
1019                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1020                                                &reg);
1021                 if (ret_val)
1022                         goto release;
1023
1024                 ret_val =
1025                     e1000_write_kmrn_reg_locked(hw,
1026                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1027                                                 reg &
1028                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
1029                 if (ret_val)
1030                         goto release;
1031
1032                 usec_delay(10);
1033
1034                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1035                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1036
1037                 ret_val =
1038                     e1000_write_kmrn_reg_locked(hw,
1039                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1040                                                 reg);
1041 release:
1042                 hw->phy.ops.release(hw);
1043         } else {
1044                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1045                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1046
1047                 if ((hw->phy.revision > 5) || !link ||
1048                     ((status & E1000_STATUS_SPEED_100) &&
1049                      (status & E1000_STATUS_FD)))
1050                         goto update_fextnvm6;
1051
1052                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1053                 if (ret_val)
1054                         return ret_val;
1055
1056                 /* Clear link status transmit timeout */
1057                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1058
1059                 if (status & E1000_STATUS_SPEED_100) {
1060                         /* Set inband Tx timeout to 5x10us for 100Half */
1061                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1062
1063                         /* Do not extend the K1 entry latency for 100Half */
1064                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1065                 } else {
1066                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1067                         reg |= 50 <<
1068                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1069
1070                         /* Extend the K1 entry latency for 10 Mbps */
1071                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1072                 }
1073
1074                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1075                 if (ret_val)
1076                         return ret_val;
1077
1078 update_fextnvm6:
1079                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1080         }
1081
1082         return ret_val;
1083 }
1084
1085 static u64 e1000_ltr2ns(u16 ltr)
1086 {
1087         u32 value, scale;
1088
1089         /* Determine the latency in nsec based on the LTR value & scale */
1090         value = ltr & E1000_LTRV_VALUE_MASK;
1091         scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1092
1093         return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
1094 }
1095
1096 /**
1097  *  e1000_platform_pm_pch_lpt - Set platform power management values
1098  *  @hw: pointer to the HW structure
1099  *  @link: bool indicating link status
1100  *
1101  *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1102  *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1103  *  when link is up (which must not exceed the maximum latency supported
1104  *  by the platform), otherwise specify there is no LTR requirement.
1105  *  Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
1106  *  latencies in the LTR Extended Capability Structure in the PCIe Extended
1107  *  Capability register set, on this device LTR is set by writing the
1108  *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1109  *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1110  *  message to the PMC.
1111  *
1112  *  Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1113  *  high-water mark.
1114  **/
1115 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1116 {
1117         u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1118                   link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1119         u16 lat_enc = 0;        /* latency encoded */
1120         s32 obff_hwm = 0;
1121
1122         DEBUGFUNC("e1000_platform_pm_pch_lpt");
1123
1124         if (link) {
1125                 u16 speed, duplex, scale = 0;
1126                 u16 max_snoop, max_nosnoop;
1127                 u16 max_ltr_enc;        /* max LTR latency encoded */
1128                 s64 lat_ns;
1129                 s64 value;
1130                 u32 rxa;
1131
1132                 if (!hw->mac.max_frame_size) {
1133                         DEBUGOUT("max_frame_size not set.\n");
1134                         return -E1000_ERR_CONFIG;
1135                 }
1136
1137                 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1138                 if (!speed) {
1139                         DEBUGOUT("Speed not set.\n");
1140                         return -E1000_ERR_CONFIG;
1141                 }
1142
1143                 /* Rx Packet Buffer Allocation size (KB) */
1144                 rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1145
1146                 /* Determine the maximum latency tolerated by the device.
1147                  *
1148                  * Per the PCIe spec, the tolerated latencies are encoded as
1149                  * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1150                  * a 10-bit value (0-1023) to provide a range from 1 ns to
1151                  * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
1152                  * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1153                  */
1154                 lat_ns = ((s64)rxa * 1024 -
1155                           (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1156                 if (lat_ns < 0)
1157                         lat_ns = 0;
1158                 else
1159                         lat_ns /= speed;
1160                 value = lat_ns;
1161
1162                 while (value > E1000_LTRV_VALUE_MASK) {
1163                         scale++;
1164                         value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1165                 }
1166                 if (scale > E1000_LTRV_SCALE_MAX) {
1167                         DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1168                         return -E1000_ERR_CONFIG;
1169                 }
1170                 lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1171
1172                 /* Determine the maximum latency tolerated by the platform */
1173                 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1174                 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1175                 max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1176
1177                 if (lat_enc > max_ltr_enc) {
1178                         lat_enc = max_ltr_enc;
1179                         lat_ns = e1000_ltr2ns(max_ltr_enc);
1180                 }
1181
1182                 if (lat_ns) {
1183                         lat_ns *= speed * 1000;
1184                         lat_ns /= 8;
1185                         lat_ns /= 1000000000;
1186                         obff_hwm = (s32)(rxa - lat_ns);
1187                 }
1188                 if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1189                         DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1190                         return -E1000_ERR_CONFIG;
1191                 }
1192         }
1193
1194         /* Set Snoop and No-Snoop latencies the same */
1195         reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1196         E1000_WRITE_REG(hw, E1000_LTRV, reg);
1197
1198         /* Set OBFF high water mark */
1199         reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1200         reg |= obff_hwm;
1201         E1000_WRITE_REG(hw, E1000_SVT, reg);
1202
1203         /* Enable OBFF */
1204         reg = E1000_READ_REG(hw, E1000_SVCR);
1205         reg |= E1000_SVCR_OFF_EN;
1206         /* Always unblock interrupts to the CPU even when the system is
1207          * in OBFF mode. This ensures that small round-robin traffic
1208          * (like ping) does not get dropped or experience long latency.
1209          */
1210         reg |= E1000_SVCR_OFF_MASKINT;
1211         E1000_WRITE_REG(hw, E1000_SVCR, reg);
1212
1213         return E1000_SUCCESS;
1214 }
1215
1216 /**
1217  *  e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1218  *  @hw: pointer to the HW structure
1219  *  @itr: interrupt throttling rate
1220  *
1221  *  Configure OBFF with the updated interrupt rate.
1222  **/
1223 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1224 {
1225         u32 svcr;
1226         s32 timer;
1227
1228         DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1229
1230         /* Convert ITR value into microseconds for OBFF timer */
1231         timer = itr & E1000_ITR_MASK;
1232         timer = (timer * E1000_ITR_MULT) / 1000;
1233
1234         if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1235                 DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1236                 return -E1000_ERR_CONFIG;
1237         }
1238
1239         svcr = E1000_READ_REG(hw, E1000_SVCR);
1240         svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1241         svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1242         E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1243
1244         return E1000_SUCCESS;
1245 }
1246
1247 /**
1248  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1249  *  @hw: pointer to the HW structure
1250  *  @to_sx: boolean indicating a system power state transition to Sx
1251  *
1252  *  When link is down, configure ULP mode to significantly reduce the power
1253  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1254  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1255  *  system, configure the ULP mode by software.
1256  */
1257 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1258 {
1259         u32 mac_reg;
1260         s32 ret_val = E1000_SUCCESS;
1261         u16 phy_reg;
1262         u16 oem_reg = 0;
1263
1264         if ((hw->mac.type < e1000_pch_lpt) ||
1265             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1266             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1267             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1268             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1269             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1270                 return 0;
1271
1272         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1273                 /* Request ME configure ULP mode in the PHY */
1274                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1275                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1276                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1277
1278                 goto out;
1279         }
1280
1281         if (!to_sx) {
1282                 int i = 0;
1283
1284                 /* Poll up to 5 seconds for Cable Disconnected indication */
1285                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1286                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1287                         /* Bail if link is re-acquired */
1288                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1289                                 return -E1000_ERR_PHY;
1290
1291                         if (i++ == 100)
1292                                 break;
1293
1294                         msec_delay(50);
1295                 }
1296                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1297                          (E1000_READ_REG(hw, E1000_FEXT) &
1298                           E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1299                          i * 50);
1300         }
1301
1302         ret_val = hw->phy.ops.acquire(hw);
1303         if (ret_val)
1304                 goto out;
1305
1306         /* Force SMBus mode in PHY */
1307         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1308         if (ret_val)
1309                 goto release;
1310         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1311         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1312
1313         /* Force SMBus mode in MAC */
1314         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1315         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1316         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1317
1318         /* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1319          * LPLU and disable Gig speed when entering ULP
1320          */
1321         if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1322                 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1323                                                        &oem_reg);
1324                 if (ret_val)
1325                         goto release;
1326
1327                 phy_reg = oem_reg;
1328                 phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1329
1330                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1331                                                         phy_reg);
1332
1333                 if (ret_val)
1334                         goto release;
1335         }
1336
1337         /* Set Inband ULP Exit, Reset to SMBus mode and
1338          * Disable SMBus Release on PERST# in PHY
1339          */
1340         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1341         if (ret_val)
1342                 goto release;
1343         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1344                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1345         if (to_sx) {
1346                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1347                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1348                 else
1349                         phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1350
1351                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1352                 phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1353         } else {
1354                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1355                 phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1356                 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1357         }
1358         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1359
1360         /* Set Disable SMBus Release on PERST# in MAC */
1361         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1362         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1363         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1364
1365         /* Commit ULP changes in PHY by starting auto ULP configuration */
1366         phy_reg |= I218_ULP_CONFIG1_START;
1367         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1368
1369         if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1370             to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1371                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1372                                                         oem_reg);
1373                 if (ret_val)
1374                         goto release;
1375         }
1376
1377 release:
1378         hw->phy.ops.release(hw);
1379 out:
1380         if (ret_val)
1381                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1382         else
1383                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1384
1385         return ret_val;
1386 }
1387
1388 /**
1389  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1390  *  @hw: pointer to the HW structure
1391  *  @force: boolean indicating whether or not to force disabling ULP
1392  *
1393  *  Un-configure ULP mode when link is up, the system is transitioned from
1394  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1395  *  system, poll for an indication from ME that ULP has been un-configured.
1396  *  If not on an ME enabled system, un-configure the ULP mode by software.
1397  *
1398  *  During nominal operation, this function is called when link is acquired
1399  *  to disable ULP mode (force=FALSE); otherwise, for example when unloading
1400  *  the driver or during Sx->S0 transitions, this is called with force=TRUE
1401  *  to forcibly disable ULP.
1402  */
1403 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1404 {
1405         s32 ret_val = E1000_SUCCESS;
1406         u32 mac_reg;
1407         u16 phy_reg;
1408         int i = 0;
1409
1410         if ((hw->mac.type < e1000_pch_lpt) ||
1411             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1412             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1413             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1414             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1415             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1416                 return 0;
1417
1418         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1419                 if (force) {
1420                         /* Request ME un-configure ULP mode in the PHY */
1421                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1422                         mac_reg &= ~E1000_H2ME_ULP;
1423                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1424                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1425                 }
1426
1427                 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1428                 while (E1000_READ_REG(hw, E1000_FWSM) &
1429                        E1000_FWSM_ULP_CFG_DONE) {
1430                         if (i++ == 30) {
1431                                 ret_val = -E1000_ERR_PHY;
1432                                 goto out;
1433                         }
1434
1435                         msec_delay(10);
1436                 }
1437                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1438
1439                 if (force) {
1440                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1441                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1442                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1443                 } else {
1444                         /* Clear H2ME.ULP after ME ULP configuration */
1445                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1446                         mac_reg &= ~E1000_H2ME_ULP;
1447                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1448                 }
1449
1450                 goto out;
1451         }
1452
1453         ret_val = hw->phy.ops.acquire(hw);
1454         if (ret_val)
1455                 goto out;
1456
1457         if (force)
1458                 /* Toggle LANPHYPC Value bit */
1459                 e1000_toggle_lanphypc_pch_lpt(hw);
1460
1461         /* Unforce SMBus mode in PHY */
1462         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1463         if (ret_val) {
1464                 /* The MAC might be in PCIe mode, so temporarily force to
1465                  * SMBus mode in order to access the PHY.
1466                  */
1467                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1468                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1469                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1470
1471                 msec_delay(50);
1472
1473                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1474                                                        &phy_reg);
1475                 if (ret_val)
1476                         goto release;
1477         }
1478         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1479         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1480
1481         /* Unforce SMBus mode in MAC */
1482         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1483         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1484         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1485
1486         /* When ULP mode was previously entered, K1 was disabled by the
1487          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1488          */
1489         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1490         if (ret_val)
1491                 goto release;
1492         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1493         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1494
1495         /* Clear ULP enabled configuration */
1496         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1497         if (ret_val)
1498                 goto release;
1499         phy_reg &= ~(I218_ULP_CONFIG1_IND |
1500                      I218_ULP_CONFIG1_STICKY_ULP |
1501                      I218_ULP_CONFIG1_RESET_TO_SMBUS |
1502                      I218_ULP_CONFIG1_WOL_HOST |
1503                      I218_ULP_CONFIG1_INBAND_EXIT |
1504                      I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1505                      I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1506                      I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1507         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1508
1509         /* Commit ULP changes by starting auto ULP configuration */
1510         phy_reg |= I218_ULP_CONFIG1_START;
1511         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1512
1513         /* Clear Disable SMBus Release on PERST# in MAC */
1514         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1515         mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1516         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1517
1518 release:
1519         hw->phy.ops.release(hw);
1520         if (force) {
1521                 hw->phy.ops.reset(hw);
1522                 msec_delay(50);
1523         }
1524 out:
1525         if (ret_val)
1526                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1527         else
1528                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1529
1530         return ret_val;
1531 }
1532
1533 /**
1534  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1535  *  @hw: pointer to the HW structure
1536  *
1537  *  Checks to see of the link status of the hardware has changed.  If a
1538  *  change in link status has been detected, then we read the PHY registers
1539  *  to get the current speed/duplex if link exists.
1540  **/
1541 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1542 {
1543         struct e1000_mac_info *mac = &hw->mac;
1544         s32 ret_val, tipg_reg = 0;
1545         u16 emi_addr, emi_val = 0;
1546         bool link;
1547         u16 phy_reg;
1548
1549         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1550
1551         /* We only want to go out to the PHY registers to see if Auto-Neg
1552          * has completed and/or if our link status has changed.  The
1553          * get_link_status flag is set upon receiving a Link Status
1554          * Change or Rx Sequence Error interrupt.
1555          */
1556         if (!mac->get_link_status)
1557                 return E1000_SUCCESS;
1558
1559         /* First we want to see if the MII Status Register reports
1560          * link.  If so, then we want to get the current speed/duplex
1561          * of the PHY.
1562          */
1563         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1564         if (ret_val)
1565                 return ret_val;
1566
1567         if (hw->mac.type == e1000_pchlan) {
1568                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1569                 if (ret_val)
1570                         return ret_val;
1571         }
1572
1573         /* When connected at 10Mbps half-duplex, some parts are excessively
1574          * aggressive resulting in many collisions. To avoid this, increase
1575          * the IPG and reduce Rx latency in the PHY.
1576          */
1577         if ((hw->mac.type >= e1000_pch2lan) && link) {
1578                 u16 speed, duplex;
1579
1580                 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1581                 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1582                 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1583
1584                 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1585                         tipg_reg |= 0xFF;
1586                         /* Reduce Rx latency in analog PHY */
1587                         emi_val = 0;
1588                 } else if (hw->mac.type >= e1000_pch_spt &&
1589                            duplex == FULL_DUPLEX && speed != SPEED_1000) {
1590                         tipg_reg |= 0xC;
1591                         emi_val = 1;
1592                 } else {
1593                         /* Roll back the default values */
1594                         tipg_reg |= 0x08;
1595                         emi_val = 1;
1596                 }
1597
1598                 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1599
1600                 ret_val = hw->phy.ops.acquire(hw);
1601                 if (ret_val)
1602                         return ret_val;
1603
1604                 if (hw->mac.type == e1000_pch2lan)
1605                         emi_addr = I82579_RX_CONFIG;
1606                 else
1607                         emi_addr = I217_RX_CONFIG;
1608                 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1609
1610
1611                 if (hw->mac.type >= e1000_pch_lpt) {
1612                         u16 phy_reg;
1613
1614                         hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1615                                                     &phy_reg);
1616                         phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1617                         if (speed == SPEED_100 || speed == SPEED_10)
1618                                 phy_reg |= 0x3E8;
1619                         else
1620                                 phy_reg |= 0xFA;
1621                         hw->phy.ops.write_reg_locked(hw,
1622                                                      I217_PLL_CLOCK_GATE_REG,
1623                                                      phy_reg);
1624
1625                         if (speed == SPEED_1000) {
1626                                 hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1627                                                             &phy_reg);
1628
1629                                 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1630
1631                                 hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1632                                                              phy_reg);
1633                                 }
1634                  }
1635                 hw->phy.ops.release(hw);
1636
1637                 if (ret_val)
1638                         return ret_val;
1639
1640                 if (hw->mac.type >= e1000_pch_spt) {
1641                         u16 data;
1642                         u16 ptr_gap;
1643
1644                         if (speed == SPEED_1000) {
1645                                 ret_val = hw->phy.ops.acquire(hw);
1646                                 if (ret_val)
1647                                         return ret_val;
1648
1649                                 ret_val = hw->phy.ops.read_reg_locked(hw,
1650                                                               PHY_REG(776, 20),
1651                                                               &data);
1652                                 if (ret_val) {
1653                                         hw->phy.ops.release(hw);
1654                                         return ret_val;
1655                                 }
1656
1657                                 ptr_gap = (data & (0x3FF << 2)) >> 2;
1658                                 if (ptr_gap < 0x18) {
1659                                         data &= ~(0x3FF << 2);
1660                                         data |= (0x18 << 2);
1661                                         ret_val =
1662                                                 hw->phy.ops.write_reg_locked(hw,
1663                                                         PHY_REG(776, 20), data);
1664                                 }
1665                                 hw->phy.ops.release(hw);
1666                                 if (ret_val)
1667                                         return ret_val;
1668                         } else {
1669                                 ret_val = hw->phy.ops.acquire(hw);
1670                                 if (ret_val)
1671                                         return ret_val;
1672
1673                                 ret_val = hw->phy.ops.write_reg_locked(hw,
1674                                                              PHY_REG(776, 20),
1675                                                              0xC023);
1676                                 hw->phy.ops.release(hw);
1677                                 if (ret_val)
1678                                         return ret_val;
1679
1680                         }
1681                 }
1682         }
1683
1684         /* I217 Packet Loss issue:
1685          * ensure that FEXTNVM4 Beacon Duration is set correctly
1686          * on power up.
1687          * Set the Beacon Duration for I217 to 8 usec
1688          */
1689         if (hw->mac.type >= e1000_pch_lpt) {
1690                 u32 mac_reg;
1691
1692                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1693                 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1694                 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1695                 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1696         }
1697
1698         /* Work-around I218 hang issue */
1699         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1700             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1701             (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1702             (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1703                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1704                 if (ret_val)
1705                         return ret_val;
1706         }
1707         if (hw->mac.type >= e1000_pch_lpt) {
1708                 /* Set platform power management values for
1709                  * Latency Tolerance Reporting (LTR)
1710                  * Optimized Buffer Flush/Fill (OBFF)
1711                  */
1712                 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1713                 if (ret_val)
1714                         return ret_val;
1715         }
1716
1717         /* Clear link partner's EEE ability */
1718         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1719
1720         if (hw->mac.type >= e1000_pch_lpt) {
1721                 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1722
1723                 if (hw->mac.type == e1000_pch_spt) {
1724                         /* FEXTNVM6 K1-off workaround - for SPT only */
1725                         u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1726
1727                         if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1728                                 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1729                         else
1730                                 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1731                 }
1732
1733                 if (hw->dev_spec.ich8lan.disable_k1_off == TRUE)
1734                         fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1735
1736                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1737         }
1738
1739         if (!link)
1740                 return E1000_SUCCESS; /* No link detected */
1741
1742         mac->get_link_status = FALSE;
1743
1744         switch (hw->mac.type) {
1745         case e1000_pch2lan:
1746                 ret_val = e1000_k1_workaround_lv(hw);
1747                 if (ret_val)
1748                         return ret_val;
1749                 /* fall-thru */
1750         case e1000_pchlan:
1751                 if (hw->phy.type == e1000_phy_82578) {
1752                         ret_val = e1000_link_stall_workaround_hv(hw);
1753                         if (ret_val)
1754                                 return ret_val;
1755                 }
1756
1757                 /* Workaround for PCHx parts in half-duplex:
1758                  * Set the number of preambles removed from the packet
1759                  * when it is passed from the PHY to the MAC to prevent
1760                  * the MAC from misinterpreting the packet type.
1761                  */
1762                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1763                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1764
1765                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1766                     E1000_STATUS_FD)
1767                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1768
1769                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1770                 break;
1771         default:
1772                 break;
1773         }
1774
1775         /* Check if there was DownShift, must be checked
1776          * immediately after link-up
1777          */
1778         e1000_check_downshift_generic(hw);
1779
1780         /* Enable/Disable EEE after link up */
1781         if (hw->phy.type > e1000_phy_82579) {
1782                 ret_val = e1000_set_eee_pchlan(hw);
1783                 if (ret_val)
1784                         return ret_val;
1785         }
1786
1787         /* If we are forcing speed/duplex, then we simply return since
1788          * we have already determined whether we have link or not.
1789          */
1790         if (!mac->autoneg)
1791                 return -E1000_ERR_CONFIG;
1792
1793         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1794          * of MAC speed/duplex configuration.  So we only need to
1795          * configure Collision Distance in the MAC.
1796          */
1797         mac->ops.config_collision_dist(hw);
1798
1799         /* Configure Flow Control now that Auto-Neg has completed.
1800          * First, we need to restore the desired flow control
1801          * settings because we may have had to re-autoneg with a
1802          * different link partner.
1803          */
1804         ret_val = e1000_config_fc_after_link_up_generic(hw);
1805         if (ret_val)
1806                 DEBUGOUT("Error configuring flow control\n");
1807
1808         return ret_val;
1809 }
1810
1811 /**
1812  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1813  *  @hw: pointer to the HW structure
1814  *
1815  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1816  **/
1817 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1818 {
1819         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1820
1821         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1822         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1823         switch (hw->mac.type) {
1824         case e1000_ich8lan:
1825         case e1000_ich9lan:
1826         case e1000_ich10lan:
1827                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1828                 break;
1829         case e1000_pchlan:
1830         case e1000_pch2lan:
1831         case e1000_pch_lpt:
1832         case e1000_pch_spt:
1833                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1834                 break;
1835         default:
1836                 break;
1837         }
1838 }
1839
1840 /**
1841  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1842  *  @hw: pointer to the HW structure
1843  *
1844  *  Acquires the mutex for performing NVM operations.
1845  **/
1846 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1847 {
1848         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1849
1850         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1851
1852         return E1000_SUCCESS;
1853 }
1854
1855 /**
1856  *  e1000_release_nvm_ich8lan - Release NVM mutex
1857  *  @hw: pointer to the HW structure
1858  *
1859  *  Releases the mutex used while performing NVM operations.
1860  **/
1861 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1862 {
1863         DEBUGFUNC("e1000_release_nvm_ich8lan");
1864
1865         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1866
1867         return;
1868 }
1869
1870 /**
1871  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1872  *  @hw: pointer to the HW structure
1873  *
1874  *  Acquires the software control flag for performing PHY and select
1875  *  MAC CSR accesses.
1876  **/
1877 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1878 {
1879         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1880         s32 ret_val = E1000_SUCCESS;
1881
1882         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1883
1884         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1885
1886         while (timeout) {
1887                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1888                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1889                         break;
1890
1891                 msec_delay_irq(1);
1892                 timeout--;
1893         }
1894
1895         if (!timeout) {
1896                 DEBUGOUT("SW has already locked the resource.\n");
1897                 ret_val = -E1000_ERR_CONFIG;
1898                 goto out;
1899         }
1900
1901         timeout = SW_FLAG_TIMEOUT;
1902
1903         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1904         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1905
1906         while (timeout) {
1907                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1908                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1909                         break;
1910
1911                 msec_delay_irq(1);
1912                 timeout--;
1913         }
1914
1915         if (!timeout) {
1916                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1917                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1918                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1919                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1920                 ret_val = -E1000_ERR_CONFIG;
1921                 goto out;
1922         }
1923
1924 out:
1925         if (ret_val)
1926                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1927
1928         return ret_val;
1929 }
1930
1931 /**
1932  *  e1000_release_swflag_ich8lan - Release software control flag
1933  *  @hw: pointer to the HW structure
1934  *
1935  *  Releases the software control flag for performing PHY and select
1936  *  MAC CSR accesses.
1937  **/
1938 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1939 {
1940         u32 extcnf_ctrl;
1941
1942         DEBUGFUNC("e1000_release_swflag_ich8lan");
1943
1944         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1945
1946         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1947                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1948                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1949         } else {
1950                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1951         }
1952
1953         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1954
1955         return;
1956 }
1957
1958 /**
1959  *  e1000_check_mng_mode_ich8lan - Checks management mode
1960  *  @hw: pointer to the HW structure
1961  *
1962  *  This checks if the adapter has any manageability enabled.
1963  *  This is a function pointer entry point only called by read/write
1964  *  routines for the PHY and NVM parts.
1965  **/
1966 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1967 {
1968         u32 fwsm;
1969
1970         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1971
1972         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1973
1974         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1975                ((fwsm & E1000_FWSM_MODE_MASK) ==
1976                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1977 }
1978
1979 /**
1980  *  e1000_check_mng_mode_pchlan - Checks management mode
1981  *  @hw: pointer to the HW structure
1982  *
1983  *  This checks if the adapter has iAMT enabled.
1984  *  This is a function pointer entry point only called by read/write
1985  *  routines for the PHY and NVM parts.
1986  **/
1987 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1988 {
1989         u32 fwsm;
1990
1991         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1992
1993         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1994
1995         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1996                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1997 }
1998
1999 /**
2000  *  e1000_rar_set_pch2lan - Set receive address register
2001  *  @hw: pointer to the HW structure
2002  *  @addr: pointer to the receive address
2003  *  @index: receive address array register
2004  *
2005  *  Sets the receive address array register at index to the address passed
2006  *  in by addr.  For 82579, RAR[0] is the base address register that is to
2007  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
2008  *  Use SHRA[0-3] in place of those reserved for ME.
2009  **/
2010 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
2011 {
2012         u32 rar_low, rar_high;
2013
2014         DEBUGFUNC("e1000_rar_set_pch2lan");
2015
2016         /* HW expects these in little endian so we reverse the byte order
2017          * from network order (big endian) to little endian
2018          */
2019         rar_low = ((u32) addr[0] |
2020                    ((u32) addr[1] << 8) |
2021                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2022
2023         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2024
2025         /* If MAC address zero, no need to set the AV bit */
2026         if (rar_low || rar_high)
2027                 rar_high |= E1000_RAH_AV;
2028
2029         if (index == 0) {
2030                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2031                 E1000_WRITE_FLUSH(hw);
2032                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2033                 E1000_WRITE_FLUSH(hw);
2034                 return E1000_SUCCESS;
2035         }
2036
2037         /* RAR[1-6] are owned by manageability.  Skip those and program the
2038          * next address into the SHRA register array.
2039          */
2040         if (index < (u32) (hw->mac.rar_entry_count)) {
2041                 s32 ret_val;
2042
2043                 ret_val = e1000_acquire_swflag_ich8lan(hw);
2044                 if (ret_val)
2045                         goto out;
2046
2047                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
2048                 E1000_WRITE_FLUSH(hw);
2049                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
2050                 E1000_WRITE_FLUSH(hw);
2051
2052                 e1000_release_swflag_ich8lan(hw);
2053
2054                 /* verify the register updates */
2055                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
2056                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
2057                         return E1000_SUCCESS;
2058
2059                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
2060                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
2061         }
2062
2063 out:
2064         DEBUGOUT1("Failed to write receive address at index %d\n", index);
2065         return -E1000_ERR_CONFIG;
2066 }
2067
2068 /**
2069  *  e1000_rar_set_pch_lpt - Set receive address registers
2070  *  @hw: pointer to the HW structure
2071  *  @addr: pointer to the receive address
2072  *  @index: receive address array register
2073  *
2074  *  Sets the receive address register array at index to the address passed
2075  *  in by addr. For LPT, RAR[0] is the base address register that is to
2076  *  contain the MAC address. SHRA[0-10] are the shared receive address
2077  *  registers that are shared between the Host and manageability engine (ME).
2078  **/
2079 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
2080 {
2081         u32 rar_low, rar_high;
2082         u32 wlock_mac;
2083
2084         DEBUGFUNC("e1000_rar_set_pch_lpt");
2085
2086         /* HW expects these in little endian so we reverse the byte order
2087          * from network order (big endian) to little endian
2088          */
2089         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
2090                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2091
2092         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2093
2094         /* If MAC address zero, no need to set the AV bit */
2095         if (rar_low || rar_high)
2096                 rar_high |= E1000_RAH_AV;
2097
2098         if (index == 0) {
2099                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2100                 E1000_WRITE_FLUSH(hw);
2101                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2102                 E1000_WRITE_FLUSH(hw);
2103                 return E1000_SUCCESS;
2104         }
2105
2106         /* The manageability engine (ME) can lock certain SHRAR registers that
2107          * it is using - those registers are unavailable for use.
2108          */
2109         if (index < hw->mac.rar_entry_count) {
2110                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2111                             E1000_FWSM_WLOCK_MAC_MASK;
2112                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2113
2114                 /* Check if all SHRAR registers are locked */
2115                 if (wlock_mac == 1)
2116                         goto out;
2117
2118                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
2119                         s32 ret_val;
2120
2121                         ret_val = e1000_acquire_swflag_ich8lan(hw);
2122
2123                         if (ret_val)
2124                                 goto out;
2125
2126                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2127                                         rar_low);
2128                         E1000_WRITE_FLUSH(hw);
2129                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2130                                         rar_high);
2131                         E1000_WRITE_FLUSH(hw);
2132
2133                         e1000_release_swflag_ich8lan(hw);
2134
2135                         /* verify the register updates */
2136                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2137                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2138                                 return E1000_SUCCESS;
2139                 }
2140         }
2141
2142 out:
2143         DEBUGOUT1("Failed to write receive address at index %d\n", index);
2144         return -E1000_ERR_CONFIG;
2145 }
2146
2147 /**
2148  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2149  *  @hw: pointer to the HW structure
2150  *  @mc_addr_list: array of multicast addresses to program
2151  *  @mc_addr_count: number of multicast addresses to program
2152  *
2153  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2154  *  The caller must have a packed mc_addr_list of multicast addresses.
2155  **/
2156 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2157                                               u8 *mc_addr_list,
2158                                               u32 mc_addr_count)
2159 {
2160         u16 phy_reg = 0;
2161         int i;
2162         s32 ret_val;
2163
2164         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2165
2166         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2167
2168         ret_val = hw->phy.ops.acquire(hw);
2169         if (ret_val)
2170                 return;
2171
2172         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2173         if (ret_val)
2174                 goto release;
2175
2176         for (i = 0; i < hw->mac.mta_reg_count; i++) {
2177                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2178                                            (u16)(hw->mac.mta_shadow[i] &
2179                                                  0xFFFF));
2180                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2181                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
2182                                                  0xFFFF));
2183         }
2184
2185         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2186
2187 release:
2188         hw->phy.ops.release(hw);
2189 }
2190
2191 /**
2192  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2193  *  @hw: pointer to the HW structure
2194  *
2195  *  Checks if firmware is blocking the reset of the PHY.
2196  *  This is a function pointer entry point only called by
2197  *  reset routines.
2198  **/
2199 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2200 {
2201         u32 fwsm;
2202         bool blocked = FALSE;
2203         int i = 0;
2204
2205         DEBUGFUNC("e1000_check_reset_block_ich8lan");
2206
2207         do {
2208                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2209                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2210                         blocked = TRUE;
2211                         msec_delay(10);
2212                         continue;
2213                 }
2214                 blocked = FALSE;
2215         } while (blocked && (i++ < 30));
2216         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2217 }
2218
2219 /**
2220  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2221  *  @hw: pointer to the HW structure
2222  *
2223  *  Assumes semaphore already acquired.
2224  *
2225  **/
2226 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2227 {
2228         u16 phy_data;
2229         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2230         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2231                 E1000_STRAP_SMT_FREQ_SHIFT;
2232         s32 ret_val;
2233
2234         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2235
2236         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2237         if (ret_val)
2238                 return ret_val;
2239
2240         phy_data &= ~HV_SMB_ADDR_MASK;
2241         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2242         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2243
2244         if (hw->phy.type == e1000_phy_i217) {
2245                 /* Restore SMBus frequency */
2246                 if (freq--) {
2247                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2248                         phy_data |= (freq & (1 << 0)) <<
2249                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2250                         phy_data |= (freq & (1 << 1)) <<
2251                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2252                 } else {
2253                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
2254                 }
2255         }
2256
2257         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2258 }
2259
2260 /**
2261  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2262  *  @hw:   pointer to the HW structure
2263  *
2264  *  SW should configure the LCD from the NVM extended configuration region
2265  *  as a workaround for certain parts.
2266  **/
2267 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2268 {
2269         struct e1000_phy_info *phy = &hw->phy;
2270         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2271         s32 ret_val = E1000_SUCCESS;
2272         u16 word_addr, reg_data, reg_addr, phy_page = 0;
2273
2274         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2275
2276         /* Initialize the PHY from the NVM on ICH platforms.  This
2277          * is needed due to an issue where the NVM configuration is
2278          * not properly autoloaded after power transitions.
2279          * Therefore, after each PHY reset, we will load the
2280          * configuration data out of the NVM manually.
2281          */
2282         switch (hw->mac.type) {
2283         case e1000_ich8lan:
2284                 if (phy->type != e1000_phy_igp_3)
2285                         return ret_val;
2286
2287                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2288                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2289                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2290                         break;
2291                 }
2292                 /* Fall-thru */
2293         case e1000_pchlan:
2294         case e1000_pch2lan:
2295         case e1000_pch_lpt:
2296         case e1000_pch_spt:
2297                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2298                 break;
2299         default:
2300                 return ret_val;
2301         }
2302
2303         ret_val = hw->phy.ops.acquire(hw);
2304         if (ret_val)
2305                 return ret_val;
2306
2307         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2308         if (!(data & sw_cfg_mask))
2309                 goto release;
2310
2311         /* Make sure HW does not configure LCD from PHY
2312          * extended configuration before SW configuration
2313          */
2314         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2315         if ((hw->mac.type < e1000_pch2lan) &&
2316             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2317                         goto release;
2318
2319         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2320         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2321         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2322         if (!cnf_size)
2323                 goto release;
2324
2325         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2326         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2327
2328         if (((hw->mac.type == e1000_pchlan) &&
2329              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2330             (hw->mac.type > e1000_pchlan)) {
2331                 /* HW configures the SMBus address and LEDs when the
2332                  * OEM and LCD Write Enable bits are set in the NVM.
2333                  * When both NVM bits are cleared, SW will configure
2334                  * them instead.
2335                  */
2336                 ret_val = e1000_write_smbus_addr(hw);
2337                 if (ret_val)
2338                         goto release;
2339
2340                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2341                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2342                                                         (u16)data);
2343                 if (ret_val)
2344                         goto release;
2345         }
2346
2347         /* Configure LCD from extended configuration region. */
2348
2349         /* cnf_base_addr is in DWORD */
2350         word_addr = (u16)(cnf_base_addr << 1);
2351
2352         for (i = 0; i < cnf_size; i++) {
2353                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2354                                            &reg_data);
2355                 if (ret_val)
2356                         goto release;
2357
2358                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2359                                            1, &reg_addr);
2360                 if (ret_val)
2361                         goto release;
2362
2363                 /* Save off the PHY page for future writes. */
2364                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2365                         phy_page = reg_data;
2366                         continue;
2367                 }
2368
2369                 reg_addr &= PHY_REG_MASK;
2370                 reg_addr |= phy_page;
2371
2372                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2373                                                     reg_data);
2374                 if (ret_val)
2375                         goto release;
2376         }
2377
2378 release:
2379         hw->phy.ops.release(hw);
2380         return ret_val;
2381 }
2382
2383 /**
2384  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2385  *  @hw:   pointer to the HW structure
2386  *  @link: link up bool flag
2387  *
2388  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2389  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2390  *  If link is down, the function will restore the default K1 setting located
2391  *  in the NVM.
2392  **/
2393 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2394 {
2395         s32 ret_val = E1000_SUCCESS;
2396         u16 status_reg = 0;
2397         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2398
2399         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2400
2401         if (hw->mac.type != e1000_pchlan)
2402                 return E1000_SUCCESS;
2403
2404         /* Wrap the whole flow with the sw flag */
2405         ret_val = hw->phy.ops.acquire(hw);
2406         if (ret_val)
2407                 return ret_val;
2408
2409         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2410         if (link) {
2411                 if (hw->phy.type == e1000_phy_82578) {
2412                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2413                                                               &status_reg);
2414                         if (ret_val)
2415                                 goto release;
2416
2417                         status_reg &= (BM_CS_STATUS_LINK_UP |
2418                                        BM_CS_STATUS_RESOLVED |
2419                                        BM_CS_STATUS_SPEED_MASK);
2420
2421                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2422                                            BM_CS_STATUS_RESOLVED |
2423                                            BM_CS_STATUS_SPEED_1000))
2424                                 k1_enable = FALSE;
2425                 }
2426
2427                 if (hw->phy.type == e1000_phy_82577) {
2428                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2429                                                               &status_reg);
2430                         if (ret_val)
2431                                 goto release;
2432
2433                         status_reg &= (HV_M_STATUS_LINK_UP |
2434                                        HV_M_STATUS_AUTONEG_COMPLETE |
2435                                        HV_M_STATUS_SPEED_MASK);
2436
2437                         if (status_reg == (HV_M_STATUS_LINK_UP |
2438                                            HV_M_STATUS_AUTONEG_COMPLETE |
2439                                            HV_M_STATUS_SPEED_1000))
2440                                 k1_enable = FALSE;
2441                 }
2442
2443                 /* Link stall fix for link up */
2444                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2445                                                        0x0100);
2446                 if (ret_val)
2447                         goto release;
2448
2449         } else {
2450                 /* Link stall fix for link down */
2451                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2452                                                        0x4100);
2453                 if (ret_val)
2454                         goto release;
2455         }
2456
2457         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2458
2459 release:
2460         hw->phy.ops.release(hw);
2461
2462         return ret_val;
2463 }
2464
2465 /**
2466  *  e1000_configure_k1_ich8lan - Configure K1 power state
2467  *  @hw: pointer to the HW structure
2468  *  @enable: K1 state to configure
2469  *
2470  *  Configure the K1 power state based on the provided parameter.
2471  *  Assumes semaphore already acquired.
2472  *
2473  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2474  **/
2475 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2476 {
2477         s32 ret_val;
2478         u32 ctrl_reg = 0;
2479         u32 ctrl_ext = 0;
2480         u32 reg = 0;
2481         u16 kmrn_reg = 0;
2482
2483         DEBUGFUNC("e1000_configure_k1_ich8lan");
2484
2485         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2486                                              &kmrn_reg);
2487         if (ret_val)
2488                 return ret_val;
2489
2490         if (k1_enable)
2491                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2492         else
2493                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2494
2495         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2496                                               kmrn_reg);
2497         if (ret_val)
2498                 return ret_val;
2499
2500         usec_delay(20);
2501         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2502         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2503
2504         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2505         reg |= E1000_CTRL_FRCSPD;
2506         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2507
2508         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2509         E1000_WRITE_FLUSH(hw);
2510         usec_delay(20);
2511         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2512         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2513         E1000_WRITE_FLUSH(hw);
2514         usec_delay(20);
2515
2516         return E1000_SUCCESS;
2517 }
2518
2519 /**
2520  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2521  *  @hw:       pointer to the HW structure
2522  *  @d0_state: boolean if entering d0 or d3 device state
2523  *
2524  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2525  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2526  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2527  **/
2528 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2529 {
2530         s32 ret_val = 0;
2531         u32 mac_reg;
2532         u16 oem_reg;
2533
2534         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2535
2536         if (hw->mac.type < e1000_pchlan)
2537                 return ret_val;
2538
2539         ret_val = hw->phy.ops.acquire(hw);
2540         if (ret_val)
2541                 return ret_val;
2542
2543         if (hw->mac.type == e1000_pchlan) {
2544                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2545                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2546                         goto release;
2547         }
2548
2549         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2550         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2551                 goto release;
2552
2553         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2554
2555         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2556         if (ret_val)
2557                 goto release;
2558
2559         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2560
2561         if (d0_state) {
2562                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2563                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2564
2565                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2566                         oem_reg |= HV_OEM_BITS_LPLU;
2567         } else {
2568                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2569                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2570                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2571
2572                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2573                     E1000_PHY_CTRL_NOND0A_LPLU))
2574                         oem_reg |= HV_OEM_BITS_LPLU;
2575         }
2576
2577         /* Set Restart auto-neg to activate the bits */
2578         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2579             !hw->phy.ops.check_reset_block(hw))
2580                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2581
2582         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2583
2584 release:
2585         hw->phy.ops.release(hw);
2586
2587         return ret_val;
2588 }
2589
2590
2591 /**
2592  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2593  *  @hw:   pointer to the HW structure
2594  **/
2595 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2596 {
2597         s32 ret_val;
2598         u16 data;
2599
2600         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2601
2602         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2603         if (ret_val)
2604                 return ret_val;
2605
2606         data |= HV_KMRN_MDIO_SLOW;
2607
2608         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2609
2610         return ret_val;
2611 }
2612
2613 /**
2614  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2615  *  done after every PHY reset.
2616  **/
2617 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2618 {
2619         s32 ret_val = E1000_SUCCESS;
2620         u16 phy_data;
2621
2622         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2623
2624         if (hw->mac.type != e1000_pchlan)
2625                 return E1000_SUCCESS;
2626
2627         /* Set MDIO slow mode before any other MDIO access */
2628         if (hw->phy.type == e1000_phy_82577) {
2629                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2630                 if (ret_val)
2631                         return ret_val;
2632         }
2633
2634         if (((hw->phy.type == e1000_phy_82577) &&
2635              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2636             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2637                 /* Disable generation of early preamble */
2638                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2639                 if (ret_val)
2640                         return ret_val;
2641
2642                 /* Preamble tuning for SSC */
2643                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2644                                                 0xA204);
2645                 if (ret_val)
2646                         return ret_val;
2647         }
2648
2649         if (hw->phy.type == e1000_phy_82578) {
2650                 /* Return registers to default by doing a soft reset then
2651                  * writing 0x3140 to the control register.
2652                  */
2653                 if (hw->phy.revision < 2) {
2654                         e1000_phy_sw_reset_generic(hw);
2655                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2656                                                         0x3140);
2657                 }
2658         }
2659
2660         /* Select page 0 */
2661         ret_val = hw->phy.ops.acquire(hw);
2662         if (ret_val)
2663                 return ret_val;
2664
2665         hw->phy.addr = 1;
2666         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2667         hw->phy.ops.release(hw);
2668         if (ret_val)
2669                 return ret_val;
2670
2671         /* Configure the K1 Si workaround during phy reset assuming there is
2672          * link so that it disables K1 if link is in 1Gbps.
2673          */
2674         ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2675         if (ret_val)
2676                 return ret_val;
2677
2678         /* Workaround for link disconnects on a busy hub in half duplex */
2679         ret_val = hw->phy.ops.acquire(hw);
2680         if (ret_val)
2681                 return ret_val;
2682         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2683         if (ret_val)
2684                 goto release;
2685         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2686                                                phy_data & 0x00FF);
2687         if (ret_val)
2688                 goto release;
2689
2690         /* set MSE higher to enable link to stay up when noise is high */
2691         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2692 release:
2693         hw->phy.ops.release(hw);
2694
2695         return ret_val;
2696 }
2697
2698 /**
2699  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2700  *  @hw:   pointer to the HW structure
2701  **/
2702 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2703 {
2704         u32 mac_reg;
2705         u16 i, phy_reg = 0;
2706         s32 ret_val;
2707
2708         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2709
2710         ret_val = hw->phy.ops.acquire(hw);
2711         if (ret_val)
2712                 return;
2713         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2714         if (ret_val)
2715                 goto release;
2716
2717         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2718         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2719                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2720                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2721                                            (u16)(mac_reg & 0xFFFF));
2722                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2723                                            (u16)((mac_reg >> 16) & 0xFFFF));
2724
2725                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2726                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2727                                            (u16)(mac_reg & 0xFFFF));
2728                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2729                                            (u16)((mac_reg & E1000_RAH_AV)
2730                                                  >> 16));
2731         }
2732
2733         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2734
2735 release:
2736         hw->phy.ops.release(hw);
2737 }
2738
2739 static u32 e1000_calc_rx_da_crc(u8 mac[])
2740 {
2741         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2742         u32 i, j, mask, crc;
2743
2744         DEBUGFUNC("e1000_calc_rx_da_crc");
2745
2746         crc = 0xffffffff;
2747         for (i = 0; i < 6; i++) {
2748                 crc = crc ^ mac[i];
2749                 for (j = 8; j > 0; j--) {
2750                         mask = (crc & 1) * (-1);
2751                         crc = (crc >> 1) ^ (poly & mask);
2752                 }
2753         }
2754         return ~crc;
2755 }
2756
2757 /**
2758  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2759  *  with 82579 PHY
2760  *  @hw: pointer to the HW structure
2761  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2762  **/
2763 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2764 {
2765         s32 ret_val = E1000_SUCCESS;
2766         u16 phy_reg, data;
2767         u32 mac_reg;
2768         u16 i;
2769
2770         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2771
2772         if (hw->mac.type < e1000_pch2lan)
2773                 return E1000_SUCCESS;
2774
2775         /* disable Rx path while enabling/disabling workaround */
2776         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2777         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2778                                         phy_reg | (1 << 14));
2779         if (ret_val)
2780                 return ret_val;
2781
2782         if (enable) {
2783                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2784                  * SHRAL/H) and initial CRC values to the MAC
2785                  */
2786                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2787                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2788                         u32 addr_high, addr_low;
2789
2790                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2791                         if (!(addr_high & E1000_RAH_AV))
2792                                 continue;
2793                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2794                         mac_addr[0] = (addr_low & 0xFF);
2795                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2796                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2797                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2798                         mac_addr[4] = (addr_high & 0xFF);
2799                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2800
2801                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2802                                         e1000_calc_rx_da_crc(mac_addr));
2803                 }
2804
2805                 /* Write Rx addresses to the PHY */
2806                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2807
2808                 /* Enable jumbo frame workaround in the MAC */
2809                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2810                 mac_reg &= ~(1 << 14);
2811                 mac_reg |= (7 << 15);
2812                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2813
2814                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2815                 mac_reg |= E1000_RCTL_SECRC;
2816                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2817
2818                 ret_val = e1000_read_kmrn_reg_generic(hw,
2819                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2820                                                 &data);
2821                 if (ret_val)
2822                         return ret_val;
2823                 ret_val = e1000_write_kmrn_reg_generic(hw,
2824                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2825                                                 data | (1 << 0));
2826                 if (ret_val)
2827                         return ret_val;
2828                 ret_val = e1000_read_kmrn_reg_generic(hw,
2829                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2830                                                 &data);
2831                 if (ret_val)
2832                         return ret_val;
2833                 data &= ~(0xF << 8);
2834                 data |= (0xB << 8);
2835                 ret_val = e1000_write_kmrn_reg_generic(hw,
2836                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2837                                                 data);
2838                 if (ret_val)
2839                         return ret_val;
2840
2841                 /* Enable jumbo frame workaround in the PHY */
2842                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2843                 data &= ~(0x7F << 5);
2844                 data |= (0x37 << 5);
2845                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2846                 if (ret_val)
2847                         return ret_val;
2848                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2849                 data &= ~(1 << 13);
2850                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2851                 if (ret_val)
2852                         return ret_val;
2853                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2854                 data &= ~(0x3FF << 2);
2855                 data |= (E1000_TX_PTR_GAP << 2);
2856                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2857                 if (ret_val)
2858                         return ret_val;
2859                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2860                 if (ret_val)
2861                         return ret_val;
2862                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2863                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2864                                                 (1 << 10));
2865                 if (ret_val)
2866                         return ret_val;
2867         } else {
2868                 /* Write MAC register values back to h/w defaults */
2869                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2870                 mac_reg &= ~(0xF << 14);
2871                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2872
2873                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2874                 mac_reg &= ~E1000_RCTL_SECRC;
2875                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2876
2877                 ret_val = e1000_read_kmrn_reg_generic(hw,
2878                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2879                                                 &data);
2880                 if (ret_val)
2881                         return ret_val;
2882                 ret_val = e1000_write_kmrn_reg_generic(hw,
2883                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2884                                                 data & ~(1 << 0));
2885                 if (ret_val)
2886                         return ret_val;
2887                 ret_val = e1000_read_kmrn_reg_generic(hw,
2888                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2889                                                 &data);
2890                 if (ret_val)
2891                         return ret_val;
2892                 data &= ~(0xF << 8);
2893                 data |= (0xB << 8);
2894                 ret_val = e1000_write_kmrn_reg_generic(hw,
2895                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2896                                                 data);
2897                 if (ret_val)
2898                         return ret_val;
2899
2900                 /* Write PHY register values back to h/w defaults */
2901                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2902                 data &= ~(0x7F << 5);
2903                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2904                 if (ret_val)
2905                         return ret_val;
2906                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2907                 data |= (1 << 13);
2908                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2909                 if (ret_val)
2910                         return ret_val;
2911                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2912                 data &= ~(0x3FF << 2);
2913                 data |= (0x8 << 2);
2914                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2915                 if (ret_val)
2916                         return ret_val;
2917                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2918                 if (ret_val)
2919                         return ret_val;
2920                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2921                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2922                                                 ~(1 << 10));
2923                 if (ret_val)
2924                         return ret_val;
2925         }
2926
2927         /* re-enable Rx path after enabling/disabling workaround */
2928         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2929                                      ~(1 << 14));
2930 }
2931
2932 /**
2933  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2934  *  done after every PHY reset.
2935  **/
2936 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2937 {
2938         s32 ret_val = E1000_SUCCESS;
2939
2940         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2941
2942         if (hw->mac.type != e1000_pch2lan)
2943                 return E1000_SUCCESS;
2944
2945         /* Set MDIO slow mode before any other MDIO access */
2946         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2947         if (ret_val)
2948                 return ret_val;
2949
2950         ret_val = hw->phy.ops.acquire(hw);
2951         if (ret_val)
2952                 return ret_val;
2953         /* set MSE higher to enable link to stay up when noise is high */
2954         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2955         if (ret_val)
2956                 goto release;
2957         /* drop link after 5 times MSE threshold was reached */
2958         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2959 release:
2960         hw->phy.ops.release(hw);
2961
2962         return ret_val;
2963 }
2964
2965 /**
2966  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2967  *  @hw:   pointer to the HW structure
2968  *
2969  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2970  *  Disable K1 for 1000 and 100 speeds
2971  **/
2972 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2973 {
2974         s32 ret_val = E1000_SUCCESS;
2975         u16 status_reg = 0;
2976
2977         DEBUGFUNC("e1000_k1_workaround_lv");
2978
2979         if (hw->mac.type != e1000_pch2lan)
2980                 return E1000_SUCCESS;
2981
2982         /* Set K1 beacon duration based on 10Mbs speed */
2983         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2984         if (ret_val)
2985                 return ret_val;
2986
2987         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2988             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2989                 if (status_reg &
2990                     (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2991                         u16 pm_phy_reg;
2992
2993                         /* LV 1G/100 Packet drop issue wa  */
2994                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2995                                                        &pm_phy_reg);
2996                         if (ret_val)
2997                                 return ret_val;
2998                         pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2999                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
3000                                                         pm_phy_reg);
3001                         if (ret_val)
3002                                 return ret_val;
3003                 } else {
3004                         u32 mac_reg;
3005                         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
3006                         mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
3007                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
3008                         E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
3009                 }
3010         }
3011
3012         return ret_val;
3013 }
3014
3015 /**
3016  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
3017  *  @hw:   pointer to the HW structure
3018  *  @gate: boolean set to TRUE to gate, FALSE to ungate
3019  *
3020  *  Gate/ungate the automatic PHY configuration via hardware; perform
3021  *  the configuration via software instead.
3022  **/
3023 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
3024 {
3025         u32 extcnf_ctrl;
3026
3027         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
3028
3029         if (hw->mac.type < e1000_pch2lan)
3030                 return;
3031
3032         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
3033
3034         if (gate)
3035                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3036         else
3037                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3038
3039         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
3040 }
3041
3042 /**
3043  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
3044  *  @hw: pointer to the HW structure
3045  *
3046  *  Check the appropriate indication the MAC has finished configuring the
3047  *  PHY after a software reset.
3048  **/
3049 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
3050 {
3051         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
3052
3053         DEBUGFUNC("e1000_lan_init_done_ich8lan");
3054
3055         /* Wait for basic configuration completes before proceeding */
3056         do {
3057                 data = E1000_READ_REG(hw, E1000_STATUS);
3058                 data &= E1000_STATUS_LAN_INIT_DONE;
3059                 usec_delay(100);
3060         } while ((!data) && --loop);
3061
3062         /* If basic configuration is incomplete before the above loop
3063          * count reaches 0, loading the configuration from NVM will
3064          * leave the PHY in a bad state possibly resulting in no link.
3065          */
3066         if (loop == 0)
3067                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
3068
3069         /* Clear the Init Done bit for the next init event */
3070         data = E1000_READ_REG(hw, E1000_STATUS);
3071         data &= ~E1000_STATUS_LAN_INIT_DONE;
3072         E1000_WRITE_REG(hw, E1000_STATUS, data);
3073 }
3074
3075 /**
3076  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
3077  *  @hw: pointer to the HW structure
3078  **/
3079 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
3080 {
3081         s32 ret_val = E1000_SUCCESS;
3082         u16 reg;
3083
3084         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
3085
3086         if (hw->phy.ops.check_reset_block(hw))
3087                 return E1000_SUCCESS;
3088
3089         /* Allow time for h/w to get to quiescent state after reset */
3090         msec_delay(10);
3091
3092         /* Perform any necessary post-reset workarounds */
3093         switch (hw->mac.type) {
3094         case e1000_pchlan:
3095                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3096                 if (ret_val)
3097                         return ret_val;
3098                 break;
3099         case e1000_pch2lan:
3100                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3101                 if (ret_val)
3102                         return ret_val;
3103                 break;
3104         default:
3105                 break;
3106         }
3107
3108         /* Clear the host wakeup bit after lcd reset */
3109         if (hw->mac.type >= e1000_pchlan) {
3110                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
3111                 reg &= ~BM_WUC_HOST_WU_BIT;
3112                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3113         }
3114
3115         /* Configure the LCD with the extended configuration region in NVM */
3116         ret_val = e1000_sw_lcd_config_ich8lan(hw);
3117         if (ret_val)
3118                 return ret_val;
3119
3120         /* Configure the LCD with the OEM bits in NVM */
3121         ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
3122
3123         if (hw->mac.type == e1000_pch2lan) {
3124                 /* Ungate automatic PHY configuration on non-managed 82579 */
3125                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
3126                     E1000_ICH_FWSM_FW_VALID)) {
3127                         msec_delay(10);
3128                         e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
3129                 }
3130
3131                 /* Set EEE LPI Update Timer to 200usec */
3132                 ret_val = hw->phy.ops.acquire(hw);
3133                 if (ret_val)
3134                         return ret_val;
3135                 ret_val = e1000_write_emi_reg_locked(hw,
3136                                                      I82579_LPI_UPDATE_TIMER,
3137                                                      0x1387);
3138                 hw->phy.ops.release(hw);
3139         }
3140
3141         return ret_val;
3142 }
3143
3144 /**
3145  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3146  *  @hw: pointer to the HW structure
3147  *
3148  *  Resets the PHY
3149  *  This is a function pointer entry point called by drivers
3150  *  or other shared routines.
3151  **/
3152 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3153 {
3154         s32 ret_val = E1000_SUCCESS;
3155
3156         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3157
3158         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
3159         if ((hw->mac.type == e1000_pch2lan) &&
3160             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3161                 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3162
3163         ret_val = e1000_phy_hw_reset_generic(hw);
3164         if (ret_val)
3165                 return ret_val;
3166
3167         return e1000_post_phy_reset_ich8lan(hw);
3168 }
3169
3170 /**
3171  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3172  *  @hw: pointer to the HW structure
3173  *  @active: TRUE to enable LPLU, FALSE to disable
3174  *
3175  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
3176  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3177  *  the phy speed. This function will manually set the LPLU bit and restart
3178  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
3179  *  since it configures the same bit.
3180  **/
3181 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3182 {
3183         s32 ret_val;
3184         u16 oem_reg;
3185
3186         DEBUGFUNC("e1000_set_lplu_state_pchlan");
3187         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3188         if (ret_val)
3189                 return ret_val;
3190
3191         if (active)
3192                 oem_reg |= HV_OEM_BITS_LPLU;
3193         else
3194                 oem_reg &= ~HV_OEM_BITS_LPLU;
3195
3196         if (!hw->phy.ops.check_reset_block(hw))
3197                 oem_reg |= HV_OEM_BITS_RESTART_AN;
3198
3199         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3200 }
3201
3202 /**
3203  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3204  *  @hw: pointer to the HW structure
3205  *  @active: TRUE to enable LPLU, FALSE to disable
3206  *
3207  *  Sets the LPLU D0 state according to the active flag.  When
3208  *  activating LPLU this function also disables smart speed
3209  *  and vice versa.  LPLU will not be activated unless the
3210  *  device autonegotiation advertisement meets standards of
3211  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3212  *  This is a function pointer entry point only called by
3213  *  PHY setup routines.
3214  **/
3215 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3216 {
3217         struct e1000_phy_info *phy = &hw->phy;
3218         u32 phy_ctrl;
3219         s32 ret_val = E1000_SUCCESS;
3220         u16 data;
3221
3222         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3223
3224         if (phy->type == e1000_phy_ife)
3225                 return E1000_SUCCESS;
3226
3227         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3228
3229         if (active) {
3230                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3231                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3232
3233                 if (phy->type != e1000_phy_igp_3)
3234                         return E1000_SUCCESS;
3235
3236                 /* Call gig speed drop workaround on LPLU before accessing
3237                  * any PHY registers
3238                  */
3239                 if (hw->mac.type == e1000_ich8lan)
3240                         e1000_gig_downshift_workaround_ich8lan(hw);
3241
3242                 /* When LPLU is enabled, we should disable SmartSpeed */
3243                 ret_val = phy->ops.read_reg(hw,
3244                                             IGP01E1000_PHY_PORT_CONFIG,
3245                                             &data);
3246                 if (ret_val)
3247                         return ret_val;
3248                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3249                 ret_val = phy->ops.write_reg(hw,
3250                                              IGP01E1000_PHY_PORT_CONFIG,
3251                                              data);
3252                 if (ret_val)
3253                         return ret_val;
3254         } else {
3255                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3256                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3257
3258                 if (phy->type != e1000_phy_igp_3)
3259                         return E1000_SUCCESS;
3260
3261                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3262                  * during Dx states where the power conservation is most
3263                  * important.  During driver activity we should enable
3264                  * SmartSpeed, so performance is maintained.
3265                  */
3266                 if (phy->smart_speed == e1000_smart_speed_on) {
3267                         ret_val = phy->ops.read_reg(hw,
3268                                                     IGP01E1000_PHY_PORT_CONFIG,
3269                                                     &data);
3270                         if (ret_val)
3271                                 return ret_val;
3272
3273                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3274                         ret_val = phy->ops.write_reg(hw,
3275                                                      IGP01E1000_PHY_PORT_CONFIG,
3276                                                      data);
3277                         if (ret_val)
3278                                 return ret_val;
3279                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3280                         ret_val = phy->ops.read_reg(hw,
3281                                                     IGP01E1000_PHY_PORT_CONFIG,
3282                                                     &data);
3283                         if (ret_val)
3284                                 return ret_val;
3285
3286                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3287                         ret_val = phy->ops.write_reg(hw,
3288                                                      IGP01E1000_PHY_PORT_CONFIG,
3289                                                      data);
3290                         if (ret_val)
3291                                 return ret_val;
3292                 }
3293         }
3294
3295         return E1000_SUCCESS;
3296 }
3297
3298 /**
3299  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3300  *  @hw: pointer to the HW structure
3301  *  @active: TRUE to enable LPLU, FALSE to disable
3302  *
3303  *  Sets the LPLU D3 state according to the active flag.  When
3304  *  activating LPLU this function also disables smart speed
3305  *  and vice versa.  LPLU will not be activated unless the
3306  *  device autonegotiation advertisement meets standards of
3307  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3308  *  This is a function pointer entry point only called by
3309  *  PHY setup routines.
3310  **/
3311 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3312 {
3313         struct e1000_phy_info *phy = &hw->phy;
3314         u32 phy_ctrl;
3315         s32 ret_val = E1000_SUCCESS;
3316         u16 data;
3317
3318         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3319
3320         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3321
3322         if (!active) {
3323                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3324                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3325
3326                 if (phy->type != e1000_phy_igp_3)
3327                         return E1000_SUCCESS;
3328
3329                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3330                  * during Dx states where the power conservation is most
3331                  * important.  During driver activity we should enable
3332                  * SmartSpeed, so performance is maintained.
3333                  */
3334                 if (phy->smart_speed == e1000_smart_speed_on) {
3335                         ret_val = phy->ops.read_reg(hw,
3336                                                     IGP01E1000_PHY_PORT_CONFIG,
3337                                                     &data);
3338                         if (ret_val)
3339                                 return ret_val;
3340
3341                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3342                         ret_val = phy->ops.write_reg(hw,
3343                                                      IGP01E1000_PHY_PORT_CONFIG,
3344                                                      data);
3345                         if (ret_val)
3346                                 return ret_val;
3347                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3348                         ret_val = phy->ops.read_reg(hw,
3349                                                     IGP01E1000_PHY_PORT_CONFIG,
3350                                                     &data);
3351                         if (ret_val)
3352                                 return ret_val;
3353
3354                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3355                         ret_val = phy->ops.write_reg(hw,
3356                                                      IGP01E1000_PHY_PORT_CONFIG,
3357                                                      data);
3358                         if (ret_val)
3359                                 return ret_val;
3360                 }
3361         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3362                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3363                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3364                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3365                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3366
3367                 if (phy->type != e1000_phy_igp_3)
3368                         return E1000_SUCCESS;
3369
3370                 /* Call gig speed drop workaround on LPLU before accessing
3371                  * any PHY registers
3372                  */
3373                 if (hw->mac.type == e1000_ich8lan)
3374                         e1000_gig_downshift_workaround_ich8lan(hw);
3375
3376                 /* When LPLU is enabled, we should disable SmartSpeed */
3377                 ret_val = phy->ops.read_reg(hw,
3378                                             IGP01E1000_PHY_PORT_CONFIG,
3379                                             &data);
3380                 if (ret_val)
3381                         return ret_val;
3382
3383                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3384                 ret_val = phy->ops.write_reg(hw,
3385                                              IGP01E1000_PHY_PORT_CONFIG,
3386                                              data);
3387         }
3388
3389         return ret_val;
3390 }
3391
3392 /**
3393  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3394  *  @hw: pointer to the HW structure
3395  *  @bank:  pointer to the variable that returns the active bank
3396  *
3397  *  Reads signature byte from the NVM using the flash access registers.
3398  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3399  **/
3400 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3401 {
3402         u32 eecd;
3403         struct e1000_nvm_info *nvm = &hw->nvm;
3404         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3405         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3406         u32 nvm_dword = 0;
3407         u8 sig_byte = 0;
3408         s32 ret_val;
3409
3410         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3411
3412         switch (hw->mac.type) {
3413         case e1000_pch_spt:
3414                 bank1_offset = nvm->flash_bank_size;
3415                 act_offset = E1000_ICH_NVM_SIG_WORD;
3416
3417                 /* set bank to 0 in case flash read fails */
3418                 *bank = 0;
3419
3420                 /* Check bank 0 */
3421                 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3422                                                          &nvm_dword);
3423                 if (ret_val)
3424                         return ret_val;
3425                 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3426                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3427                     E1000_ICH_NVM_SIG_VALUE) {
3428                         *bank = 0;
3429                         return E1000_SUCCESS;
3430                 }
3431
3432                 /* Check bank 1 */
3433                 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3434                                                          bank1_offset,
3435                                                          &nvm_dword);
3436                 if (ret_val)
3437                         return ret_val;
3438                 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3439                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3440                     E1000_ICH_NVM_SIG_VALUE) {
3441                         *bank = 1;
3442                         return E1000_SUCCESS;
3443                 }
3444
3445                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3446                 return -E1000_ERR_NVM;
3447         case e1000_ich8lan:
3448         case e1000_ich9lan:
3449                 eecd = E1000_READ_REG(hw, E1000_EECD);
3450                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3451                     E1000_EECD_SEC1VAL_VALID_MASK) {
3452                         if (eecd & E1000_EECD_SEC1VAL)
3453                                 *bank = 1;
3454                         else
3455                                 *bank = 0;
3456
3457                         return E1000_SUCCESS;
3458                 }
3459                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3460                 /* fall-thru */
3461         default:
3462                 /* set bank to 0 in case flash read fails */
3463                 *bank = 0;
3464
3465                 /* Check bank 0 */
3466                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3467                                                         &sig_byte);
3468                 if (ret_val)
3469                         return ret_val;
3470                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3471                     E1000_ICH_NVM_SIG_VALUE) {
3472                         *bank = 0;
3473                         return E1000_SUCCESS;
3474                 }
3475
3476                 /* Check bank 1 */
3477                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3478                                                         bank1_offset,
3479                                                         &sig_byte);
3480                 if (ret_val)
3481                         return ret_val;
3482                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3483                     E1000_ICH_NVM_SIG_VALUE) {
3484                         *bank = 1;
3485                         return E1000_SUCCESS;
3486                 }
3487
3488                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3489                 return -E1000_ERR_NVM;
3490         }
3491 }
3492
3493 /**
3494  *  e1000_read_nvm_spt - NVM access for SPT
3495  *  @hw: pointer to the HW structure
3496  *  @offset: The offset (in bytes) of the word(s) to read.
3497  *  @words: Size of data to read in words.
3498  *  @data: pointer to the word(s) to read at offset.
3499  *
3500  *  Reads a word(s) from the NVM
3501  **/
3502 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3503                               u16 *data)
3504 {
3505         struct e1000_nvm_info *nvm = &hw->nvm;
3506         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3507         u32 act_offset;
3508         s32 ret_val = E1000_SUCCESS;
3509         u32 bank = 0;
3510         u32 dword = 0;
3511         u16 offset_to_read;
3512         u16 i;
3513
3514         DEBUGFUNC("e1000_read_nvm_spt");
3515
3516         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3517             (words == 0)) {
3518                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3519                 ret_val = -E1000_ERR_NVM;
3520                 goto out;
3521         }
3522
3523         nvm->ops.acquire(hw);
3524
3525         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3526         if (ret_val != E1000_SUCCESS) {
3527                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3528                 bank = 0;
3529         }
3530
3531         act_offset = (bank) ? nvm->flash_bank_size : 0;
3532         act_offset += offset;
3533
3534         ret_val = E1000_SUCCESS;
3535
3536         for (i = 0; i < words; i += 2) {
3537                 if (words - i == 1) {
3538                         if (dev_spec->shadow_ram[offset+i].modified) {
3539                                 data[i] = dev_spec->shadow_ram[offset+i].value;
3540                         } else {
3541                                 offset_to_read = act_offset + i -
3542                                                  ((act_offset + i) % 2);
3543                                 ret_val =
3544                                    e1000_read_flash_dword_ich8lan(hw,
3545                                                                  offset_to_read,
3546                                                                  &dword);
3547                                 if (ret_val)
3548                                         break;
3549                                 if ((act_offset + i) % 2 == 0)
3550                                         data[i] = (u16)(dword & 0xFFFF);
3551                                 else
3552                                         data[i] = (u16)((dword >> 16) & 0xFFFF);
3553                         }
3554                 } else {
3555                         offset_to_read = act_offset + i;
3556                         if (!(dev_spec->shadow_ram[offset+i].modified) ||
3557                             !(dev_spec->shadow_ram[offset+i+1].modified)) {
3558                                 ret_val =
3559                                    e1000_read_flash_dword_ich8lan(hw,
3560                                                                  offset_to_read,
3561                                                                  &dword);
3562                                 if (ret_val)
3563                                         break;
3564                         }
3565                         if (dev_spec->shadow_ram[offset+i].modified)
3566                                 data[i] = dev_spec->shadow_ram[offset+i].value;
3567                         else
3568                                 data[i] = (u16) (dword & 0xFFFF);
3569                         if (dev_spec->shadow_ram[offset+i].modified)
3570                                 data[i+1] =
3571                                    dev_spec->shadow_ram[offset+i+1].value;
3572                         else
3573                                 data[i+1] = (u16) (dword >> 16 & 0xFFFF);
3574                 }
3575         }
3576
3577         nvm->ops.release(hw);
3578
3579 out:
3580         if (ret_val)
3581                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3582
3583         return ret_val;
3584 }
3585
3586 /**
3587  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3588  *  @hw: pointer to the HW structure
3589  *  @offset: The offset (in bytes) of the word(s) to read.
3590  *  @words: Size of data to read in words
3591  *  @data: Pointer to the word(s) to read at offset.
3592  *
3593  *  Reads a word(s) from the NVM using the flash access registers.
3594  **/
3595 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3596                                   u16 *data)
3597 {
3598         struct e1000_nvm_info *nvm = &hw->nvm;
3599         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3600         u32 act_offset;
3601         s32 ret_val = E1000_SUCCESS;
3602         u32 bank = 0;
3603         u16 i, word;
3604
3605         DEBUGFUNC("e1000_read_nvm_ich8lan");
3606
3607         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3608             (words == 0)) {
3609                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3610                 ret_val = -E1000_ERR_NVM;
3611                 goto out;
3612         }
3613
3614         nvm->ops.acquire(hw);
3615
3616         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3617         if (ret_val != E1000_SUCCESS) {
3618                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3619                 bank = 0;
3620         }
3621
3622         act_offset = (bank) ? nvm->flash_bank_size : 0;
3623         act_offset += offset;
3624
3625         ret_val = E1000_SUCCESS;
3626         for (i = 0; i < words; i++) {
3627                 if (dev_spec->shadow_ram[offset+i].modified) {
3628                         data[i] = dev_spec->shadow_ram[offset+i].value;
3629                 } else {
3630                         ret_val = e1000_read_flash_word_ich8lan(hw,
3631                                                                 act_offset + i,
3632                                                                 &word);
3633                         if (ret_val)
3634                                 break;
3635                         data[i] = word;
3636                 }
3637         }
3638
3639         nvm->ops.release(hw);
3640
3641 out:
3642         if (ret_val)
3643                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3644
3645         return ret_val;
3646 }
3647
3648 /**
3649  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3650  *  @hw: pointer to the HW structure
3651  *
3652  *  This function does initial flash setup so that a new read/write/erase cycle
3653  *  can be started.
3654  **/
3655 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3656 {
3657         union ich8_hws_flash_status hsfsts;
3658         s32 ret_val = -E1000_ERR_NVM;
3659
3660         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3661
3662         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3663
3664         /* Check if the flash descriptor is valid */
3665         if (!hsfsts.hsf_status.fldesvalid) {
3666                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3667                 return -E1000_ERR_NVM;
3668         }
3669
3670         /* Clear FCERR and DAEL in hw status by writing 1 */
3671         hsfsts.hsf_status.flcerr = 1;
3672         hsfsts.hsf_status.dael = 1;
3673         if (hw->mac.type >= e1000_pch_spt)
3674                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3675                                       hsfsts.regval & 0xFFFF);
3676         else
3677                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3678
3679         /* Either we should have a hardware SPI cycle in progress
3680          * bit to check against, in order to start a new cycle or
3681          * FDONE bit should be changed in the hardware so that it
3682          * is 1 after hardware reset, which can then be used as an
3683          * indication whether a cycle is in progress or has been
3684          * completed.
3685          */
3686
3687         if (!hsfsts.hsf_status.flcinprog) {
3688                 /* There is no cycle running at present,
3689                  * so we can start a cycle.
3690                  * Begin by setting Flash Cycle Done.
3691                  */
3692                 hsfsts.hsf_status.flcdone = 1;
3693                 if (hw->mac.type >= e1000_pch_spt)
3694                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3695                                               hsfsts.regval & 0xFFFF);
3696                 else
3697                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3698                                                 hsfsts.regval);
3699                 ret_val = E1000_SUCCESS;
3700         } else {
3701                 s32 i;
3702
3703                 /* Otherwise poll for sometime so the current
3704                  * cycle has a chance to end before giving up.
3705                  */
3706                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3707                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3708                                                               ICH_FLASH_HSFSTS);
3709                         if (!hsfsts.hsf_status.flcinprog) {
3710                                 ret_val = E1000_SUCCESS;
3711                                 break;
3712                         }
3713                         usec_delay(1);
3714                 }
3715                 if (ret_val == E1000_SUCCESS) {
3716                         /* Successful in waiting for previous cycle to timeout,
3717                          * now set the Flash Cycle Done.
3718                          */
3719                         hsfsts.hsf_status.flcdone = 1;
3720                         if (hw->mac.type >= e1000_pch_spt)
3721                                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3722                                                       hsfsts.regval & 0xFFFF);
3723                         else
3724                                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3725                                                         hsfsts.regval);
3726                 } else {
3727                         DEBUGOUT("Flash controller busy, cannot get access\n");
3728                 }
3729         }
3730
3731         return ret_val;
3732 }
3733
3734 /**
3735  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3736  *  @hw: pointer to the HW structure
3737  *  @timeout: maximum time to wait for completion
3738  *
3739  *  This function starts a flash cycle and waits for its completion.
3740  **/
3741 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3742 {
3743         union ich8_hws_flash_ctrl hsflctl;
3744         union ich8_hws_flash_status hsfsts;
3745         u32 i = 0;
3746
3747         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3748
3749         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3750         if (hw->mac.type >= e1000_pch_spt)
3751                 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3752         else
3753                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3754         hsflctl.hsf_ctrl.flcgo = 1;
3755
3756         if (hw->mac.type >= e1000_pch_spt)
3757                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3758                                       hsflctl.regval << 16);
3759         else
3760                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3761
3762         /* wait till FDONE bit is set to 1 */
3763         do {
3764                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3765                 if (hsfsts.hsf_status.flcdone)
3766                         break;
3767                 usec_delay(1);
3768         } while (i++ < timeout);
3769
3770         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3771                 return E1000_SUCCESS;
3772
3773         return -E1000_ERR_NVM;
3774 }
3775
3776 /**
3777  *  e1000_read_flash_dword_ich8lan - Read dword from flash
3778  *  @hw: pointer to the HW structure
3779  *  @offset: offset to data location
3780  *  @data: pointer to the location for storing the data
3781  *
3782  *  Reads the flash dword at offset into data.  Offset is converted
3783  *  to bytes before read.
3784  **/
3785 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3786                                           u32 *data)
3787 {
3788         DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3789
3790         if (!data)
3791                 return -E1000_ERR_NVM;
3792
3793         /* Must convert word offset into bytes. */
3794         offset <<= 1;
3795
3796         return e1000_read_flash_data32_ich8lan(hw, offset, data);
3797 }
3798
3799 /**
3800  *  e1000_read_flash_word_ich8lan - Read word from flash
3801  *  @hw: pointer to the HW structure
3802  *  @offset: offset to data location
3803  *  @data: pointer to the location for storing the data
3804  *
3805  *  Reads the flash word at offset into data.  Offset is converted
3806  *  to bytes before read.
3807  **/
3808 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3809                                          u16 *data)
3810 {
3811         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3812
3813         if (!data)
3814                 return -E1000_ERR_NVM;
3815
3816         /* Must convert offset into bytes. */
3817         offset <<= 1;
3818
3819         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3820 }
3821
3822 /**
3823  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3824  *  @hw: pointer to the HW structure
3825  *  @offset: The offset of the byte to read.
3826  *  @data: Pointer to a byte to store the value read.
3827  *
3828  *  Reads a single byte from the NVM using the flash access registers.
3829  **/
3830 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3831                                          u8 *data)
3832 {
3833         s32 ret_val;
3834         u16 word = 0;
3835
3836         /* In SPT, only 32 bits access is supported,
3837          * so this function should not be called.
3838          */
3839         if (hw->mac.type >= e1000_pch_spt)
3840                 return -E1000_ERR_NVM;
3841         else
3842                 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3843
3844         if (ret_val)
3845                 return ret_val;
3846
3847         *data = (u8)word;
3848
3849         return E1000_SUCCESS;
3850 }
3851
3852 /**
3853  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3854  *  @hw: pointer to the HW structure
3855  *  @offset: The offset (in bytes) of the byte or word to read.
3856  *  @size: Size of data to read, 1=byte 2=word
3857  *  @data: Pointer to the word to store the value read.
3858  *
3859  *  Reads a byte or word from the NVM using the flash access registers.
3860  **/
3861 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3862                                          u8 size, u16 *data)
3863 {
3864         union ich8_hws_flash_status hsfsts;
3865         union ich8_hws_flash_ctrl hsflctl;
3866         u32 flash_linear_addr;
3867         u32 flash_data = 0;
3868         s32 ret_val = -E1000_ERR_NVM;
3869         u8 count = 0;
3870
3871         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3872
3873         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3874                 return -E1000_ERR_NVM;
3875         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3876                              hw->nvm.flash_base_addr);
3877
3878         do {
3879                 usec_delay(1);
3880                 /* Steps */
3881                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3882                 if (ret_val != E1000_SUCCESS)
3883                         break;
3884                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3885
3886                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3887                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3888                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3889                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3890                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3891
3892                 ret_val = e1000_flash_cycle_ich8lan(hw,
3893                                                 ICH_FLASH_READ_COMMAND_TIMEOUT);
3894
3895                 /* Check if FCERR is set to 1, if set to 1, clear it
3896                  * and try the whole sequence a few more times, else
3897                  * read in (shift in) the Flash Data0, the order is
3898                  * least significant byte first msb to lsb
3899                  */
3900                 if (ret_val == E1000_SUCCESS) {
3901                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3902                         if (size == 1)
3903                                 *data = (u8)(flash_data & 0x000000FF);
3904                         else if (size == 2)
3905                                 *data = (u16)(flash_data & 0x0000FFFF);
3906                         break;
3907                 } else {
3908                         /* If we've gotten here, then things are probably
3909                          * completely hosed, but if the error condition is
3910                          * detected, it won't hurt to give it another try...
3911                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3912                          */
3913                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3914                                                               ICH_FLASH_HSFSTS);
3915                         if (hsfsts.hsf_status.flcerr) {
3916                                 /* Repeat for some time before giving up. */
3917                                 continue;
3918                         } else if (!hsfsts.hsf_status.flcdone) {
3919                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3920                                 break;
3921                         }
3922                 }
3923         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3924
3925         return ret_val;
3926 }
3927
3928 /**
3929  *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3930  *  @hw: pointer to the HW structure
3931  *  @offset: The offset (in bytes) of the dword to read.
3932  *  @data: Pointer to the dword to store the value read.
3933  *
3934  *  Reads a byte or word from the NVM using the flash access registers.
3935  **/
3936 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3937                                            u32 *data)
3938 {
3939         union ich8_hws_flash_status hsfsts;
3940         union ich8_hws_flash_ctrl hsflctl;
3941         u32 flash_linear_addr;
3942         s32 ret_val = -E1000_ERR_NVM;
3943         u8 count = 0;
3944
3945         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3946
3947                 if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3948                     hw->mac.type < e1000_pch_spt)
3949                         return -E1000_ERR_NVM;
3950         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3951                              hw->nvm.flash_base_addr);
3952
3953         do {
3954                 usec_delay(1);
3955                 /* Steps */
3956                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3957                 if (ret_val != E1000_SUCCESS)
3958                         break;
3959                 /* In SPT, This register is in Lan memory space, not flash.
3960                  * Therefore, only 32 bit access is supported
3961                  */
3962                 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3963
3964                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3965                 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3966                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3967                 /* In SPT, This register is in Lan memory space, not flash.
3968                  * Therefore, only 32 bit access is supported
3969                  */
3970                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3971                                       (u32)hsflctl.regval << 16);
3972                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3973
3974                 ret_val = e1000_flash_cycle_ich8lan(hw,
3975                                                 ICH_FLASH_READ_COMMAND_TIMEOUT);
3976
3977                 /* Check if FCERR is set to 1, if set to 1, clear it
3978                  * and try the whole sequence a few more times, else
3979                  * read in (shift in) the Flash Data0, the order is
3980                  * least significant byte first msb to lsb
3981                  */
3982                 if (ret_val == E1000_SUCCESS) {
3983                         *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3984                         break;
3985                 } else {
3986                         /* If we've gotten here, then things are probably
3987                          * completely hosed, but if the error condition is
3988                          * detected, it won't hurt to give it another try...
3989                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3990                          */
3991                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3992                                                               ICH_FLASH_HSFSTS);
3993                         if (hsfsts.hsf_status.flcerr) {
3994                                 /* Repeat for some time before giving up. */
3995                                 continue;
3996                         } else if (!hsfsts.hsf_status.flcdone) {
3997                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3998                                 break;
3999                         }
4000                 }
4001         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4002
4003         return ret_val;
4004 }
4005
4006 /**
4007  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
4008  *  @hw: pointer to the HW structure
4009  *  @offset: The offset (in bytes) of the word(s) to write.
4010  *  @words: Size of data to write in words
4011  *  @data: Pointer to the word(s) to write at offset.
4012  *
4013  *  Writes a byte or word to the NVM using the flash access registers.
4014  **/
4015 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
4016                                    u16 *data)
4017 {
4018         struct e1000_nvm_info *nvm = &hw->nvm;
4019         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4020         u16 i;
4021
4022         DEBUGFUNC("e1000_write_nvm_ich8lan");
4023
4024         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
4025             (words == 0)) {
4026                 DEBUGOUT("nvm parameter(s) out of bounds\n");
4027                 return -E1000_ERR_NVM;
4028         }
4029
4030         nvm->ops.acquire(hw);
4031
4032         for (i = 0; i < words; i++) {
4033                 dev_spec->shadow_ram[offset+i].modified = TRUE;
4034                 dev_spec->shadow_ram[offset+i].value = data[i];
4035         }
4036
4037         nvm->ops.release(hw);
4038
4039         return E1000_SUCCESS;
4040 }
4041
4042 /**
4043  *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
4044  *  @hw: pointer to the HW structure
4045  *
4046  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4047  *  which writes the checksum to the shadow ram.  The changes in the shadow
4048  *  ram are then committed to the EEPROM by processing each bank at a time
4049  *  checking for the modified bit and writing only the pending changes.
4050  *  After a successful commit, the shadow ram is cleared and is ready for
4051  *  future writes.
4052  **/
4053 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
4054 {
4055         struct e1000_nvm_info *nvm = &hw->nvm;
4056         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4057         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4058         s32 ret_val;
4059         u32 dword = 0;
4060
4061         DEBUGFUNC("e1000_update_nvm_checksum_spt");
4062
4063         ret_val = e1000_update_nvm_checksum_generic(hw);
4064         if (ret_val)
4065                 goto out;
4066
4067         if (nvm->type != e1000_nvm_flash_sw)
4068                 goto out;
4069
4070         nvm->ops.acquire(hw);
4071
4072         /* We're writing to the opposite bank so if we're on bank 1,
4073          * write to bank 0 etc.  We also need to erase the segment that
4074          * is going to be written
4075          */
4076         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4077         if (ret_val != E1000_SUCCESS) {
4078                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4079                 bank = 0;
4080         }
4081
4082         if (bank == 0) {
4083                 new_bank_offset = nvm->flash_bank_size;
4084                 old_bank_offset = 0;
4085                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4086                 if (ret_val)
4087                         goto release;
4088         } else {
4089                 old_bank_offset = nvm->flash_bank_size;
4090                 new_bank_offset = 0;
4091                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4092                 if (ret_val)
4093                         goto release;
4094         }
4095         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
4096                 /* Determine whether to write the value stored
4097                  * in the other NVM bank or a modified value stored
4098                  * in the shadow RAM
4099                  */
4100                 ret_val = e1000_read_flash_dword_ich8lan(hw,
4101                                                          i + old_bank_offset,
4102                                                          &dword);
4103
4104                 if (dev_spec->shadow_ram[i].modified) {
4105                         dword &= 0xffff0000;
4106                         dword |= (dev_spec->shadow_ram[i].value & 0xffff);
4107                 }
4108                 if (dev_spec->shadow_ram[i + 1].modified) {
4109                         dword &= 0x0000ffff;
4110                         dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
4111                                   << 16);
4112                 }
4113                 if (ret_val)
4114                         break;
4115
4116                 /* If the word is 0x13, then make sure the signature bits
4117                  * (15:14) are 11b until the commit has completed.
4118                  * This will allow us to write 10b which indicates the
4119                  * signature is valid.  We want to do this after the write
4120                  * has completed so that we don't mark the segment valid
4121                  * while the write is still in progress
4122                  */
4123                 if (i == E1000_ICH_NVM_SIG_WORD - 1)
4124                         dword |= E1000_ICH_NVM_SIG_MASK << 16;
4125
4126                 /* Convert offset to bytes. */
4127                 act_offset = (i + new_bank_offset) << 1;
4128
4129                 usec_delay(100);
4130
4131                 /* Write the data to the new bank. Offset in words*/
4132                 act_offset = i + new_bank_offset;
4133                 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
4134                                                                 dword);
4135                 if (ret_val)
4136                         break;
4137          }
4138
4139         /* Don't bother writing the segment valid bits if sector
4140          * programming failed.
4141          */
4142         if (ret_val) {
4143                 DEBUGOUT("Flash commit failed.\n");
4144                 goto release;
4145         }
4146
4147         /* Finally validate the new segment by setting bit 15:14
4148          * to 10b in word 0x13 , this can be done without an
4149          * erase as well since these bits are 11 to start with
4150          * and we need to change bit 14 to 0b
4151          */
4152         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4153
4154         /*offset in words but we read dword*/
4155         --act_offset;
4156         ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4157
4158         if (ret_val)
4159                 goto release;
4160
4161         dword &= 0xBFFFFFFF;
4162         ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4163
4164         if (ret_val)
4165                 goto release;
4166
4167         /* And invalidate the previously valid segment by setting
4168          * its signature word (0x13) high_byte to 0b. This can be
4169          * done without an erase because flash erase sets all bits
4170          * to 1's. We can write 1's to 0's without an erase
4171          */
4172         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4173
4174         /* offset in words but we read dword*/
4175         act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4176         ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4177
4178         if (ret_val)
4179                 goto release;
4180
4181         dword &= 0x00FFFFFF;
4182         ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4183
4184         if (ret_val)
4185                 goto release;
4186
4187         /* Great!  Everything worked, we can now clear the cached entries. */
4188         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4189                 dev_spec->shadow_ram[i].modified = FALSE;
4190                 dev_spec->shadow_ram[i].value = 0xFFFF;
4191         }
4192
4193 release:
4194         nvm->ops.release(hw);
4195
4196         /* Reload the EEPROM, or else modifications will not appear
4197          * until after the next adapter reset.
4198          */
4199         if (!ret_val) {
4200                 nvm->ops.reload(hw);
4201                 msec_delay(10);
4202         }
4203
4204 out:
4205         if (ret_val)
4206                 DEBUGOUT1("NVM update error: %d\n", ret_val);
4207
4208         return ret_val;
4209 }
4210
4211 /**
4212  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4213  *  @hw: pointer to the HW structure
4214  *
4215  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4216  *  which writes the checksum to the shadow ram.  The changes in the shadow
4217  *  ram are then committed to the EEPROM by processing each bank at a time
4218  *  checking for the modified bit and writing only the pending changes.
4219  *  After a successful commit, the shadow ram is cleared and is ready for
4220  *  future writes.
4221  **/
4222 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4223 {
4224         struct e1000_nvm_info *nvm = &hw->nvm;
4225         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4226         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4227         s32 ret_val;
4228         u16 data = 0;
4229
4230         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4231
4232         ret_val = e1000_update_nvm_checksum_generic(hw);
4233         if (ret_val)
4234                 goto out;
4235
4236         if (nvm->type != e1000_nvm_flash_sw)
4237                 goto out;
4238
4239         nvm->ops.acquire(hw);
4240
4241         /* We're writing to the opposite bank so if we're on bank 1,
4242          * write to bank 0 etc.  We also need to erase the segment that
4243          * is going to be written
4244          */
4245         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4246         if (ret_val != E1000_SUCCESS) {
4247                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4248                 bank = 0;
4249         }
4250
4251         if (bank == 0) {
4252                 new_bank_offset = nvm->flash_bank_size;
4253                 old_bank_offset = 0;
4254                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4255                 if (ret_val)
4256                         goto release;
4257         } else {
4258                 old_bank_offset = nvm->flash_bank_size;
4259                 new_bank_offset = 0;
4260                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4261                 if (ret_val)
4262                         goto release;
4263         }
4264         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4265                 if (dev_spec->shadow_ram[i].modified) {
4266                         data = dev_spec->shadow_ram[i].value;
4267                 } else {
4268                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
4269                                                                 old_bank_offset,
4270                                                                 &data);
4271                         if (ret_val)
4272                                 break;
4273                 }
4274                 /* If the word is 0x13, then make sure the signature bits
4275                  * (15:14) are 11b until the commit has completed.
4276                  * This will allow us to write 10b which indicates the
4277                  * signature is valid.  We want to do this after the write
4278                  * has completed so that we don't mark the segment valid
4279                  * while the write is still in progress
4280                  */
4281                 if (i == E1000_ICH_NVM_SIG_WORD)
4282                         data |= E1000_ICH_NVM_SIG_MASK;
4283
4284                 /* Convert offset to bytes. */
4285                 act_offset = (i + new_bank_offset) << 1;
4286
4287                 usec_delay(100);
4288
4289                 /* Write the bytes to the new bank. */
4290                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4291                                                                act_offset,
4292                                                                (u8)data);
4293                 if (ret_val)
4294                         break;
4295
4296                 usec_delay(100);
4297                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4298                                                           act_offset + 1,
4299                                                           (u8)(data >> 8));
4300                 if (ret_val)
4301                         break;
4302          }
4303
4304         /* Don't bother writing the segment valid bits if sector
4305          * programming failed.
4306          */
4307         if (ret_val) {
4308                 DEBUGOUT("Flash commit failed.\n");
4309                 goto release;
4310         }
4311
4312         /* Finally validate the new segment by setting bit 15:14
4313          * to 10b in word 0x13 , this can be done without an
4314          * erase as well since these bits are 11 to start with
4315          * and we need to change bit 14 to 0b
4316          */
4317         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4318         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4319         if (ret_val)
4320                 goto release;
4321
4322         data &= 0xBFFF;
4323         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4324                                                        (u8)(data >> 8));
4325         if (ret_val)
4326                 goto release;
4327
4328         /* And invalidate the previously valid segment by setting
4329          * its signature word (0x13) high_byte to 0b. This can be
4330          * done without an erase because flash erase sets all bits
4331          * to 1's. We can write 1's to 0's without an erase
4332          */
4333         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4334
4335         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4336
4337         if (ret_val)
4338                 goto release;
4339
4340         /* Great!  Everything worked, we can now clear the cached entries. */
4341         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4342                 dev_spec->shadow_ram[i].modified = FALSE;
4343                 dev_spec->shadow_ram[i].value = 0xFFFF;
4344         }
4345
4346 release:
4347         nvm->ops.release(hw);
4348
4349         /* Reload the EEPROM, or else modifications will not appear
4350          * until after the next adapter reset.
4351          */
4352         if (!ret_val) {
4353                 nvm->ops.reload(hw);
4354                 msec_delay(10);
4355         }
4356
4357 out:
4358         if (ret_val)
4359                 DEBUGOUT1("NVM update error: %d\n", ret_val);
4360
4361         return ret_val;
4362 }
4363
4364 /**
4365  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4366  *  @hw: pointer to the HW structure
4367  *
4368  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4369  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4370  *  calculated, in which case we need to calculate the checksum and set bit 6.
4371  **/
4372 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4373 {
4374         s32 ret_val;
4375         u16 data;
4376         u16 word;
4377         u16 valid_csum_mask;
4378
4379         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4380
4381         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4382          * the checksum needs to be fixed.  This bit is an indication that
4383          * the NVM was prepared by OEM software and did not calculate
4384          * the checksum...a likely scenario.
4385          */
4386         switch (hw->mac.type) {
4387         case e1000_pch_lpt:
4388         case e1000_pch_spt:
4389                 word = NVM_COMPAT;
4390                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4391                 break;
4392         default:
4393                 word = NVM_FUTURE_INIT_WORD1;
4394                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4395                 break;
4396         }
4397
4398         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4399         if (ret_val)
4400                 return ret_val;
4401
4402         if (!(data & valid_csum_mask)) {
4403                 data |= valid_csum_mask;
4404                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4405                 if (ret_val)
4406                         return ret_val;
4407                 ret_val = hw->nvm.ops.update(hw);
4408                 if (ret_val)
4409                         return ret_val;
4410         }
4411
4412         return e1000_validate_nvm_checksum_generic(hw);
4413 }
4414
4415 /**
4416  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4417  *  @hw: pointer to the HW structure
4418  *  @offset: The offset (in bytes) of the byte/word to read.
4419  *  @size: Size of data to read, 1=byte 2=word
4420  *  @data: The byte(s) to write to the NVM.
4421  *
4422  *  Writes one/two bytes to the NVM using the flash access registers.
4423  **/
4424 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4425                                           u8 size, u16 data)
4426 {
4427         union ich8_hws_flash_status hsfsts;
4428         union ich8_hws_flash_ctrl hsflctl;
4429         u32 flash_linear_addr;
4430         u32 flash_data = 0;
4431         s32 ret_val;
4432         u8 count = 0;
4433
4434         DEBUGFUNC("e1000_write_ich8_data");
4435
4436         if (hw->mac.type >= e1000_pch_spt) {
4437                 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4438                         return -E1000_ERR_NVM;
4439         } else {
4440                 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4441                         return -E1000_ERR_NVM;
4442         }
4443
4444         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4445                              hw->nvm.flash_base_addr);
4446
4447         do {
4448                 usec_delay(1);
4449                 /* Steps */
4450                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4451                 if (ret_val != E1000_SUCCESS)
4452                         break;
4453                 /* In SPT, This register is in Lan memory space, not
4454                  * flash.  Therefore, only 32 bit access is supported
4455                  */
4456                 if (hw->mac.type >= e1000_pch_spt)
4457                         hsflctl.regval =
4458                             E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
4459                 else
4460                         hsflctl.regval =
4461                             E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4462
4463                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4464                 hsflctl.hsf_ctrl.fldbcount = size - 1;
4465                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4466                 /* In SPT, This register is in Lan memory space,
4467                  * not flash.  Therefore, only 32 bit access is
4468                  * supported
4469                  */
4470                 if (hw->mac.type >= e1000_pch_spt)
4471                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4472                                               hsflctl.regval << 16);
4473                 else
4474                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4475                                                 hsflctl.regval);
4476
4477                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4478
4479                 if (size == 1)
4480                         flash_data = (u32)data & 0x00FF;
4481                 else
4482                         flash_data = (u32)data;
4483
4484                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4485
4486                 /* check if FCERR is set to 1 , if set to 1, clear it
4487                  * and try the whole sequence a few more times else done
4488                  */
4489                 ret_val =
4490                     e1000_flash_cycle_ich8lan(hw,
4491                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4492                 if (ret_val == E1000_SUCCESS)
4493                         break;
4494
4495                 /* If we're here, then things are most likely
4496                  * completely hosed, but if the error condition
4497                  * is detected, it won't hurt to give it another
4498                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4499                  */
4500                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4501                 if (hsfsts.hsf_status.flcerr)
4502                         /* Repeat for some time before giving up. */
4503                         continue;
4504                 if (!hsfsts.hsf_status.flcdone) {
4505                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4506                         break;
4507                 }
4508         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4509
4510         return ret_val;
4511 }
4512
4513 /**
4514 *  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4515 *  @hw: pointer to the HW structure
4516 *  @offset: The offset (in bytes) of the dwords to read.
4517 *  @data: The 4 bytes to write to the NVM.
4518 *
4519 *  Writes one/two/four bytes to the NVM using the flash access registers.
4520 **/
4521 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4522                                             u32 data)
4523 {
4524         union ich8_hws_flash_status hsfsts;
4525         union ich8_hws_flash_ctrl hsflctl;
4526         u32 flash_linear_addr;
4527         s32 ret_val;
4528         u8 count = 0;
4529
4530         DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4531
4532         if (hw->mac.type >= e1000_pch_spt) {
4533                 if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4534                         return -E1000_ERR_NVM;
4535         }
4536         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4537                              hw->nvm.flash_base_addr);
4538         do {
4539                 usec_delay(1);
4540                 /* Steps */
4541                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4542                 if (ret_val != E1000_SUCCESS)
4543                         break;
4544
4545                 /* In SPT, This register is in Lan memory space, not
4546                  * flash.  Therefore, only 32 bit access is supported
4547                  */
4548                 if (hw->mac.type >= e1000_pch_spt)
4549                         hsflctl.regval = E1000_READ_FLASH_REG(hw,
4550                                                               ICH_FLASH_HSFSTS)
4551                                          >> 16;
4552                 else
4553                         hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4554                                                               ICH_FLASH_HSFCTL);
4555
4556                 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4557                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4558
4559                 /* In SPT, This register is in Lan memory space,
4560                  * not flash.  Therefore, only 32 bit access is
4561                  * supported
4562                  */
4563                 if (hw->mac.type >= e1000_pch_spt)
4564                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4565                                               hsflctl.regval << 16);
4566                 else
4567                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4568                                                 hsflctl.regval);
4569
4570                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4571
4572                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4573
4574                 /* check if FCERR is set to 1 , if set to 1, clear it
4575                  * and try the whole sequence a few more times else done
4576                  */
4577                 ret_val = e1000_flash_cycle_ich8lan(hw,
4578                                                ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4579
4580                 if (ret_val == E1000_SUCCESS)
4581                         break;
4582
4583                 /* If we're here, then things are most likely
4584                  * completely hosed, but if the error condition
4585                  * is detected, it won't hurt to give it another
4586                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4587                  */
4588                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4589
4590                 if (hsfsts.hsf_status.flcerr)
4591                         /* Repeat for some time before giving up. */
4592                         continue;
4593                 if (!hsfsts.hsf_status.flcdone) {
4594                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4595                         break;
4596                 }
4597         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4598
4599         return ret_val;
4600 }
4601
4602 /**
4603  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4604  *  @hw: pointer to the HW structure
4605  *  @offset: The index of the byte to read.
4606  *  @data: The byte to write to the NVM.
4607  *
4608  *  Writes a single byte to the NVM using the flash access registers.
4609  **/
4610 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4611                                           u8 data)
4612 {
4613         u16 word = (u16)data;
4614
4615         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4616
4617         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4618 }
4619
4620 /**
4621 *  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4622 *  @hw: pointer to the HW structure
4623 *  @offset: The offset of the word to write.
4624 *  @dword: The dword to write to the NVM.
4625 *
4626 *  Writes a single dword to the NVM using the flash access registers.
4627 *  Goes through a retry algorithm before giving up.
4628 **/
4629 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4630                                                  u32 offset, u32 dword)
4631 {
4632         s32 ret_val;
4633         u16 program_retries;
4634
4635         DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
4636
4637         /* Must convert word offset into bytes. */
4638         offset <<= 1;
4639
4640         ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4641
4642         if (!ret_val)
4643                 return ret_val;
4644         for (program_retries = 0; program_retries < 100; program_retries++) {
4645                 DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
4646                 usec_delay(100);
4647                 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4648                 if (ret_val == E1000_SUCCESS)
4649                         break;
4650         }
4651         if (program_retries == 100)
4652                 return -E1000_ERR_NVM;
4653
4654         return E1000_SUCCESS;
4655 }
4656
4657 /**
4658  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4659  *  @hw: pointer to the HW structure
4660  *  @offset: The offset of the byte to write.
4661  *  @byte: The byte to write to the NVM.
4662  *
4663  *  Writes a single byte to the NVM using the flash access registers.
4664  *  Goes through a retry algorithm before giving up.
4665  **/
4666 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4667                                                 u32 offset, u8 byte)
4668 {
4669         s32 ret_val;
4670         u16 program_retries;
4671
4672         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4673
4674         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4675         if (!ret_val)
4676                 return ret_val;
4677
4678         for (program_retries = 0; program_retries < 100; program_retries++) {
4679                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4680                 usec_delay(100);
4681                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4682                 if (ret_val == E1000_SUCCESS)
4683                         break;
4684         }
4685         if (program_retries == 100)
4686                 return -E1000_ERR_NVM;
4687
4688         return E1000_SUCCESS;
4689 }
4690
4691 /**
4692  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4693  *  @hw: pointer to the HW structure
4694  *  @bank: 0 for first bank, 1 for second bank, etc.
4695  *
4696  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4697  *  bank N is 4096 * N + flash_reg_addr.
4698  **/
4699 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4700 {
4701         struct e1000_nvm_info *nvm = &hw->nvm;
4702         union ich8_hws_flash_status hsfsts;
4703         union ich8_hws_flash_ctrl hsflctl;
4704         u32 flash_linear_addr;
4705         /* bank size is in 16bit words - adjust to bytes */
4706         u32 flash_bank_size = nvm->flash_bank_size * 2;
4707         s32 ret_val;
4708         s32 count = 0;
4709         s32 j, iteration, sector_size;
4710
4711         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4712
4713         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4714
4715         /* Determine HW Sector size: Read BERASE bits of hw flash status
4716          * register
4717          * 00: The Hw sector is 256 bytes, hence we need to erase 16
4718          *     consecutive sectors.  The start index for the nth Hw sector
4719          *     can be calculated as = bank * 4096 + n * 256
4720          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4721          *     The start index for the nth Hw sector can be calculated
4722          *     as = bank * 4096
4723          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4724          *     (ich9 only, otherwise error condition)
4725          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4726          */
4727         switch (hsfsts.hsf_status.berasesz) {
4728         case 0:
4729                 /* Hw sector size 256 */
4730                 sector_size = ICH_FLASH_SEG_SIZE_256;
4731                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4732                 break;
4733         case 1:
4734                 sector_size = ICH_FLASH_SEG_SIZE_4K;
4735                 iteration = 1;
4736                 break;
4737         case 2:
4738                 sector_size = ICH_FLASH_SEG_SIZE_8K;
4739                 iteration = 1;
4740                 break;
4741         case 3:
4742                 sector_size = ICH_FLASH_SEG_SIZE_64K;
4743                 iteration = 1;
4744                 break;
4745         default:
4746                 return -E1000_ERR_NVM;
4747         }
4748
4749         /* Start with the base address, then add the sector offset. */
4750         flash_linear_addr = hw->nvm.flash_base_addr;
4751         flash_linear_addr += (bank) ? flash_bank_size : 0;
4752
4753         for (j = 0; j < iteration; j++) {
4754                 do {
4755                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4756
4757                         /* Steps */
4758                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
4759                         if (ret_val)
4760                                 return ret_val;
4761
4762                         /* Write a value 11 (block Erase) in Flash
4763                          * Cycle field in hw flash control
4764                          */
4765                         if (hw->mac.type >= e1000_pch_spt)
4766                                 hsflctl.regval =
4767                                     E1000_READ_FLASH_REG(hw,
4768                                                          ICH_FLASH_HSFSTS)>>16;
4769                         else
4770                                 hsflctl.regval =
4771                                     E1000_READ_FLASH_REG16(hw,
4772                                                            ICH_FLASH_HSFCTL);
4773
4774                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4775                         if (hw->mac.type >= e1000_pch_spt)
4776                                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4777                                                       hsflctl.regval << 16);
4778                         else
4779                                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4780                                                         hsflctl.regval);
4781
4782                         /* Write the last 24 bits of an index within the
4783                          * block into Flash Linear address field in Flash
4784                          * Address.
4785                          */
4786                         flash_linear_addr += (j * sector_size);
4787                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4788                                               flash_linear_addr);
4789
4790                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4791                         if (ret_val == E1000_SUCCESS)
4792                                 break;
4793
4794                         /* Check if FCERR is set to 1.  If 1,
4795                          * clear it and try the whole sequence
4796                          * a few more times else Done
4797                          */
4798                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4799                                                       ICH_FLASH_HSFSTS);
4800                         if (hsfsts.hsf_status.flcerr)
4801                                 /* repeat for some time before giving up */
4802                                 continue;
4803                         else if (!hsfsts.hsf_status.flcdone)
4804                                 return ret_val;
4805                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4806         }
4807
4808         return E1000_SUCCESS;
4809 }
4810
4811 /**
4812  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4813  *  @hw: pointer to the HW structure
4814  *  @data: Pointer to the LED settings
4815  *
4816  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4817  *  settings is all 0's or F's, set the LED default to a valid LED default
4818  *  setting.
4819  **/
4820 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4821 {
4822         s32 ret_val;
4823
4824         DEBUGFUNC("e1000_valid_led_default_ich8lan");
4825
4826         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4827         if (ret_val) {
4828                 DEBUGOUT("NVM Read Error\n");
4829                 return ret_val;
4830         }
4831
4832         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4833                 *data = ID_LED_DEFAULT_ICH8LAN;
4834
4835         return E1000_SUCCESS;
4836 }
4837
4838 /**
4839  *  e1000_id_led_init_pchlan - store LED configurations
4840  *  @hw: pointer to the HW structure
4841  *
4842  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4843  *  the PHY LED configuration register.
4844  *
4845  *  PCH also does not have an "always on" or "always off" mode which
4846  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4847  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4848  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4849  *  link based on logic in e1000_led_[on|off]_pchlan().
4850  **/
4851 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4852 {
4853         struct e1000_mac_info *mac = &hw->mac;
4854         s32 ret_val;
4855         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4856         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4857         u16 data, i, temp, shift;
4858
4859         DEBUGFUNC("e1000_id_led_init_pchlan");
4860
4861         /* Get default ID LED modes */
4862         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4863         if (ret_val)
4864                 return ret_val;
4865
4866         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4867         mac->ledctl_mode1 = mac->ledctl_default;
4868         mac->ledctl_mode2 = mac->ledctl_default;
4869
4870         for (i = 0; i < 4; i++) {
4871                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4872                 shift = (i * 5);
4873                 switch (temp) {
4874                 case ID_LED_ON1_DEF2:
4875                 case ID_LED_ON1_ON2:
4876                 case ID_LED_ON1_OFF2:
4877                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4878                         mac->ledctl_mode1 |= (ledctl_on << shift);
4879                         break;
4880                 case ID_LED_OFF1_DEF2:
4881                 case ID_LED_OFF1_ON2:
4882                 case ID_LED_OFF1_OFF2:
4883                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4884                         mac->ledctl_mode1 |= (ledctl_off << shift);
4885                         break;
4886                 default:
4887                         /* Do nothing */
4888                         break;
4889                 }
4890                 switch (temp) {
4891                 case ID_LED_DEF1_ON2:
4892                 case ID_LED_ON1_ON2:
4893                 case ID_LED_OFF1_ON2:
4894                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4895                         mac->ledctl_mode2 |= (ledctl_on << shift);
4896                         break;
4897                 case ID_LED_DEF1_OFF2:
4898                 case ID_LED_ON1_OFF2:
4899                 case ID_LED_OFF1_OFF2:
4900                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4901                         mac->ledctl_mode2 |= (ledctl_off << shift);
4902                         break;
4903                 default:
4904                         /* Do nothing */
4905                         break;
4906                 }
4907         }
4908
4909         return E1000_SUCCESS;
4910 }
4911
4912 /**
4913  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4914  *  @hw: pointer to the HW structure
4915  *
4916  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4917  *  register, so the bus width is hard coded.
4918  **/
4919 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4920 {
4921         struct e1000_bus_info *bus = &hw->bus;
4922         s32 ret_val;
4923
4924         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4925
4926         ret_val = e1000_get_bus_info_pcie_generic(hw);
4927
4928         /* ICH devices are "PCI Express"-ish.  They have
4929          * a configuration space, but do not contain
4930          * PCI Express Capability registers, so bus width
4931          * must be hardcoded.
4932          */
4933         if (bus->width == e1000_bus_width_unknown)
4934                 bus->width = e1000_bus_width_pcie_x1;
4935
4936         return ret_val;
4937 }
4938
4939 /**
4940  *  e1000_reset_hw_ich8lan - Reset the hardware
4941  *  @hw: pointer to the HW structure
4942  *
4943  *  Does a full reset of the hardware which includes a reset of the PHY and
4944  *  MAC.
4945  **/
4946 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4947 {
4948         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4949         u16 kum_cfg;
4950         u32 ctrl, reg;
4951         s32 ret_val;
4952
4953         DEBUGFUNC("e1000_reset_hw_ich8lan");
4954
4955         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4956          * on the last TLP read/write transaction when MAC is reset.
4957          */
4958         ret_val = e1000_disable_pcie_master_generic(hw);
4959         if (ret_val)
4960                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4961
4962         DEBUGOUT("Masking off all interrupts\n");
4963         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4964
4965         /* Disable the Transmit and Receive units.  Then delay to allow
4966          * any pending transactions to complete before we hit the MAC
4967          * with the global reset.
4968          */
4969         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4970         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4971         E1000_WRITE_FLUSH(hw);
4972
4973         msec_delay(10);
4974
4975         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4976         if (hw->mac.type == e1000_ich8lan) {
4977                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4978                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4979                 /* Set Packet Buffer Size to 16k. */
4980                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4981         }
4982
4983         if (hw->mac.type == e1000_pchlan) {
4984                 /* Save the NVM K1 bit setting*/
4985                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4986                 if (ret_val)
4987                         return ret_val;
4988
4989                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4990                         dev_spec->nvm_k1_enabled = TRUE;
4991                 else
4992                         dev_spec->nvm_k1_enabled = FALSE;
4993         }
4994
4995         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4996
4997         if (!hw->phy.ops.check_reset_block(hw)) {
4998                 /* Full-chip reset requires MAC and PHY reset at the same
4999                  * time to make sure the interface between MAC and the
5000                  * external PHY is reset.
5001                  */
5002                 ctrl |= E1000_CTRL_PHY_RST;
5003
5004                 /* Gate automatic PHY configuration by hardware on
5005                  * non-managed 82579
5006                  */
5007                 if ((hw->mac.type == e1000_pch2lan) &&
5008                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
5009                         e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
5010         }
5011         ret_val = e1000_acquire_swflag_ich8lan(hw);
5012         DEBUGOUT("Issuing a global reset to ich8lan\n");
5013         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
5014         /* cannot issue a flush here because it hangs the hardware */
5015         msec_delay(20);
5016
5017         /* Set Phy Config Counter to 50msec */
5018         if (hw->mac.type == e1000_pch2lan) {
5019                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
5020                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
5021                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
5022                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
5023         }
5024
5025         if (!ret_val)
5026                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
5027
5028         if (ctrl & E1000_CTRL_PHY_RST) {
5029                 ret_val = hw->phy.ops.get_cfg_done(hw);
5030                 if (ret_val)
5031                         return ret_val;
5032
5033                 ret_val = e1000_post_phy_reset_ich8lan(hw);
5034                 if (ret_val)
5035                         return ret_val;
5036         }
5037
5038         /* For PCH, this write will make sure that any noise
5039          * will be detected as a CRC error and be dropped rather than show up
5040          * as a bad packet to the DMA engine.
5041          */
5042         if (hw->mac.type == e1000_pchlan)
5043                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
5044
5045         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5046         E1000_READ_REG(hw, E1000_ICR);
5047
5048         reg = E1000_READ_REG(hw, E1000_KABGTXD);
5049         reg |= E1000_KABGTXD_BGSQLBIAS;
5050         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
5051
5052         return E1000_SUCCESS;
5053 }
5054
5055 /**
5056  *  e1000_init_hw_ich8lan - Initialize the hardware
5057  *  @hw: pointer to the HW structure
5058  *
5059  *  Prepares the hardware for transmit and receive by doing the following:
5060  *   - initialize hardware bits
5061  *   - initialize LED identification
5062  *   - setup receive address registers
5063  *   - setup flow control
5064  *   - setup transmit descriptors
5065  *   - clear statistics
5066  **/
5067 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
5068 {
5069         struct e1000_mac_info *mac = &hw->mac;
5070         u32 ctrl_ext, txdctl, snoop;
5071         s32 ret_val;
5072         u16 i;
5073
5074         DEBUGFUNC("e1000_init_hw_ich8lan");
5075
5076         e1000_initialize_hw_bits_ich8lan(hw);
5077
5078         /* Initialize identification LED */
5079         ret_val = mac->ops.id_led_init(hw);
5080         /* An error is not fatal and we should not stop init due to this */
5081         if (ret_val)
5082                 DEBUGOUT("Error initializing identification LED\n");
5083
5084         /* Setup the receive address. */
5085         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
5086
5087         /* Zero out the Multicast HASH table */
5088         DEBUGOUT("Zeroing the MTA\n");
5089         for (i = 0; i < mac->mta_reg_count; i++)
5090                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
5091
5092         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
5093          * the ME.  Disable wakeup by clearing the host wakeup bit.
5094          * Reset the phy after disabling host wakeup to reset the Rx buffer.
5095          */
5096         if (hw->phy.type == e1000_phy_82578) {
5097                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
5098                 i &= ~BM_WUC_HOST_WU_BIT;
5099                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
5100                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
5101                 if (ret_val)
5102                         return ret_val;
5103         }
5104
5105         /* Setup link and flow control */
5106         ret_val = mac->ops.setup_link(hw);
5107
5108         /* Set the transmit descriptor write-back policy for both queues */
5109         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
5110         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5111                   E1000_TXDCTL_FULL_TX_DESC_WB);
5112         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5113                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5114         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
5115         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
5116         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5117                   E1000_TXDCTL_FULL_TX_DESC_WB);
5118         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5119                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5120         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
5121
5122         /* ICH8 has opposite polarity of no_snoop bits.
5123          * By default, we should use snoop behavior.
5124          */
5125         if (mac->type == e1000_ich8lan)
5126                 snoop = PCIE_ICH8_SNOOP_ALL;
5127         else
5128                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
5129         e1000_set_pcie_no_snoop_generic(hw, snoop);
5130
5131         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5132         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
5133         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5134
5135         /* Clear all of the statistics registers (clear on read).  It is
5136          * important that we do this after we have tried to establish link
5137          * because the symbol error count will increment wildly if there
5138          * is no link.
5139          */
5140         e1000_clear_hw_cntrs_ich8lan(hw);
5141
5142         return ret_val;
5143 }
5144
5145 /**
5146  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
5147  *  @hw: pointer to the HW structure
5148  *
5149  *  Sets/Clears required hardware bits necessary for correctly setting up the
5150  *  hardware for transmit and receive.
5151  **/
5152 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
5153 {
5154         u32 reg;
5155
5156         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5157
5158         /* Extended Device Control */
5159         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5160         reg |= (1 << 22);
5161         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
5162         if (hw->mac.type >= e1000_pchlan)
5163                 reg |= E1000_CTRL_EXT_PHYPDEN;
5164         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5165
5166         /* Transmit Descriptor Control 0 */
5167         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5168         reg |= (1 << 22);
5169         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5170
5171         /* Transmit Descriptor Control 1 */
5172         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5173         reg |= (1 << 22);
5174         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5175
5176         /* Transmit Arbitration Control 0 */
5177         reg = E1000_READ_REG(hw, E1000_TARC(0));
5178         if (hw->mac.type == e1000_ich8lan)
5179                 reg |= (1 << 28) | (1 << 29);
5180         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5181         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5182
5183         /* Transmit Arbitration Control 1 */
5184         reg = E1000_READ_REG(hw, E1000_TARC(1));
5185         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5186                 reg &= ~(1 << 28);
5187         else
5188                 reg |= (1 << 28);
5189         reg |= (1 << 24) | (1 << 26) | (1 << 30);
5190         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5191
5192         /* Device Status */
5193         if (hw->mac.type == e1000_ich8lan) {
5194                 reg = E1000_READ_REG(hw, E1000_STATUS);
5195                 reg &= ~(1 << 31);
5196                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
5197         }
5198
5199         /* work-around descriptor data corruption issue during nfs v2 udp
5200          * traffic, just disable the nfs filtering capability
5201          */
5202         reg = E1000_READ_REG(hw, E1000_RFCTL);
5203         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5204
5205         /* Disable IPv6 extension header parsing because some malformed
5206          * IPv6 headers can hang the Rx.
5207          */
5208         if (hw->mac.type == e1000_ich8lan)
5209                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5210         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5211
5212         /* Enable ECC on Lynxpoint */
5213         if (hw->mac.type >= e1000_pch_lpt) {
5214                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5215                 reg |= E1000_PBECCSTS_ECC_ENABLE;
5216                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5217
5218                 reg = E1000_READ_REG(hw, E1000_CTRL);
5219                 reg |= E1000_CTRL_MEHE;
5220                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
5221         }
5222
5223         return;
5224 }
5225
5226 /**
5227  *  e1000_setup_link_ich8lan - Setup flow control and link settings
5228  *  @hw: pointer to the HW structure
5229  *
5230  *  Determines which flow control settings to use, then configures flow
5231  *  control.  Calls the appropriate media-specific link configuration
5232  *  function.  Assuming the adapter has a valid link partner, a valid link
5233  *  should be established.  Assumes the hardware has previously been reset
5234  *  and the transmitter and receiver are not enabled.
5235  **/
5236 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5237 {
5238         s32 ret_val;
5239
5240         DEBUGFUNC("e1000_setup_link_ich8lan");
5241
5242         if (hw->phy.ops.check_reset_block(hw))
5243                 return E1000_SUCCESS;
5244
5245         /* ICH parts do not have a word in the NVM to determine
5246          * the default flow control setting, so we explicitly
5247          * set it to full.
5248          */
5249         if (hw->fc.requested_mode == e1000_fc_default)
5250                 hw->fc.requested_mode = e1000_fc_full;
5251
5252         /* Save off the requested flow control mode for use later.  Depending
5253          * on the link partner's capabilities, we may or may not use this mode.
5254          */
5255         hw->fc.current_mode = hw->fc.requested_mode;
5256
5257         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5258                 hw->fc.current_mode);
5259
5260         /* Continue to configure the copper link. */
5261         ret_val = hw->mac.ops.setup_physical_interface(hw);
5262         if (ret_val)
5263                 return ret_val;
5264
5265         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5266         if ((hw->phy.type == e1000_phy_82578) ||
5267             (hw->phy.type == e1000_phy_82579) ||
5268             (hw->phy.type == e1000_phy_i217) ||
5269             (hw->phy.type == e1000_phy_82577)) {
5270                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5271
5272                 ret_val = hw->phy.ops.write_reg(hw,
5273                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
5274                                              hw->fc.pause_time);
5275                 if (ret_val)
5276                         return ret_val;
5277         }
5278
5279         return e1000_set_fc_watermarks_generic(hw);
5280 }
5281
5282 /**
5283  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5284  *  @hw: pointer to the HW structure
5285  *
5286  *  Configures the kumeran interface to the PHY to wait the appropriate time
5287  *  when polling the PHY, then call the generic setup_copper_link to finish
5288  *  configuring the copper link.
5289  **/
5290 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5291 {
5292         u32 ctrl;
5293         s32 ret_val;
5294         u16 reg_data;
5295
5296         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5297
5298         ctrl = E1000_READ_REG(hw, E1000_CTRL);
5299         ctrl |= E1000_CTRL_SLU;
5300         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5301         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5302
5303         /* Set the mac to wait the maximum time between each iteration
5304          * and increase the max iterations when polling the phy;
5305          * this fixes erroneous timeouts at 10Mbps.
5306          */
5307         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5308                                                0xFFFF);
5309         if (ret_val)
5310                 return ret_val;
5311         ret_val = e1000_read_kmrn_reg_generic(hw,
5312                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
5313                                               &reg_data);
5314         if (ret_val)
5315                 return ret_val;
5316         reg_data |= 0x3F;
5317         ret_val = e1000_write_kmrn_reg_generic(hw,
5318                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
5319                                                reg_data);
5320         if (ret_val)
5321                 return ret_val;
5322
5323         switch (hw->phy.type) {
5324         case e1000_phy_igp_3:
5325                 ret_val = e1000_copper_link_setup_igp(hw);
5326                 if (ret_val)
5327                         return ret_val;
5328                 break;
5329         case e1000_phy_bm:
5330         case e1000_phy_82578:
5331                 ret_val = e1000_copper_link_setup_m88(hw);
5332                 if (ret_val)
5333                         return ret_val;
5334                 break;
5335         case e1000_phy_82577:
5336         case e1000_phy_82579:
5337                 ret_val = e1000_copper_link_setup_82577(hw);
5338                 if (ret_val)
5339                         return ret_val;
5340                 break;
5341         case e1000_phy_ife:
5342                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5343                                                &reg_data);
5344                 if (ret_val)
5345                         return ret_val;
5346
5347                 reg_data &= ~IFE_PMC_AUTO_MDIX;
5348
5349                 switch (hw->phy.mdix) {
5350                 case 1:
5351                         reg_data &= ~IFE_PMC_FORCE_MDIX;
5352                         break;
5353                 case 2:
5354                         reg_data |= IFE_PMC_FORCE_MDIX;
5355                         break;
5356                 case 0:
5357                 default:
5358                         reg_data |= IFE_PMC_AUTO_MDIX;
5359                         break;
5360                 }
5361                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5362                                                 reg_data);
5363                 if (ret_val)
5364                         return ret_val;
5365                 break;
5366         default:
5367                 break;
5368         }
5369
5370         return e1000_setup_copper_link_generic(hw);
5371 }
5372
5373 /**
5374  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5375  *  @hw: pointer to the HW structure
5376  *
5377  *  Calls the PHY specific link setup function and then calls the
5378  *  generic setup_copper_link to finish configuring the link for
5379  *  Lynxpoint PCH devices
5380  **/
5381 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5382 {
5383         u32 ctrl;
5384         s32 ret_val;
5385
5386         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5387
5388         ctrl = E1000_READ_REG(hw, E1000_CTRL);
5389         ctrl |= E1000_CTRL_SLU;
5390         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5391         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5392
5393         ret_val = e1000_copper_link_setup_82577(hw);
5394         if (ret_val)
5395                 return ret_val;
5396
5397         return e1000_setup_copper_link_generic(hw);
5398 }
5399
5400 /**
5401  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5402  *  @hw: pointer to the HW structure
5403  *  @speed: pointer to store current link speed
5404  *  @duplex: pointer to store the current link duplex
5405  *
5406  *  Calls the generic get_speed_and_duplex to retrieve the current link
5407  *  information and then calls the Kumeran lock loss workaround for links at
5408  *  gigabit speeds.
5409  **/
5410 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5411                                           u16 *duplex)
5412 {
5413         s32 ret_val;
5414
5415         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5416
5417         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5418         if (ret_val)
5419                 return ret_val;
5420
5421         if ((hw->mac.type == e1000_ich8lan) &&
5422             (hw->phy.type == e1000_phy_igp_3) &&
5423             (*speed == SPEED_1000)) {
5424                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5425         }
5426
5427         return ret_val;
5428 }
5429
5430 /**
5431  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5432  *  @hw: pointer to the HW structure
5433  *
5434  *  Work-around for 82566 Kumeran PCS lock loss:
5435  *  On link status change (i.e. PCI reset, speed change) and link is up and
5436  *  speed is gigabit-
5437  *    0) if workaround is optionally disabled do nothing
5438  *    1) wait 1ms for Kumeran link to come up
5439  *    2) check Kumeran Diagnostic register PCS lock loss bit
5440  *    3) if not set the link is locked (all is good), otherwise...
5441  *    4) reset the PHY
5442  *    5) repeat up to 10 times
5443  *  Note: this is only called for IGP3 copper when speed is 1gb.
5444  **/
5445 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5446 {
5447         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5448         u32 phy_ctrl;
5449         s32 ret_val;
5450         u16 i, data;
5451         bool link;
5452
5453         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5454
5455         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5456                 return E1000_SUCCESS;
5457
5458         /* Make sure link is up before proceeding.  If not just return.
5459          * Attempting this while link is negotiating fouled up link
5460          * stability
5461          */
5462         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5463         if (!link)
5464                 return E1000_SUCCESS;
5465
5466         for (i = 0; i < 10; i++) {
5467                 /* read once to clear */
5468                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5469                 if (ret_val)
5470                         return ret_val;
5471                 /* and again to get new status */
5472                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5473                 if (ret_val)
5474                         return ret_val;
5475
5476                 /* check for PCS lock */
5477                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5478                         return E1000_SUCCESS;
5479
5480                 /* Issue PHY reset */
5481                 hw->phy.ops.reset(hw);
5482                 msec_delay_irq(5);
5483         }
5484         /* Disable GigE link negotiation */
5485         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5486         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5487                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5488         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5489
5490         /* Call gig speed drop workaround on Gig disable before accessing
5491          * any PHY registers
5492          */
5493         e1000_gig_downshift_workaround_ich8lan(hw);
5494
5495         /* unable to acquire PCS lock */
5496         return -E1000_ERR_PHY;
5497 }
5498
5499 /**
5500  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5501  *  @hw: pointer to the HW structure
5502  *  @state: boolean value used to set the current Kumeran workaround state
5503  *
5504  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
5505  *  /disabled - FALSE).
5506  **/
5507 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5508                                                  bool state)
5509 {
5510         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5511
5512         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5513
5514         if (hw->mac.type != e1000_ich8lan) {
5515                 DEBUGOUT("Workaround applies to ICH8 only.\n");
5516                 return;
5517         }
5518
5519         dev_spec->kmrn_lock_loss_workaround_enabled = state;
5520
5521         return;
5522 }
5523
5524 /**
5525  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5526  *  @hw: pointer to the HW structure
5527  *
5528  *  Workaround for 82566 power-down on D3 entry:
5529  *    1) disable gigabit link
5530  *    2) write VR power-down enable
5531  *    3) read it back
5532  *  Continue if successful, else issue LCD reset and repeat
5533  **/
5534 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5535 {
5536         u32 reg;
5537         u16 data;
5538         u8  retry = 0;
5539
5540         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5541
5542         if (hw->phy.type != e1000_phy_igp_3)
5543                 return;
5544
5545         /* Try the workaround twice (if needed) */
5546         do {
5547                 /* Disable link */
5548                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5549                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5550                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5551                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5552
5553                 /* Call gig speed drop workaround on Gig disable before
5554                  * accessing any PHY registers
5555                  */
5556                 if (hw->mac.type == e1000_ich8lan)
5557                         e1000_gig_downshift_workaround_ich8lan(hw);
5558
5559                 /* Write VR power-down enable */
5560                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5561                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5562                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5563                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5564
5565                 /* Read it back and test */
5566                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5567                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5568                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5569                         break;
5570
5571                 /* Issue PHY reset and repeat at most one more time */
5572                 reg = E1000_READ_REG(hw, E1000_CTRL);
5573                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5574                 retry++;
5575         } while (retry);
5576 }
5577
5578 /**
5579  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5580  *  @hw: pointer to the HW structure
5581  *
5582  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5583  *  LPLU, Gig disable, MDIC PHY reset):
5584  *    1) Set Kumeran Near-end loopback
5585  *    2) Clear Kumeran Near-end loopback
5586  *  Should only be called for ICH8[m] devices with any 1G Phy.
5587  **/
5588 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5589 {
5590         s32 ret_val;
5591         u16 reg_data;
5592
5593         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5594
5595         if ((hw->mac.type != e1000_ich8lan) ||
5596             (hw->phy.type == e1000_phy_ife))
5597                 return;
5598
5599         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5600                                               &reg_data);
5601         if (ret_val)
5602                 return;
5603         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5604         ret_val = e1000_write_kmrn_reg_generic(hw,
5605                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
5606                                                reg_data);
5607         if (ret_val)
5608                 return;
5609         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5610         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5611                                      reg_data);
5612 }
5613
5614 /**
5615  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5616  *  @hw: pointer to the HW structure
5617  *
5618  *  During S0 to Sx transition, it is possible the link remains at gig
5619  *  instead of negotiating to a lower speed.  Before going to Sx, set
5620  *  'Gig Disable' to force link speed negotiation to a lower speed based on
5621  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5622  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5623  *  needs to be written.
5624  *  Parts that support (and are linked to a partner which support) EEE in
5625  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5626  *  than 10Mbps w/o EEE.
5627  **/
5628 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5629 {
5630         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5631         u32 phy_ctrl;
5632         s32 ret_val;
5633
5634         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5635
5636         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5637         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5638
5639         if (hw->phy.type == e1000_phy_i217) {
5640                 u16 phy_reg, device_id = hw->device_id;
5641
5642                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5643                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5644                     (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5645                     (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5646                     (hw->mac.type >= e1000_pch_spt)) {
5647                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5648
5649                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5650                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5651                 }
5652
5653                 ret_val = hw->phy.ops.acquire(hw);
5654                 if (ret_val)
5655                         goto out;
5656
5657                 if (!dev_spec->eee_disable) {
5658                         u16 eee_advert;
5659
5660                         ret_val =
5661                             e1000_read_emi_reg_locked(hw,
5662                                                       I217_EEE_ADVERTISEMENT,
5663                                                       &eee_advert);
5664                         if (ret_val)
5665                                 goto release;
5666
5667                         /* Disable LPLU if both link partners support 100BaseT
5668                          * EEE and 100Full is advertised on both ends of the
5669                          * link, and enable Auto Enable LPI since there will
5670                          * be no driver to enable LPI while in Sx.
5671                          */
5672                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5673                             (dev_spec->eee_lp_ability &
5674                              I82579_EEE_100_SUPPORTED) &&
5675                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5676                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5677                                               E1000_PHY_CTRL_NOND0A_LPLU);
5678
5679                                 /* Set Auto Enable LPI after link up */
5680                                 hw->phy.ops.read_reg_locked(hw,
5681                                                             I217_LPI_GPIO_CTRL,
5682                                                             &phy_reg);
5683                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5684                                 hw->phy.ops.write_reg_locked(hw,
5685                                                              I217_LPI_GPIO_CTRL,
5686                                                              phy_reg);
5687                         }
5688                 }
5689
5690                 /* For i217 Intel Rapid Start Technology support,
5691                  * when the system is going into Sx and no manageability engine
5692                  * is present, the driver must configure proxy to reset only on
5693                  * power good.  LPI (Low Power Idle) state must also reset only
5694                  * on power good, as well as the MTA (Multicast table array).
5695                  * The SMBus release must also be disabled on LCD reset.
5696                  */
5697                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5698                       E1000_ICH_FWSM_FW_VALID)) {
5699                         /* Enable proxy to reset only on power good. */
5700                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5701                                                     &phy_reg);
5702                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5703                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5704                                                      phy_reg);
5705
5706                         /* Set bit enable LPI (EEE) to reset only on
5707                          * power good.
5708                         */
5709                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5710                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5711                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5712
5713                         /* Disable the SMB release on LCD reset. */
5714                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5715                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5716                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5717                 }
5718
5719                 /* Enable MTA to reset for Intel Rapid Start Technology
5720                  * Support
5721                  */
5722                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5723                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5724                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5725
5726 release:
5727                 hw->phy.ops.release(hw);
5728         }
5729 out:
5730         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5731
5732         if (hw->mac.type == e1000_ich8lan)
5733                 e1000_gig_downshift_workaround_ich8lan(hw);
5734
5735         if (hw->mac.type >= e1000_pchlan) {
5736                 e1000_oem_bits_config_ich8lan(hw, FALSE);
5737
5738                 /* Reset PHY to activate OEM bits on 82577/8 */
5739                 if (hw->mac.type == e1000_pchlan)
5740                         e1000_phy_hw_reset_generic(hw);
5741
5742                 ret_val = hw->phy.ops.acquire(hw);
5743                 if (ret_val)
5744                         return;
5745                 e1000_write_smbus_addr(hw);
5746                 hw->phy.ops.release(hw);
5747         }
5748
5749         return;
5750 }
5751
5752 /**
5753  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5754  *  @hw: pointer to the HW structure
5755  *
5756  *  During Sx to S0 transitions on non-managed devices or managed devices
5757  *  on which PHY resets are not blocked, if the PHY registers cannot be
5758  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5759  *  the PHY.
5760  *  On i217, setup Intel Rapid Start Technology.
5761  **/
5762 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5763 {
5764         s32 ret_val;
5765
5766         DEBUGFUNC("e1000_resume_workarounds_pchlan");
5767         if (hw->mac.type < e1000_pch2lan)
5768                 return E1000_SUCCESS;
5769
5770         ret_val = e1000_init_phy_workarounds_pchlan(hw);
5771         if (ret_val) {
5772                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5773                 return ret_val;
5774         }
5775
5776         /* For i217 Intel Rapid Start Technology support when the system
5777          * is transitioning from Sx and no manageability engine is present
5778          * configure SMBus to restore on reset, disable proxy, and enable
5779          * the reset on MTA (Multicast table array).
5780          */
5781         if (hw->phy.type == e1000_phy_i217) {
5782                 u16 phy_reg;
5783
5784                 ret_val = hw->phy.ops.acquire(hw);
5785                 if (ret_val) {
5786                         DEBUGOUT("Failed to setup iRST\n");
5787                         return ret_val;
5788                 }
5789
5790                 /* Clear Auto Enable LPI after link up */
5791                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5792                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5793                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5794
5795                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5796                     E1000_ICH_FWSM_FW_VALID)) {
5797                         /* Restore clear on SMB if no manageability engine
5798                          * is present
5799                          */
5800                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5801                                                               &phy_reg);
5802                         if (ret_val)
5803                                 goto release;
5804                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5805                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5806
5807                         /* Disable Proxy */
5808                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5809                 }
5810                 /* Enable reset on MTA */
5811                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5812                                                       &phy_reg);
5813                 if (ret_val)
5814                         goto release;
5815                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5816                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5817 release:
5818                 if (ret_val)
5819                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5820                 hw->phy.ops.release(hw);
5821                 return ret_val;
5822         }
5823         return E1000_SUCCESS;
5824 }
5825
5826 /**
5827  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5828  *  @hw: pointer to the HW structure
5829  *
5830  *  Return the LED back to the default configuration.
5831  **/
5832 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5833 {
5834         DEBUGFUNC("e1000_cleanup_led_ich8lan");
5835
5836         if (hw->phy.type == e1000_phy_ife)
5837                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5838                                              0);
5839
5840         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5841         return E1000_SUCCESS;
5842 }
5843
5844 /**
5845  *  e1000_led_on_ich8lan - Turn LEDs on
5846  *  @hw: pointer to the HW structure
5847  *
5848  *  Turn on the LEDs.
5849  **/
5850 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5851 {
5852         DEBUGFUNC("e1000_led_on_ich8lan");
5853
5854         if (hw->phy.type == e1000_phy_ife)
5855                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5856                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5857
5858         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5859         return E1000_SUCCESS;
5860 }
5861
5862 /**
5863  *  e1000_led_off_ich8lan - Turn LEDs off
5864  *  @hw: pointer to the HW structure
5865  *
5866  *  Turn off the LEDs.
5867  **/
5868 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5869 {
5870         DEBUGFUNC("e1000_led_off_ich8lan");
5871
5872         if (hw->phy.type == e1000_phy_ife)
5873                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5874                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5875
5876         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5877         return E1000_SUCCESS;
5878 }
5879
5880 /**
5881  *  e1000_setup_led_pchlan - Configures SW controllable LED
5882  *  @hw: pointer to the HW structure
5883  *
5884  *  This prepares the SW controllable LED for use.
5885  **/
5886 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5887 {
5888         DEBUGFUNC("e1000_setup_led_pchlan");
5889
5890         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5891                                      (u16)hw->mac.ledctl_mode1);
5892 }
5893
5894 /**
5895  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5896  *  @hw: pointer to the HW structure
5897  *
5898  *  Return the LED back to the default configuration.
5899  **/
5900 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5901 {
5902         DEBUGFUNC("e1000_cleanup_led_pchlan");
5903
5904         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5905                                      (u16)hw->mac.ledctl_default);
5906 }
5907
5908 /**
5909  *  e1000_led_on_pchlan - Turn LEDs on
5910  *  @hw: pointer to the HW structure
5911  *
5912  *  Turn on the LEDs.
5913  **/
5914 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5915 {
5916         u16 data = (u16)hw->mac.ledctl_mode2;
5917         u32 i, led;
5918
5919         DEBUGFUNC("e1000_led_on_pchlan");
5920
5921         /* If no link, then turn LED on by setting the invert bit
5922          * for each LED that's mode is "link_up" in ledctl_mode2.
5923          */
5924         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5925                 for (i = 0; i < 3; i++) {
5926                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5927                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5928                             E1000_LEDCTL_MODE_LINK_UP)
5929                                 continue;
5930                         if (led & E1000_PHY_LED0_IVRT)
5931                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5932                         else
5933                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5934                 }
5935         }
5936
5937         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5938 }
5939
5940 /**
5941  *  e1000_led_off_pchlan - Turn LEDs off
5942  *  @hw: pointer to the HW structure
5943  *
5944  *  Turn off the LEDs.
5945  **/
5946 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5947 {
5948         u16 data = (u16)hw->mac.ledctl_mode1;
5949         u32 i, led;
5950
5951         DEBUGFUNC("e1000_led_off_pchlan");
5952
5953         /* If no link, then turn LED off by clearing the invert bit
5954          * for each LED that's mode is "link_up" in ledctl_mode1.
5955          */
5956         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5957                 for (i = 0; i < 3; i++) {
5958                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5959                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5960                             E1000_LEDCTL_MODE_LINK_UP)
5961                                 continue;
5962                         if (led & E1000_PHY_LED0_IVRT)
5963                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5964                         else
5965                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5966                 }
5967         }
5968
5969         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5970 }
5971
5972 /**
5973  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5974  *  @hw: pointer to the HW structure
5975  *
5976  *  Read appropriate register for the config done bit for completion status
5977  *  and configure the PHY through s/w for EEPROM-less parts.
5978  *
5979  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5980  *  config done bit, so only an error is logged and continues.  If we were
5981  *  to return with error, EEPROM-less silicon would not be able to be reset
5982  *  or change link.
5983  **/
5984 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5985 {
5986         s32 ret_val = E1000_SUCCESS;
5987         u32 bank = 0;
5988         u32 status;
5989
5990         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5991
5992         e1000_get_cfg_done_generic(hw);
5993
5994         /* Wait for indication from h/w that it has completed basic config */
5995         if (hw->mac.type >= e1000_ich10lan) {
5996                 e1000_lan_init_done_ich8lan(hw);
5997         } else {
5998                 ret_val = e1000_get_auto_rd_done_generic(hw);
5999                 if (ret_val) {
6000                         /* When auto config read does not complete, do not
6001                          * return with an error. This can happen in situations
6002                          * where there is no eeprom and prevents getting link.
6003                          */
6004                         DEBUGOUT("Auto Read Done did not complete\n");
6005                         ret_val = E1000_SUCCESS;
6006                 }
6007         }
6008
6009         /* Clear PHY Reset Asserted bit */
6010         status = E1000_READ_REG(hw, E1000_STATUS);
6011         if (status & E1000_STATUS_PHYRA)
6012                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
6013         else
6014                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
6015
6016         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
6017         if (hw->mac.type <= e1000_ich9lan) {
6018                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
6019                     (hw->phy.type == e1000_phy_igp_3)) {
6020                         e1000_phy_init_script_igp3(hw);
6021                 }
6022         } else {
6023                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
6024                         /* Maybe we should do a basic PHY config */
6025                         DEBUGOUT("EEPROM not present\n");
6026                         ret_val = -E1000_ERR_CONFIG;
6027                 }
6028         }
6029
6030         return ret_val;
6031 }
6032
6033 /**
6034  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
6035  * @hw: pointer to the HW structure
6036  *
6037  * In the case of a PHY power down to save power, or to turn off link during a
6038  * driver unload, or wake on lan is not enabled, remove the link.
6039  **/
6040 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
6041 {
6042         /* If the management interface is not enabled, then power down */
6043         if (!(hw->mac.ops.check_mng_mode(hw) ||
6044               hw->phy.ops.check_reset_block(hw)))
6045                 e1000_power_down_phy_copper(hw);
6046
6047         return;
6048 }
6049
6050 /**
6051  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
6052  *  @hw: pointer to the HW structure
6053  *
6054  *  Clears hardware counters specific to the silicon family and calls
6055  *  clear_hw_cntrs_generic to clear all general purpose counters.
6056  **/
6057 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
6058 {
6059         u16 phy_data;
6060         s32 ret_val;
6061
6062         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
6063
6064         e1000_clear_hw_cntrs_base_generic(hw);
6065
6066         E1000_READ_REG(hw, E1000_ALGNERRC);
6067         E1000_READ_REG(hw, E1000_RXERRC);
6068         E1000_READ_REG(hw, E1000_TNCRS);
6069         E1000_READ_REG(hw, E1000_CEXTERR);
6070         E1000_READ_REG(hw, E1000_TSCTC);
6071         E1000_READ_REG(hw, E1000_TSCTFC);
6072
6073         E1000_READ_REG(hw, E1000_MGTPRC);
6074         E1000_READ_REG(hw, E1000_MGTPDC);
6075         E1000_READ_REG(hw, E1000_MGTPTC);
6076
6077         E1000_READ_REG(hw, E1000_IAC);
6078         E1000_READ_REG(hw, E1000_ICRXOC);
6079
6080         /* Clear PHY statistics registers */
6081         if ((hw->phy.type == e1000_phy_82578) ||
6082             (hw->phy.type == e1000_phy_82579) ||
6083             (hw->phy.type == e1000_phy_i217) ||
6084             (hw->phy.type == e1000_phy_82577)) {
6085                 ret_val = hw->phy.ops.acquire(hw);
6086                 if (ret_val)
6087                         return;
6088                 ret_val = hw->phy.ops.set_page(hw,
6089                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
6090                 if (ret_val)
6091                         goto release;
6092                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
6093                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
6094                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
6095                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
6096                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
6097                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
6098                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
6099                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
6100                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
6101                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
6102                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
6103                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
6104                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
6105                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
6106 release:
6107                 hw->phy.ops.release(hw);
6108         }
6109 }
6110