]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/e1000/e1000_ich8lan.c
MFC r359968:
[FreeBSD/stable/10.git] / sys / dev / e1000 / e1000_ich8lan.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 /* 82562G 10/100 Network Connection
36  * 82562G-2 10/100 Network Connection
37  * 82562GT 10/100 Network Connection
38  * 82562GT-2 10/100 Network Connection
39  * 82562V 10/100 Network Connection
40  * 82562V-2 10/100 Network Connection
41  * 82566DC-2 Gigabit Network Connection
42  * 82566DC Gigabit Network Connection
43  * 82566DM-2 Gigabit Network Connection
44  * 82566DM Gigabit Network Connection
45  * 82566MC Gigabit Network Connection
46  * 82566MM Gigabit Network Connection
47  * 82567LM Gigabit Network Connection
48  * 82567LF Gigabit Network Connection
49  * 82567V Gigabit Network Connection
50  * 82567LM-2 Gigabit Network Connection
51  * 82567LF-2 Gigabit Network Connection
52  * 82567V-2 Gigabit Network Connection
53  * 82567LF-3 Gigabit Network Connection
54  * 82567LM-3 Gigabit Network Connection
55  * 82567LM-4 Gigabit Network Connection
56  * 82577LM Gigabit Network Connection
57  * 82577LC Gigabit Network Connection
58  * 82578DM Gigabit Network Connection
59  * 82578DC Gigabit Network Connection
60  * 82579LM Gigabit Network Connection
61  * 82579V Gigabit Network Connection
62  * Ethernet Connection I217-LM
63  * Ethernet Connection I217-V
64  * Ethernet Connection I218-V
65  * Ethernet Connection I218-LM
66  * Ethernet Connection (2) I218-LM
67  * Ethernet Connection (2) I218-V
68  * Ethernet Connection (3) I218-LM
69  * Ethernet Connection (3) I218-V
70  */
71
72 #include "e1000_api.h"
73
74 static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 static int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 static int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
84                                               u8 *mc_addr_list,
85                                               u32 mc_addr_count);
86 static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
87 static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
88 static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
89 static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
90                                             bool active);
91 static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
92                                             bool active);
93 static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
94                                    u16 words, u16 *data);
95 static s32  e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
96                                u16 *data);
97 static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98                                     u16 words, u16 *data);
99 static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 static s32  e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
102 static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
103                                             u16 *data);
104 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
105 static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
106 static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
107 static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
108 static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
109 static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
110 static s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
111 static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
112                                            u16 *speed, u16 *duplex);
113 static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
114 static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
115 static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
116 static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
117 static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
118 static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
119 static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
120 static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
121 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
122 static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
123 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
124 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
125 static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
126                                           u32 offset, u8 *data);
127 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
128                                           u8 size, u16 *data);
129 static s32  e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
130                                             u32 *data);
131 static s32  e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
132                                            u32 offset, u32 *data);
133 static s32  e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
134                                              u32 offset, u32 data);
135 static s32  e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
136                                                   u32 offset, u32 dword);
137 static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
138                                           u32 offset, u16 *data);
139 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
140                                                  u32 offset, u8 byte);
141 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
142 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
143 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
144 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
145 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
146 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
147 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
148
149 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
150 /* Offset 04h HSFSTS */
151 union ich8_hws_flash_status {
152         struct ich8_hsfsts {
153                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
154                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
155                 u16 dael:1; /* bit 2 Direct Access error Log */
156                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
157                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
158                 u16 reserved1:2; /* bit 13:6 Reserved */
159                 u16 reserved2:6; /* bit 13:6 Reserved */
160                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
161                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
162         } hsf_status;
163         u16 regval;
164 };
165
166 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
167 /* Offset 06h FLCTL */
168 union ich8_hws_flash_ctrl {
169         struct ich8_hsflctl {
170                 u16 flcgo:1;   /* 0 Flash Cycle Go */
171                 u16 flcycle:2;   /* 2:1 Flash Cycle */
172                 u16 reserved:5;   /* 7:3 Reserved  */
173                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
174                 u16 flockdn:6;   /* 15:10 Reserved */
175         } hsf_ctrl;
176         u16 regval;
177 };
178
179 /* ICH Flash Region Access Permissions */
180 union ich8_hws_flash_regacc {
181         struct ich8_flracc {
182                 u32 grra:8; /* 0:7 GbE region Read Access */
183                 u32 grwa:8; /* 8:15 GbE region Write Access */
184                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
185                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
186         } hsf_flregacc;
187         u16 regval;
188 };
189
190 /**
191  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
192  *  @hw: pointer to the HW structure
193  *
194  *  Test access to the PHY registers by reading the PHY ID registers.  If
195  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
196  *  otherwise assume the read PHY ID is correct if it is valid.
197  *
198  *  Assumes the sw/fw/hw semaphore is already acquired.
199  **/
200 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
201 {
202         u16 phy_reg = 0;
203         u32 phy_id = 0;
204         s32 ret_val = 0;
205         u16 retry_count;
206         u32 mac_reg = 0;
207
208         for (retry_count = 0; retry_count < 2; retry_count++) {
209                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
210                 if (ret_val || (phy_reg == 0xFFFF))
211                         continue;
212                 phy_id = (u32)(phy_reg << 16);
213
214                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
215                 if (ret_val || (phy_reg == 0xFFFF)) {
216                         phy_id = 0;
217                         continue;
218                 }
219                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
220                 break;
221         }
222
223         if (hw->phy.id) {
224                 if  (hw->phy.id == phy_id)
225                         goto out;
226         } else if (phy_id) {
227                 hw->phy.id = phy_id;
228                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
229                 goto out;
230         }
231
232         /* In case the PHY needs to be in mdio slow mode,
233          * set slow mode and try to get the PHY id again.
234          */
235         if (hw->mac.type < e1000_pch_lpt) {
236                 hw->phy.ops.release(hw);
237                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
238                 if (!ret_val)
239                         ret_val = e1000_get_phy_id(hw);
240                 hw->phy.ops.acquire(hw);
241         }
242
243         if (ret_val)
244                 return FALSE;
245 out:
246         if (hw->mac.type >= e1000_pch_lpt) {
247                 /* Only unforce SMBus if ME is not active */
248                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
249                     E1000_ICH_FWSM_FW_VALID)) {
250                         /* Unforce SMBus mode in PHY */
251                         hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
252                         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
253                         hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
254
255                         /* Unforce SMBus mode in MAC */
256                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
257                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
258                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
259                 }
260         }
261
262         return TRUE;
263 }
264
265 /**
266  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
267  *  @hw: pointer to the HW structure
268  *
269  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
270  *  used to reset the PHY to a quiescent state when necessary.
271  **/
272 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
273 {
274         u32 mac_reg;
275
276         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
277
278         /* Set Phy Config Counter to 50msec */
279         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
280         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
281         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
282         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
283
284         /* Toggle LANPHYPC Value bit */
285         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
286         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
287         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
288         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
289         E1000_WRITE_FLUSH(hw);
290         msec_delay(1);
291         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
292         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
293         E1000_WRITE_FLUSH(hw);
294
295         if (hw->mac.type < e1000_pch_lpt) {
296                 msec_delay(50);
297         } else {
298                 u16 count = 20;
299
300                 do {
301                         msec_delay(5);
302                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
303                            E1000_CTRL_EXT_LPCD) && count--);
304
305                 msec_delay(30);
306         }
307 }
308
309 /**
310  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
311  *  @hw: pointer to the HW structure
312  *
313  *  Workarounds/flow necessary for PHY initialization during driver load
314  *  and resume paths.
315  **/
316 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
317 {
318         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
319         s32 ret_val;
320
321         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
322
323         /* Gate automatic PHY configuration by hardware on managed and
324          * non-managed 82579 and newer adapters.
325          */
326         e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
327
328         /* It is not possible to be certain of the current state of ULP
329          * so forcibly disable it.
330          */
331         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
332         e1000_disable_ulp_lpt_lp(hw, TRUE);
333
334         ret_val = hw->phy.ops.acquire(hw);
335         if (ret_val) {
336                 DEBUGOUT("Failed to initialize PHY flow\n");
337                 goto out;
338         }
339
340         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
341          * inaccessible and resetting the PHY is not blocked, toggle the
342          * LANPHYPC Value bit to force the interconnect to PCIe mode.
343          */
344         switch (hw->mac.type) {
345         case e1000_pch_lpt:
346         case e1000_pch_spt:
347         case e1000_pch_cnp:
348                 if (e1000_phy_is_accessible_pchlan(hw))
349                         break;
350
351                 /* Before toggling LANPHYPC, see if PHY is accessible by
352                  * forcing MAC to SMBus mode first.
353                  */
354                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
355                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
356                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
357
358                 /* Wait 50 milliseconds for MAC to finish any retries
359                  * that it might be trying to perform from previous
360                  * attempts to acknowledge any phy read requests.
361                  */
362                  msec_delay(50);
363
364                 /* fall-through */
365         case e1000_pch2lan:
366                 if (e1000_phy_is_accessible_pchlan(hw))
367                         break;
368
369                 /* fall-through */
370         case e1000_pchlan:
371                 if ((hw->mac.type == e1000_pchlan) &&
372                     (fwsm & E1000_ICH_FWSM_FW_VALID))
373                         break;
374
375                 if (hw->phy.ops.check_reset_block(hw)) {
376                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
377                         ret_val = -E1000_ERR_PHY;
378                         break;
379                 }
380
381                 /* Toggle LANPHYPC Value bit */
382                 e1000_toggle_lanphypc_pch_lpt(hw);
383                 if (hw->mac.type >= e1000_pch_lpt) {
384                         if (e1000_phy_is_accessible_pchlan(hw))
385                                 break;
386
387                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
388                          * so ensure that the MAC is also out of SMBus mode
389                          */
390                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
391                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
392                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
393
394                         if (e1000_phy_is_accessible_pchlan(hw))
395                                 break;
396
397                         ret_val = -E1000_ERR_PHY;
398                 }
399                 break;
400         default:
401                 break;
402         }
403
404         hw->phy.ops.release(hw);
405         if (!ret_val) {
406
407                 /* Check to see if able to reset PHY.  Print error if not */
408                 if (hw->phy.ops.check_reset_block(hw)) {
409                         ERROR_REPORT("Reset blocked by ME\n");
410                         goto out;
411                 }
412
413                 /* Reset the PHY before any access to it.  Doing so, ensures
414                  * that the PHY is in a known good state before we read/write
415                  * PHY registers.  The generic reset is sufficient here,
416                  * because we haven't determined the PHY type yet.
417                  */
418                 ret_val = e1000_phy_hw_reset_generic(hw);
419                 if (ret_val)
420                         goto out;
421
422                 /* On a successful reset, possibly need to wait for the PHY
423                  * to quiesce to an accessible state before returning control
424                  * to the calling function.  If the PHY does not quiesce, then
425                  * return E1000E_BLK_PHY_RESET, as this is the condition that
426                  *  the PHY is in.
427                  */
428                 ret_val = hw->phy.ops.check_reset_block(hw);
429                 if (ret_val)
430                         ERROR_REPORT("ME blocked access to PHY after reset\n");
431         }
432
433 out:
434         /* Ungate automatic PHY configuration on non-managed 82579 */
435         if ((hw->mac.type == e1000_pch2lan) &&
436             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
437                 msec_delay(10);
438                 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
439         }
440
441         return ret_val;
442 }
443
444 /**
445  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
446  *  @hw: pointer to the HW structure
447  *
448  *  Initialize family-specific PHY parameters and function pointers.
449  **/
450 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
451 {
452         struct e1000_phy_info *phy = &hw->phy;
453         s32 ret_val;
454
455         DEBUGFUNC("e1000_init_phy_params_pchlan");
456
457         phy->addr               = 1;
458         phy->reset_delay_us     = 100;
459
460         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
461         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
462         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
463         phy->ops.set_page       = e1000_set_page_igp;
464         phy->ops.read_reg       = e1000_read_phy_reg_hv;
465         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
466         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
467         phy->ops.release        = e1000_release_swflag_ich8lan;
468         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
469         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
470         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
471         phy->ops.write_reg      = e1000_write_phy_reg_hv;
472         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
473         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
474         phy->ops.power_up       = e1000_power_up_phy_copper;
475         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
476         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
477
478         phy->id = e1000_phy_unknown;
479
480         ret_val = e1000_init_phy_workarounds_pchlan(hw);
481         if (ret_val)
482                 return ret_val;
483
484         if (phy->id == e1000_phy_unknown)
485                 switch (hw->mac.type) {
486                 default:
487                         ret_val = e1000_get_phy_id(hw);
488                         if (ret_val)
489                                 return ret_val;
490                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
491                                 break;
492                         /* fall-through */
493                 case e1000_pch2lan:
494                 case e1000_pch_lpt:
495                 case e1000_pch_spt:
496                 case e1000_pch_cnp:
497                         /* In case the PHY needs to be in mdio slow mode,
498                          * set slow mode and try to get the PHY id again.
499                          */
500                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
501                         if (ret_val)
502                                 return ret_val;
503                         ret_val = e1000_get_phy_id(hw);
504                         if (ret_val)
505                                 return ret_val;
506                         break;
507                 }
508         phy->type = e1000_get_phy_type_from_id(phy->id);
509
510         switch (phy->type) {
511         case e1000_phy_82577:
512         case e1000_phy_82579:
513         case e1000_phy_i217:
514                 phy->ops.check_polarity = e1000_check_polarity_82577;
515                 phy->ops.force_speed_duplex =
516                         e1000_phy_force_speed_duplex_82577;
517                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
518                 phy->ops.get_info = e1000_get_phy_info_82577;
519                 phy->ops.commit = e1000_phy_sw_reset_generic;
520                 break;
521         case e1000_phy_82578:
522                 phy->ops.check_polarity = e1000_check_polarity_m88;
523                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
524                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
525                 phy->ops.get_info = e1000_get_phy_info_m88;
526                 break;
527         default:
528                 ret_val = -E1000_ERR_PHY;
529                 break;
530         }
531
532         return ret_val;
533 }
534
535 /**
536  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
537  *  @hw: pointer to the HW structure
538  *
539  *  Initialize family-specific PHY parameters and function pointers.
540  **/
541 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
542 {
543         struct e1000_phy_info *phy = &hw->phy;
544         s32 ret_val;
545         u16 i = 0;
546
547         DEBUGFUNC("e1000_init_phy_params_ich8lan");
548
549         phy->addr               = 1;
550         phy->reset_delay_us     = 100;
551
552         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
553         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
554         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
555         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
556         phy->ops.read_reg       = e1000_read_phy_reg_igp;
557         phy->ops.release        = e1000_release_swflag_ich8lan;
558         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
559         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
560         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
561         phy->ops.write_reg      = e1000_write_phy_reg_igp;
562         phy->ops.power_up       = e1000_power_up_phy_copper;
563         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
564
565         /* We may need to do this twice - once for IGP and if that fails,
566          * we'll set BM func pointers and try again
567          */
568         ret_val = e1000_determine_phy_address(hw);
569         if (ret_val) {
570                 phy->ops.write_reg = e1000_write_phy_reg_bm;
571                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
572                 ret_val = e1000_determine_phy_address(hw);
573                 if (ret_val) {
574                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
575                         return ret_val;
576                 }
577         }
578
579         phy->id = 0;
580         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
581                (i++ < 100)) {
582                 msec_delay(1);
583                 ret_val = e1000_get_phy_id(hw);
584                 if (ret_val)
585                         return ret_val;
586         }
587
588         /* Verify phy id */
589         switch (phy->id) {
590         case IGP03E1000_E_PHY_ID:
591                 phy->type = e1000_phy_igp_3;
592                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
593                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
594                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
595                 phy->ops.get_info = e1000_get_phy_info_igp;
596                 phy->ops.check_polarity = e1000_check_polarity_igp;
597                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
598                 break;
599         case IFE_E_PHY_ID:
600         case IFE_PLUS_E_PHY_ID:
601         case IFE_C_E_PHY_ID:
602                 phy->type = e1000_phy_ife;
603                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
604                 phy->ops.get_info = e1000_get_phy_info_ife;
605                 phy->ops.check_polarity = e1000_check_polarity_ife;
606                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
607                 break;
608         case BME1000_E_PHY_ID:
609                 phy->type = e1000_phy_bm;
610                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
611                 phy->ops.read_reg = e1000_read_phy_reg_bm;
612                 phy->ops.write_reg = e1000_write_phy_reg_bm;
613                 phy->ops.commit = e1000_phy_sw_reset_generic;
614                 phy->ops.get_info = e1000_get_phy_info_m88;
615                 phy->ops.check_polarity = e1000_check_polarity_m88;
616                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
617                 break;
618         default:
619                 return -E1000_ERR_PHY;
620                 break;
621         }
622
623         return E1000_SUCCESS;
624 }
625
626 /**
627  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
628  *  @hw: pointer to the HW structure
629  *
630  *  Initialize family-specific NVM parameters and function
631  *  pointers.
632  **/
633 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
634 {
635         struct e1000_nvm_info *nvm = &hw->nvm;
636         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
637         u32 gfpreg, sector_base_addr, sector_end_addr;
638         u16 i;
639         u32 nvm_size;
640
641         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
642
643         nvm->type = e1000_nvm_flash_sw;
644
645         if (hw->mac.type >= e1000_pch_spt) {
646                 /* in SPT, gfpreg doesn't exist. NVM size is taken from the
647                  * STRAP register. This is because in SPT the GbE Flash region
648                  * is no longer accessed through the flash registers. Instead,
649                  * the mechanism has changed, and the Flash region access
650                  * registers are now implemented in GbE memory space.
651                  */
652                 nvm->flash_base_addr = 0;
653                 nvm_size =
654                     (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
655                     * NVM_SIZE_MULTIPLIER;
656                 nvm->flash_bank_size = nvm_size / 2;
657                 /* Adjust to word count */
658                 nvm->flash_bank_size /= sizeof(u16);
659                 /* Set the base address for flash register access */
660                 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
661         } else {
662                 /* Can't read flash registers if register set isn't mapped. */
663                 if (!hw->flash_address) {
664                         DEBUGOUT("ERROR: Flash registers not mapped\n");
665                         return -E1000_ERR_CONFIG;
666                 }
667
668                 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
669
670                 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
671                  * Add 1 to sector_end_addr since this sector is included in
672                  * the overall size.
673                  */
674                 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
675                 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
676
677                 /* flash_base_addr is byte-aligned */
678                 nvm->flash_base_addr = sector_base_addr
679                                        << FLASH_SECTOR_ADDR_SHIFT;
680
681                 /* find total size of the NVM, then cut in half since the total
682                  * size represents two separate NVM banks.
683                  */
684                 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
685                                         << FLASH_SECTOR_ADDR_SHIFT);
686                 nvm->flash_bank_size /= 2;
687                 /* Adjust to word count */
688                 nvm->flash_bank_size /= sizeof(u16);
689         }
690
691         nvm->word_size = E1000_SHADOW_RAM_WORDS;
692
693         /* Clear shadow ram */
694         for (i = 0; i < nvm->word_size; i++) {
695                 dev_spec->shadow_ram[i].modified = FALSE;
696                 dev_spec->shadow_ram[i].value    = 0xFFFF;
697         }
698
699         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
700         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
701
702         /* Function Pointers */
703         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
704         nvm->ops.release        = e1000_release_nvm_ich8lan;
705         if (hw->mac.type >= e1000_pch_spt) {
706                 nvm->ops.read   = e1000_read_nvm_spt;
707                 nvm->ops.update = e1000_update_nvm_checksum_spt;
708         } else {
709                 nvm->ops.read   = e1000_read_nvm_ich8lan;
710                 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
711         }
712         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
713         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
714         nvm->ops.write          = e1000_write_nvm_ich8lan;
715
716         return E1000_SUCCESS;
717 }
718
719 /**
720  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
721  *  @hw: pointer to the HW structure
722  *
723  *  Initialize family-specific MAC parameters and function
724  *  pointers.
725  **/
726 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
727 {
728         struct e1000_mac_info *mac = &hw->mac;
729
730         DEBUGFUNC("e1000_init_mac_params_ich8lan");
731
732         /* Set media type function pointer */
733         hw->phy.media_type = e1000_media_type_copper;
734
735         /* Set mta register count */
736         mac->mta_reg_count = 32;
737         /* Set rar entry count */
738         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
739         if (mac->type == e1000_ich8lan)
740                 mac->rar_entry_count--;
741         /* Set if part includes ASF firmware */
742         mac->asf_firmware_present = TRUE;
743         /* FWSM register */
744         mac->has_fwsm = TRUE;
745         /* ARC subsystem not supported */
746         mac->arc_subsystem_valid = FALSE;
747         /* Adaptive IFS supported */
748         mac->adaptive_ifs = TRUE;
749
750         /* Function pointers */
751
752         /* bus type/speed/width */
753         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
754         /* function id */
755         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
756         /* reset */
757         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
758         /* hw initialization */
759         mac->ops.init_hw = e1000_init_hw_ich8lan;
760         /* link setup */
761         mac->ops.setup_link = e1000_setup_link_ich8lan;
762         /* physical interface setup */
763         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
764         /* check for link */
765         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
766         /* link info */
767         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
768         /* multicast address update */
769         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
770         /* clear hardware counters */
771         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
772
773         /* LED and other operations */
774         switch (mac->type) {
775         case e1000_ich8lan:
776         case e1000_ich9lan:
777         case e1000_ich10lan:
778                 /* check management mode */
779                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
780                 /* ID LED init */
781                 mac->ops.id_led_init = e1000_id_led_init_generic;
782                 /* blink LED */
783                 mac->ops.blink_led = e1000_blink_led_generic;
784                 /* setup LED */
785                 mac->ops.setup_led = e1000_setup_led_generic;
786                 /* cleanup LED */
787                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
788                 /* turn on/off LED */
789                 mac->ops.led_on = e1000_led_on_ich8lan;
790                 mac->ops.led_off = e1000_led_off_ich8lan;
791                 break;
792         case e1000_pch2lan:
793                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
794                 mac->ops.rar_set = e1000_rar_set_pch2lan;
795                 /* fall-through */
796         case e1000_pch_lpt:
797         case e1000_pch_spt:
798         case e1000_pch_cnp:
799                 /* multicast address update for pch2 */
800                 mac->ops.update_mc_addr_list =
801                         e1000_update_mc_addr_list_pch2lan;
802                 /* fall-through */
803         case e1000_pchlan:
804                 /* check management mode */
805                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
806                 /* ID LED init */
807                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
808                 /* setup LED */
809                 mac->ops.setup_led = e1000_setup_led_pchlan;
810                 /* cleanup LED */
811                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
812                 /* turn on/off LED */
813                 mac->ops.led_on = e1000_led_on_pchlan;
814                 mac->ops.led_off = e1000_led_off_pchlan;
815                 break;
816         default:
817                 break;
818         }
819
820         if (mac->type >= e1000_pch_lpt) {
821                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
822                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
823                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
824                 mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
825         }
826
827         /* Enable PCS Lock-loss workaround for ICH8 */
828         if (mac->type == e1000_ich8lan)
829                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
830
831         return E1000_SUCCESS;
832 }
833
834 /**
835  *  __e1000_access_emi_reg_locked - Read/write EMI register
836  *  @hw: pointer to the HW structure
837  *  @addr: EMI address to program
838  *  @data: pointer to value to read/write from/to the EMI address
839  *  @read: boolean flag to indicate read or write
840  *
841  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
842  **/
843 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
844                                          u16 *data, bool read)
845 {
846         s32 ret_val;
847
848         DEBUGFUNC("__e1000_access_emi_reg_locked");
849
850         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
851         if (ret_val)
852                 return ret_val;
853
854         if (read)
855                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
856                                                       data);
857         else
858                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
859                                                        *data);
860
861         return ret_val;
862 }
863
864 /**
865  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
866  *  @hw: pointer to the HW structure
867  *  @addr: EMI address to program
868  *  @data: value to be read from the EMI address
869  *
870  *  Assumes the SW/FW/HW Semaphore is already acquired.
871  **/
872 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
873 {
874         DEBUGFUNC("e1000_read_emi_reg_locked");
875
876         return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
877 }
878
879 /**
880  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
881  *  @hw: pointer to the HW structure
882  *  @addr: EMI address to program
883  *  @data: value to be written to the EMI address
884  *
885  *  Assumes the SW/FW/HW Semaphore is already acquired.
886  **/
887 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
888 {
889         DEBUGFUNC("e1000_read_emi_reg_locked");
890
891         return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
892 }
893
894 /**
895  *  e1000_set_eee_pchlan - Enable/disable EEE support
896  *  @hw: pointer to the HW structure
897  *
898  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
899  *  the link and the EEE capabilities of the link partner.  The LPI Control
900  *  register bits will remain set only if/when link is up.
901  *
902  *  EEE LPI must not be asserted earlier than one second after link is up.
903  *  On 82579, EEE LPI should not be enabled until such time otherwise there
904  *  can be link issues with some switches.  Other devices can have EEE LPI
905  *  enabled immediately upon link up since they have a timer in hardware which
906  *  prevents LPI from being asserted too early.
907  **/
908 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
909 {
910         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
911         s32 ret_val;
912         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
913
914         DEBUGFUNC("e1000_set_eee_pchlan");
915
916         switch (hw->phy.type) {
917         case e1000_phy_82579:
918                 lpa = I82579_EEE_LP_ABILITY;
919                 pcs_status = I82579_EEE_PCS_STATUS;
920                 adv_addr = I82579_EEE_ADVERTISEMENT;
921                 break;
922         case e1000_phy_i217:
923                 lpa = I217_EEE_LP_ABILITY;
924                 pcs_status = I217_EEE_PCS_STATUS;
925                 adv_addr = I217_EEE_ADVERTISEMENT;
926                 break;
927         default:
928                 return E1000_SUCCESS;
929         }
930
931         ret_val = hw->phy.ops.acquire(hw);
932         if (ret_val)
933                 return ret_val;
934
935         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
936         if (ret_val)
937                 goto release;
938
939         /* Clear bits that enable EEE in various speeds */
940         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
941
942         /* Enable EEE if not disabled by user */
943         if (!dev_spec->eee_disable) {
944                 /* Save off link partner's EEE ability */
945                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
946                                                     &dev_spec->eee_lp_ability);
947                 if (ret_val)
948                         goto release;
949
950                 /* Read EEE advertisement */
951                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
952                 if (ret_val)
953                         goto release;
954
955                 /* Enable EEE only for speeds in which the link partner is
956                  * EEE capable and for which we advertise EEE.
957                  */
958                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
959                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
960
961                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
962                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
963                         if (data & NWAY_LPAR_100TX_FD_CAPS)
964                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
965                         else
966                                 /* EEE is not supported in 100Half, so ignore
967                                  * partner's EEE in 100 ability if full-duplex
968                                  * is not advertised.
969                                  */
970                                 dev_spec->eee_lp_ability &=
971                                     ~I82579_EEE_100_SUPPORTED;
972                 }
973         }
974
975         if (hw->phy.type == e1000_phy_82579) {
976                 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
977                                                     &data);
978                 if (ret_val)
979                         goto release;
980
981                 data &= ~I82579_LPI_100_PLL_SHUT;
982                 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
983                                                      data);
984         }
985
986         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
987         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
988         if (ret_val)
989                 goto release;
990
991         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
992 release:
993         hw->phy.ops.release(hw);
994
995         return ret_val;
996 }
997
998 /**
999  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
1000  *  @hw:   pointer to the HW structure
1001  *  @link: link up bool flag
1002  *
1003  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
1004  *  preventing further DMA write requests.  Workaround the issue by disabling
1005  *  the de-assertion of the clock request when in 1Gpbs mode.
1006  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1007  *  speeds in order to avoid Tx hangs.
1008  **/
1009 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1010 {
1011         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1012         u32 status = E1000_READ_REG(hw, E1000_STATUS);
1013         s32 ret_val = E1000_SUCCESS;
1014         u16 reg;
1015
1016         if (link && (status & E1000_STATUS_SPEED_1000)) {
1017                 ret_val = hw->phy.ops.acquire(hw);
1018                 if (ret_val)
1019                         return ret_val;
1020
1021                 ret_val =
1022                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1023                                                &reg);
1024                 if (ret_val)
1025                         goto release;
1026
1027                 ret_val =
1028                     e1000_write_kmrn_reg_locked(hw,
1029                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1030                                                 reg &
1031                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
1032                 if (ret_val)
1033                         goto release;
1034
1035                 usec_delay(10);
1036
1037                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1038                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1039
1040                 ret_val =
1041                     e1000_write_kmrn_reg_locked(hw,
1042                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1043                                                 reg);
1044 release:
1045                 hw->phy.ops.release(hw);
1046         } else {
1047                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1048                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1049
1050                 if ((hw->phy.revision > 5) || !link ||
1051                     ((status & E1000_STATUS_SPEED_100) &&
1052                      (status & E1000_STATUS_FD)))
1053                         goto update_fextnvm6;
1054
1055                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1056                 if (ret_val)
1057                         return ret_val;
1058
1059                 /* Clear link status transmit timeout */
1060                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1061
1062                 if (status & E1000_STATUS_SPEED_100) {
1063                         /* Set inband Tx timeout to 5x10us for 100Half */
1064                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1065
1066                         /* Do not extend the K1 entry latency for 100Half */
1067                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1068                 } else {
1069                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1070                         reg |= 50 <<
1071                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1072
1073                         /* Extend the K1 entry latency for 10 Mbps */
1074                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1075                 }
1076
1077                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1078                 if (ret_val)
1079                         return ret_val;
1080
1081 update_fextnvm6:
1082                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1083         }
1084
1085         return ret_val;
1086 }
1087
1088 static u64 e1000_ltr2ns(u16 ltr)
1089 {
1090         u32 value, scale;
1091
1092         /* Determine the latency in nsec based on the LTR value & scale */
1093         value = ltr & E1000_LTRV_VALUE_MASK;
1094         scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1095
1096         return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
1097 }
1098
1099 /**
1100  *  e1000_platform_pm_pch_lpt - Set platform power management values
1101  *  @hw: pointer to the HW structure
1102  *  @link: bool indicating link status
1103  *
1104  *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1105  *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1106  *  when link is up (which must not exceed the maximum latency supported
1107  *  by the platform), otherwise specify there is no LTR requirement.
1108  *  Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
1109  *  latencies in the LTR Extended Capability Structure in the PCIe Extended
1110  *  Capability register set, on this device LTR is set by writing the
1111  *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1112  *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1113  *  message to the PMC.
1114  *
1115  *  Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1116  *  high-water mark.
1117  **/
1118 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1119 {
1120         u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1121                   link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1122         u16 lat_enc = 0;        /* latency encoded */
1123         s32 obff_hwm = 0;
1124
1125         DEBUGFUNC("e1000_platform_pm_pch_lpt");
1126
1127         if (link) {
1128                 u16 speed, duplex, scale = 0;
1129                 u16 max_snoop, max_nosnoop;
1130                 u16 max_ltr_enc;        /* max LTR latency encoded */
1131                 s64 lat_ns;
1132                 s64 value;
1133                 u32 rxa;
1134
1135                 if (!hw->mac.max_frame_size) {
1136                         DEBUGOUT("max_frame_size not set.\n");
1137                         return -E1000_ERR_CONFIG;
1138                 }
1139
1140                 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1141                 if (!speed) {
1142                         DEBUGOUT("Speed not set.\n");
1143                         return -E1000_ERR_CONFIG;
1144                 }
1145
1146                 /* Rx Packet Buffer Allocation size (KB) */
1147                 rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1148
1149                 /* Determine the maximum latency tolerated by the device.
1150                  *
1151                  * Per the PCIe spec, the tolerated latencies are encoded as
1152                  * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1153                  * a 10-bit value (0-1023) to provide a range from 1 ns to
1154                  * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
1155                  * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1156                  */
1157                 lat_ns = ((s64)rxa * 1024 -
1158                           (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1159                 if (lat_ns < 0)
1160                         lat_ns = 0;
1161                 else
1162                         lat_ns /= speed;
1163                 value = lat_ns;
1164
1165                 while (value > E1000_LTRV_VALUE_MASK) {
1166                         scale++;
1167                         value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1168                 }
1169                 if (scale > E1000_LTRV_SCALE_MAX) {
1170                         DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1171                         return -E1000_ERR_CONFIG;
1172                 }
1173                 lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1174
1175                 /* Determine the maximum latency tolerated by the platform */
1176                 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1177                 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1178                 max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1179
1180                 if (lat_enc > max_ltr_enc) {
1181                         lat_enc = max_ltr_enc;
1182                         lat_ns = e1000_ltr2ns(max_ltr_enc);
1183                 }
1184
1185                 if (lat_ns) {
1186                         lat_ns *= speed * 1000;
1187                         lat_ns /= 8;
1188                         lat_ns /= 1000000000;
1189                         obff_hwm = (s32)(rxa - lat_ns);
1190                 }
1191                 if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1192                         DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1193                         return -E1000_ERR_CONFIG;
1194                 }
1195         }
1196
1197         /* Set Snoop and No-Snoop latencies the same */
1198         reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1199         E1000_WRITE_REG(hw, E1000_LTRV, reg);
1200
1201         /* Set OBFF high water mark */
1202         reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1203         reg |= obff_hwm;
1204         E1000_WRITE_REG(hw, E1000_SVT, reg);
1205
1206         /* Enable OBFF */
1207         reg = E1000_READ_REG(hw, E1000_SVCR);
1208         reg |= E1000_SVCR_OFF_EN;
1209         /* Always unblock interrupts to the CPU even when the system is
1210          * in OBFF mode. This ensures that small round-robin traffic
1211          * (like ping) does not get dropped or experience long latency.
1212          */
1213         reg |= E1000_SVCR_OFF_MASKINT;
1214         E1000_WRITE_REG(hw, E1000_SVCR, reg);
1215
1216         return E1000_SUCCESS;
1217 }
1218
1219 /**
1220  *  e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1221  *  @hw: pointer to the HW structure
1222  *  @itr: interrupt throttling rate
1223  *
1224  *  Configure OBFF with the updated interrupt rate.
1225  **/
1226 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1227 {
1228         u32 svcr;
1229         s32 timer;
1230
1231         DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1232
1233         /* Convert ITR value into microseconds for OBFF timer */
1234         timer = itr & E1000_ITR_MASK;
1235         timer = (timer * E1000_ITR_MULT) / 1000;
1236
1237         if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1238                 DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1239                 return -E1000_ERR_CONFIG;
1240         }
1241
1242         svcr = E1000_READ_REG(hw, E1000_SVCR);
1243         svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1244         svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1245         E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1246
1247         return E1000_SUCCESS;
1248 }
1249
1250 /**
1251  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1252  *  @hw: pointer to the HW structure
1253  *  @to_sx: boolean indicating a system power state transition to Sx
1254  *
1255  *  When link is down, configure ULP mode to significantly reduce the power
1256  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1257  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1258  *  system, configure the ULP mode by software.
1259  */
1260 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1261 {
1262         u32 mac_reg;
1263         s32 ret_val = E1000_SUCCESS;
1264         u16 phy_reg;
1265         u16 oem_reg = 0;
1266
1267         if ((hw->mac.type < e1000_pch_lpt) ||
1268             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1269             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1270             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1271             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1272             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1273                 return 0;
1274
1275         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1276                 /* Request ME configure ULP mode in the PHY */
1277                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1278                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1279                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1280
1281                 goto out;
1282         }
1283
1284         if (!to_sx) {
1285                 int i = 0;
1286
1287                 /* Poll up to 5 seconds for Cable Disconnected indication */
1288                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1289                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1290                         /* Bail if link is re-acquired */
1291                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1292                                 return -E1000_ERR_PHY;
1293
1294                         if (i++ == 100)
1295                                 break;
1296
1297                         msec_delay(50);
1298                 }
1299                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1300                          (E1000_READ_REG(hw, E1000_FEXT) &
1301                           E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1302                          i * 50);
1303         }
1304
1305         ret_val = hw->phy.ops.acquire(hw);
1306         if (ret_val)
1307                 goto out;
1308
1309         /* Force SMBus mode in PHY */
1310         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1311         if (ret_val)
1312                 goto release;
1313         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1314         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1315
1316         /* Force SMBus mode in MAC */
1317         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1318         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1319         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1320
1321         /* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1322          * LPLU and disable Gig speed when entering ULP
1323          */
1324         if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1325                 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1326                                                        &oem_reg);
1327                 if (ret_val)
1328                         goto release;
1329
1330                 phy_reg = oem_reg;
1331                 phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1332
1333                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1334                                                         phy_reg);
1335
1336                 if (ret_val)
1337                         goto release;
1338         }
1339
1340         /* Set Inband ULP Exit, Reset to SMBus mode and
1341          * Disable SMBus Release on PERST# in PHY
1342          */
1343         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1344         if (ret_val)
1345                 goto release;
1346         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1347                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1348         if (to_sx) {
1349                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1350                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1351                 else
1352                         phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1353
1354                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1355                 phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1356         } else {
1357                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1358                 phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1359                 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1360         }
1361         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1362
1363         /* Set Disable SMBus Release on PERST# in MAC */
1364         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1365         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1366         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1367
1368         /* Commit ULP changes in PHY by starting auto ULP configuration */
1369         phy_reg |= I218_ULP_CONFIG1_START;
1370         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1371
1372         if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1373             to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1374                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1375                                                         oem_reg);
1376                 if (ret_val)
1377                         goto release;
1378         }
1379
1380 release:
1381         hw->phy.ops.release(hw);
1382 out:
1383         if (ret_val)
1384                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1385         else
1386                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1387
1388         return ret_val;
1389 }
1390
1391 /**
1392  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1393  *  @hw: pointer to the HW structure
1394  *  @force: boolean indicating whether or not to force disabling ULP
1395  *
1396  *  Un-configure ULP mode when link is up, the system is transitioned from
1397  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1398  *  system, poll for an indication from ME that ULP has been un-configured.
1399  *  If not on an ME enabled system, un-configure the ULP mode by software.
1400  *
1401  *  During nominal operation, this function is called when link is acquired
1402  *  to disable ULP mode (force=FALSE); otherwise, for example when unloading
1403  *  the driver or during Sx->S0 transitions, this is called with force=TRUE
1404  *  to forcibly disable ULP.
1405  */
1406 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1407 {
1408         s32 ret_val = E1000_SUCCESS;
1409         u32 mac_reg;
1410         u16 phy_reg;
1411         int i = 0;
1412
1413         if ((hw->mac.type < e1000_pch_lpt) ||
1414             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1415             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1416             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1417             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1418             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1419                 return 0;
1420
1421         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1422                 if (force) {
1423                         /* Request ME un-configure ULP mode in the PHY */
1424                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1425                         mac_reg &= ~E1000_H2ME_ULP;
1426                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1427                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1428                 }
1429
1430                 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1431                 while (E1000_READ_REG(hw, E1000_FWSM) &
1432                        E1000_FWSM_ULP_CFG_DONE) {
1433                         if (i++ == 30) {
1434                                 ret_val = -E1000_ERR_PHY;
1435                                 goto out;
1436                         }
1437
1438                         msec_delay(10);
1439                 }
1440                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1441
1442                 if (force) {
1443                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1444                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1445                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1446                 } else {
1447                         /* Clear H2ME.ULP after ME ULP configuration */
1448                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1449                         mac_reg &= ~E1000_H2ME_ULP;
1450                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1451                 }
1452
1453                 goto out;
1454         }
1455
1456         ret_val = hw->phy.ops.acquire(hw);
1457         if (ret_val)
1458                 goto out;
1459
1460         if (force)
1461                 /* Toggle LANPHYPC Value bit */
1462                 e1000_toggle_lanphypc_pch_lpt(hw);
1463
1464         /* Unforce SMBus mode in PHY */
1465         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1466         if (ret_val) {
1467                 /* The MAC might be in PCIe mode, so temporarily force to
1468                  * SMBus mode in order to access the PHY.
1469                  */
1470                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1471                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1472                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1473
1474                 msec_delay(50);
1475
1476                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1477                                                        &phy_reg);
1478                 if (ret_val)
1479                         goto release;
1480         }
1481         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1482         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1483
1484         /* Unforce SMBus mode in MAC */
1485         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1486         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1487         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1488
1489         /* When ULP mode was previously entered, K1 was disabled by the
1490          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1491          */
1492         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1493         if (ret_val)
1494                 goto release;
1495         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1496         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1497
1498         /* Clear ULP enabled configuration */
1499         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1500         if (ret_val)
1501                 goto release;
1502                 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1503                              I218_ULP_CONFIG1_STICKY_ULP |
1504                              I218_ULP_CONFIG1_RESET_TO_SMBUS |
1505                              I218_ULP_CONFIG1_WOL_HOST |
1506                              I218_ULP_CONFIG1_INBAND_EXIT |
1507                              I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1508                              I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1509                              I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1510                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1511
1512                 /* Commit ULP changes by starting auto ULP configuration */
1513                 phy_reg |= I218_ULP_CONFIG1_START;
1514                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1515
1516                 /* Clear Disable SMBus Release on PERST# in MAC */
1517                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1518                 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1519                 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1520
1521 release:
1522         hw->phy.ops.release(hw);
1523         if (force) {
1524                 hw->phy.ops.reset(hw);
1525                 msec_delay(50);
1526         }
1527 out:
1528         if (ret_val)
1529                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1530         else
1531                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1532
1533         return ret_val;
1534 }
1535
1536 /**
1537  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1538  *  @hw: pointer to the HW structure
1539  *
1540  *  Checks to see of the link status of the hardware has changed.  If a
1541  *  change in link status has been detected, then we read the PHY registers
1542  *  to get the current speed/duplex if link exists.
1543  **/
1544 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1545 {
1546         struct e1000_mac_info *mac = &hw->mac;
1547         s32 ret_val, tipg_reg = 0;
1548         u16 emi_addr, emi_val = 0;
1549         bool link;
1550         u16 phy_reg;
1551
1552         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1553
1554         /* We only want to go out to the PHY registers to see if Auto-Neg
1555          * has completed and/or if our link status has changed.  The
1556          * get_link_status flag is set upon receiving a Link Status
1557          * Change or Rx Sequence Error interrupt.
1558          */
1559         if (!mac->get_link_status)
1560                 return E1000_SUCCESS;
1561
1562                 /* First we want to see if the MII Status Register reports
1563                  * link.  If so, then we want to get the current speed/duplex
1564                  * of the PHY.
1565                  */
1566                 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1567                 if (ret_val)
1568                         return ret_val;
1569
1570         if (hw->mac.type == e1000_pchlan) {
1571                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1572                 if (ret_val)
1573                         return ret_val;
1574         }
1575
1576         /* When connected at 10Mbps half-duplex, some parts are excessively
1577          * aggressive resulting in many collisions. To avoid this, increase
1578          * the IPG and reduce Rx latency in the PHY.
1579          */
1580         if ((hw->mac.type >= e1000_pch2lan) && link) {
1581                 u16 speed, duplex;
1582
1583                 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1584                 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1585                 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1586
1587                 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1588                         tipg_reg |= 0xFF;
1589                         /* Reduce Rx latency in analog PHY */
1590                         emi_val = 0;
1591                 } else if (hw->mac.type >= e1000_pch_spt &&
1592                            duplex == FULL_DUPLEX && speed != SPEED_1000) {
1593                         tipg_reg |= 0xC;
1594                         emi_val = 1;
1595                 } else {
1596                         /* Roll back the default values */
1597                         tipg_reg |= 0x08;
1598                         emi_val = 1;
1599                 }
1600
1601                 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1602
1603                 ret_val = hw->phy.ops.acquire(hw);
1604                 if (ret_val)
1605                         return ret_val;
1606
1607                 if (hw->mac.type == e1000_pch2lan)
1608                         emi_addr = I82579_RX_CONFIG;
1609                 else
1610                         emi_addr = I217_RX_CONFIG;
1611                 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1612
1613
1614                 if (hw->mac.type >= e1000_pch_lpt) {
1615                         u16 phy_reg;
1616
1617                         hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1618                                                     &phy_reg);
1619                         phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1620                         if (speed == SPEED_100 || speed == SPEED_10)
1621                                 phy_reg |= 0x3E8;
1622                         else
1623                                 phy_reg |= 0xFA;
1624                         hw->phy.ops.write_reg_locked(hw,
1625                                                      I217_PLL_CLOCK_GATE_REG,
1626                                                      phy_reg);
1627
1628                         if (speed == SPEED_1000) {
1629                                 hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1630                                                             &phy_reg);
1631
1632                                 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1633
1634                                 hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1635                                                              phy_reg);
1636                                 }
1637                  }
1638                 hw->phy.ops.release(hw);
1639
1640                 if (ret_val)
1641                         return ret_val;
1642
1643                 if (hw->mac.type >= e1000_pch_spt) {
1644                         u16 data;
1645                         u16 ptr_gap;
1646
1647                         if (speed == SPEED_1000) {
1648                                 ret_val = hw->phy.ops.acquire(hw);
1649                                 if (ret_val)
1650                                         return ret_val;
1651
1652                                 ret_val = hw->phy.ops.read_reg_locked(hw,
1653                                                               PHY_REG(776, 20),
1654                                                               &data);
1655                                 if (ret_val) {
1656                                         hw->phy.ops.release(hw);
1657                                         return ret_val;
1658                                 }
1659
1660                                 ptr_gap = (data & (0x3FF << 2)) >> 2;
1661                                 if (ptr_gap < 0x18) {
1662                                         data &= ~(0x3FF << 2);
1663                                         data |= (0x18 << 2);
1664                                         ret_val =
1665                                                 hw->phy.ops.write_reg_locked(hw,
1666                                                         PHY_REG(776, 20), data);
1667                                 }
1668                                 hw->phy.ops.release(hw);
1669                                 if (ret_val)
1670                                         return ret_val;
1671                         } else {
1672                                 ret_val = hw->phy.ops.acquire(hw);
1673                                 if (ret_val)
1674                                         return ret_val;
1675
1676                                 ret_val = hw->phy.ops.write_reg_locked(hw,
1677                                                              PHY_REG(776, 20),
1678                                                              0xC023);
1679                                 hw->phy.ops.release(hw);
1680                                 if (ret_val)
1681                                         return ret_val;
1682
1683                         }
1684                 }
1685         }
1686
1687         /* I217 Packet Loss issue:
1688          * ensure that FEXTNVM4 Beacon Duration is set correctly
1689          * on power up.
1690          * Set the Beacon Duration for I217 to 8 usec
1691          */
1692         if (hw->mac.type >= e1000_pch_lpt) {
1693                 u32 mac_reg;
1694
1695                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1696                 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1697                 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1698                 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1699         }
1700
1701         /* Work-around I218 hang issue */
1702         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1703             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1704             (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1705             (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1706                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1707                 if (ret_val)
1708                         return ret_val;
1709         }
1710         if (hw->mac.type >= e1000_pch_lpt) {
1711                 /* Set platform power management values for
1712                  * Latency Tolerance Reporting (LTR)
1713                  * Optimized Buffer Flush/Fill (OBFF)
1714                  */
1715                 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1716                 if (ret_val)
1717                         return ret_val;
1718         }
1719
1720         /* Clear link partner's EEE ability */
1721         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1722
1723         if (hw->mac.type >= e1000_pch_lpt) {
1724                 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1725
1726                 if (hw->mac.type == e1000_pch_spt) {
1727                         /* FEXTNVM6 K1-off workaround - for SPT only */
1728                         u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1729
1730                         if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1731                                 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1732                         else
1733                                 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1734                 }
1735
1736                 if (hw->dev_spec.ich8lan.disable_k1_off == TRUE)
1737                         fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1738
1739                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1740         }
1741
1742         if (!link)
1743                 return E1000_SUCCESS; /* No link detected */
1744
1745         mac->get_link_status = FALSE;
1746
1747         switch (hw->mac.type) {
1748         case e1000_pch2lan:
1749                 ret_val = e1000_k1_workaround_lv(hw);
1750                 if (ret_val)
1751                         return ret_val;
1752                 /* fall-thru */
1753         case e1000_pchlan:
1754                 if (hw->phy.type == e1000_phy_82578) {
1755                         ret_val = e1000_link_stall_workaround_hv(hw);
1756                         if (ret_val)
1757                                 return ret_val;
1758                 }
1759
1760                 /* Workaround for PCHx parts in half-duplex:
1761                  * Set the number of preambles removed from the packet
1762                  * when it is passed from the PHY to the MAC to prevent
1763                  * the MAC from misinterpreting the packet type.
1764                  */
1765                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1766                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1767
1768                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1769                     E1000_STATUS_FD)
1770                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1771
1772                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1773                 break;
1774         default:
1775                 break;
1776         }
1777
1778         /* Check if there was DownShift, must be checked
1779          * immediately after link-up
1780          */
1781         e1000_check_downshift_generic(hw);
1782
1783         /* Enable/Disable EEE after link up */
1784         if (hw->phy.type > e1000_phy_82579) {
1785                 ret_val = e1000_set_eee_pchlan(hw);
1786                 if (ret_val)
1787                         return ret_val;
1788         }
1789
1790         /* If we are forcing speed/duplex, then we simply return since
1791          * we have already determined whether we have link or not.
1792          */
1793         if (!mac->autoneg)
1794                 return -E1000_ERR_CONFIG;
1795
1796         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1797          * of MAC speed/duplex configuration.  So we only need to
1798          * configure Collision Distance in the MAC.
1799          */
1800         mac->ops.config_collision_dist(hw);
1801
1802         /* Configure Flow Control now that Auto-Neg has completed.
1803          * First, we need to restore the desired flow control
1804          * settings because we may have had to re-autoneg with a
1805          * different link partner.
1806          */
1807         ret_val = e1000_config_fc_after_link_up_generic(hw);
1808         if (ret_val)
1809                 DEBUGOUT("Error configuring flow control\n");
1810
1811         return ret_val;
1812 }
1813
1814 /**
1815  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1816  *  @hw: pointer to the HW structure
1817  *
1818  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1819  **/
1820 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1821 {
1822         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1823
1824         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1825         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1826         switch (hw->mac.type) {
1827         case e1000_ich8lan:
1828         case e1000_ich9lan:
1829         case e1000_ich10lan:
1830                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1831                 break;
1832         case e1000_pchlan:
1833         case e1000_pch2lan:
1834         case e1000_pch_lpt:
1835         case e1000_pch_spt:
1836         case e1000_pch_cnp:
1837                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1838                 break;
1839         default:
1840                 break;
1841         }
1842 }
1843
1844 /**
1845  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1846  *  @hw: pointer to the HW structure
1847  *
1848  *  Acquires the mutex for performing NVM operations.
1849  **/
1850 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1851 {
1852         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1853
1854         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1855
1856         return E1000_SUCCESS;
1857 }
1858
1859 /**
1860  *  e1000_release_nvm_ich8lan - Release NVM mutex
1861  *  @hw: pointer to the HW structure
1862  *
1863  *  Releases the mutex used while performing NVM operations.
1864  **/
1865 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1866 {
1867         DEBUGFUNC("e1000_release_nvm_ich8lan");
1868
1869         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1870
1871         return;
1872 }
1873
1874 /**
1875  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1876  *  @hw: pointer to the HW structure
1877  *
1878  *  Acquires the software control flag for performing PHY and select
1879  *  MAC CSR accesses.
1880  **/
1881 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1882 {
1883         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1884         s32 ret_val = E1000_SUCCESS;
1885
1886         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1887
1888         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1889
1890         while (timeout) {
1891                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1892                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1893                         break;
1894
1895                 msec_delay_irq(1);
1896                 timeout--;
1897         }
1898
1899         if (!timeout) {
1900                 DEBUGOUT("SW has already locked the resource.\n");
1901                 ret_val = -E1000_ERR_CONFIG;
1902                 goto out;
1903         }
1904
1905         timeout = SW_FLAG_TIMEOUT;
1906
1907         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1908         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1909
1910         while (timeout) {
1911                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1912                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1913                         break;
1914
1915                 msec_delay_irq(1);
1916                 timeout--;
1917         }
1918
1919         if (!timeout) {
1920                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1921                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1922                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1923                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1924                 ret_val = -E1000_ERR_CONFIG;
1925                 goto out;
1926         }
1927
1928 out:
1929         if (ret_val)
1930                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1931
1932         return ret_val;
1933 }
1934
1935 /**
1936  *  e1000_release_swflag_ich8lan - Release software control flag
1937  *  @hw: pointer to the HW structure
1938  *
1939  *  Releases the software control flag for performing PHY and select
1940  *  MAC CSR accesses.
1941  **/
1942 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1943 {
1944         u32 extcnf_ctrl;
1945
1946         DEBUGFUNC("e1000_release_swflag_ich8lan");
1947
1948         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1949
1950         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1951                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1952                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1953         } else {
1954                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1955         }
1956
1957         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1958
1959         return;
1960 }
1961
1962 /**
1963  *  e1000_check_mng_mode_ich8lan - Checks management mode
1964  *  @hw: pointer to the HW structure
1965  *
1966  *  This checks if the adapter has any manageability enabled.
1967  *  This is a function pointer entry point only called by read/write
1968  *  routines for the PHY and NVM parts.
1969  **/
1970 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1971 {
1972         u32 fwsm;
1973
1974         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1975
1976         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1977
1978         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1979                ((fwsm & E1000_FWSM_MODE_MASK) ==
1980                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1981 }
1982
1983 /**
1984  *  e1000_check_mng_mode_pchlan - Checks management mode
1985  *  @hw: pointer to the HW structure
1986  *
1987  *  This checks if the adapter has iAMT enabled.
1988  *  This is a function pointer entry point only called by read/write
1989  *  routines for the PHY and NVM parts.
1990  **/
1991 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1992 {
1993         u32 fwsm;
1994
1995         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1996
1997         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1998
1999         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
2000                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
2001 }
2002
2003 /**
2004  *  e1000_rar_set_pch2lan - Set receive address register
2005  *  @hw: pointer to the HW structure
2006  *  @addr: pointer to the receive address
2007  *  @index: receive address array register
2008  *
2009  *  Sets the receive address array register at index to the address passed
2010  *  in by addr.  For 82579, RAR[0] is the base address register that is to
2011  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
2012  *  Use SHRA[0-3] in place of those reserved for ME.
2013  **/
2014 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
2015 {
2016         u32 rar_low, rar_high;
2017
2018         DEBUGFUNC("e1000_rar_set_pch2lan");
2019
2020         /* HW expects these in little endian so we reverse the byte order
2021          * from network order (big endian) to little endian
2022          */
2023         rar_low = ((u32) addr[0] |
2024                    ((u32) addr[1] << 8) |
2025                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2026
2027         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2028
2029         /* If MAC address zero, no need to set the AV bit */
2030         if (rar_low || rar_high)
2031                 rar_high |= E1000_RAH_AV;
2032
2033         if (index == 0) {
2034                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2035                 E1000_WRITE_FLUSH(hw);
2036                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2037                 E1000_WRITE_FLUSH(hw);
2038                 return E1000_SUCCESS;
2039         }
2040
2041         /* RAR[1-6] are owned by manageability.  Skip those and program the
2042          * next address into the SHRA register array.
2043          */
2044         if (index < (u32) (hw->mac.rar_entry_count)) {
2045                 s32 ret_val;
2046
2047                 ret_val = e1000_acquire_swflag_ich8lan(hw);
2048                 if (ret_val)
2049                         goto out;
2050
2051                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
2052                 E1000_WRITE_FLUSH(hw);
2053                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
2054                 E1000_WRITE_FLUSH(hw);
2055
2056                 e1000_release_swflag_ich8lan(hw);
2057
2058                 /* verify the register updates */
2059                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
2060                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
2061                         return E1000_SUCCESS;
2062
2063                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
2064                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
2065         }
2066
2067 out:
2068         DEBUGOUT1("Failed to write receive address at index %d\n", index);
2069         return -E1000_ERR_CONFIG;
2070 }
2071
2072 /**
2073  *  e1000_rar_set_pch_lpt - Set receive address registers
2074  *  @hw: pointer to the HW structure
2075  *  @addr: pointer to the receive address
2076  *  @index: receive address array register
2077  *
2078  *  Sets the receive address register array at index to the address passed
2079  *  in by addr. For LPT, RAR[0] is the base address register that is to
2080  *  contain the MAC address. SHRA[0-10] are the shared receive address
2081  *  registers that are shared between the Host and manageability engine (ME).
2082  **/
2083 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
2084 {
2085         u32 rar_low, rar_high;
2086         u32 wlock_mac;
2087
2088         DEBUGFUNC("e1000_rar_set_pch_lpt");
2089
2090         /* HW expects these in little endian so we reverse the byte order
2091          * from network order (big endian) to little endian
2092          */
2093         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
2094                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2095
2096         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2097
2098         /* If MAC address zero, no need to set the AV bit */
2099         if (rar_low || rar_high)
2100                 rar_high |= E1000_RAH_AV;
2101
2102         if (index == 0) {
2103                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2104                 E1000_WRITE_FLUSH(hw);
2105                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2106                 E1000_WRITE_FLUSH(hw);
2107                 return E1000_SUCCESS;
2108         }
2109
2110         /* The manageability engine (ME) can lock certain SHRAR registers that
2111          * it is using - those registers are unavailable for use.
2112          */
2113         if (index < hw->mac.rar_entry_count) {
2114                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2115                             E1000_FWSM_WLOCK_MAC_MASK;
2116                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2117
2118                 /* Check if all SHRAR registers are locked */
2119                 if (wlock_mac == 1)
2120                         goto out;
2121
2122                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
2123                         s32 ret_val;
2124
2125                         ret_val = e1000_acquire_swflag_ich8lan(hw);
2126
2127                         if (ret_val)
2128                                 goto out;
2129
2130                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2131                                         rar_low);
2132                         E1000_WRITE_FLUSH(hw);
2133                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2134                                         rar_high);
2135                         E1000_WRITE_FLUSH(hw);
2136
2137                         e1000_release_swflag_ich8lan(hw);
2138
2139                         /* verify the register updates */
2140                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2141                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2142                                 return E1000_SUCCESS;
2143                 }
2144         }
2145
2146 out:
2147         DEBUGOUT1("Failed to write receive address at index %d\n", index);
2148         return -E1000_ERR_CONFIG;
2149 }
2150
2151 /**
2152  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2153  *  @hw: pointer to the HW structure
2154  *  @mc_addr_list: array of multicast addresses to program
2155  *  @mc_addr_count: number of multicast addresses to program
2156  *
2157  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2158  *  The caller must have a packed mc_addr_list of multicast addresses.
2159  **/
2160 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2161                                               u8 *mc_addr_list,
2162                                               u32 mc_addr_count)
2163 {
2164         u16 phy_reg = 0;
2165         int i;
2166         s32 ret_val;
2167
2168         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2169
2170         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2171
2172         ret_val = hw->phy.ops.acquire(hw);
2173         if (ret_val)
2174                 return;
2175
2176         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2177         if (ret_val)
2178                 goto release;
2179
2180         for (i = 0; i < hw->mac.mta_reg_count; i++) {
2181                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2182                                            (u16)(hw->mac.mta_shadow[i] &
2183                                                  0xFFFF));
2184                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2185                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
2186                                                  0xFFFF));
2187         }
2188
2189         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2190
2191 release:
2192         hw->phy.ops.release(hw);
2193 }
2194
2195 /**
2196  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2197  *  @hw: pointer to the HW structure
2198  *
2199  *  Checks if firmware is blocking the reset of the PHY.
2200  *  This is a function pointer entry point only called by
2201  *  reset routines.
2202  **/
2203 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2204 {
2205         u32 fwsm;
2206         bool blocked = FALSE;
2207         int i = 0;
2208
2209         DEBUGFUNC("e1000_check_reset_block_ich8lan");
2210
2211         do {
2212                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2213                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2214                         blocked = TRUE;
2215                         msec_delay(10);
2216                         continue;
2217                 }
2218                 blocked = FALSE;
2219         } while (blocked && (i++ < 30));
2220         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2221 }
2222
2223 /**
2224  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2225  *  @hw: pointer to the HW structure
2226  *
2227  *  Assumes semaphore already acquired.
2228  *
2229  **/
2230 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2231 {
2232         u16 phy_data;
2233         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2234         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2235                 E1000_STRAP_SMT_FREQ_SHIFT;
2236         s32 ret_val;
2237
2238         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2239
2240         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2241         if (ret_val)
2242                 return ret_val;
2243
2244         phy_data &= ~HV_SMB_ADDR_MASK;
2245         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2246         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2247
2248         if (hw->phy.type == e1000_phy_i217) {
2249                 /* Restore SMBus frequency */
2250                 if (freq--) {
2251                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2252                         phy_data |= (freq & (1 << 0)) <<
2253                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2254                         phy_data |= (freq & (1 << 1)) <<
2255                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2256                 } else {
2257                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
2258                 }
2259         }
2260
2261         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2262 }
2263
2264 /**
2265  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2266  *  @hw:   pointer to the HW structure
2267  *
2268  *  SW should configure the LCD from the NVM extended configuration region
2269  *  as a workaround for certain parts.
2270  **/
2271 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2272 {
2273         struct e1000_phy_info *phy = &hw->phy;
2274         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2275         s32 ret_val = E1000_SUCCESS;
2276         u16 word_addr, reg_data, reg_addr, phy_page = 0;
2277
2278         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2279
2280         /* Initialize the PHY from the NVM on ICH platforms.  This
2281          * is needed due to an issue where the NVM configuration is
2282          * not properly autoloaded after power transitions.
2283          * Therefore, after each PHY reset, we will load the
2284          * configuration data out of the NVM manually.
2285          */
2286         switch (hw->mac.type) {
2287         case e1000_ich8lan:
2288                 if (phy->type != e1000_phy_igp_3)
2289                         return ret_val;
2290
2291                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2292                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2293                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2294                         break;
2295                 }
2296                 /* Fall-thru */
2297         case e1000_pchlan:
2298         case e1000_pch2lan:
2299         case e1000_pch_lpt:
2300         case e1000_pch_spt:
2301         case e1000_pch_cnp:
2302                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2303                 break;
2304         default:
2305                 return ret_val;
2306         }
2307
2308         ret_val = hw->phy.ops.acquire(hw);
2309         if (ret_val)
2310                 return ret_val;
2311
2312         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2313         if (!(data & sw_cfg_mask))
2314                 goto release;
2315
2316         /* Make sure HW does not configure LCD from PHY
2317          * extended configuration before SW configuration
2318          */
2319         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2320         if ((hw->mac.type < e1000_pch2lan) &&
2321             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2322                         goto release;
2323
2324         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2325         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2326         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2327         if (!cnf_size)
2328                 goto release;
2329
2330         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2331         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2332
2333         if (((hw->mac.type == e1000_pchlan) &&
2334              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2335             (hw->mac.type > e1000_pchlan)) {
2336                 /* HW configures the SMBus address and LEDs when the
2337                  * OEM and LCD Write Enable bits are set in the NVM.
2338                  * When both NVM bits are cleared, SW will configure
2339                  * them instead.
2340                  */
2341                 ret_val = e1000_write_smbus_addr(hw);
2342                 if (ret_val)
2343                         goto release;
2344
2345                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2346                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2347                                                         (u16)data);
2348                 if (ret_val)
2349                         goto release;
2350         }
2351
2352         /* Configure LCD from extended configuration region. */
2353
2354         /* cnf_base_addr is in DWORD */
2355         word_addr = (u16)(cnf_base_addr << 1);
2356
2357         for (i = 0; i < cnf_size; i++) {
2358                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2359                                            &reg_data);
2360                 if (ret_val)
2361                         goto release;
2362
2363                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2364                                            1, &reg_addr);
2365                 if (ret_val)
2366                         goto release;
2367
2368                 /* Save off the PHY page for future writes. */
2369                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2370                         phy_page = reg_data;
2371                         continue;
2372                 }
2373
2374                 reg_addr &= PHY_REG_MASK;
2375                 reg_addr |= phy_page;
2376
2377                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2378                                                     reg_data);
2379                 if (ret_val)
2380                         goto release;
2381         }
2382
2383 release:
2384         hw->phy.ops.release(hw);
2385         return ret_val;
2386 }
2387
2388 /**
2389  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2390  *  @hw:   pointer to the HW structure
2391  *  @link: link up bool flag
2392  *
2393  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2394  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2395  *  If link is down, the function will restore the default K1 setting located
2396  *  in the NVM.
2397  **/
2398 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2399 {
2400         s32 ret_val = E1000_SUCCESS;
2401         u16 status_reg = 0;
2402         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2403
2404         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2405
2406         if (hw->mac.type != e1000_pchlan)
2407                 return E1000_SUCCESS;
2408
2409         /* Wrap the whole flow with the sw flag */
2410         ret_val = hw->phy.ops.acquire(hw);
2411         if (ret_val)
2412                 return ret_val;
2413
2414         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2415         if (link) {
2416                 if (hw->phy.type == e1000_phy_82578) {
2417                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2418                                                               &status_reg);
2419                         if (ret_val)
2420                                 goto release;
2421
2422                         status_reg &= (BM_CS_STATUS_LINK_UP |
2423                                        BM_CS_STATUS_RESOLVED |
2424                                        BM_CS_STATUS_SPEED_MASK);
2425
2426                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2427                                            BM_CS_STATUS_RESOLVED |
2428                                            BM_CS_STATUS_SPEED_1000))
2429                                 k1_enable = FALSE;
2430                 }
2431
2432                 if (hw->phy.type == e1000_phy_82577) {
2433                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2434                                                               &status_reg);
2435                         if (ret_val)
2436                                 goto release;
2437
2438                         status_reg &= (HV_M_STATUS_LINK_UP |
2439                                        HV_M_STATUS_AUTONEG_COMPLETE |
2440                                        HV_M_STATUS_SPEED_MASK);
2441
2442                         if (status_reg == (HV_M_STATUS_LINK_UP |
2443                                            HV_M_STATUS_AUTONEG_COMPLETE |
2444                                            HV_M_STATUS_SPEED_1000))
2445                                 k1_enable = FALSE;
2446                 }
2447
2448                 /* Link stall fix for link up */
2449                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2450                                                        0x0100);
2451                 if (ret_val)
2452                         goto release;
2453
2454         } else {
2455                 /* Link stall fix for link down */
2456                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2457                                                        0x4100);
2458                 if (ret_val)
2459                         goto release;
2460         }
2461
2462         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2463
2464 release:
2465         hw->phy.ops.release(hw);
2466
2467         return ret_val;
2468 }
2469
2470 /**
2471  *  e1000_configure_k1_ich8lan - Configure K1 power state
2472  *  @hw: pointer to the HW structure
2473  *  @enable: K1 state to configure
2474  *
2475  *  Configure the K1 power state based on the provided parameter.
2476  *  Assumes semaphore already acquired.
2477  *
2478  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2479  **/
2480 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2481 {
2482         s32 ret_val;
2483         u32 ctrl_reg = 0;
2484         u32 ctrl_ext = 0;
2485         u32 reg = 0;
2486         u16 kmrn_reg = 0;
2487
2488         DEBUGFUNC("e1000_configure_k1_ich8lan");
2489
2490         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2491                                              &kmrn_reg);
2492         if (ret_val)
2493                 return ret_val;
2494
2495         if (k1_enable)
2496                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2497         else
2498                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2499
2500         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2501                                               kmrn_reg);
2502         if (ret_val)
2503                 return ret_val;
2504
2505         usec_delay(20);
2506         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2507         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2508
2509         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2510         reg |= E1000_CTRL_FRCSPD;
2511         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2512
2513         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2514         E1000_WRITE_FLUSH(hw);
2515         usec_delay(20);
2516         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2517         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2518         E1000_WRITE_FLUSH(hw);
2519         usec_delay(20);
2520
2521         return E1000_SUCCESS;
2522 }
2523
2524 /**
2525  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2526  *  @hw:       pointer to the HW structure
2527  *  @d0_state: boolean if entering d0 or d3 device state
2528  *
2529  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2530  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2531  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2532  **/
2533 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2534 {
2535         s32 ret_val = 0;
2536         u32 mac_reg;
2537         u16 oem_reg;
2538
2539         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2540
2541         if (hw->mac.type < e1000_pchlan)
2542                 return ret_val;
2543
2544         ret_val = hw->phy.ops.acquire(hw);
2545         if (ret_val)
2546                 return ret_val;
2547
2548         if (hw->mac.type == e1000_pchlan) {
2549                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2550                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2551                         goto release;
2552         }
2553
2554         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2555         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2556                 goto release;
2557
2558         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2559
2560         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2561         if (ret_val)
2562                 goto release;
2563
2564         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2565
2566         if (d0_state) {
2567                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2568                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2569
2570                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2571                         oem_reg |= HV_OEM_BITS_LPLU;
2572         } else {
2573                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2574                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2575                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2576
2577                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2578                     E1000_PHY_CTRL_NOND0A_LPLU))
2579                         oem_reg |= HV_OEM_BITS_LPLU;
2580         }
2581
2582         /* Set Restart auto-neg to activate the bits */
2583         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2584             !hw->phy.ops.check_reset_block(hw))
2585                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2586
2587         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2588
2589 release:
2590         hw->phy.ops.release(hw);
2591
2592         return ret_val;
2593 }
2594
2595
2596 /**
2597  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2598  *  @hw:   pointer to the HW structure
2599  **/
2600 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2601 {
2602         s32 ret_val;
2603         u16 data;
2604
2605         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2606
2607         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2608         if (ret_val)
2609                 return ret_val;
2610
2611         data |= HV_KMRN_MDIO_SLOW;
2612
2613         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2614
2615         return ret_val;
2616 }
2617
2618 /**
2619  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2620  *  done after every PHY reset.
2621  **/
2622 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2623 {
2624         s32 ret_val = E1000_SUCCESS;
2625         u16 phy_data;
2626
2627         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2628
2629         if (hw->mac.type != e1000_pchlan)
2630                 return E1000_SUCCESS;
2631
2632         /* Set MDIO slow mode before any other MDIO access */
2633         if (hw->phy.type == e1000_phy_82577) {
2634                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2635                 if (ret_val)
2636                         return ret_val;
2637         }
2638
2639         if (((hw->phy.type == e1000_phy_82577) &&
2640              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2641             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2642                 /* Disable generation of early preamble */
2643                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2644                 if (ret_val)
2645                         return ret_val;
2646
2647                 /* Preamble tuning for SSC */
2648                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2649                                                 0xA204);
2650                 if (ret_val)
2651                         return ret_val;
2652         }
2653
2654         if (hw->phy.type == e1000_phy_82578) {
2655                 /* Return registers to default by doing a soft reset then
2656                  * writing 0x3140 to the control register.
2657                  */
2658                 if (hw->phy.revision < 2) {
2659                         e1000_phy_sw_reset_generic(hw);
2660                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2661                                                         0x3140);
2662                         if (ret_val)
2663                                 return ret_val;
2664                 }
2665         }
2666
2667         /* Select page 0 */
2668         ret_val = hw->phy.ops.acquire(hw);
2669         if (ret_val)
2670                 return ret_val;
2671
2672         hw->phy.addr = 1;
2673         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2674         hw->phy.ops.release(hw);
2675         if (ret_val)
2676                 return ret_val;
2677
2678         /* Configure the K1 Si workaround during phy reset assuming there is
2679          * link so that it disables K1 if link is in 1Gbps.
2680          */
2681         ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2682         if (ret_val)
2683                 return ret_val;
2684
2685         /* Workaround for link disconnects on a busy hub in half duplex */
2686         ret_val = hw->phy.ops.acquire(hw);
2687         if (ret_val)
2688                 return ret_val;
2689         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2690         if (ret_val)
2691                 goto release;
2692         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2693                                                phy_data & 0x00FF);
2694         if (ret_val)
2695                 goto release;
2696
2697         /* set MSE higher to enable link to stay up when noise is high */
2698         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2699 release:
2700         hw->phy.ops.release(hw);
2701
2702         return ret_val;
2703 }
2704
2705 /**
2706  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2707  *  @hw:   pointer to the HW structure
2708  **/
2709 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2710 {
2711         u32 mac_reg;
2712         u16 i, phy_reg = 0;
2713         s32 ret_val;
2714
2715         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2716
2717         ret_val = hw->phy.ops.acquire(hw);
2718         if (ret_val)
2719                 return;
2720         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2721         if (ret_val)
2722                 goto release;
2723
2724         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2725         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2726                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2727                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2728                                            (u16)(mac_reg & 0xFFFF));
2729                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2730                                            (u16)((mac_reg >> 16) & 0xFFFF));
2731
2732                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2733                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2734                                            (u16)(mac_reg & 0xFFFF));
2735                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2736                                            (u16)((mac_reg & E1000_RAH_AV)
2737                                                  >> 16));
2738         }
2739
2740         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2741
2742 release:
2743         hw->phy.ops.release(hw);
2744 }
2745
2746 static u32 e1000_calc_rx_da_crc(u8 mac[])
2747 {
2748         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2749         u32 i, j, mask, crc;
2750
2751         DEBUGFUNC("e1000_calc_rx_da_crc");
2752
2753         crc = 0xffffffff;
2754         for (i = 0; i < 6; i++) {
2755                 crc = crc ^ mac[i];
2756                 for (j = 8; j > 0; j--) {
2757                         mask = (crc & 1) * (-1);
2758                         crc = (crc >> 1) ^ (poly & mask);
2759                 }
2760         }
2761         return ~crc;
2762 }
2763
2764 /**
2765  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2766  *  with 82579 PHY
2767  *  @hw: pointer to the HW structure
2768  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2769  **/
2770 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2771 {
2772         s32 ret_val = E1000_SUCCESS;
2773         u16 phy_reg, data;
2774         u32 mac_reg;
2775         u16 i;
2776
2777         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2778
2779         if (hw->mac.type < e1000_pch2lan)
2780                 return E1000_SUCCESS;
2781
2782         /* disable Rx path while enabling/disabling workaround */
2783         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2784         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2785                                         phy_reg | (1 << 14));
2786         if (ret_val)
2787                 return ret_val;
2788
2789         if (enable) {
2790                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2791                  * SHRAL/H) and initial CRC values to the MAC
2792                  */
2793                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2794                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2795                         u32 addr_high, addr_low;
2796
2797                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2798                         if (!(addr_high & E1000_RAH_AV))
2799                                 continue;
2800                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2801                         mac_addr[0] = (addr_low & 0xFF);
2802                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2803                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2804                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2805                         mac_addr[4] = (addr_high & 0xFF);
2806                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2807
2808                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2809                                         e1000_calc_rx_da_crc(mac_addr));
2810                 }
2811
2812                 /* Write Rx addresses to the PHY */
2813                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2814
2815                 /* Enable jumbo frame workaround in the MAC */
2816                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2817                 mac_reg &= ~(1 << 14);
2818                 mac_reg |= (7 << 15);
2819                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2820
2821                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2822                 mac_reg |= E1000_RCTL_SECRC;
2823                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2824
2825                 ret_val = e1000_read_kmrn_reg_generic(hw,
2826                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2827                                                 &data);
2828                 if (ret_val)
2829                         return ret_val;
2830                 ret_val = e1000_write_kmrn_reg_generic(hw,
2831                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2832                                                 data | (1 << 0));
2833                 if (ret_val)
2834                         return ret_val;
2835                 ret_val = e1000_read_kmrn_reg_generic(hw,
2836                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2837                                                 &data);
2838                 if (ret_val)
2839                         return ret_val;
2840                 data &= ~(0xF << 8);
2841                 data |= (0xB << 8);
2842                 ret_val = e1000_write_kmrn_reg_generic(hw,
2843                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2844                                                 data);
2845                 if (ret_val)
2846                         return ret_val;
2847
2848                 /* Enable jumbo frame workaround in the PHY */
2849                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2850                 data &= ~(0x7F << 5);
2851                 data |= (0x37 << 5);
2852                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2853                 if (ret_val)
2854                         return ret_val;
2855                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2856                 data &= ~(1 << 13);
2857                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2858                 if (ret_val)
2859                         return ret_val;
2860                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2861                 data &= ~(0x3FF << 2);
2862                 data |= (E1000_TX_PTR_GAP << 2);
2863                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2864                 if (ret_val)
2865                         return ret_val;
2866                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2867                 if (ret_val)
2868                         return ret_val;
2869                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2870                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2871                                                 (1 << 10));
2872                 if (ret_val)
2873                         return ret_val;
2874         } else {
2875                 /* Write MAC register values back to h/w defaults */
2876                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2877                 mac_reg &= ~(0xF << 14);
2878                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2879
2880                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2881                 mac_reg &= ~E1000_RCTL_SECRC;
2882                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2883
2884                 ret_val = e1000_read_kmrn_reg_generic(hw,
2885                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2886                                                 &data);
2887                 if (ret_val)
2888                         return ret_val;
2889                 ret_val = e1000_write_kmrn_reg_generic(hw,
2890                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2891                                                 data & ~(1 << 0));
2892                 if (ret_val)
2893                         return ret_val;
2894                 ret_val = e1000_read_kmrn_reg_generic(hw,
2895                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2896                                                 &data);
2897                 if (ret_val)
2898                         return ret_val;
2899                 data &= ~(0xF << 8);
2900                 data |= (0xB << 8);
2901                 ret_val = e1000_write_kmrn_reg_generic(hw,
2902                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2903                                                 data);
2904                 if (ret_val)
2905                         return ret_val;
2906
2907                 /* Write PHY register values back to h/w defaults */
2908                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2909                 data &= ~(0x7F << 5);
2910                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2911                 if (ret_val)
2912                         return ret_val;
2913                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2914                 data |= (1 << 13);
2915                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2916                 if (ret_val)
2917                         return ret_val;
2918                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2919                 data &= ~(0x3FF << 2);
2920                 data |= (0x8 << 2);
2921                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2922                 if (ret_val)
2923                         return ret_val;
2924                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2925                 if (ret_val)
2926                         return ret_val;
2927                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2928                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2929                                                 ~(1 << 10));
2930                 if (ret_val)
2931                         return ret_val;
2932         }
2933
2934         /* re-enable Rx path after enabling/disabling workaround */
2935         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2936                                      ~(1 << 14));
2937 }
2938
2939 /**
2940  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2941  *  done after every PHY reset.
2942  **/
2943 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2944 {
2945         s32 ret_val = E1000_SUCCESS;
2946
2947         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2948
2949         if (hw->mac.type != e1000_pch2lan)
2950                 return E1000_SUCCESS;
2951
2952         /* Set MDIO slow mode before any other MDIO access */
2953         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2954         if (ret_val)
2955                 return ret_val;
2956
2957         ret_val = hw->phy.ops.acquire(hw);
2958         if (ret_val)
2959                 return ret_val;
2960         /* set MSE higher to enable link to stay up when noise is high */
2961         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2962         if (ret_val)
2963                 goto release;
2964         /* drop link after 5 times MSE threshold was reached */
2965         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2966 release:
2967         hw->phy.ops.release(hw);
2968
2969         return ret_val;
2970 }
2971
2972 /**
2973  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2974  *  @hw:   pointer to the HW structure
2975  *
2976  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2977  *  Disable K1 for 1000 and 100 speeds
2978  **/
2979 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2980 {
2981         s32 ret_val = E1000_SUCCESS;
2982         u16 status_reg = 0;
2983
2984         DEBUGFUNC("e1000_k1_workaround_lv");
2985
2986         if (hw->mac.type != e1000_pch2lan)
2987                 return E1000_SUCCESS;
2988
2989         /* Set K1 beacon duration based on 10Mbs speed */
2990         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2991         if (ret_val)
2992                 return ret_val;
2993
2994         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2995             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2996                 if (status_reg &
2997                     (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2998                         u16 pm_phy_reg;
2999
3000                         /* LV 1G/100 Packet drop issue wa  */
3001                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
3002                                                        &pm_phy_reg);
3003                         if (ret_val)
3004                                 return ret_val;
3005                         pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
3006                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
3007                                                         pm_phy_reg);
3008                         if (ret_val)
3009                                 return ret_val;
3010                 } else {
3011                         u32 mac_reg;
3012                         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
3013                         mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
3014                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
3015                         E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
3016                 }
3017         }
3018
3019         return ret_val;
3020 }
3021
3022 /**
3023  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
3024  *  @hw:   pointer to the HW structure
3025  *  @gate: boolean set to TRUE to gate, FALSE to ungate
3026  *
3027  *  Gate/ungate the automatic PHY configuration via hardware; perform
3028  *  the configuration via software instead.
3029  **/
3030 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
3031 {
3032         u32 extcnf_ctrl;
3033
3034         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
3035
3036         if (hw->mac.type < e1000_pch2lan)
3037                 return;
3038
3039         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
3040
3041         if (gate)
3042                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3043         else
3044                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3045
3046         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
3047 }
3048
3049 /**
3050  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
3051  *  @hw: pointer to the HW structure
3052  *
3053  *  Check the appropriate indication the MAC has finished configuring the
3054  *  PHY after a software reset.
3055  **/
3056 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
3057 {
3058         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
3059
3060         DEBUGFUNC("e1000_lan_init_done_ich8lan");
3061
3062         /* Wait for basic configuration completes before proceeding */
3063         do {
3064                 data = E1000_READ_REG(hw, E1000_STATUS);
3065                 data &= E1000_STATUS_LAN_INIT_DONE;
3066                 usec_delay(100);
3067         } while ((!data) && --loop);
3068
3069         /* If basic configuration is incomplete before the above loop
3070          * count reaches 0, loading the configuration from NVM will
3071          * leave the PHY in a bad state possibly resulting in no link.
3072          */
3073         if (loop == 0)
3074                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
3075
3076         /* Clear the Init Done bit for the next init event */
3077         data = E1000_READ_REG(hw, E1000_STATUS);
3078         data &= ~E1000_STATUS_LAN_INIT_DONE;
3079         E1000_WRITE_REG(hw, E1000_STATUS, data);
3080 }
3081
3082 /**
3083  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
3084  *  @hw: pointer to the HW structure
3085  **/
3086 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
3087 {
3088         s32 ret_val = E1000_SUCCESS;
3089         u16 reg;
3090
3091         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
3092
3093         if (hw->phy.ops.check_reset_block(hw))
3094                 return E1000_SUCCESS;
3095
3096         /* Allow time for h/w to get to quiescent state after reset */
3097         msec_delay(10);
3098
3099         /* Perform any necessary post-reset workarounds */
3100         switch (hw->mac.type) {
3101         case e1000_pchlan:
3102                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3103                 if (ret_val)
3104                         return ret_val;
3105                 break;
3106         case e1000_pch2lan:
3107                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3108                 if (ret_val)
3109                         return ret_val;
3110                 break;
3111         default:
3112                 break;
3113         }
3114
3115         /* Clear the host wakeup bit after lcd reset */
3116         if (hw->mac.type >= e1000_pchlan) {
3117                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
3118                 reg &= ~BM_WUC_HOST_WU_BIT;
3119                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3120         }
3121
3122         /* Configure the LCD with the extended configuration region in NVM */
3123         ret_val = e1000_sw_lcd_config_ich8lan(hw);
3124         if (ret_val)
3125                 return ret_val;
3126
3127         /* Configure the LCD with the OEM bits in NVM */
3128         ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
3129
3130         if (hw->mac.type == e1000_pch2lan) {
3131                 /* Ungate automatic PHY configuration on non-managed 82579 */
3132                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
3133                     E1000_ICH_FWSM_FW_VALID)) {
3134                         msec_delay(10);
3135                         e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
3136                 }
3137
3138                 /* Set EEE LPI Update Timer to 200usec */
3139                 ret_val = hw->phy.ops.acquire(hw);
3140                 if (ret_val)
3141                         return ret_val;
3142                 ret_val = e1000_write_emi_reg_locked(hw,
3143                                                      I82579_LPI_UPDATE_TIMER,
3144                                                      0x1387);
3145                 hw->phy.ops.release(hw);
3146         }
3147
3148         return ret_val;
3149 }
3150
3151 /**
3152  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3153  *  @hw: pointer to the HW structure
3154  *
3155  *  Resets the PHY
3156  *  This is a function pointer entry point called by drivers
3157  *  or other shared routines.
3158  **/
3159 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3160 {
3161         s32 ret_val = E1000_SUCCESS;
3162
3163         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3164
3165         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
3166         if ((hw->mac.type == e1000_pch2lan) &&
3167             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3168                 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3169
3170         ret_val = e1000_phy_hw_reset_generic(hw);
3171         if (ret_val)
3172                 return ret_val;
3173
3174         return e1000_post_phy_reset_ich8lan(hw);
3175 }
3176
3177 /**
3178  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3179  *  @hw: pointer to the HW structure
3180  *  @active: TRUE to enable LPLU, FALSE to disable
3181  *
3182  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
3183  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3184  *  the phy speed. This function will manually set the LPLU bit and restart
3185  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
3186  *  since it configures the same bit.
3187  **/
3188 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3189 {
3190         s32 ret_val;
3191         u16 oem_reg;
3192
3193         DEBUGFUNC("e1000_set_lplu_state_pchlan");
3194         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3195         if (ret_val)
3196                 return ret_val;
3197
3198         if (active)
3199                 oem_reg |= HV_OEM_BITS_LPLU;
3200         else
3201                 oem_reg &= ~HV_OEM_BITS_LPLU;
3202
3203         if (!hw->phy.ops.check_reset_block(hw))
3204                 oem_reg |= HV_OEM_BITS_RESTART_AN;
3205
3206         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3207 }
3208
3209 /**
3210  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3211  *  @hw: pointer to the HW structure
3212  *  @active: TRUE to enable LPLU, FALSE to disable
3213  *
3214  *  Sets the LPLU D0 state according to the active flag.  When
3215  *  activating LPLU this function also disables smart speed
3216  *  and vice versa.  LPLU will not be activated unless the
3217  *  device autonegotiation advertisement meets standards of
3218  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3219  *  This is a function pointer entry point only called by
3220  *  PHY setup routines.
3221  **/
3222 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3223 {
3224         struct e1000_phy_info *phy = &hw->phy;
3225         u32 phy_ctrl;
3226         s32 ret_val = E1000_SUCCESS;
3227         u16 data;
3228
3229         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3230
3231         if (phy->type == e1000_phy_ife)
3232                 return E1000_SUCCESS;
3233
3234         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3235
3236         if (active) {
3237                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3238                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3239
3240                 if (phy->type != e1000_phy_igp_3)
3241                         return E1000_SUCCESS;
3242
3243                 /* Call gig speed drop workaround on LPLU before accessing
3244                  * any PHY registers
3245                  */
3246                 if (hw->mac.type == e1000_ich8lan)
3247                         e1000_gig_downshift_workaround_ich8lan(hw);
3248
3249                 /* When LPLU is enabled, we should disable SmartSpeed */
3250                 ret_val = phy->ops.read_reg(hw,
3251                                             IGP01E1000_PHY_PORT_CONFIG,
3252                                             &data);
3253                 if (ret_val)
3254                         return ret_val;
3255                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3256                 ret_val = phy->ops.write_reg(hw,
3257                                              IGP01E1000_PHY_PORT_CONFIG,
3258                                              data);
3259                 if (ret_val)
3260                         return ret_val;
3261         } else {
3262                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3263                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3264
3265                 if (phy->type != e1000_phy_igp_3)
3266                         return E1000_SUCCESS;
3267
3268                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3269                  * during Dx states where the power conservation is most
3270                  * important.  During driver activity we should enable
3271                  * SmartSpeed, so performance is maintained.
3272                  */
3273                 if (phy->smart_speed == e1000_smart_speed_on) {
3274                         ret_val = phy->ops.read_reg(hw,
3275                                                     IGP01E1000_PHY_PORT_CONFIG,
3276                                                     &data);
3277                         if (ret_val)
3278                                 return ret_val;
3279
3280                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3281                         ret_val = phy->ops.write_reg(hw,
3282                                                      IGP01E1000_PHY_PORT_CONFIG,
3283                                                      data);
3284                         if (ret_val)
3285                                 return ret_val;
3286                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3287                         ret_val = phy->ops.read_reg(hw,
3288                                                     IGP01E1000_PHY_PORT_CONFIG,
3289                                                     &data);
3290                         if (ret_val)
3291                                 return ret_val;
3292
3293                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3294                         ret_val = phy->ops.write_reg(hw,
3295                                                      IGP01E1000_PHY_PORT_CONFIG,
3296                                                      data);
3297                         if (ret_val)
3298                                 return ret_val;
3299                 }
3300         }
3301
3302         return E1000_SUCCESS;
3303 }
3304
3305 /**
3306  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3307  *  @hw: pointer to the HW structure
3308  *  @active: TRUE to enable LPLU, FALSE to disable
3309  *
3310  *  Sets the LPLU D3 state according to the active flag.  When
3311  *  activating LPLU this function also disables smart speed
3312  *  and vice versa.  LPLU will not be activated unless the
3313  *  device autonegotiation advertisement meets standards of
3314  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3315  *  This is a function pointer entry point only called by
3316  *  PHY setup routines.
3317  **/
3318 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3319 {
3320         struct e1000_phy_info *phy = &hw->phy;
3321         u32 phy_ctrl;
3322         s32 ret_val = E1000_SUCCESS;
3323         u16 data;
3324
3325         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3326
3327         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3328
3329         if (!active) {
3330                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3331                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3332
3333                 if (phy->type != e1000_phy_igp_3)
3334                         return E1000_SUCCESS;
3335
3336                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3337                  * during Dx states where the power conservation is most
3338                  * important.  During driver activity we should enable
3339                  * SmartSpeed, so performance is maintained.
3340                  */
3341                 if (phy->smart_speed == e1000_smart_speed_on) {
3342                         ret_val = phy->ops.read_reg(hw,
3343                                                     IGP01E1000_PHY_PORT_CONFIG,
3344                                                     &data);
3345                         if (ret_val)
3346                                 return ret_val;
3347
3348                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3349                         ret_val = phy->ops.write_reg(hw,
3350                                                      IGP01E1000_PHY_PORT_CONFIG,
3351                                                      data);
3352                         if (ret_val)
3353                                 return ret_val;
3354                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3355                         ret_val = phy->ops.read_reg(hw,
3356                                                     IGP01E1000_PHY_PORT_CONFIG,
3357                                                     &data);
3358                         if (ret_val)
3359                                 return ret_val;
3360
3361                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3362                         ret_val = phy->ops.write_reg(hw,
3363                                                      IGP01E1000_PHY_PORT_CONFIG,
3364                                                      data);
3365                         if (ret_val)
3366                                 return ret_val;
3367                 }
3368         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3369                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3370                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3371                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3372                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3373
3374                 if (phy->type != e1000_phy_igp_3)
3375                         return E1000_SUCCESS;
3376
3377                 /* Call gig speed drop workaround on LPLU before accessing
3378                  * any PHY registers
3379                  */
3380                 if (hw->mac.type == e1000_ich8lan)
3381                         e1000_gig_downshift_workaround_ich8lan(hw);
3382
3383                 /* When LPLU is enabled, we should disable SmartSpeed */
3384                 ret_val = phy->ops.read_reg(hw,
3385                                             IGP01E1000_PHY_PORT_CONFIG,
3386                                             &data);
3387                 if (ret_val)
3388                         return ret_val;
3389
3390                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3391                 ret_val = phy->ops.write_reg(hw,
3392                                              IGP01E1000_PHY_PORT_CONFIG,
3393                                              data);
3394         }
3395
3396         return ret_val;
3397 }
3398
3399 /**
3400  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3401  *  @hw: pointer to the HW structure
3402  *  @bank:  pointer to the variable that returns the active bank
3403  *
3404  *  Reads signature byte from the NVM using the flash access registers.
3405  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3406  **/
3407 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3408 {
3409         u32 eecd;
3410         struct e1000_nvm_info *nvm = &hw->nvm;
3411         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3412         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3413         u32 nvm_dword = 0;
3414         u8 sig_byte = 0;
3415         s32 ret_val;
3416
3417         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3418
3419         switch (hw->mac.type) {
3420         case e1000_pch_spt:
3421         case e1000_pch_cnp:
3422                 bank1_offset = nvm->flash_bank_size;
3423                 act_offset = E1000_ICH_NVM_SIG_WORD;
3424
3425                 /* set bank to 0 in case flash read fails */
3426                 *bank = 0;
3427
3428                 /* Check bank 0 */
3429                 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3430                                                          &nvm_dword);
3431                 if (ret_val)
3432                         return ret_val;
3433                 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3434                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3435                     E1000_ICH_NVM_SIG_VALUE) {
3436                         *bank = 0;
3437                         return E1000_SUCCESS;
3438                 }
3439
3440                 /* Check bank 1 */
3441                 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3442                                                          bank1_offset,
3443                                                          &nvm_dword);
3444                 if (ret_val)
3445                         return ret_val;
3446                 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3447                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3448                     E1000_ICH_NVM_SIG_VALUE) {
3449                         *bank = 1;
3450                         return E1000_SUCCESS;
3451                 }
3452
3453                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3454                 return -E1000_ERR_NVM;
3455         case e1000_ich8lan:
3456         case e1000_ich9lan:
3457                 eecd = E1000_READ_REG(hw, E1000_EECD);
3458                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3459                     E1000_EECD_SEC1VAL_VALID_MASK) {
3460                         if (eecd & E1000_EECD_SEC1VAL)
3461                                 *bank = 1;
3462                         else
3463                                 *bank = 0;
3464
3465                         return E1000_SUCCESS;
3466                 }
3467                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3468                 /* fall-thru */
3469         default:
3470                 /* set bank to 0 in case flash read fails */
3471                 *bank = 0;
3472
3473                 /* Check bank 0 */
3474                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3475                                                         &sig_byte);
3476                 if (ret_val)
3477                         return ret_val;
3478                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3479                     E1000_ICH_NVM_SIG_VALUE) {
3480                         *bank = 0;
3481                         return E1000_SUCCESS;
3482                 }
3483
3484                 /* Check bank 1 */
3485                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3486                                                         bank1_offset,
3487                                                         &sig_byte);
3488                 if (ret_val)
3489                         return ret_val;
3490                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3491                     E1000_ICH_NVM_SIG_VALUE) {
3492                         *bank = 1;
3493                         return E1000_SUCCESS;
3494                 }
3495
3496                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3497                 return -E1000_ERR_NVM;
3498         }
3499 }
3500
3501 /**
3502  *  e1000_read_nvm_spt - NVM access for SPT
3503  *  @hw: pointer to the HW structure
3504  *  @offset: The offset (in bytes) of the word(s) to read.
3505  *  @words: Size of data to read in words.
3506  *  @data: pointer to the word(s) to read at offset.
3507  *
3508  *  Reads a word(s) from the NVM
3509  **/
3510 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3511                               u16 *data)
3512 {
3513         struct e1000_nvm_info *nvm = &hw->nvm;
3514         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3515         u32 act_offset;
3516         s32 ret_val = E1000_SUCCESS;
3517         u32 bank = 0;
3518         u32 dword = 0;
3519         u16 offset_to_read;
3520         u16 i;
3521
3522         DEBUGFUNC("e1000_read_nvm_spt");
3523
3524         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3525             (words == 0)) {
3526                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3527                 ret_val = -E1000_ERR_NVM;
3528                 goto out;
3529         }
3530
3531         nvm->ops.acquire(hw);
3532
3533         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3534         if (ret_val != E1000_SUCCESS) {
3535                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3536                 bank = 0;
3537         }
3538
3539         act_offset = (bank) ? nvm->flash_bank_size : 0;
3540         act_offset += offset;
3541
3542         ret_val = E1000_SUCCESS;
3543
3544         for (i = 0; i < words; i += 2) {
3545                 if (words - i == 1) {
3546                         if (dev_spec->shadow_ram[offset+i].modified) {
3547                                 data[i] = dev_spec->shadow_ram[offset+i].value;
3548                         } else {
3549                                 offset_to_read = act_offset + i -
3550                                                  ((act_offset + i) % 2);
3551                                 ret_val =
3552                                    e1000_read_flash_dword_ich8lan(hw,
3553                                                                  offset_to_read,
3554                                                                  &dword);
3555                                 if (ret_val)
3556                                         break;
3557                                 if ((act_offset + i) % 2 == 0)
3558                                         data[i] = (u16)(dword & 0xFFFF);
3559                                 else
3560                                         data[i] = (u16)((dword >> 16) & 0xFFFF);
3561                         }
3562                 } else {
3563                         offset_to_read = act_offset + i;
3564                         if (!(dev_spec->shadow_ram[offset+i].modified) ||
3565                             !(dev_spec->shadow_ram[offset+i+1].modified)) {
3566                                 ret_val =
3567                                    e1000_read_flash_dword_ich8lan(hw,
3568                                                                  offset_to_read,
3569                                                                  &dword);
3570                                 if (ret_val)
3571                                         break;
3572                         }
3573                         if (dev_spec->shadow_ram[offset+i].modified)
3574                                 data[i] = dev_spec->shadow_ram[offset+i].value;
3575                         else
3576                                 data[i] = (u16) (dword & 0xFFFF);
3577                         if (dev_spec->shadow_ram[offset+i].modified)
3578                                 data[i+1] =
3579                                    dev_spec->shadow_ram[offset+i+1].value;
3580                         else
3581                                 data[i+1] = (u16) (dword >> 16 & 0xFFFF);
3582                 }
3583         }
3584
3585         nvm->ops.release(hw);
3586
3587 out:
3588         if (ret_val)
3589                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3590
3591         return ret_val;
3592 }
3593
3594 /**
3595  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3596  *  @hw: pointer to the HW structure
3597  *  @offset: The offset (in bytes) of the word(s) to read.
3598  *  @words: Size of data to read in words
3599  *  @data: Pointer to the word(s) to read at offset.
3600  *
3601  *  Reads a word(s) from the NVM using the flash access registers.
3602  **/
3603 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3604                                   u16 *data)
3605 {
3606         struct e1000_nvm_info *nvm = &hw->nvm;
3607         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3608         u32 act_offset;
3609         s32 ret_val = E1000_SUCCESS;
3610         u32 bank = 0;
3611         u16 i, word;
3612
3613         DEBUGFUNC("e1000_read_nvm_ich8lan");
3614
3615         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3616             (words == 0)) {
3617                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3618                 ret_val = -E1000_ERR_NVM;
3619                 goto out;
3620         }
3621
3622         nvm->ops.acquire(hw);
3623
3624         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3625         if (ret_val != E1000_SUCCESS) {
3626                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3627                 bank = 0;
3628         }
3629
3630         act_offset = (bank) ? nvm->flash_bank_size : 0;
3631         act_offset += offset;
3632
3633         ret_val = E1000_SUCCESS;
3634         for (i = 0; i < words; i++) {
3635                 if (dev_spec->shadow_ram[offset+i].modified) {
3636                         data[i] = dev_spec->shadow_ram[offset+i].value;
3637                 } else {
3638                         ret_val = e1000_read_flash_word_ich8lan(hw,
3639                                                                 act_offset + i,
3640                                                                 &word);
3641                         if (ret_val)
3642                                 break;
3643                         data[i] = word;
3644                 }
3645         }
3646
3647         nvm->ops.release(hw);
3648
3649 out:
3650         if (ret_val)
3651                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3652
3653         return ret_val;
3654 }
3655
3656 /**
3657  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3658  *  @hw: pointer to the HW structure
3659  *
3660  *  This function does initial flash setup so that a new read/write/erase cycle
3661  *  can be started.
3662  **/
3663 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3664 {
3665         union ich8_hws_flash_status hsfsts;
3666         s32 ret_val = -E1000_ERR_NVM;
3667
3668         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3669
3670         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3671
3672         /* Check if the flash descriptor is valid */
3673         if (!hsfsts.hsf_status.fldesvalid) {
3674                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3675                 return -E1000_ERR_NVM;
3676         }
3677
3678         /* Clear FCERR and DAEL in hw status by writing 1 */
3679         hsfsts.hsf_status.flcerr = 1;
3680         hsfsts.hsf_status.dael = 1;
3681         if (hw->mac.type >= e1000_pch_spt)
3682                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3683                                       hsfsts.regval & 0xFFFF);
3684         else
3685                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3686
3687         /* Either we should have a hardware SPI cycle in progress
3688          * bit to check against, in order to start a new cycle or
3689          * FDONE bit should be changed in the hardware so that it
3690          * is 1 after hardware reset, which can then be used as an
3691          * indication whether a cycle is in progress or has been
3692          * completed.
3693          */
3694
3695         if (!hsfsts.hsf_status.flcinprog) {
3696                 /* There is no cycle running at present,
3697                  * so we can start a cycle.
3698                  * Begin by setting Flash Cycle Done.
3699                  */
3700                 hsfsts.hsf_status.flcdone = 1;
3701                 if (hw->mac.type >= e1000_pch_spt)
3702                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3703                                               hsfsts.regval & 0xFFFF);
3704                 else
3705                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3706                                                 hsfsts.regval);
3707                 ret_val = E1000_SUCCESS;
3708         } else {
3709                 s32 i;
3710
3711                 /* Otherwise poll for sometime so the current
3712                  * cycle has a chance to end before giving up.
3713                  */
3714                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3715                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3716                                                               ICH_FLASH_HSFSTS);
3717                         if (!hsfsts.hsf_status.flcinprog) {
3718                                 ret_val = E1000_SUCCESS;
3719                                 break;
3720                         }
3721                         usec_delay(1);
3722                 }
3723                 if (ret_val == E1000_SUCCESS) {
3724                         /* Successful in waiting for previous cycle to timeout,
3725                          * now set the Flash Cycle Done.
3726                          */
3727                         hsfsts.hsf_status.flcdone = 1;
3728                         if (hw->mac.type >= e1000_pch_spt)
3729                                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3730                                                       hsfsts.regval & 0xFFFF);
3731                         else
3732                                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3733                                                         hsfsts.regval);
3734                 } else {
3735                         DEBUGOUT("Flash controller busy, cannot get access\n");
3736                 }
3737         }
3738
3739         return ret_val;
3740 }
3741
3742 /**
3743  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3744  *  @hw: pointer to the HW structure
3745  *  @timeout: maximum time to wait for completion
3746  *
3747  *  This function starts a flash cycle and waits for its completion.
3748  **/
3749 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3750 {
3751         union ich8_hws_flash_ctrl hsflctl;
3752         union ich8_hws_flash_status hsfsts;
3753         u32 i = 0;
3754
3755         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3756
3757         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3758         if (hw->mac.type >= e1000_pch_spt)
3759                 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3760         else
3761                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3762         hsflctl.hsf_ctrl.flcgo = 1;
3763
3764         if (hw->mac.type >= e1000_pch_spt)
3765                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3766                                       hsflctl.regval << 16);
3767         else
3768                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3769
3770         /* wait till FDONE bit is set to 1 */
3771         do {
3772                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3773                 if (hsfsts.hsf_status.flcdone)
3774                         break;
3775                 usec_delay(1);
3776         } while (i++ < timeout);
3777
3778         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3779                 return E1000_SUCCESS;
3780
3781         return -E1000_ERR_NVM;
3782 }
3783
3784 /**
3785  *  e1000_read_flash_dword_ich8lan - Read dword from flash
3786  *  @hw: pointer to the HW structure
3787  *  @offset: offset to data location
3788  *  @data: pointer to the location for storing the data
3789  *
3790  *  Reads the flash dword at offset into data.  Offset is converted
3791  *  to bytes before read.
3792  **/
3793 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3794                                           u32 *data)
3795 {
3796         DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3797
3798         if (!data)
3799                 return -E1000_ERR_NVM;
3800
3801         /* Must convert word offset into bytes. */
3802         offset <<= 1;
3803
3804         return e1000_read_flash_data32_ich8lan(hw, offset, data);
3805 }
3806
3807 /**
3808  *  e1000_read_flash_word_ich8lan - Read word from flash
3809  *  @hw: pointer to the HW structure
3810  *  @offset: offset to data location
3811  *  @data: pointer to the location for storing the data
3812  *
3813  *  Reads the flash word at offset into data.  Offset is converted
3814  *  to bytes before read.
3815  **/
3816 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3817                                          u16 *data)
3818 {
3819         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3820
3821         if (!data)
3822                 return -E1000_ERR_NVM;
3823
3824         /* Must convert offset into bytes. */
3825         offset <<= 1;
3826
3827         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3828 }
3829
3830 /**
3831  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3832  *  @hw: pointer to the HW structure
3833  *  @offset: The offset of the byte to read.
3834  *  @data: Pointer to a byte to store the value read.
3835  *
3836  *  Reads a single byte from the NVM using the flash access registers.
3837  **/
3838 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3839                                          u8 *data)
3840 {
3841         s32 ret_val;
3842         u16 word = 0;
3843
3844         /* In SPT, only 32 bits access is supported,
3845          * so this function should not be called.
3846          */
3847         if (hw->mac.type >= e1000_pch_spt)
3848                 return -E1000_ERR_NVM;
3849         else
3850                 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3851
3852         if (ret_val)
3853                 return ret_val;
3854
3855         *data = (u8)word;
3856
3857         return E1000_SUCCESS;
3858 }
3859
3860 /**
3861  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3862  *  @hw: pointer to the HW structure
3863  *  @offset: The offset (in bytes) of the byte or word to read.
3864  *  @size: Size of data to read, 1=byte 2=word
3865  *  @data: Pointer to the word to store the value read.
3866  *
3867  *  Reads a byte or word from the NVM using the flash access registers.
3868  **/
3869 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3870                                          u8 size, u16 *data)
3871 {
3872         union ich8_hws_flash_status hsfsts;
3873         union ich8_hws_flash_ctrl hsflctl;
3874         u32 flash_linear_addr;
3875         u32 flash_data = 0;
3876         s32 ret_val = -E1000_ERR_NVM;
3877         u8 count = 0;
3878
3879         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3880
3881         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3882                 return -E1000_ERR_NVM;
3883         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3884                              hw->nvm.flash_base_addr);
3885
3886         do {
3887                 usec_delay(1);
3888                 /* Steps */
3889                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3890                 if (ret_val != E1000_SUCCESS)
3891                         break;
3892                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3893
3894                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3895                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3896                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3897                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3898                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3899
3900                 ret_val = e1000_flash_cycle_ich8lan(hw,
3901                                                 ICH_FLASH_READ_COMMAND_TIMEOUT);
3902
3903                 /* Check if FCERR is set to 1, if set to 1, clear it
3904                  * and try the whole sequence a few more times, else
3905                  * read in (shift in) the Flash Data0, the order is
3906                  * least significant byte first msb to lsb
3907                  */
3908                 if (ret_val == E1000_SUCCESS) {
3909                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3910                         if (size == 1)
3911                                 *data = (u8)(flash_data & 0x000000FF);
3912                         else if (size == 2)
3913                                 *data = (u16)(flash_data & 0x0000FFFF);
3914                         break;
3915                 } else {
3916                         /* If we've gotten here, then things are probably
3917                          * completely hosed, but if the error condition is
3918                          * detected, it won't hurt to give it another try...
3919                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3920                          */
3921                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3922                                                               ICH_FLASH_HSFSTS);
3923                         if (hsfsts.hsf_status.flcerr) {
3924                                 /* Repeat for some time before giving up. */
3925                                 continue;
3926                         } else if (!hsfsts.hsf_status.flcdone) {
3927                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3928                                 break;
3929                         }
3930                 }
3931         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3932
3933         return ret_val;
3934 }
3935
3936 /**
3937  *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3938  *  @hw: pointer to the HW structure
3939  *  @offset: The offset (in bytes) of the dword to read.
3940  *  @data: Pointer to the dword to store the value read.
3941  *
3942  *  Reads a byte or word from the NVM using the flash access registers.
3943  **/
3944 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3945                                            u32 *data)
3946 {
3947         union ich8_hws_flash_status hsfsts;
3948         union ich8_hws_flash_ctrl hsflctl;
3949         u32 flash_linear_addr;
3950         s32 ret_val = -E1000_ERR_NVM;
3951         u8 count = 0;
3952
3953         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3954
3955                 if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3956                     hw->mac.type < e1000_pch_spt)
3957                         return -E1000_ERR_NVM;
3958         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3959                              hw->nvm.flash_base_addr);
3960
3961         do {
3962                 usec_delay(1);
3963                 /* Steps */
3964                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3965                 if (ret_val != E1000_SUCCESS)
3966                         break;
3967                 /* In SPT, This register is in Lan memory space, not flash.
3968                  * Therefore, only 32 bit access is supported
3969                  */
3970                 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3971
3972                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3973                 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3974                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3975                 /* In SPT, This register is in Lan memory space, not flash.
3976                  * Therefore, only 32 bit access is supported
3977                  */
3978                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3979                                       (u32)hsflctl.regval << 16);
3980                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3981
3982                 ret_val = e1000_flash_cycle_ich8lan(hw,
3983                                                 ICH_FLASH_READ_COMMAND_TIMEOUT);
3984
3985                 /* Check if FCERR is set to 1, if set to 1, clear it
3986                  * and try the whole sequence a few more times, else
3987                  * read in (shift in) the Flash Data0, the order is
3988                  * least significant byte first msb to lsb
3989                  */
3990                 if (ret_val == E1000_SUCCESS) {
3991                         *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3992                         break;
3993                 } else {
3994                         /* If we've gotten here, then things are probably
3995                          * completely hosed, but if the error condition is
3996                          * detected, it won't hurt to give it another try...
3997                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3998                          */
3999                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4000                                                               ICH_FLASH_HSFSTS);
4001                         if (hsfsts.hsf_status.flcerr) {
4002                                 /* Repeat for some time before giving up. */
4003                                 continue;
4004                         } else if (!hsfsts.hsf_status.flcdone) {
4005                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4006                                 break;
4007                         }
4008                 }
4009         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4010
4011         return ret_val;
4012 }
4013
4014 /**
4015  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
4016  *  @hw: pointer to the HW structure
4017  *  @offset: The offset (in bytes) of the word(s) to write.
4018  *  @words: Size of data to write in words
4019  *  @data: Pointer to the word(s) to write at offset.
4020  *
4021  *  Writes a byte or word to the NVM using the flash access registers.
4022  **/
4023 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
4024                                    u16 *data)
4025 {
4026         struct e1000_nvm_info *nvm = &hw->nvm;
4027         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4028         u16 i;
4029
4030         DEBUGFUNC("e1000_write_nvm_ich8lan");
4031
4032         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
4033             (words == 0)) {
4034                 DEBUGOUT("nvm parameter(s) out of bounds\n");
4035                 return -E1000_ERR_NVM;
4036         }
4037
4038         nvm->ops.acquire(hw);
4039
4040         for (i = 0; i < words; i++) {
4041                 dev_spec->shadow_ram[offset+i].modified = TRUE;
4042                 dev_spec->shadow_ram[offset+i].value = data[i];
4043         }
4044
4045         nvm->ops.release(hw);
4046
4047         return E1000_SUCCESS;
4048 }
4049
4050 /**
4051  *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
4052  *  @hw: pointer to the HW structure
4053  *
4054  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4055  *  which writes the checksum to the shadow ram.  The changes in the shadow
4056  *  ram are then committed to the EEPROM by processing each bank at a time
4057  *  checking for the modified bit and writing only the pending changes.
4058  *  After a successful commit, the shadow ram is cleared and is ready for
4059  *  future writes.
4060  **/
4061 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
4062 {
4063         struct e1000_nvm_info *nvm = &hw->nvm;
4064         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4065         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4066         s32 ret_val;
4067         u32 dword = 0;
4068
4069         DEBUGFUNC("e1000_update_nvm_checksum_spt");
4070
4071         ret_val = e1000_update_nvm_checksum_generic(hw);
4072         if (ret_val)
4073                 goto out;
4074
4075         if (nvm->type != e1000_nvm_flash_sw)
4076                 goto out;
4077
4078         nvm->ops.acquire(hw);
4079
4080         /* We're writing to the opposite bank so if we're on bank 1,
4081          * write to bank 0 etc.  We also need to erase the segment that
4082          * is going to be written
4083          */
4084         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4085         if (ret_val != E1000_SUCCESS) {
4086                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4087                 bank = 0;
4088         }
4089
4090         if (bank == 0) {
4091                 new_bank_offset = nvm->flash_bank_size;
4092                 old_bank_offset = 0;
4093                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4094                 if (ret_val)
4095                         goto release;
4096         } else {
4097                 old_bank_offset = nvm->flash_bank_size;
4098                 new_bank_offset = 0;
4099                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4100                 if (ret_val)
4101                         goto release;
4102         }
4103         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
4104                 /* Determine whether to write the value stored
4105                  * in the other NVM bank or a modified value stored
4106                  * in the shadow RAM
4107                  */
4108                 ret_val = e1000_read_flash_dword_ich8lan(hw,
4109                                                          i + old_bank_offset,
4110                                                          &dword);
4111
4112                 if (dev_spec->shadow_ram[i].modified) {
4113                         dword &= 0xffff0000;
4114                         dword |= (dev_spec->shadow_ram[i].value & 0xffff);
4115                 }
4116                 if (dev_spec->shadow_ram[i + 1].modified) {
4117                         dword &= 0x0000ffff;
4118                         dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
4119                                   << 16);
4120                 }
4121                 if (ret_val)
4122                         break;
4123
4124                 /* If the word is 0x13, then make sure the signature bits
4125                  * (15:14) are 11b until the commit has completed.
4126                  * This will allow us to write 10b which indicates the
4127                  * signature is valid.  We want to do this after the write
4128                  * has completed so that we don't mark the segment valid
4129                  * while the write is still in progress
4130                  */
4131                 if (i == E1000_ICH_NVM_SIG_WORD - 1)
4132                         dword |= E1000_ICH_NVM_SIG_MASK << 16;
4133
4134                 /* Convert offset to bytes. */
4135                 act_offset = (i + new_bank_offset) << 1;
4136
4137                 usec_delay(100);
4138
4139                 /* Write the data to the new bank. Offset in words*/
4140                 act_offset = i + new_bank_offset;
4141                 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
4142                                                                 dword);
4143                 if (ret_val)
4144                         break;
4145          }
4146
4147         /* Don't bother writing the segment valid bits if sector
4148          * programming failed.
4149          */
4150         if (ret_val) {
4151                 DEBUGOUT("Flash commit failed.\n");
4152                 goto release;
4153         }
4154
4155         /* Finally validate the new segment by setting bit 15:14
4156          * to 10b in word 0x13 , this can be done without an
4157          * erase as well since these bits are 11 to start with
4158          * and we need to change bit 14 to 0b
4159          */
4160         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4161
4162         /*offset in words but we read dword*/
4163         --act_offset;
4164         ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4165
4166         if (ret_val)
4167                 goto release;
4168
4169         dword &= 0xBFFFFFFF;
4170         ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4171
4172         if (ret_val)
4173                 goto release;
4174
4175         /* And invalidate the previously valid segment by setting
4176          * its signature word (0x13) high_byte to 0b. This can be
4177          * done without an erase because flash erase sets all bits
4178          * to 1's. We can write 1's to 0's without an erase
4179          */
4180         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4181
4182         /* offset in words but we read dword*/
4183         act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4184         ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4185
4186         if (ret_val)
4187                 goto release;
4188
4189         dword &= 0x00FFFFFF;
4190         ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4191
4192         if (ret_val)
4193                 goto release;
4194
4195         /* Great!  Everything worked, we can now clear the cached entries. */
4196         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4197                 dev_spec->shadow_ram[i].modified = FALSE;
4198                 dev_spec->shadow_ram[i].value = 0xFFFF;
4199         }
4200
4201 release:
4202         nvm->ops.release(hw);
4203
4204         /* Reload the EEPROM, or else modifications will not appear
4205          * until after the next adapter reset.
4206          */
4207         if (!ret_val) {
4208                 nvm->ops.reload(hw);
4209                 msec_delay(10);
4210         }
4211
4212 out:
4213         if (ret_val)
4214                 DEBUGOUT1("NVM update error: %d\n", ret_val);
4215
4216         return ret_val;
4217 }
4218
4219 /**
4220  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4221  *  @hw: pointer to the HW structure
4222  *
4223  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4224  *  which writes the checksum to the shadow ram.  The changes in the shadow
4225  *  ram are then committed to the EEPROM by processing each bank at a time
4226  *  checking for the modified bit and writing only the pending changes.
4227  *  After a successful commit, the shadow ram is cleared and is ready for
4228  *  future writes.
4229  **/
4230 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4231 {
4232         struct e1000_nvm_info *nvm = &hw->nvm;
4233         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4234         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4235         s32 ret_val;
4236         u16 data = 0;
4237
4238         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4239
4240         ret_val = e1000_update_nvm_checksum_generic(hw);
4241         if (ret_val)
4242                 goto out;
4243
4244         if (nvm->type != e1000_nvm_flash_sw)
4245                 goto out;
4246
4247         nvm->ops.acquire(hw);
4248
4249         /* We're writing to the opposite bank so if we're on bank 1,
4250          * write to bank 0 etc.  We also need to erase the segment that
4251          * is going to be written
4252          */
4253         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4254         if (ret_val != E1000_SUCCESS) {
4255                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4256                 bank = 0;
4257         }
4258
4259         if (bank == 0) {
4260                 new_bank_offset = nvm->flash_bank_size;
4261                 old_bank_offset = 0;
4262                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4263                 if (ret_val)
4264                         goto release;
4265         } else {
4266                 old_bank_offset = nvm->flash_bank_size;
4267                 new_bank_offset = 0;
4268                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4269                 if (ret_val)
4270                         goto release;
4271         }
4272         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4273                 if (dev_spec->shadow_ram[i].modified) {
4274                         data = dev_spec->shadow_ram[i].value;
4275                 } else {
4276                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
4277                                                                 old_bank_offset,
4278                                                                 &data);
4279                         if (ret_val)
4280                                 break;
4281                 }
4282                 /* If the word is 0x13, then make sure the signature bits
4283                  * (15:14) are 11b until the commit has completed.
4284                  * This will allow us to write 10b which indicates the
4285                  * signature is valid.  We want to do this after the write
4286                  * has completed so that we don't mark the segment valid
4287                  * while the write is still in progress
4288                  */
4289                 if (i == E1000_ICH_NVM_SIG_WORD)
4290                         data |= E1000_ICH_NVM_SIG_MASK;
4291
4292                 /* Convert offset to bytes. */
4293                 act_offset = (i + new_bank_offset) << 1;
4294
4295                 usec_delay(100);
4296
4297                 /* Write the bytes to the new bank. */
4298                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4299                                                                act_offset,
4300                                                                (u8)data);
4301                 if (ret_val)
4302                         break;
4303
4304                 usec_delay(100);
4305                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4306                                                           act_offset + 1,
4307                                                           (u8)(data >> 8));
4308                 if (ret_val)
4309                         break;
4310          }
4311
4312         /* Don't bother writing the segment valid bits if sector
4313          * programming failed.
4314          */
4315         if (ret_val) {
4316                 DEBUGOUT("Flash commit failed.\n");
4317                 goto release;
4318         }
4319
4320         /* Finally validate the new segment by setting bit 15:14
4321          * to 10b in word 0x13 , this can be done without an
4322          * erase as well since these bits are 11 to start with
4323          * and we need to change bit 14 to 0b
4324          */
4325         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4326         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4327         if (ret_val)
4328                 goto release;
4329
4330         data &= 0xBFFF;
4331         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4332                                                        (u8)(data >> 8));
4333         if (ret_val)
4334                 goto release;
4335
4336         /* And invalidate the previously valid segment by setting
4337          * its signature word (0x13) high_byte to 0b. This can be
4338          * done without an erase because flash erase sets all bits
4339          * to 1's. We can write 1's to 0's without an erase
4340          */
4341         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4342
4343         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4344
4345         if (ret_val)
4346                 goto release;
4347
4348         /* Great!  Everything worked, we can now clear the cached entries. */
4349         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4350                 dev_spec->shadow_ram[i].modified = FALSE;
4351                 dev_spec->shadow_ram[i].value = 0xFFFF;
4352         }
4353
4354 release:
4355         nvm->ops.release(hw);
4356
4357         /* Reload the EEPROM, or else modifications will not appear
4358          * until after the next adapter reset.
4359          */
4360         if (!ret_val) {
4361                 nvm->ops.reload(hw);
4362                 msec_delay(10);
4363         }
4364
4365 out:
4366         if (ret_val)
4367                 DEBUGOUT1("NVM update error: %d\n", ret_val);
4368
4369         return ret_val;
4370 }
4371
4372 /**
4373  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4374  *  @hw: pointer to the HW structure
4375  *
4376  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4377  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4378  *  calculated, in which case we need to calculate the checksum and set bit 6.
4379  **/
4380 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4381 {
4382         s32 ret_val;
4383         u16 data;
4384         u16 word;
4385         u16 valid_csum_mask;
4386
4387         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4388
4389         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4390          * the checksum needs to be fixed.  This bit is an indication that
4391          * the NVM was prepared by OEM software and did not calculate
4392          * the checksum...a likely scenario.
4393          */
4394         switch (hw->mac.type) {
4395         case e1000_pch_lpt:
4396         case e1000_pch_spt:
4397         case e1000_pch_cnp:
4398                 word = NVM_COMPAT;
4399                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4400                 break;
4401         default:
4402                 word = NVM_FUTURE_INIT_WORD1;
4403                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4404                 break;
4405         }
4406
4407         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4408         if (ret_val)
4409                 return ret_val;
4410
4411         if (!(data & valid_csum_mask)) {
4412                 data |= valid_csum_mask;
4413                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4414                 if (ret_val)
4415                         return ret_val;
4416                 ret_val = hw->nvm.ops.update(hw);
4417                 if (ret_val)
4418                         return ret_val;
4419         }
4420
4421         return e1000_validate_nvm_checksum_generic(hw);
4422 }
4423
4424 /**
4425  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4426  *  @hw: pointer to the HW structure
4427  *  @offset: The offset (in bytes) of the byte/word to read.
4428  *  @size: Size of data to read, 1=byte 2=word
4429  *  @data: The byte(s) to write to the NVM.
4430  *
4431  *  Writes one/two bytes to the NVM using the flash access registers.
4432  **/
4433 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4434                                           u8 size, u16 data)
4435 {
4436         union ich8_hws_flash_status hsfsts;
4437         union ich8_hws_flash_ctrl hsflctl;
4438         u32 flash_linear_addr;
4439         u32 flash_data = 0;
4440         s32 ret_val;
4441         u8 count = 0;
4442
4443         DEBUGFUNC("e1000_write_ich8_data");
4444
4445         if (hw->mac.type >= e1000_pch_spt) {
4446                 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4447                         return -E1000_ERR_NVM;
4448         } else {
4449                 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4450                         return -E1000_ERR_NVM;
4451         }
4452
4453         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4454                              hw->nvm.flash_base_addr);
4455
4456         do {
4457                 usec_delay(1);
4458                 /* Steps */
4459                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4460                 if (ret_val != E1000_SUCCESS)
4461                         break;
4462                 /* In SPT, This register is in Lan memory space, not
4463                  * flash.  Therefore, only 32 bit access is supported
4464                  */
4465                 if (hw->mac.type >= e1000_pch_spt)
4466                         hsflctl.regval =
4467                             E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
4468                 else
4469                         hsflctl.regval =
4470                             E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4471
4472                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4473                 hsflctl.hsf_ctrl.fldbcount = size - 1;
4474                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4475                 /* In SPT, This register is in Lan memory space,
4476                  * not flash.  Therefore, only 32 bit access is
4477                  * supported
4478                  */
4479                 if (hw->mac.type >= e1000_pch_spt)
4480                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4481                                               hsflctl.regval << 16);
4482                 else
4483                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4484                                                 hsflctl.regval);
4485
4486                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4487
4488                 if (size == 1)
4489                         flash_data = (u32)data & 0x00FF;
4490                 else
4491                         flash_data = (u32)data;
4492
4493                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4494
4495                 /* check if FCERR is set to 1 , if set to 1, clear it
4496                  * and try the whole sequence a few more times else done
4497                  */
4498                 ret_val =
4499                     e1000_flash_cycle_ich8lan(hw,
4500                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4501                 if (ret_val == E1000_SUCCESS)
4502                         break;
4503
4504                 /* If we're here, then things are most likely
4505                  * completely hosed, but if the error condition
4506                  * is detected, it won't hurt to give it another
4507                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4508                  */
4509                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4510                 if (hsfsts.hsf_status.flcerr)
4511                         /* Repeat for some time before giving up. */
4512                         continue;
4513                 if (!hsfsts.hsf_status.flcdone) {
4514                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4515                         break;
4516                 }
4517         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4518
4519         return ret_val;
4520 }
4521
4522 /**
4523 *  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4524 *  @hw: pointer to the HW structure
4525 *  @offset: The offset (in bytes) of the dwords to read.
4526 *  @data: The 4 bytes to write to the NVM.
4527 *
4528 *  Writes one/two/four bytes to the NVM using the flash access registers.
4529 **/
4530 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4531                                             u32 data)
4532 {
4533         union ich8_hws_flash_status hsfsts;
4534         union ich8_hws_flash_ctrl hsflctl;
4535         u32 flash_linear_addr;
4536         s32 ret_val;
4537         u8 count = 0;
4538
4539         DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4540
4541         if (hw->mac.type >= e1000_pch_spt) {
4542                 if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4543                         return -E1000_ERR_NVM;
4544         }
4545         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4546                              hw->nvm.flash_base_addr);
4547         do {
4548                 usec_delay(1);
4549                 /* Steps */
4550                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4551                 if (ret_val != E1000_SUCCESS)
4552                         break;
4553
4554                 /* In SPT, This register is in Lan memory space, not
4555                  * flash.  Therefore, only 32 bit access is supported
4556                  */
4557                 if (hw->mac.type >= e1000_pch_spt)
4558                         hsflctl.regval = E1000_READ_FLASH_REG(hw,
4559                                                               ICH_FLASH_HSFSTS)
4560                                          >> 16;
4561                 else
4562                         hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4563                                                               ICH_FLASH_HSFCTL);
4564
4565                 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4566                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4567
4568                 /* In SPT, This register is in Lan memory space,
4569                  * not flash.  Therefore, only 32 bit access is
4570                  * supported
4571                  */
4572                 if (hw->mac.type >= e1000_pch_spt)
4573                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4574                                               hsflctl.regval << 16);
4575                 else
4576                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4577                                                 hsflctl.regval);
4578
4579                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4580
4581                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4582
4583                 /* check if FCERR is set to 1 , if set to 1, clear it
4584                  * and try the whole sequence a few more times else done
4585                  */
4586                 ret_val = e1000_flash_cycle_ich8lan(hw,
4587                                                ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4588
4589                 if (ret_val == E1000_SUCCESS)
4590                         break;
4591
4592                 /* If we're here, then things are most likely
4593                  * completely hosed, but if the error condition
4594                  * is detected, it won't hurt to give it another
4595                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4596                  */
4597                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4598
4599                 if (hsfsts.hsf_status.flcerr)
4600                         /* Repeat for some time before giving up. */
4601                         continue;
4602                 if (!hsfsts.hsf_status.flcdone) {
4603                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4604                         break;
4605                 }
4606         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4607
4608         return ret_val;
4609 }
4610
4611 /**
4612  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4613  *  @hw: pointer to the HW structure
4614  *  @offset: The index of the byte to read.
4615  *  @data: The byte to write to the NVM.
4616  *
4617  *  Writes a single byte to the NVM using the flash access registers.
4618  **/
4619 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4620                                           u8 data)
4621 {
4622         u16 word = (u16)data;
4623
4624         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4625
4626         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4627 }
4628
4629 /**
4630 *  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4631 *  @hw: pointer to the HW structure
4632 *  @offset: The offset of the word to write.
4633 *  @dword: The dword to write to the NVM.
4634 *
4635 *  Writes a single dword to the NVM using the flash access registers.
4636 *  Goes through a retry algorithm before giving up.
4637 **/
4638 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4639                                                  u32 offset, u32 dword)
4640 {
4641         s32 ret_val;
4642         u16 program_retries;
4643
4644         DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
4645
4646         /* Must convert word offset into bytes. */
4647         offset <<= 1;
4648
4649         ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4650
4651         if (!ret_val)
4652                 return ret_val;
4653         for (program_retries = 0; program_retries < 100; program_retries++) {
4654                 DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
4655                 usec_delay(100);
4656                 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4657                 if (ret_val == E1000_SUCCESS)
4658                         break;
4659         }
4660         if (program_retries == 100)
4661                 return -E1000_ERR_NVM;
4662
4663         return E1000_SUCCESS;
4664 }
4665
4666 /**
4667  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4668  *  @hw: pointer to the HW structure
4669  *  @offset: The offset of the byte to write.
4670  *  @byte: The byte to write to the NVM.
4671  *
4672  *  Writes a single byte to the NVM using the flash access registers.
4673  *  Goes through a retry algorithm before giving up.
4674  **/
4675 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4676                                                 u32 offset, u8 byte)
4677 {
4678         s32 ret_val;
4679         u16 program_retries;
4680
4681         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4682
4683         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4684         if (!ret_val)
4685                 return ret_val;
4686
4687         for (program_retries = 0; program_retries < 100; program_retries++) {
4688                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4689                 usec_delay(100);
4690                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4691                 if (ret_val == E1000_SUCCESS)
4692                         break;
4693         }
4694         if (program_retries == 100)
4695                 return -E1000_ERR_NVM;
4696
4697         return E1000_SUCCESS;
4698 }
4699
4700 /**
4701  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4702  *  @hw: pointer to the HW structure
4703  *  @bank: 0 for first bank, 1 for second bank, etc.
4704  *
4705  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4706  *  bank N is 4096 * N + flash_reg_addr.
4707  **/
4708 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4709 {
4710         struct e1000_nvm_info *nvm = &hw->nvm;
4711         union ich8_hws_flash_status hsfsts;
4712         union ich8_hws_flash_ctrl hsflctl;
4713         u32 flash_linear_addr;
4714         /* bank size is in 16bit words - adjust to bytes */
4715         u32 flash_bank_size = nvm->flash_bank_size * 2;
4716         s32 ret_val;
4717         s32 count = 0;
4718         s32 j, iteration, sector_size;
4719
4720         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4721
4722         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4723
4724         /* Determine HW Sector size: Read BERASE bits of hw flash status
4725          * register
4726          * 00: The Hw sector is 256 bytes, hence we need to erase 16
4727          *     consecutive sectors.  The start index for the nth Hw sector
4728          *     can be calculated as = bank * 4096 + n * 256
4729          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4730          *     The start index for the nth Hw sector can be calculated
4731          *     as = bank * 4096
4732          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4733          *     (ich9 only, otherwise error condition)
4734          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4735          */
4736         switch (hsfsts.hsf_status.berasesz) {
4737         case 0:
4738                 /* Hw sector size 256 */
4739                 sector_size = ICH_FLASH_SEG_SIZE_256;
4740                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4741                 break;
4742         case 1:
4743                 sector_size = ICH_FLASH_SEG_SIZE_4K;
4744                 iteration = 1;
4745                 break;
4746         case 2:
4747                 sector_size = ICH_FLASH_SEG_SIZE_8K;
4748                 iteration = 1;
4749                 break;
4750         case 3:
4751                 sector_size = ICH_FLASH_SEG_SIZE_64K;
4752                 iteration = 1;
4753                 break;
4754         default:
4755                 return -E1000_ERR_NVM;
4756         }
4757
4758         /* Start with the base address, then add the sector offset. */
4759         flash_linear_addr = hw->nvm.flash_base_addr;
4760         flash_linear_addr += (bank) ? flash_bank_size : 0;
4761
4762         for (j = 0; j < iteration; j++) {
4763                 do {
4764                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4765
4766                         /* Steps */
4767                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
4768                         if (ret_val)
4769                                 return ret_val;
4770
4771                         /* Write a value 11 (block Erase) in Flash
4772                          * Cycle field in hw flash control
4773                          */
4774                         if (hw->mac.type >= e1000_pch_spt)
4775                                 hsflctl.regval =
4776                                     E1000_READ_FLASH_REG(hw,
4777                                                          ICH_FLASH_HSFSTS)>>16;
4778                         else
4779                                 hsflctl.regval =
4780                                     E1000_READ_FLASH_REG16(hw,
4781                                                            ICH_FLASH_HSFCTL);
4782
4783                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4784                         if (hw->mac.type >= e1000_pch_spt)
4785                                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4786                                                       hsflctl.regval << 16);
4787                         else
4788                                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4789                                                         hsflctl.regval);
4790
4791                         /* Write the last 24 bits of an index within the
4792                          * block into Flash Linear address field in Flash
4793                          * Address.
4794                          */
4795                         flash_linear_addr += (j * sector_size);
4796                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4797                                               flash_linear_addr);
4798
4799                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4800                         if (ret_val == E1000_SUCCESS)
4801                                 break;
4802
4803                         /* Check if FCERR is set to 1.  If 1,
4804                          * clear it and try the whole sequence
4805                          * a few more times else Done
4806                          */
4807                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4808                                                       ICH_FLASH_HSFSTS);
4809                         if (hsfsts.hsf_status.flcerr)
4810                                 /* repeat for some time before giving up */
4811                                 continue;
4812                         else if (!hsfsts.hsf_status.flcdone)
4813                                 return ret_val;
4814                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4815         }
4816
4817         return E1000_SUCCESS;
4818 }
4819
4820 /**
4821  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4822  *  @hw: pointer to the HW structure
4823  *  @data: Pointer to the LED settings
4824  *
4825  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4826  *  settings is all 0's or F's, set the LED default to a valid LED default
4827  *  setting.
4828  **/
4829 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4830 {
4831         s32 ret_val;
4832
4833         DEBUGFUNC("e1000_valid_led_default_ich8lan");
4834
4835         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4836         if (ret_val) {
4837                 DEBUGOUT("NVM Read Error\n");
4838                 return ret_val;
4839         }
4840
4841         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4842                 *data = ID_LED_DEFAULT_ICH8LAN;
4843
4844         return E1000_SUCCESS;
4845 }
4846
4847 /**
4848  *  e1000_id_led_init_pchlan - store LED configurations
4849  *  @hw: pointer to the HW structure
4850  *
4851  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4852  *  the PHY LED configuration register.
4853  *
4854  *  PCH also does not have an "always on" or "always off" mode which
4855  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4856  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4857  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4858  *  link based on logic in e1000_led_[on|off]_pchlan().
4859  **/
4860 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4861 {
4862         struct e1000_mac_info *mac = &hw->mac;
4863         s32 ret_val;
4864         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4865         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4866         u16 data, i, temp, shift;
4867
4868         DEBUGFUNC("e1000_id_led_init_pchlan");
4869
4870         /* Get default ID LED modes */
4871         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4872         if (ret_val)
4873                 return ret_val;
4874
4875         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4876         mac->ledctl_mode1 = mac->ledctl_default;
4877         mac->ledctl_mode2 = mac->ledctl_default;
4878
4879         for (i = 0; i < 4; i++) {
4880                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4881                 shift = (i * 5);
4882                 switch (temp) {
4883                 case ID_LED_ON1_DEF2:
4884                 case ID_LED_ON1_ON2:
4885                 case ID_LED_ON1_OFF2:
4886                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4887                         mac->ledctl_mode1 |= (ledctl_on << shift);
4888                         break;
4889                 case ID_LED_OFF1_DEF2:
4890                 case ID_LED_OFF1_ON2:
4891                 case ID_LED_OFF1_OFF2:
4892                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4893                         mac->ledctl_mode1 |= (ledctl_off << shift);
4894                         break;
4895                 default:
4896                         /* Do nothing */
4897                         break;
4898                 }
4899                 switch (temp) {
4900                 case ID_LED_DEF1_ON2:
4901                 case ID_LED_ON1_ON2:
4902                 case ID_LED_OFF1_ON2:
4903                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4904                         mac->ledctl_mode2 |= (ledctl_on << shift);
4905                         break;
4906                 case ID_LED_DEF1_OFF2:
4907                 case ID_LED_ON1_OFF2:
4908                 case ID_LED_OFF1_OFF2:
4909                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4910                         mac->ledctl_mode2 |= (ledctl_off << shift);
4911                         break;
4912                 default:
4913                         /* Do nothing */
4914                         break;
4915                 }
4916         }
4917
4918         return E1000_SUCCESS;
4919 }
4920
4921 /**
4922  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4923  *  @hw: pointer to the HW structure
4924  *
4925  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4926  *  register, so the the bus width is hard coded.
4927  **/
4928 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4929 {
4930         struct e1000_bus_info *bus = &hw->bus;
4931         s32 ret_val;
4932
4933         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4934
4935         ret_val = e1000_get_bus_info_pcie_generic(hw);
4936
4937         /* ICH devices are "PCI Express"-ish.  They have
4938          * a configuration space, but do not contain
4939          * PCI Express Capability registers, so bus width
4940          * must be hardcoded.
4941          */
4942         if (bus->width == e1000_bus_width_unknown)
4943                 bus->width = e1000_bus_width_pcie_x1;
4944
4945         return ret_val;
4946 }
4947
4948 /**
4949  *  e1000_reset_hw_ich8lan - Reset the hardware
4950  *  @hw: pointer to the HW structure
4951  *
4952  *  Does a full reset of the hardware which includes a reset of the PHY and
4953  *  MAC.
4954  **/
4955 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4956 {
4957         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4958         u16 kum_cfg;
4959         u32 ctrl, reg;
4960         s32 ret_val;
4961
4962         DEBUGFUNC("e1000_reset_hw_ich8lan");
4963
4964         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4965          * on the last TLP read/write transaction when MAC is reset.
4966          */
4967         ret_val = e1000_disable_pcie_master_generic(hw);
4968         if (ret_val)
4969                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4970
4971         DEBUGOUT("Masking off all interrupts\n");
4972         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4973
4974         /* Disable the Transmit and Receive units.  Then delay to allow
4975          * any pending transactions to complete before we hit the MAC
4976          * with the global reset.
4977          */
4978         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4979         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4980         E1000_WRITE_FLUSH(hw);
4981
4982         msec_delay(10);
4983
4984         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4985         if (hw->mac.type == e1000_ich8lan) {
4986                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4987                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4988                 /* Set Packet Buffer Size to 16k. */
4989                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4990         }
4991
4992         if (hw->mac.type == e1000_pchlan) {
4993                 /* Save the NVM K1 bit setting*/
4994                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4995                 if (ret_val)
4996                         return ret_val;
4997
4998                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4999                         dev_spec->nvm_k1_enabled = TRUE;
5000                 else
5001                         dev_spec->nvm_k1_enabled = FALSE;
5002         }
5003
5004         ctrl = E1000_READ_REG(hw, E1000_CTRL);
5005
5006         if (!hw->phy.ops.check_reset_block(hw)) {
5007                 /* Full-chip reset requires MAC and PHY reset at the same
5008                  * time to make sure the interface between MAC and the
5009                  * external PHY is reset.
5010                  */
5011                 ctrl |= E1000_CTRL_PHY_RST;
5012
5013                 /* Gate automatic PHY configuration by hardware on
5014                  * non-managed 82579
5015                  */
5016                 if ((hw->mac.type == e1000_pch2lan) &&
5017                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
5018                         e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
5019         }
5020         ret_val = e1000_acquire_swflag_ich8lan(hw);
5021         DEBUGOUT("Issuing a global reset to ich8lan\n");
5022         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
5023         /* cannot issue a flush here because it hangs the hardware */
5024         msec_delay(20);
5025
5026         /* Set Phy Config Counter to 50msec */
5027         if (hw->mac.type == e1000_pch2lan) {
5028                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
5029                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
5030                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
5031                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
5032         }
5033
5034         if (!ret_val)
5035                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
5036
5037         if (ctrl & E1000_CTRL_PHY_RST) {
5038                 ret_val = hw->phy.ops.get_cfg_done(hw);
5039                 if (ret_val)
5040                         return ret_val;
5041
5042                 ret_val = e1000_post_phy_reset_ich8lan(hw);
5043                 if (ret_val)
5044                         return ret_val;
5045         }
5046
5047         /* For PCH, this write will make sure that any noise
5048          * will be detected as a CRC error and be dropped rather than show up
5049          * as a bad packet to the DMA engine.
5050          */
5051         if (hw->mac.type == e1000_pchlan)
5052                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
5053
5054         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5055         E1000_READ_REG(hw, E1000_ICR);
5056
5057         reg = E1000_READ_REG(hw, E1000_KABGTXD);
5058         reg |= E1000_KABGTXD_BGSQLBIAS;
5059         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
5060
5061         return E1000_SUCCESS;
5062 }
5063
5064 /**
5065  *  e1000_init_hw_ich8lan - Initialize the hardware
5066  *  @hw: pointer to the HW structure
5067  *
5068  *  Prepares the hardware for transmit and receive by doing the following:
5069  *   - initialize hardware bits
5070  *   - initialize LED identification
5071  *   - setup receive address registers
5072  *   - setup flow control
5073  *   - setup transmit descriptors
5074  *   - clear statistics
5075  **/
5076 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
5077 {
5078         struct e1000_mac_info *mac = &hw->mac;
5079         u32 ctrl_ext, txdctl, snoop;
5080         s32 ret_val;
5081         u16 i;
5082
5083         DEBUGFUNC("e1000_init_hw_ich8lan");
5084
5085         e1000_initialize_hw_bits_ich8lan(hw);
5086
5087         /* Initialize identification LED */
5088         ret_val = mac->ops.id_led_init(hw);
5089         /* An error is not fatal and we should not stop init due to this */
5090         if (ret_val)
5091                 DEBUGOUT("Error initializing identification LED\n");
5092
5093         /* Setup the receive address. */
5094         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
5095
5096         /* Zero out the Multicast HASH table */
5097         DEBUGOUT("Zeroing the MTA\n");
5098         for (i = 0; i < mac->mta_reg_count; i++)
5099                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
5100
5101         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
5102          * the ME.  Disable wakeup by clearing the host wakeup bit.
5103          * Reset the phy after disabling host wakeup to reset the Rx buffer.
5104          */
5105         if (hw->phy.type == e1000_phy_82578) {
5106                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
5107                 i &= ~BM_WUC_HOST_WU_BIT;
5108                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
5109                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
5110                 if (ret_val)
5111                         return ret_val;
5112         }
5113
5114         /* Setup link and flow control */
5115         ret_val = mac->ops.setup_link(hw);
5116
5117         /* Set the transmit descriptor write-back policy for both queues */
5118         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
5119         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5120                   E1000_TXDCTL_FULL_TX_DESC_WB);
5121         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5122                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5123         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
5124         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
5125         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5126                   E1000_TXDCTL_FULL_TX_DESC_WB);
5127         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5128                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5129         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
5130
5131         /* ICH8 has opposite polarity of no_snoop bits.
5132          * By default, we should use snoop behavior.
5133          */
5134         if (mac->type == e1000_ich8lan)
5135                 snoop = PCIE_ICH8_SNOOP_ALL;
5136         else
5137                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
5138         e1000_set_pcie_no_snoop_generic(hw, snoop);
5139
5140         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5141         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
5142         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5143
5144         /* Clear all of the statistics registers (clear on read).  It is
5145          * important that we do this after we have tried to establish link
5146          * because the symbol error count will increment wildly if there
5147          * is no link.
5148          */
5149         e1000_clear_hw_cntrs_ich8lan(hw);
5150
5151         return ret_val;
5152 }
5153
5154 /**
5155  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
5156  *  @hw: pointer to the HW structure
5157  *
5158  *  Sets/Clears required hardware bits necessary for correctly setting up the
5159  *  hardware for transmit and receive.
5160  **/
5161 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
5162 {
5163         u32 reg;
5164
5165         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5166
5167         /* Extended Device Control */
5168         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5169         reg |= (1 << 22);
5170         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
5171         if (hw->mac.type >= e1000_pchlan)
5172                 reg |= E1000_CTRL_EXT_PHYPDEN;
5173         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5174
5175         /* Transmit Descriptor Control 0 */
5176         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5177         reg |= (1 << 22);
5178         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5179
5180         /* Transmit Descriptor Control 1 */
5181         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5182         reg |= (1 << 22);
5183         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5184
5185         /* Transmit Arbitration Control 0 */
5186         reg = E1000_READ_REG(hw, E1000_TARC(0));
5187         if (hw->mac.type == e1000_ich8lan)
5188                 reg |= (1 << 28) | (1 << 29);
5189         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5190         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5191
5192         /* Transmit Arbitration Control 1 */
5193         reg = E1000_READ_REG(hw, E1000_TARC(1));
5194         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5195                 reg &= ~(1 << 28);
5196         else
5197                 reg |= (1 << 28);
5198         reg |= (1 << 24) | (1 << 26) | (1 << 30);
5199         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5200
5201         /* Device Status */
5202         if (hw->mac.type == e1000_ich8lan) {
5203                 reg = E1000_READ_REG(hw, E1000_STATUS);
5204                 reg &= ~(1U << 31);
5205                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
5206         }
5207
5208         /* work-around descriptor data corruption issue during nfs v2 udp
5209          * traffic, just disable the nfs filtering capability
5210          */
5211         reg = E1000_READ_REG(hw, E1000_RFCTL);
5212         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5213
5214         /* Disable IPv6 extension header parsing because some malformed
5215          * IPv6 headers can hang the Rx.
5216          */
5217         if (hw->mac.type == e1000_ich8lan)
5218                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5219         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5220
5221         /* Enable ECC on Lynxpoint */
5222         if (hw->mac.type >= e1000_pch_lpt) {
5223                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5224                 reg |= E1000_PBECCSTS_ECC_ENABLE;
5225                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5226
5227                 reg = E1000_READ_REG(hw, E1000_CTRL);
5228                 reg |= E1000_CTRL_MEHE;
5229                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
5230         }
5231
5232         return;
5233 }
5234
5235 /**
5236  *  e1000_setup_link_ich8lan - Setup flow control and link settings
5237  *  @hw: pointer to the HW structure
5238  *
5239  *  Determines which flow control settings to use, then configures flow
5240  *  control.  Calls the appropriate media-specific link configuration
5241  *  function.  Assuming the adapter has a valid link partner, a valid link
5242  *  should be established.  Assumes the hardware has previously been reset
5243  *  and the transmitter and receiver are not enabled.
5244  **/
5245 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5246 {
5247         s32 ret_val;
5248
5249         DEBUGFUNC("e1000_setup_link_ich8lan");
5250
5251         if (hw->phy.ops.check_reset_block(hw))
5252                 return E1000_SUCCESS;
5253
5254         /* ICH parts do not have a word in the NVM to determine
5255          * the default flow control setting, so we explicitly
5256          * set it to full.
5257          */
5258         if (hw->fc.requested_mode == e1000_fc_default)
5259                 hw->fc.requested_mode = e1000_fc_full;
5260
5261         /* Save off the requested flow control mode for use later.  Depending
5262          * on the link partner's capabilities, we may or may not use this mode.
5263          */
5264         hw->fc.current_mode = hw->fc.requested_mode;
5265
5266         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5267                 hw->fc.current_mode);
5268
5269         /* Continue to configure the copper link. */
5270         ret_val = hw->mac.ops.setup_physical_interface(hw);
5271         if (ret_val)
5272                 return ret_val;
5273
5274         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5275         if ((hw->phy.type == e1000_phy_82578) ||
5276             (hw->phy.type == e1000_phy_82579) ||
5277             (hw->phy.type == e1000_phy_i217) ||
5278             (hw->phy.type == e1000_phy_82577)) {
5279                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5280
5281                 ret_val = hw->phy.ops.write_reg(hw,
5282                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
5283                                              hw->fc.pause_time);
5284                 if (ret_val)
5285                         return ret_val;
5286         }
5287
5288         return e1000_set_fc_watermarks_generic(hw);
5289 }
5290
5291 /**
5292  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5293  *  @hw: pointer to the HW structure
5294  *
5295  *  Configures the kumeran interface to the PHY to wait the appropriate time
5296  *  when polling the PHY, then call the generic setup_copper_link to finish
5297  *  configuring the copper link.
5298  **/
5299 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5300 {
5301         u32 ctrl;
5302         s32 ret_val;
5303         u16 reg_data;
5304
5305         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5306
5307         ctrl = E1000_READ_REG(hw, E1000_CTRL);
5308         ctrl |= E1000_CTRL_SLU;
5309         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5310         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5311
5312         /* Set the mac to wait the maximum time between each iteration
5313          * and increase the max iterations when polling the phy;
5314          * this fixes erroneous timeouts at 10Mbps.
5315          */
5316         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5317                                                0xFFFF);
5318         if (ret_val)
5319                 return ret_val;
5320         ret_val = e1000_read_kmrn_reg_generic(hw,
5321                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
5322                                               &reg_data);
5323         if (ret_val)
5324                 return ret_val;
5325         reg_data |= 0x3F;
5326         ret_val = e1000_write_kmrn_reg_generic(hw,
5327                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
5328                                                reg_data);
5329         if (ret_val)
5330                 return ret_val;
5331
5332         switch (hw->phy.type) {
5333         case e1000_phy_igp_3:
5334                 ret_val = e1000_copper_link_setup_igp(hw);
5335                 if (ret_val)
5336                         return ret_val;
5337                 break;
5338         case e1000_phy_bm:
5339         case e1000_phy_82578:
5340                 ret_val = e1000_copper_link_setup_m88(hw);
5341                 if (ret_val)
5342                         return ret_val;
5343                 break;
5344         case e1000_phy_82577:
5345         case e1000_phy_82579:
5346                 ret_val = e1000_copper_link_setup_82577(hw);
5347                 if (ret_val)
5348                         return ret_val;
5349                 break;
5350         case e1000_phy_ife:
5351                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5352                                                &reg_data);
5353                 if (ret_val)
5354                         return ret_val;
5355
5356                 reg_data &= ~IFE_PMC_AUTO_MDIX;
5357
5358                 switch (hw->phy.mdix) {
5359                 case 1:
5360                         reg_data &= ~IFE_PMC_FORCE_MDIX;
5361                         break;
5362                 case 2:
5363                         reg_data |= IFE_PMC_FORCE_MDIX;
5364                         break;
5365                 case 0:
5366                 default:
5367                         reg_data |= IFE_PMC_AUTO_MDIX;
5368                         break;
5369                 }
5370                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5371                                                 reg_data);
5372                 if (ret_val)
5373                         return ret_val;
5374                 break;
5375         default:
5376                 break;
5377         }
5378
5379         return e1000_setup_copper_link_generic(hw);
5380 }
5381
5382 /**
5383  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5384  *  @hw: pointer to the HW structure
5385  *
5386  *  Calls the PHY specific link setup function and then calls the
5387  *  generic setup_copper_link to finish configuring the link for
5388  *  Lynxpoint PCH devices
5389  **/
5390 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5391 {
5392         u32 ctrl;
5393         s32 ret_val;
5394
5395         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5396
5397         ctrl = E1000_READ_REG(hw, E1000_CTRL);
5398         ctrl |= E1000_CTRL_SLU;
5399         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5400         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5401
5402         ret_val = e1000_copper_link_setup_82577(hw);
5403         if (ret_val)
5404                 return ret_val;
5405
5406         return e1000_setup_copper_link_generic(hw);
5407 }
5408
5409 /**
5410  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5411  *  @hw: pointer to the HW structure
5412  *  @speed: pointer to store current link speed
5413  *  @duplex: pointer to store the current link duplex
5414  *
5415  *  Calls the generic get_speed_and_duplex to retrieve the current link
5416  *  information and then calls the Kumeran lock loss workaround for links at
5417  *  gigabit speeds.
5418  **/
5419 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5420                                           u16 *duplex)
5421 {
5422         s32 ret_val;
5423
5424         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5425
5426         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5427         if (ret_val)
5428                 return ret_val;
5429
5430         if ((hw->mac.type == e1000_ich8lan) &&
5431             (hw->phy.type == e1000_phy_igp_3) &&
5432             (*speed == SPEED_1000)) {
5433                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5434         }
5435
5436         return ret_val;
5437 }
5438
5439 /**
5440  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5441  *  @hw: pointer to the HW structure
5442  *
5443  *  Work-around for 82566 Kumeran PCS lock loss:
5444  *  On link status change (i.e. PCI reset, speed change) and link is up and
5445  *  speed is gigabit-
5446  *    0) if workaround is optionally disabled do nothing
5447  *    1) wait 1ms for Kumeran link to come up
5448  *    2) check Kumeran Diagnostic register PCS lock loss bit
5449  *    3) if not set the link is locked (all is good), otherwise...
5450  *    4) reset the PHY
5451  *    5) repeat up to 10 times
5452  *  Note: this is only called for IGP3 copper when speed is 1gb.
5453  **/
5454 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5455 {
5456         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5457         u32 phy_ctrl;
5458         s32 ret_val;
5459         u16 i, data;
5460         bool link;
5461
5462         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5463
5464         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5465                 return E1000_SUCCESS;
5466
5467         /* Make sure link is up before proceeding.  If not just return.
5468          * Attempting this while link is negotiating fouled up link
5469          * stability
5470          */
5471         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5472         if (!link)
5473                 return E1000_SUCCESS;
5474
5475         for (i = 0; i < 10; i++) {
5476                 /* read once to clear */
5477                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5478                 if (ret_val)
5479                         return ret_val;
5480                 /* and again to get new status */
5481                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5482                 if (ret_val)
5483                         return ret_val;
5484
5485                 /* check for PCS lock */
5486                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5487                         return E1000_SUCCESS;
5488
5489                 /* Issue PHY reset */
5490                 hw->phy.ops.reset(hw);
5491                 msec_delay_irq(5);
5492         }
5493         /* Disable GigE link negotiation */
5494         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5495         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5496                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5497         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5498
5499         /* Call gig speed drop workaround on Gig disable before accessing
5500          * any PHY registers
5501          */
5502         e1000_gig_downshift_workaround_ich8lan(hw);
5503
5504         /* unable to acquire PCS lock */
5505         return -E1000_ERR_PHY;
5506 }
5507
5508 /**
5509  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5510  *  @hw: pointer to the HW structure
5511  *  @state: boolean value used to set the current Kumeran workaround state
5512  *
5513  *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
5514  *  /disabled - FALSE).
5515  **/
5516 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5517                                                  bool state)
5518 {
5519         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5520
5521         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5522
5523         if (hw->mac.type != e1000_ich8lan) {
5524                 DEBUGOUT("Workaround applies to ICH8 only.\n");
5525                 return;
5526         }
5527
5528         dev_spec->kmrn_lock_loss_workaround_enabled = state;
5529
5530         return;
5531 }
5532
5533 /**
5534  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5535  *  @hw: pointer to the HW structure
5536  *
5537  *  Workaround for 82566 power-down on D3 entry:
5538  *    1) disable gigabit link
5539  *    2) write VR power-down enable
5540  *    3) read it back
5541  *  Continue if successful, else issue LCD reset and repeat
5542  **/
5543 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5544 {
5545         u32 reg;
5546         u16 data;
5547         u8  retry = 0;
5548
5549         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5550
5551         if (hw->phy.type != e1000_phy_igp_3)
5552                 return;
5553
5554         /* Try the workaround twice (if needed) */
5555         do {
5556                 /* Disable link */
5557                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5558                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5559                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5560                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5561
5562                 /* Call gig speed drop workaround on Gig disable before
5563                  * accessing any PHY registers
5564                  */
5565                 if (hw->mac.type == e1000_ich8lan)
5566                         e1000_gig_downshift_workaround_ich8lan(hw);
5567
5568                 /* Write VR power-down enable */
5569                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5570                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5571                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5572                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5573
5574                 /* Read it back and test */
5575                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5576                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5577                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5578                         break;
5579
5580                 /* Issue PHY reset and repeat at most one more time */
5581                 reg = E1000_READ_REG(hw, E1000_CTRL);
5582                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5583                 retry++;
5584         } while (retry);
5585 }
5586
5587 /**
5588  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5589  *  @hw: pointer to the HW structure
5590  *
5591  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5592  *  LPLU, Gig disable, MDIC PHY reset):
5593  *    1) Set Kumeran Near-end loopback
5594  *    2) Clear Kumeran Near-end loopback
5595  *  Should only be called for ICH8[m] devices with any 1G Phy.
5596  **/
5597 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5598 {
5599         s32 ret_val;
5600         u16 reg_data;
5601
5602         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5603
5604         if ((hw->mac.type != e1000_ich8lan) ||
5605             (hw->phy.type == e1000_phy_ife))
5606                 return;
5607
5608         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5609                                               &reg_data);
5610         if (ret_val)
5611                 return;
5612         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5613         ret_val = e1000_write_kmrn_reg_generic(hw,
5614                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
5615                                                reg_data);
5616         if (ret_val)
5617                 return;
5618         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5619         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5620                                      reg_data);
5621 }
5622
5623 /**
5624  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5625  *  @hw: pointer to the HW structure
5626  *
5627  *  During S0 to Sx transition, it is possible the link remains at gig
5628  *  instead of negotiating to a lower speed.  Before going to Sx, set
5629  *  'Gig Disable' to force link speed negotiation to a lower speed based on
5630  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5631  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5632  *  needs to be written.
5633  *  Parts that support (and are linked to a partner which support) EEE in
5634  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5635  *  than 10Mbps w/o EEE.
5636  **/
5637 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5638 {
5639         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5640         u32 phy_ctrl;
5641         s32 ret_val;
5642
5643         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5644
5645         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5646         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5647
5648         if (hw->phy.type == e1000_phy_i217) {
5649                 u16 phy_reg, device_id = hw->device_id;
5650
5651                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5652                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5653                     (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5654                     (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5655                     (hw->mac.type >= e1000_pch_spt)) {
5656                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5657
5658                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5659                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5660                 }
5661
5662                 ret_val = hw->phy.ops.acquire(hw);
5663                 if (ret_val)
5664                         goto out;
5665
5666                 if (!dev_spec->eee_disable) {
5667                         u16 eee_advert;
5668
5669                         ret_val =
5670                             e1000_read_emi_reg_locked(hw,
5671                                                       I217_EEE_ADVERTISEMENT,
5672                                                       &eee_advert);
5673                         if (ret_val)
5674                                 goto release;
5675
5676                         /* Disable LPLU if both link partners support 100BaseT
5677                          * EEE and 100Full is advertised on both ends of the
5678                          * link, and enable Auto Enable LPI since there will
5679                          * be no driver to enable LPI while in Sx.
5680                          */
5681                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5682                             (dev_spec->eee_lp_ability &
5683                              I82579_EEE_100_SUPPORTED) &&
5684                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5685                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5686                                               E1000_PHY_CTRL_NOND0A_LPLU);
5687
5688                                 /* Set Auto Enable LPI after link up */
5689                                 hw->phy.ops.read_reg_locked(hw,
5690                                                             I217_LPI_GPIO_CTRL,
5691                                                             &phy_reg);
5692                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5693                                 hw->phy.ops.write_reg_locked(hw,
5694                                                              I217_LPI_GPIO_CTRL,
5695                                                              phy_reg);
5696                         }
5697                 }
5698
5699                 /* For i217 Intel Rapid Start Technology support,
5700                  * when the system is going into Sx and no manageability engine
5701                  * is present, the driver must configure proxy to reset only on
5702                  * power good.  LPI (Low Power Idle) state must also reset only
5703                  * on power good, as well as the MTA (Multicast table array).
5704                  * The SMBus release must also be disabled on LCD reset.
5705                  */
5706                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5707                       E1000_ICH_FWSM_FW_VALID)) {
5708                         /* Enable proxy to reset only on power good. */
5709                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5710                                                     &phy_reg);
5711                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5712                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5713                                                      phy_reg);
5714
5715                         /* Set bit enable LPI (EEE) to reset only on
5716                          * power good.
5717                         */
5718                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5719                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5720                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5721
5722                         /* Disable the SMB release on LCD reset. */
5723                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5724                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5725                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5726                 }
5727
5728                 /* Enable MTA to reset for Intel Rapid Start Technology
5729                  * Support
5730                  */
5731                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5732                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5733                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5734
5735 release:
5736                 hw->phy.ops.release(hw);
5737         }
5738 out:
5739         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5740
5741         if (hw->mac.type == e1000_ich8lan)
5742                 e1000_gig_downshift_workaround_ich8lan(hw);
5743
5744         if (hw->mac.type >= e1000_pchlan) {
5745                 e1000_oem_bits_config_ich8lan(hw, FALSE);
5746
5747                 /* Reset PHY to activate OEM bits on 82577/8 */
5748                 if (hw->mac.type == e1000_pchlan)
5749                         e1000_phy_hw_reset_generic(hw);
5750
5751                 ret_val = hw->phy.ops.acquire(hw);
5752                 if (ret_val)
5753                         return;
5754                 e1000_write_smbus_addr(hw);
5755                 hw->phy.ops.release(hw);
5756         }
5757
5758         return;
5759 }
5760
5761 /**
5762  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5763  *  @hw: pointer to the HW structure
5764  *
5765  *  During Sx to S0 transitions on non-managed devices or managed devices
5766  *  on which PHY resets are not blocked, if the PHY registers cannot be
5767  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5768  *  the PHY.
5769  *  On i217, setup Intel Rapid Start Technology.
5770  **/
5771 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5772 {
5773         s32 ret_val;
5774
5775         DEBUGFUNC("e1000_resume_workarounds_pchlan");
5776         if (hw->mac.type < e1000_pch2lan)
5777                 return E1000_SUCCESS;
5778
5779         ret_val = e1000_init_phy_workarounds_pchlan(hw);
5780         if (ret_val) {
5781                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5782                 return ret_val;
5783         }
5784
5785         /* For i217 Intel Rapid Start Technology support when the system
5786          * is transitioning from Sx and no manageability engine is present
5787          * configure SMBus to restore on reset, disable proxy, and enable
5788          * the reset on MTA (Multicast table array).
5789          */
5790         if (hw->phy.type == e1000_phy_i217) {
5791                 u16 phy_reg;
5792
5793                 ret_val = hw->phy.ops.acquire(hw);
5794                 if (ret_val) {
5795                         DEBUGOUT("Failed to setup iRST\n");
5796                         return ret_val;
5797                 }
5798
5799                 /* Clear Auto Enable LPI after link up */
5800                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5801                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5802                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5803
5804                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5805                     E1000_ICH_FWSM_FW_VALID)) {
5806                         /* Restore clear on SMB if no manageability engine
5807                          * is present
5808                          */
5809                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5810                                                               &phy_reg);
5811                         if (ret_val)
5812                                 goto release;
5813                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5814                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5815
5816                         /* Disable Proxy */
5817                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5818                 }
5819                 /* Enable reset on MTA */
5820                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5821                                                       &phy_reg);
5822                 if (ret_val)
5823                         goto release;
5824                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5825                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5826 release:
5827                 if (ret_val)
5828                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5829                 hw->phy.ops.release(hw);
5830                 return ret_val;
5831         }
5832         return E1000_SUCCESS;
5833 }
5834
5835 /**
5836  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5837  *  @hw: pointer to the HW structure
5838  *
5839  *  Return the LED back to the default configuration.
5840  **/
5841 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5842 {
5843         DEBUGFUNC("e1000_cleanup_led_ich8lan");
5844
5845         if (hw->phy.type == e1000_phy_ife)
5846                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5847                                              0);
5848
5849         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5850         return E1000_SUCCESS;
5851 }
5852
5853 /**
5854  *  e1000_led_on_ich8lan - Turn LEDs on
5855  *  @hw: pointer to the HW structure
5856  *
5857  *  Turn on the LEDs.
5858  **/
5859 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5860 {
5861         DEBUGFUNC("e1000_led_on_ich8lan");
5862
5863         if (hw->phy.type == e1000_phy_ife)
5864                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5865                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5866
5867         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5868         return E1000_SUCCESS;
5869 }
5870
5871 /**
5872  *  e1000_led_off_ich8lan - Turn LEDs off
5873  *  @hw: pointer to the HW structure
5874  *
5875  *  Turn off the LEDs.
5876  **/
5877 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5878 {
5879         DEBUGFUNC("e1000_led_off_ich8lan");
5880
5881         if (hw->phy.type == e1000_phy_ife)
5882                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5883                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5884
5885         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5886         return E1000_SUCCESS;
5887 }
5888
5889 /**
5890  *  e1000_setup_led_pchlan - Configures SW controllable LED
5891  *  @hw: pointer to the HW structure
5892  *
5893  *  This prepares the SW controllable LED for use.
5894  **/
5895 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5896 {
5897         DEBUGFUNC("e1000_setup_led_pchlan");
5898
5899         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5900                                      (u16)hw->mac.ledctl_mode1);
5901 }
5902
5903 /**
5904  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5905  *  @hw: pointer to the HW structure
5906  *
5907  *  Return the LED back to the default configuration.
5908  **/
5909 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5910 {
5911         DEBUGFUNC("e1000_cleanup_led_pchlan");
5912
5913         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5914                                      (u16)hw->mac.ledctl_default);
5915 }
5916
5917 /**
5918  *  e1000_led_on_pchlan - Turn LEDs on
5919  *  @hw: pointer to the HW structure
5920  *
5921  *  Turn on the LEDs.
5922  **/
5923 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5924 {
5925         u16 data = (u16)hw->mac.ledctl_mode2;
5926         u32 i, led;
5927
5928         DEBUGFUNC("e1000_led_on_pchlan");
5929
5930         /* If no link, then turn LED on by setting the invert bit
5931          * for each LED that's mode is "link_up" in ledctl_mode2.
5932          */
5933         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5934                 for (i = 0; i < 3; i++) {
5935                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5936                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5937                             E1000_LEDCTL_MODE_LINK_UP)
5938                                 continue;
5939                         if (led & E1000_PHY_LED0_IVRT)
5940                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5941                         else
5942                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5943                 }
5944         }
5945
5946         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5947 }
5948
5949 /**
5950  *  e1000_led_off_pchlan - Turn LEDs off
5951  *  @hw: pointer to the HW structure
5952  *
5953  *  Turn off the LEDs.
5954  **/
5955 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5956 {
5957         u16 data = (u16)hw->mac.ledctl_mode1;
5958         u32 i, led;
5959
5960         DEBUGFUNC("e1000_led_off_pchlan");
5961
5962         /* If no link, then turn LED off by clearing the invert bit
5963          * for each LED that's mode is "link_up" in ledctl_mode1.
5964          */
5965         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5966                 for (i = 0; i < 3; i++) {
5967                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5968                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5969                             E1000_LEDCTL_MODE_LINK_UP)
5970                                 continue;
5971                         if (led & E1000_PHY_LED0_IVRT)
5972                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5973                         else
5974                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5975                 }
5976         }
5977
5978         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5979 }
5980
5981 /**
5982  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5983  *  @hw: pointer to the HW structure
5984  *
5985  *  Read appropriate register for the config done bit for completion status
5986  *  and configure the PHY through s/w for EEPROM-less parts.
5987  *
5988  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5989  *  config done bit, so only an error is logged and continues.  If we were
5990  *  to return with error, EEPROM-less silicon would not be able to be reset
5991  *  or change link.
5992  **/
5993 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5994 {
5995         s32 ret_val = E1000_SUCCESS;
5996         u32 bank = 0;
5997         u32 status;
5998
5999         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
6000
6001         e1000_get_cfg_done_generic(hw);
6002
6003         /* Wait for indication from h/w that it has completed basic config */
6004         if (hw->mac.type >= e1000_ich10lan) {
6005                 e1000_lan_init_done_ich8lan(hw);
6006         } else {
6007                 ret_val = e1000_get_auto_rd_done_generic(hw);
6008                 if (ret_val) {
6009                         /* When auto config read does not complete, do not
6010                          * return with an error. This can happen in situations
6011                          * where there is no eeprom and prevents getting link.
6012                          */
6013                         DEBUGOUT("Auto Read Done did not complete\n");
6014                         ret_val = E1000_SUCCESS;
6015                 }
6016         }
6017
6018         /* Clear PHY Reset Asserted bit */
6019         status = E1000_READ_REG(hw, E1000_STATUS);
6020         if (status & E1000_STATUS_PHYRA)
6021                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
6022         else
6023                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
6024
6025         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
6026         if (hw->mac.type <= e1000_ich9lan) {
6027                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
6028                     (hw->phy.type == e1000_phy_igp_3)) {
6029                         e1000_phy_init_script_igp3(hw);
6030                 }
6031         } else {
6032                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
6033                         /* Maybe we should do a basic PHY config */
6034                         DEBUGOUT("EEPROM not present\n");
6035                         ret_val = -E1000_ERR_CONFIG;
6036                 }
6037         }
6038
6039         return ret_val;
6040 }
6041
6042 /**
6043  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
6044  * @hw: pointer to the HW structure
6045  *
6046  * In the case of a PHY power down to save power, or to turn off link during a
6047  * driver unload, or wake on lan is not enabled, remove the link.
6048  **/
6049 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
6050 {
6051         /* If the management interface is not enabled, then power down */
6052         if (!(hw->mac.ops.check_mng_mode(hw) ||
6053               hw->phy.ops.check_reset_block(hw)))
6054                 e1000_power_down_phy_copper(hw);
6055
6056         return;
6057 }
6058
6059 /**
6060  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
6061  *  @hw: pointer to the HW structure
6062  *
6063  *  Clears hardware counters specific to the silicon family and calls
6064  *  clear_hw_cntrs_generic to clear all general purpose counters.
6065  **/
6066 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
6067 {
6068         u16 phy_data;
6069         s32 ret_val;
6070
6071         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
6072
6073         e1000_clear_hw_cntrs_base_generic(hw);
6074
6075         E1000_READ_REG(hw, E1000_ALGNERRC);
6076         E1000_READ_REG(hw, E1000_RXERRC);
6077         E1000_READ_REG(hw, E1000_TNCRS);
6078         E1000_READ_REG(hw, E1000_CEXTERR);
6079         E1000_READ_REG(hw, E1000_TSCTC);
6080         E1000_READ_REG(hw, E1000_TSCTFC);
6081
6082         E1000_READ_REG(hw, E1000_MGTPRC);
6083         E1000_READ_REG(hw, E1000_MGTPDC);
6084         E1000_READ_REG(hw, E1000_MGTPTC);
6085
6086         E1000_READ_REG(hw, E1000_IAC);
6087         E1000_READ_REG(hw, E1000_ICRXOC);
6088
6089         /* Clear PHY statistics registers */
6090         if ((hw->phy.type == e1000_phy_82578) ||
6091             (hw->phy.type == e1000_phy_82579) ||
6092             (hw->phy.type == e1000_phy_i217) ||
6093             (hw->phy.type == e1000_phy_82577)) {
6094                 ret_val = hw->phy.ops.acquire(hw);
6095                 if (ret_val)
6096                         return;
6097                 ret_val = hw->phy.ops.set_page(hw,
6098                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
6099                 if (ret_val)
6100                         goto release;
6101                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
6102                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
6103                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
6104                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
6105                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
6106                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
6107                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
6108                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
6109                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
6110                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
6111                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
6112                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
6113                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
6114                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
6115 release:
6116                 hw->phy.ops.release(hw);
6117         }
6118 }
6119