1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 /* 82562G 10/100 Network Connection
36 * 82562G-2 10/100 Network Connection
37 * 82562GT 10/100 Network Connection
38 * 82562GT-2 10/100 Network Connection
39 * 82562V 10/100 Network Connection
40 * 82562V-2 10/100 Network Connection
41 * 82566DC-2 Gigabit Network Connection
42 * 82566DC Gigabit Network Connection
43 * 82566DM-2 Gigabit Network Connection
44 * 82566DM Gigabit Network Connection
45 * 82566MC Gigabit Network Connection
46 * 82566MM Gigabit Network Connection
47 * 82567LM Gigabit Network Connection
48 * 82567LF Gigabit Network Connection
49 * 82567V Gigabit Network Connection
50 * 82567LM-2 Gigabit Network Connection
51 * 82567LF-2 Gigabit Network Connection
52 * 82567V-2 Gigabit Network Connection
53 * 82567LF-3 Gigabit Network Connection
54 * 82567LM-3 Gigabit Network Connection
55 * 82567LM-4 Gigabit Network Connection
56 * 82577LM Gigabit Network Connection
57 * 82577LC Gigabit Network Connection
58 * 82578DM Gigabit Network Connection
59 * 82578DC Gigabit Network Connection
60 * 82579LM Gigabit Network Connection
61 * 82579V Gigabit Network Connection
62 * Ethernet Connection I217-LM
63 * Ethernet Connection I217-V
64 * Ethernet Connection I218-V
65 * Ethernet Connection I218-LM
66 * Ethernet Connection (2) I218-LM
67 * Ethernet Connection (2) I218-V
68 * Ethernet Connection (3) I218-LM
69 * Ethernet Connection (3) I218-V
72 #include "e1000_api.h"
74 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
86 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
87 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
88 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
89 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
91 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
93 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
94 u16 words, u16 *data);
95 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
97 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98 u16 words, u16 *data);
99 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
102 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
104 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
105 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
106 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
107 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
108 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
109 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
110 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
111 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
112 u16 *speed, u16 *duplex);
113 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
114 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
115 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
116 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
117 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
118 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
119 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
120 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
121 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
122 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
123 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
124 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
125 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
126 u32 offset, u8 *data);
127 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
129 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
131 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
132 u32 offset, u32 *data);
133 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
134 u32 offset, u32 data);
135 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
136 u32 offset, u32 dword);
137 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
138 u32 offset, u16 *data);
139 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
140 u32 offset, u8 byte);
141 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
142 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
143 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
144 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
145 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
146 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
147 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
149 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
150 /* Offset 04h HSFSTS */
151 union ich8_hws_flash_status {
153 u16 flcdone:1; /* bit 0 Flash Cycle Done */
154 u16 flcerr:1; /* bit 1 Flash Cycle Error */
155 u16 dael:1; /* bit 2 Direct Access error Log */
156 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
157 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
158 u16 reserved1:2; /* bit 13:6 Reserved */
159 u16 reserved2:6; /* bit 13:6 Reserved */
160 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
161 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
166 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
167 /* Offset 06h FLCTL */
168 union ich8_hws_flash_ctrl {
169 struct ich8_hsflctl {
170 u16 flcgo:1; /* 0 Flash Cycle Go */
171 u16 flcycle:2; /* 2:1 Flash Cycle */
172 u16 reserved:5; /* 7:3 Reserved */
173 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
174 u16 flockdn:6; /* 15:10 Reserved */
179 /* ICH Flash Region Access Permissions */
180 union ich8_hws_flash_regacc {
182 u32 grra:8; /* 0:7 GbE region Read Access */
183 u32 grwa:8; /* 8:15 GbE region Write Access */
184 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
185 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
191 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
192 * @hw: pointer to the HW structure
194 * Test access to the PHY registers by reading the PHY ID registers. If
195 * the PHY ID is already known (e.g. resume path) compare it with known ID,
196 * otherwise assume the read PHY ID is correct if it is valid.
198 * Assumes the sw/fw/hw semaphore is already acquired.
200 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
208 for (retry_count = 0; retry_count < 2; retry_count++) {
209 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
210 if (ret_val || (phy_reg == 0xFFFF))
212 phy_id = (u32)(phy_reg << 16);
214 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
215 if (ret_val || (phy_reg == 0xFFFF)) {
219 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
224 if (hw->phy.id == phy_id)
228 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
232 /* In case the PHY needs to be in mdio slow mode,
233 * set slow mode and try to get the PHY id again.
235 if (hw->mac.type < e1000_pch_lpt) {
236 hw->phy.ops.release(hw);
237 ret_val = e1000_set_mdio_slow_mode_hv(hw);
239 ret_val = e1000_get_phy_id(hw);
240 hw->phy.ops.acquire(hw);
246 if (hw->mac.type >= e1000_pch_lpt) {
247 /* Only unforce SMBus if ME is not active */
248 if (!(E1000_READ_REG(hw, E1000_FWSM) &
249 E1000_ICH_FWSM_FW_VALID)) {
250 /* Unforce SMBus mode in PHY */
251 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
252 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
253 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
255 /* Unforce SMBus mode in MAC */
256 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
257 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
258 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
266 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
267 * @hw: pointer to the HW structure
269 * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
270 * used to reset the PHY to a quiescent state when necessary.
272 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
276 DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
278 /* Set Phy Config Counter to 50msec */
279 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
280 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
281 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
282 E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
284 /* Toggle LANPHYPC Value bit */
285 mac_reg = E1000_READ_REG(hw, E1000_CTRL);
286 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
287 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
288 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
289 E1000_WRITE_FLUSH(hw);
291 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
292 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
293 E1000_WRITE_FLUSH(hw);
295 if (hw->mac.type < e1000_pch_lpt) {
302 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
303 E1000_CTRL_EXT_LPCD) && count--);
310 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
311 * @hw: pointer to the HW structure
313 * Workarounds/flow necessary for PHY initialization during driver load
316 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
318 u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
321 DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
323 /* Gate automatic PHY configuration by hardware on managed and
324 * non-managed 82579 and newer adapters.
326 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
328 /* It is not possible to be certain of the current state of ULP
329 * so forcibly disable it.
331 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
332 e1000_disable_ulp_lpt_lp(hw, TRUE);
334 ret_val = hw->phy.ops.acquire(hw);
336 DEBUGOUT("Failed to initialize PHY flow\n");
340 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
341 * inaccessible and resetting the PHY is not blocked, toggle the
342 * LANPHYPC Value bit to force the interconnect to PCIe mode.
344 switch (hw->mac.type) {
347 if (e1000_phy_is_accessible_pchlan(hw))
350 /* Before toggling LANPHYPC, see if PHY is accessible by
351 * forcing MAC to SMBus mode first.
353 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
354 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
355 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
357 /* Wait 50 milliseconds for MAC to finish any retries
358 * that it might be trying to perform from previous
359 * attempts to acknowledge any phy read requests.
365 if (e1000_phy_is_accessible_pchlan(hw))
370 if ((hw->mac.type == e1000_pchlan) &&
371 (fwsm & E1000_ICH_FWSM_FW_VALID))
374 if (hw->phy.ops.check_reset_block(hw)) {
375 DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
376 ret_val = -E1000_ERR_PHY;
380 /* Toggle LANPHYPC Value bit */
381 e1000_toggle_lanphypc_pch_lpt(hw);
382 if (hw->mac.type >= e1000_pch_lpt) {
383 if (e1000_phy_is_accessible_pchlan(hw))
386 /* Toggling LANPHYPC brings the PHY out of SMBus mode
387 * so ensure that the MAC is also out of SMBus mode
389 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
390 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
391 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
393 if (e1000_phy_is_accessible_pchlan(hw))
396 ret_val = -E1000_ERR_PHY;
403 hw->phy.ops.release(hw);
406 /* Check to see if able to reset PHY. Print error if not */
407 if (hw->phy.ops.check_reset_block(hw)) {
408 ERROR_REPORT("Reset blocked by ME\n");
412 /* Reset the PHY before any access to it. Doing so, ensures
413 * that the PHY is in a known good state before we read/write
414 * PHY registers. The generic reset is sufficient here,
415 * because we haven't determined the PHY type yet.
417 ret_val = e1000_phy_hw_reset_generic(hw);
421 /* On a successful reset, possibly need to wait for the PHY
422 * to quiesce to an accessible state before returning control
423 * to the calling function. If the PHY does not quiesce, then
424 * return E1000E_BLK_PHY_RESET, as this is the condition that
427 ret_val = hw->phy.ops.check_reset_block(hw);
429 ERROR_REPORT("ME blocked access to PHY after reset\n");
433 /* Ungate automatic PHY configuration on non-managed 82579 */
434 if ((hw->mac.type == e1000_pch2lan) &&
435 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
437 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
444 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
445 * @hw: pointer to the HW structure
447 * Initialize family-specific PHY parameters and function pointers.
449 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
451 struct e1000_phy_info *phy = &hw->phy;
454 DEBUGFUNC("e1000_init_phy_params_pchlan");
457 phy->reset_delay_us = 100;
459 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
460 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
461 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
462 phy->ops.set_page = e1000_set_page_igp;
463 phy->ops.read_reg = e1000_read_phy_reg_hv;
464 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
465 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
466 phy->ops.release = e1000_release_swflag_ich8lan;
467 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
468 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
469 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
470 phy->ops.write_reg = e1000_write_phy_reg_hv;
471 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
472 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
473 phy->ops.power_up = e1000_power_up_phy_copper;
474 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
475 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
477 phy->id = e1000_phy_unknown;
479 ret_val = e1000_init_phy_workarounds_pchlan(hw);
483 if (phy->id == e1000_phy_unknown)
484 switch (hw->mac.type) {
486 ret_val = e1000_get_phy_id(hw);
489 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
495 /* In case the PHY needs to be in mdio slow mode,
496 * set slow mode and try to get the PHY id again.
498 ret_val = e1000_set_mdio_slow_mode_hv(hw);
501 ret_val = e1000_get_phy_id(hw);
506 phy->type = e1000_get_phy_type_from_id(phy->id);
509 case e1000_phy_82577:
510 case e1000_phy_82579:
512 phy->ops.check_polarity = e1000_check_polarity_82577;
513 phy->ops.force_speed_duplex =
514 e1000_phy_force_speed_duplex_82577;
515 phy->ops.get_cable_length = e1000_get_cable_length_82577;
516 phy->ops.get_info = e1000_get_phy_info_82577;
517 phy->ops.commit = e1000_phy_sw_reset_generic;
519 case e1000_phy_82578:
520 phy->ops.check_polarity = e1000_check_polarity_m88;
521 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
522 phy->ops.get_cable_length = e1000_get_cable_length_m88;
523 phy->ops.get_info = e1000_get_phy_info_m88;
526 ret_val = -E1000_ERR_PHY;
534 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
535 * @hw: pointer to the HW structure
537 * Initialize family-specific PHY parameters and function pointers.
539 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
541 struct e1000_phy_info *phy = &hw->phy;
545 DEBUGFUNC("e1000_init_phy_params_ich8lan");
548 phy->reset_delay_us = 100;
550 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
551 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
552 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
553 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
554 phy->ops.read_reg = e1000_read_phy_reg_igp;
555 phy->ops.release = e1000_release_swflag_ich8lan;
556 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
557 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
558 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
559 phy->ops.write_reg = e1000_write_phy_reg_igp;
560 phy->ops.power_up = e1000_power_up_phy_copper;
561 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
563 /* We may need to do this twice - once for IGP and if that fails,
564 * we'll set BM func pointers and try again
566 ret_val = e1000_determine_phy_address(hw);
568 phy->ops.write_reg = e1000_write_phy_reg_bm;
569 phy->ops.read_reg = e1000_read_phy_reg_bm;
570 ret_val = e1000_determine_phy_address(hw);
572 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
578 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
581 ret_val = e1000_get_phy_id(hw);
588 case IGP03E1000_E_PHY_ID:
589 phy->type = e1000_phy_igp_3;
590 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
591 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
592 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
593 phy->ops.get_info = e1000_get_phy_info_igp;
594 phy->ops.check_polarity = e1000_check_polarity_igp;
595 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
598 case IFE_PLUS_E_PHY_ID:
600 phy->type = e1000_phy_ife;
601 phy->autoneg_mask = E1000_ALL_NOT_GIG;
602 phy->ops.get_info = e1000_get_phy_info_ife;
603 phy->ops.check_polarity = e1000_check_polarity_ife;
604 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
606 case BME1000_E_PHY_ID:
607 phy->type = e1000_phy_bm;
608 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
609 phy->ops.read_reg = e1000_read_phy_reg_bm;
610 phy->ops.write_reg = e1000_write_phy_reg_bm;
611 phy->ops.commit = e1000_phy_sw_reset_generic;
612 phy->ops.get_info = e1000_get_phy_info_m88;
613 phy->ops.check_polarity = e1000_check_polarity_m88;
614 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
617 return -E1000_ERR_PHY;
621 return E1000_SUCCESS;
625 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
626 * @hw: pointer to the HW structure
628 * Initialize family-specific NVM parameters and function
631 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
633 struct e1000_nvm_info *nvm = &hw->nvm;
634 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
635 u32 gfpreg, sector_base_addr, sector_end_addr;
639 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
641 nvm->type = e1000_nvm_flash_sw;
643 if (hw->mac.type >= e1000_pch_spt) {
644 /* in SPT, gfpreg doesn't exist. NVM size is taken from the
645 * STRAP register. This is because in SPT the GbE Flash region
646 * is no longer accessed through the flash registers. Instead,
647 * the mechanism has changed, and the Flash region access
648 * registers are now implemented in GbE memory space.
650 nvm->flash_base_addr = 0;
652 (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
653 * NVM_SIZE_MULTIPLIER;
654 nvm->flash_bank_size = nvm_size / 2;
655 /* Adjust to word count */
656 nvm->flash_bank_size /= sizeof(u16);
657 /* Set the base address for flash register access */
658 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
660 /* Can't read flash registers if register set isn't mapped. */
661 if (!hw->flash_address) {
662 DEBUGOUT("ERROR: Flash registers not mapped\n");
663 return -E1000_ERR_CONFIG;
666 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
668 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
669 * Add 1 to sector_end_addr since this sector is included in
672 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
673 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
675 /* flash_base_addr is byte-aligned */
676 nvm->flash_base_addr = sector_base_addr
677 << FLASH_SECTOR_ADDR_SHIFT;
679 /* find total size of the NVM, then cut in half since the total
680 * size represents two separate NVM banks.
682 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
683 << FLASH_SECTOR_ADDR_SHIFT);
684 nvm->flash_bank_size /= 2;
685 /* Adjust to word count */
686 nvm->flash_bank_size /= sizeof(u16);
689 nvm->word_size = E1000_SHADOW_RAM_WORDS;
691 /* Clear shadow ram */
692 for (i = 0; i < nvm->word_size; i++) {
693 dev_spec->shadow_ram[i].modified = FALSE;
694 dev_spec->shadow_ram[i].value = 0xFFFF;
697 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
698 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
700 /* Function Pointers */
701 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
702 nvm->ops.release = e1000_release_nvm_ich8lan;
703 if (hw->mac.type >= e1000_pch_spt) {
704 nvm->ops.read = e1000_read_nvm_spt;
705 nvm->ops.update = e1000_update_nvm_checksum_spt;
707 nvm->ops.read = e1000_read_nvm_ich8lan;
708 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
710 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
711 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
712 nvm->ops.write = e1000_write_nvm_ich8lan;
714 return E1000_SUCCESS;
718 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
719 * @hw: pointer to the HW structure
721 * Initialize family-specific MAC parameters and function
724 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
726 struct e1000_mac_info *mac = &hw->mac;
728 DEBUGFUNC("e1000_init_mac_params_ich8lan");
730 /* Set media type function pointer */
731 hw->phy.media_type = e1000_media_type_copper;
733 /* Set mta register count */
734 mac->mta_reg_count = 32;
735 /* Set rar entry count */
736 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
737 if (mac->type == e1000_ich8lan)
738 mac->rar_entry_count--;
739 /* Set if part includes ASF firmware */
740 mac->asf_firmware_present = TRUE;
742 mac->has_fwsm = TRUE;
743 /* ARC subsystem not supported */
744 mac->arc_subsystem_valid = FALSE;
745 /* Adaptive IFS supported */
746 mac->adaptive_ifs = TRUE;
748 /* Function pointers */
750 /* bus type/speed/width */
751 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
753 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
755 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
756 /* hw initialization */
757 mac->ops.init_hw = e1000_init_hw_ich8lan;
759 mac->ops.setup_link = e1000_setup_link_ich8lan;
760 /* physical interface setup */
761 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
763 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
765 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
766 /* multicast address update */
767 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
768 /* clear hardware counters */
769 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
771 /* LED and other operations */
776 /* check management mode */
777 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
779 mac->ops.id_led_init = e1000_id_led_init_generic;
781 mac->ops.blink_led = e1000_blink_led_generic;
783 mac->ops.setup_led = e1000_setup_led_generic;
785 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
786 /* turn on/off LED */
787 mac->ops.led_on = e1000_led_on_ich8lan;
788 mac->ops.led_off = e1000_led_off_ich8lan;
791 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
792 mac->ops.rar_set = e1000_rar_set_pch2lan;
796 /* multicast address update for pch2 */
797 mac->ops.update_mc_addr_list =
798 e1000_update_mc_addr_list_pch2lan;
801 /* check management mode */
802 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
804 mac->ops.id_led_init = e1000_id_led_init_pchlan;
806 mac->ops.setup_led = e1000_setup_led_pchlan;
808 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
809 /* turn on/off LED */
810 mac->ops.led_on = e1000_led_on_pchlan;
811 mac->ops.led_off = e1000_led_off_pchlan;
817 if (mac->type >= e1000_pch_lpt) {
818 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
819 mac->ops.rar_set = e1000_rar_set_pch_lpt;
820 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
821 mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
824 /* Enable PCS Lock-loss workaround for ICH8 */
825 if (mac->type == e1000_ich8lan)
826 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
828 return E1000_SUCCESS;
832 * __e1000_access_emi_reg_locked - Read/write EMI register
833 * @hw: pointer to the HW structure
834 * @addr: EMI address to program
835 * @data: pointer to value to read/write from/to the EMI address
836 * @read: boolean flag to indicate read or write
838 * This helper function assumes the SW/FW/HW Semaphore is already acquired.
840 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
841 u16 *data, bool read)
845 DEBUGFUNC("__e1000_access_emi_reg_locked");
847 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
852 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
855 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
862 * e1000_read_emi_reg_locked - Read Extended Management Interface register
863 * @hw: pointer to the HW structure
864 * @addr: EMI address to program
865 * @data: value to be read from the EMI address
867 * Assumes the SW/FW/HW Semaphore is already acquired.
869 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
871 DEBUGFUNC("e1000_read_emi_reg_locked");
873 return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
877 * e1000_write_emi_reg_locked - Write Extended Management Interface register
878 * @hw: pointer to the HW structure
879 * @addr: EMI address to program
880 * @data: value to be written to the EMI address
882 * Assumes the SW/FW/HW Semaphore is already acquired.
884 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
886 DEBUGFUNC("e1000_read_emi_reg_locked");
888 return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
892 * e1000_set_eee_pchlan - Enable/disable EEE support
893 * @hw: pointer to the HW structure
895 * Enable/disable EEE based on setting in dev_spec structure, the duplex of
896 * the link and the EEE capabilities of the link partner. The LPI Control
897 * register bits will remain set only if/when link is up.
899 * EEE LPI must not be asserted earlier than one second after link is up.
900 * On 82579, EEE LPI should not be enabled until such time otherwise there
901 * can be link issues with some switches. Other devices can have EEE LPI
902 * enabled immediately upon link up since they have a timer in hardware which
903 * prevents LPI from being asserted too early.
905 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
907 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
909 u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
911 DEBUGFUNC("e1000_set_eee_pchlan");
913 switch (hw->phy.type) {
914 case e1000_phy_82579:
915 lpa = I82579_EEE_LP_ABILITY;
916 pcs_status = I82579_EEE_PCS_STATUS;
917 adv_addr = I82579_EEE_ADVERTISEMENT;
920 lpa = I217_EEE_LP_ABILITY;
921 pcs_status = I217_EEE_PCS_STATUS;
922 adv_addr = I217_EEE_ADVERTISEMENT;
925 return E1000_SUCCESS;
928 ret_val = hw->phy.ops.acquire(hw);
932 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
936 /* Clear bits that enable EEE in various speeds */
937 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
939 /* Enable EEE if not disabled by user */
940 if (!dev_spec->eee_disable) {
941 /* Save off link partner's EEE ability */
942 ret_val = e1000_read_emi_reg_locked(hw, lpa,
943 &dev_spec->eee_lp_ability);
947 /* Read EEE advertisement */
948 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
952 /* Enable EEE only for speeds in which the link partner is
953 * EEE capable and for which we advertise EEE.
955 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
956 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
958 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
959 hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
960 if (data & NWAY_LPAR_100TX_FD_CAPS)
961 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
963 /* EEE is not supported in 100Half, so ignore
964 * partner's EEE in 100 ability if full-duplex
967 dev_spec->eee_lp_ability &=
968 ~I82579_EEE_100_SUPPORTED;
972 if (hw->phy.type == e1000_phy_82579) {
973 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
978 data &= ~I82579_LPI_100_PLL_SHUT;
979 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
983 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
984 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
988 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
990 hw->phy.ops.release(hw);
996 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
997 * @hw: pointer to the HW structure
998 * @link: link up bool flag
1000 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
1001 * preventing further DMA write requests. Workaround the issue by disabling
1002 * the de-assertion of the clock request when in 1Gpbs mode.
1003 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1004 * speeds in order to avoid Tx hangs.
1006 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1008 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1009 u32 status = E1000_READ_REG(hw, E1000_STATUS);
1010 s32 ret_val = E1000_SUCCESS;
1013 if (link && (status & E1000_STATUS_SPEED_1000)) {
1014 ret_val = hw->phy.ops.acquire(hw);
1019 e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1025 e1000_write_kmrn_reg_locked(hw,
1026 E1000_KMRNCTRLSTA_K1_CONFIG,
1028 ~E1000_KMRNCTRLSTA_K1_ENABLE);
1034 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1035 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1038 e1000_write_kmrn_reg_locked(hw,
1039 E1000_KMRNCTRLSTA_K1_CONFIG,
1042 hw->phy.ops.release(hw);
1044 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1045 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1047 if ((hw->phy.revision > 5) || !link ||
1048 ((status & E1000_STATUS_SPEED_100) &&
1049 (status & E1000_STATUS_FD)))
1050 goto update_fextnvm6;
1052 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, ®);
1056 /* Clear link status transmit timeout */
1057 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1059 if (status & E1000_STATUS_SPEED_100) {
1060 /* Set inband Tx timeout to 5x10us for 100Half */
1061 reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1063 /* Do not extend the K1 entry latency for 100Half */
1064 fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1066 /* Set inband Tx timeout to 50x10us for 10Full/Half */
1068 I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1070 /* Extend the K1 entry latency for 10 Mbps */
1071 fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1074 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1079 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1085 static u64 e1000_ltr2ns(u16 ltr)
1089 /* Determine the latency in nsec based on the LTR value & scale */
1090 value = ltr & E1000_LTRV_VALUE_MASK;
1091 scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1093 return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
1097 * e1000_platform_pm_pch_lpt - Set platform power management values
1098 * @hw: pointer to the HW structure
1099 * @link: bool indicating link status
1101 * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1102 * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1103 * when link is up (which must not exceed the maximum latency supported
1104 * by the platform), otherwise specify there is no LTR requirement.
1105 * Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
1106 * latencies in the LTR Extended Capability Structure in the PCIe Extended
1107 * Capability register set, on this device LTR is set by writing the
1108 * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1109 * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1110 * message to the PMC.
1112 * Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1115 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1117 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1118 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1119 u16 lat_enc = 0; /* latency encoded */
1122 DEBUGFUNC("e1000_platform_pm_pch_lpt");
1125 u16 speed, duplex, scale = 0;
1126 u16 max_snoop, max_nosnoop;
1127 u16 max_ltr_enc; /* max LTR latency encoded */
1132 if (!hw->mac.max_frame_size) {
1133 DEBUGOUT("max_frame_size not set.\n");
1134 return -E1000_ERR_CONFIG;
1137 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1139 DEBUGOUT("Speed not set.\n");
1140 return -E1000_ERR_CONFIG;
1143 /* Rx Packet Buffer Allocation size (KB) */
1144 rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1146 /* Determine the maximum latency tolerated by the device.
1148 * Per the PCIe spec, the tolerated latencies are encoded as
1149 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1150 * a 10-bit value (0-1023) to provide a range from 1 ns to
1151 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
1152 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1154 lat_ns = ((s64)rxa * 1024 -
1155 (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1162 while (value > E1000_LTRV_VALUE_MASK) {
1164 value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1166 if (scale > E1000_LTRV_SCALE_MAX) {
1167 DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1168 return -E1000_ERR_CONFIG;
1170 lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1172 /* Determine the maximum latency tolerated by the platform */
1173 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1174 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1175 max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1177 if (lat_enc > max_ltr_enc) {
1178 lat_enc = max_ltr_enc;
1179 lat_ns = e1000_ltr2ns(max_ltr_enc);
1183 lat_ns *= speed * 1000;
1185 lat_ns /= 1000000000;
1186 obff_hwm = (s32)(rxa - lat_ns);
1188 if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1189 DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1190 return -E1000_ERR_CONFIG;
1194 /* Set Snoop and No-Snoop latencies the same */
1195 reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1196 E1000_WRITE_REG(hw, E1000_LTRV, reg);
1198 /* Set OBFF high water mark */
1199 reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1201 E1000_WRITE_REG(hw, E1000_SVT, reg);
1204 reg = E1000_READ_REG(hw, E1000_SVCR);
1205 reg |= E1000_SVCR_OFF_EN;
1206 /* Always unblock interrupts to the CPU even when the system is
1207 * in OBFF mode. This ensures that small round-robin traffic
1208 * (like ping) does not get dropped or experience long latency.
1210 reg |= E1000_SVCR_OFF_MASKINT;
1211 E1000_WRITE_REG(hw, E1000_SVCR, reg);
1213 return E1000_SUCCESS;
1217 * e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1218 * @hw: pointer to the HW structure
1219 * @itr: interrupt throttling rate
1221 * Configure OBFF with the updated interrupt rate.
1223 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1228 DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1230 /* Convert ITR value into microseconds for OBFF timer */
1231 timer = itr & E1000_ITR_MASK;
1232 timer = (timer * E1000_ITR_MULT) / 1000;
1234 if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1235 DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1236 return -E1000_ERR_CONFIG;
1239 svcr = E1000_READ_REG(hw, E1000_SVCR);
1240 svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1241 svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1242 E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1244 return E1000_SUCCESS;
1248 * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1249 * @hw: pointer to the HW structure
1250 * @to_sx: boolean indicating a system power state transition to Sx
1252 * When link is down, configure ULP mode to significantly reduce the power
1253 * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
1254 * ME firmware to start the ULP configuration. If not on an ME enabled
1255 * system, configure the ULP mode by software.
1257 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1260 s32 ret_val = E1000_SUCCESS;
1264 if ((hw->mac.type < e1000_pch_lpt) ||
1265 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1266 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1267 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1268 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1269 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1272 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1273 /* Request ME configure ULP mode in the PHY */
1274 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1275 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1276 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1284 /* Poll up to 5 seconds for Cable Disconnected indication */
1285 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1286 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1287 /* Bail if link is re-acquired */
1288 if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1289 return -E1000_ERR_PHY;
1296 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1297 (E1000_READ_REG(hw, E1000_FEXT) &
1298 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1302 ret_val = hw->phy.ops.acquire(hw);
1306 /* Force SMBus mode in PHY */
1307 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1310 phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1311 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1313 /* Force SMBus mode in MAC */
1314 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1315 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1316 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1318 /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
1319 * LPLU and disable Gig speed when entering ULP
1321 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1322 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1328 phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1330 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1337 /* Set Inband ULP Exit, Reset to SMBus mode and
1338 * Disable SMBus Release on PERST# in PHY
1340 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1343 phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1344 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1346 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1347 phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1349 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1351 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1352 phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1354 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1355 phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1356 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1358 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1360 /* Set Disable SMBus Release on PERST# in MAC */
1361 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1362 mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1363 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1365 /* Commit ULP changes in PHY by starting auto ULP configuration */
1366 phy_reg |= I218_ULP_CONFIG1_START;
1367 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1369 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1370 to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1371 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1378 hw->phy.ops.release(hw);
1381 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1383 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1389 * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1390 * @hw: pointer to the HW structure
1391 * @force: boolean indicating whether or not to force disabling ULP
1393 * Un-configure ULP mode when link is up, the system is transitioned from
1394 * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
1395 * system, poll for an indication from ME that ULP has been un-configured.
1396 * If not on an ME enabled system, un-configure the ULP mode by software.
1398 * During nominal operation, this function is called when link is acquired
1399 * to disable ULP mode (force=FALSE); otherwise, for example when unloading
1400 * the driver or during Sx->S0 transitions, this is called with force=TRUE
1401 * to forcibly disable ULP.
1403 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1405 s32 ret_val = E1000_SUCCESS;
1410 if ((hw->mac.type < e1000_pch_lpt) ||
1411 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1412 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1413 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1414 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1415 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1418 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1420 /* Request ME un-configure ULP mode in the PHY */
1421 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1422 mac_reg &= ~E1000_H2ME_ULP;
1423 mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1424 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1427 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1428 while (E1000_READ_REG(hw, E1000_FWSM) &
1429 E1000_FWSM_ULP_CFG_DONE) {
1431 ret_val = -E1000_ERR_PHY;
1437 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1440 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1441 mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1442 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1444 /* Clear H2ME.ULP after ME ULP configuration */
1445 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1446 mac_reg &= ~E1000_H2ME_ULP;
1447 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1453 ret_val = hw->phy.ops.acquire(hw);
1458 /* Toggle LANPHYPC Value bit */
1459 e1000_toggle_lanphypc_pch_lpt(hw);
1461 /* Unforce SMBus mode in PHY */
1462 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1464 /* The MAC might be in PCIe mode, so temporarily force to
1465 * SMBus mode in order to access the PHY.
1467 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1468 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1469 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1473 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1478 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1479 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1481 /* Unforce SMBus mode in MAC */
1482 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1483 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1484 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1486 /* When ULP mode was previously entered, K1 was disabled by the
1487 * hardware. Re-Enable K1 in the PHY when exiting ULP.
1489 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1492 phy_reg |= HV_PM_CTRL_K1_ENABLE;
1493 e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1495 /* Clear ULP enabled configuration */
1496 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1499 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1500 I218_ULP_CONFIG1_STICKY_ULP |
1501 I218_ULP_CONFIG1_RESET_TO_SMBUS |
1502 I218_ULP_CONFIG1_WOL_HOST |
1503 I218_ULP_CONFIG1_INBAND_EXIT |
1504 I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1505 I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1506 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1507 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1509 /* Commit ULP changes by starting auto ULP configuration */
1510 phy_reg |= I218_ULP_CONFIG1_START;
1511 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1513 /* Clear Disable SMBus Release on PERST# in MAC */
1514 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1515 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1516 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1519 hw->phy.ops.release(hw);
1521 hw->phy.ops.reset(hw);
1526 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1528 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1534 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1535 * @hw: pointer to the HW structure
1537 * Checks to see of the link status of the hardware has changed. If a
1538 * change in link status has been detected, then we read the PHY registers
1539 * to get the current speed/duplex if link exists.
1541 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1543 struct e1000_mac_info *mac = &hw->mac;
1544 s32 ret_val, tipg_reg = 0;
1545 u16 emi_addr, emi_val = 0;
1549 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1551 /* We only want to go out to the PHY registers to see if Auto-Neg
1552 * has completed and/or if our link status has changed. The
1553 * get_link_status flag is set upon receiving a Link Status
1554 * Change or Rx Sequence Error interrupt.
1556 if (!mac->get_link_status)
1557 return E1000_SUCCESS;
1559 /* First we want to see if the MII Status Register reports
1560 * link. If so, then we want to get the current speed/duplex
1563 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1567 if (hw->mac.type == e1000_pchlan) {
1568 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1573 /* When connected at 10Mbps half-duplex, some parts are excessively
1574 * aggressive resulting in many collisions. To avoid this, increase
1575 * the IPG and reduce Rx latency in the PHY.
1577 if ((hw->mac.type >= e1000_pch2lan) && link) {
1580 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1581 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1582 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1584 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1586 /* Reduce Rx latency in analog PHY */
1588 } else if (hw->mac.type >= e1000_pch_spt &&
1589 duplex == FULL_DUPLEX && speed != SPEED_1000) {
1593 /* Roll back the default values */
1598 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1600 ret_val = hw->phy.ops.acquire(hw);
1604 if (hw->mac.type == e1000_pch2lan)
1605 emi_addr = I82579_RX_CONFIG;
1607 emi_addr = I217_RX_CONFIG;
1608 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1611 if (hw->mac.type >= e1000_pch_lpt) {
1614 hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1616 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1617 if (speed == SPEED_100 || speed == SPEED_10)
1621 hw->phy.ops.write_reg_locked(hw,
1622 I217_PLL_CLOCK_GATE_REG,
1625 if (speed == SPEED_1000) {
1626 hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1629 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1631 hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1635 hw->phy.ops.release(hw);
1640 if (hw->mac.type >= e1000_pch_spt) {
1644 if (speed == SPEED_1000) {
1645 ret_val = hw->phy.ops.acquire(hw);
1649 ret_val = hw->phy.ops.read_reg_locked(hw,
1653 hw->phy.ops.release(hw);
1657 ptr_gap = (data & (0x3FF << 2)) >> 2;
1658 if (ptr_gap < 0x18) {
1659 data &= ~(0x3FF << 2);
1660 data |= (0x18 << 2);
1662 hw->phy.ops.write_reg_locked(hw,
1663 PHY_REG(776, 20), data);
1665 hw->phy.ops.release(hw);
1669 ret_val = hw->phy.ops.acquire(hw);
1673 ret_val = hw->phy.ops.write_reg_locked(hw,
1676 hw->phy.ops.release(hw);
1684 /* I217 Packet Loss issue:
1685 * ensure that FEXTNVM4 Beacon Duration is set correctly
1687 * Set the Beacon Duration for I217 to 8 usec
1689 if (hw->mac.type >= e1000_pch_lpt) {
1692 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1693 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1694 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1695 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1698 /* Work-around I218 hang issue */
1699 if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1700 (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1701 (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1702 (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1703 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1707 if (hw->mac.type >= e1000_pch_lpt) {
1708 /* Set platform power management values for
1709 * Latency Tolerance Reporting (LTR)
1710 * Optimized Buffer Flush/Fill (OBFF)
1712 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1717 /* Clear link partner's EEE ability */
1718 hw->dev_spec.ich8lan.eee_lp_ability = 0;
1720 if (hw->mac.type >= e1000_pch_lpt) {
1721 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1723 if (hw->mac.type == e1000_pch_spt) {
1724 /* FEXTNVM6 K1-off workaround - for SPT only */
1725 u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1727 if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1728 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1730 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1733 if (hw->dev_spec.ich8lan.disable_k1_off == TRUE)
1734 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1736 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1740 return E1000_SUCCESS; /* No link detected */
1742 mac->get_link_status = FALSE;
1744 switch (hw->mac.type) {
1746 ret_val = e1000_k1_workaround_lv(hw);
1751 if (hw->phy.type == e1000_phy_82578) {
1752 ret_val = e1000_link_stall_workaround_hv(hw);
1757 /* Workaround for PCHx parts in half-duplex:
1758 * Set the number of preambles removed from the packet
1759 * when it is passed from the PHY to the MAC to prevent
1760 * the MAC from misinterpreting the packet type.
1762 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1763 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1765 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1767 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1769 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1775 /* Check if there was DownShift, must be checked
1776 * immediately after link-up
1778 e1000_check_downshift_generic(hw);
1780 /* Enable/Disable EEE after link up */
1781 if (hw->phy.type > e1000_phy_82579) {
1782 ret_val = e1000_set_eee_pchlan(hw);
1787 /* If we are forcing speed/duplex, then we simply return since
1788 * we have already determined whether we have link or not.
1791 return -E1000_ERR_CONFIG;
1793 /* Auto-Neg is enabled. Auto Speed Detection takes care
1794 * of MAC speed/duplex configuration. So we only need to
1795 * configure Collision Distance in the MAC.
1797 mac->ops.config_collision_dist(hw);
1799 /* Configure Flow Control now that Auto-Neg has completed.
1800 * First, we need to restore the desired flow control
1801 * settings because we may have had to re-autoneg with a
1802 * different link partner.
1804 ret_val = e1000_config_fc_after_link_up_generic(hw);
1806 DEBUGOUT("Error configuring flow control\n");
1812 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1813 * @hw: pointer to the HW structure
1815 * Initialize family-specific function pointers for PHY, MAC, and NVM.
1817 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1819 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1821 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1822 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1823 switch (hw->mac.type) {
1826 case e1000_ich10lan:
1827 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1833 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1841 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1842 * @hw: pointer to the HW structure
1844 * Acquires the mutex for performing NVM operations.
1846 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1848 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1850 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1852 return E1000_SUCCESS;
1856 * e1000_release_nvm_ich8lan - Release NVM mutex
1857 * @hw: pointer to the HW structure
1859 * Releases the mutex used while performing NVM operations.
1861 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1863 DEBUGFUNC("e1000_release_nvm_ich8lan");
1865 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1871 * e1000_acquire_swflag_ich8lan - Acquire software control flag
1872 * @hw: pointer to the HW structure
1874 * Acquires the software control flag for performing PHY and select
1877 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1879 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1880 s32 ret_val = E1000_SUCCESS;
1882 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1884 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1887 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1888 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1896 DEBUGOUT("SW has already locked the resource.\n");
1897 ret_val = -E1000_ERR_CONFIG;
1901 timeout = SW_FLAG_TIMEOUT;
1903 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1904 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1907 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1908 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1916 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1917 E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1918 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1919 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1920 ret_val = -E1000_ERR_CONFIG;
1926 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1932 * e1000_release_swflag_ich8lan - Release software control flag
1933 * @hw: pointer to the HW structure
1935 * Releases the software control flag for performing PHY and select
1938 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1942 DEBUGFUNC("e1000_release_swflag_ich8lan");
1944 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1946 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1947 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1948 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1950 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1953 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1959 * e1000_check_mng_mode_ich8lan - Checks management mode
1960 * @hw: pointer to the HW structure
1962 * This checks if the adapter has any manageability enabled.
1963 * This is a function pointer entry point only called by read/write
1964 * routines for the PHY and NVM parts.
1966 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1970 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1972 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1974 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1975 ((fwsm & E1000_FWSM_MODE_MASK) ==
1976 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1980 * e1000_check_mng_mode_pchlan - Checks management mode
1981 * @hw: pointer to the HW structure
1983 * This checks if the adapter has iAMT enabled.
1984 * This is a function pointer entry point only called by read/write
1985 * routines for the PHY and NVM parts.
1987 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1991 DEBUGFUNC("e1000_check_mng_mode_pchlan");
1993 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1995 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1996 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
2000 * e1000_rar_set_pch2lan - Set receive address register
2001 * @hw: pointer to the HW structure
2002 * @addr: pointer to the receive address
2003 * @index: receive address array register
2005 * Sets the receive address array register at index to the address passed
2006 * in by addr. For 82579, RAR[0] is the base address register that is to
2007 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
2008 * Use SHRA[0-3] in place of those reserved for ME.
2010 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
2012 u32 rar_low, rar_high;
2014 DEBUGFUNC("e1000_rar_set_pch2lan");
2016 /* HW expects these in little endian so we reverse the byte order
2017 * from network order (big endian) to little endian
2019 rar_low = ((u32) addr[0] |
2020 ((u32) addr[1] << 8) |
2021 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2023 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2025 /* If MAC address zero, no need to set the AV bit */
2026 if (rar_low || rar_high)
2027 rar_high |= E1000_RAH_AV;
2030 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2031 E1000_WRITE_FLUSH(hw);
2032 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2033 E1000_WRITE_FLUSH(hw);
2034 return E1000_SUCCESS;
2037 /* RAR[1-6] are owned by manageability. Skip those and program the
2038 * next address into the SHRA register array.
2040 if (index < (u32) (hw->mac.rar_entry_count)) {
2043 ret_val = e1000_acquire_swflag_ich8lan(hw);
2047 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
2048 E1000_WRITE_FLUSH(hw);
2049 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
2050 E1000_WRITE_FLUSH(hw);
2052 e1000_release_swflag_ich8lan(hw);
2054 /* verify the register updates */
2055 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
2056 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
2057 return E1000_SUCCESS;
2059 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
2060 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
2064 DEBUGOUT1("Failed to write receive address at index %d\n", index);
2065 return -E1000_ERR_CONFIG;
2069 * e1000_rar_set_pch_lpt - Set receive address registers
2070 * @hw: pointer to the HW structure
2071 * @addr: pointer to the receive address
2072 * @index: receive address array register
2074 * Sets the receive address register array at index to the address passed
2075 * in by addr. For LPT, RAR[0] is the base address register that is to
2076 * contain the MAC address. SHRA[0-10] are the shared receive address
2077 * registers that are shared between the Host and manageability engine (ME).
2079 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
2081 u32 rar_low, rar_high;
2084 DEBUGFUNC("e1000_rar_set_pch_lpt");
2086 /* HW expects these in little endian so we reverse the byte order
2087 * from network order (big endian) to little endian
2089 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
2090 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2092 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2094 /* If MAC address zero, no need to set the AV bit */
2095 if (rar_low || rar_high)
2096 rar_high |= E1000_RAH_AV;
2099 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2100 E1000_WRITE_FLUSH(hw);
2101 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2102 E1000_WRITE_FLUSH(hw);
2103 return E1000_SUCCESS;
2106 /* The manageability engine (ME) can lock certain SHRAR registers that
2107 * it is using - those registers are unavailable for use.
2109 if (index < hw->mac.rar_entry_count) {
2110 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2111 E1000_FWSM_WLOCK_MAC_MASK;
2112 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2114 /* Check if all SHRAR registers are locked */
2118 if ((wlock_mac == 0) || (index <= wlock_mac)) {
2121 ret_val = e1000_acquire_swflag_ich8lan(hw);
2126 E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2128 E1000_WRITE_FLUSH(hw);
2129 E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2131 E1000_WRITE_FLUSH(hw);
2133 e1000_release_swflag_ich8lan(hw);
2135 /* verify the register updates */
2136 if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2137 (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2138 return E1000_SUCCESS;
2143 DEBUGOUT1("Failed to write receive address at index %d\n", index);
2144 return -E1000_ERR_CONFIG;
2148 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2149 * @hw: pointer to the HW structure
2150 * @mc_addr_list: array of multicast addresses to program
2151 * @mc_addr_count: number of multicast addresses to program
2153 * Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2154 * The caller must have a packed mc_addr_list of multicast addresses.
2156 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2164 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2166 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2168 ret_val = hw->phy.ops.acquire(hw);
2172 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2176 for (i = 0; i < hw->mac.mta_reg_count; i++) {
2177 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2178 (u16)(hw->mac.mta_shadow[i] &
2180 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2181 (u16)((hw->mac.mta_shadow[i] >> 16) &
2185 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2188 hw->phy.ops.release(hw);
2192 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2193 * @hw: pointer to the HW structure
2195 * Checks if firmware is blocking the reset of the PHY.
2196 * This is a function pointer entry point only called by
2199 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2202 bool blocked = FALSE;
2205 DEBUGFUNC("e1000_check_reset_block_ich8lan");
2208 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2209 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2215 } while (blocked && (i++ < 30));
2216 return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2220 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2221 * @hw: pointer to the HW structure
2223 * Assumes semaphore already acquired.
2226 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2229 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2230 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2231 E1000_STRAP_SMT_FREQ_SHIFT;
2234 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2236 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2240 phy_data &= ~HV_SMB_ADDR_MASK;
2241 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2242 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2244 if (hw->phy.type == e1000_phy_i217) {
2245 /* Restore SMBus frequency */
2247 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2248 phy_data |= (freq & (1 << 0)) <<
2249 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2250 phy_data |= (freq & (1 << 1)) <<
2251 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2253 DEBUGOUT("Unsupported SMB frequency in PHY\n");
2257 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2261 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2262 * @hw: pointer to the HW structure
2264 * SW should configure the LCD from the NVM extended configuration region
2265 * as a workaround for certain parts.
2267 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2269 struct e1000_phy_info *phy = &hw->phy;
2270 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2271 s32 ret_val = E1000_SUCCESS;
2272 u16 word_addr, reg_data, reg_addr, phy_page = 0;
2274 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2276 /* Initialize the PHY from the NVM on ICH platforms. This
2277 * is needed due to an issue where the NVM configuration is
2278 * not properly autoloaded after power transitions.
2279 * Therefore, after each PHY reset, we will load the
2280 * configuration data out of the NVM manually.
2282 switch (hw->mac.type) {
2284 if (phy->type != e1000_phy_igp_3)
2287 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2288 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2289 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2297 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2303 ret_val = hw->phy.ops.acquire(hw);
2307 data = E1000_READ_REG(hw, E1000_FEXTNVM);
2308 if (!(data & sw_cfg_mask))
2311 /* Make sure HW does not configure LCD from PHY
2312 * extended configuration before SW configuration
2314 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2315 if ((hw->mac.type < e1000_pch2lan) &&
2316 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2319 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2320 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2321 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2325 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2326 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2328 if (((hw->mac.type == e1000_pchlan) &&
2329 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2330 (hw->mac.type > e1000_pchlan)) {
2331 /* HW configures the SMBus address and LEDs when the
2332 * OEM and LCD Write Enable bits are set in the NVM.
2333 * When both NVM bits are cleared, SW will configure
2336 ret_val = e1000_write_smbus_addr(hw);
2340 data = E1000_READ_REG(hw, E1000_LEDCTL);
2341 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2347 /* Configure LCD from extended configuration region. */
2349 /* cnf_base_addr is in DWORD */
2350 word_addr = (u16)(cnf_base_addr << 1);
2352 for (i = 0; i < cnf_size; i++) {
2353 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2358 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2363 /* Save off the PHY page for future writes. */
2364 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2365 phy_page = reg_data;
2369 reg_addr &= PHY_REG_MASK;
2370 reg_addr |= phy_page;
2372 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2379 hw->phy.ops.release(hw);
2384 * e1000_k1_gig_workaround_hv - K1 Si workaround
2385 * @hw: pointer to the HW structure
2386 * @link: link up bool flag
2388 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2389 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
2390 * If link is down, the function will restore the default K1 setting located
2393 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2395 s32 ret_val = E1000_SUCCESS;
2397 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2399 DEBUGFUNC("e1000_k1_gig_workaround_hv");
2401 if (hw->mac.type != e1000_pchlan)
2402 return E1000_SUCCESS;
2404 /* Wrap the whole flow with the sw flag */
2405 ret_val = hw->phy.ops.acquire(hw);
2409 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2411 if (hw->phy.type == e1000_phy_82578) {
2412 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2417 status_reg &= (BM_CS_STATUS_LINK_UP |
2418 BM_CS_STATUS_RESOLVED |
2419 BM_CS_STATUS_SPEED_MASK);
2421 if (status_reg == (BM_CS_STATUS_LINK_UP |
2422 BM_CS_STATUS_RESOLVED |
2423 BM_CS_STATUS_SPEED_1000))
2427 if (hw->phy.type == e1000_phy_82577) {
2428 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2433 status_reg &= (HV_M_STATUS_LINK_UP |
2434 HV_M_STATUS_AUTONEG_COMPLETE |
2435 HV_M_STATUS_SPEED_MASK);
2437 if (status_reg == (HV_M_STATUS_LINK_UP |
2438 HV_M_STATUS_AUTONEG_COMPLETE |
2439 HV_M_STATUS_SPEED_1000))
2443 /* Link stall fix for link up */
2444 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2450 /* Link stall fix for link down */
2451 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2457 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2460 hw->phy.ops.release(hw);
2466 * e1000_configure_k1_ich8lan - Configure K1 power state
2467 * @hw: pointer to the HW structure
2468 * @enable: K1 state to configure
2470 * Configure the K1 power state based on the provided parameter.
2471 * Assumes semaphore already acquired.
2473 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2475 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2483 DEBUGFUNC("e1000_configure_k1_ich8lan");
2485 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2491 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2493 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2495 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2501 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2502 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2504 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2505 reg |= E1000_CTRL_FRCSPD;
2506 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2508 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2509 E1000_WRITE_FLUSH(hw);
2511 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2512 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2513 E1000_WRITE_FLUSH(hw);
2516 return E1000_SUCCESS;
2520 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2521 * @hw: pointer to the HW structure
2522 * @d0_state: boolean if entering d0 or d3 device state
2524 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2525 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
2526 * in NVM determines whether HW should configure LPLU and Gbe Disable.
2528 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2534 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2536 if (hw->mac.type < e1000_pchlan)
2539 ret_val = hw->phy.ops.acquire(hw);
2543 if (hw->mac.type == e1000_pchlan) {
2544 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2545 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2549 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2550 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2553 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2555 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2559 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2562 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2563 oem_reg |= HV_OEM_BITS_GBE_DIS;
2565 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2566 oem_reg |= HV_OEM_BITS_LPLU;
2568 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2569 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2570 oem_reg |= HV_OEM_BITS_GBE_DIS;
2572 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2573 E1000_PHY_CTRL_NOND0A_LPLU))
2574 oem_reg |= HV_OEM_BITS_LPLU;
2577 /* Set Restart auto-neg to activate the bits */
2578 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2579 !hw->phy.ops.check_reset_block(hw))
2580 oem_reg |= HV_OEM_BITS_RESTART_AN;
2582 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2585 hw->phy.ops.release(hw);
2592 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2593 * @hw: pointer to the HW structure
2595 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2600 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2602 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2606 data |= HV_KMRN_MDIO_SLOW;
2608 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2614 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2615 * done after every PHY reset.
2617 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2619 s32 ret_val = E1000_SUCCESS;
2622 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2624 if (hw->mac.type != e1000_pchlan)
2625 return E1000_SUCCESS;
2627 /* Set MDIO slow mode before any other MDIO access */
2628 if (hw->phy.type == e1000_phy_82577) {
2629 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2634 if (((hw->phy.type == e1000_phy_82577) &&
2635 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2636 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2637 /* Disable generation of early preamble */
2638 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2642 /* Preamble tuning for SSC */
2643 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2649 if (hw->phy.type == e1000_phy_82578) {
2650 /* Return registers to default by doing a soft reset then
2651 * writing 0x3140 to the control register.
2653 if (hw->phy.revision < 2) {
2654 e1000_phy_sw_reset_generic(hw);
2655 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2661 ret_val = hw->phy.ops.acquire(hw);
2666 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2667 hw->phy.ops.release(hw);
2671 /* Configure the K1 Si workaround during phy reset assuming there is
2672 * link so that it disables K1 if link is in 1Gbps.
2674 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2678 /* Workaround for link disconnects on a busy hub in half duplex */
2679 ret_val = hw->phy.ops.acquire(hw);
2682 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2685 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2690 /* set MSE higher to enable link to stay up when noise is high */
2691 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2693 hw->phy.ops.release(hw);
2699 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2700 * @hw: pointer to the HW structure
2702 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2708 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2710 ret_val = hw->phy.ops.acquire(hw);
2713 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2717 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2718 for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2719 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2720 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2721 (u16)(mac_reg & 0xFFFF));
2722 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2723 (u16)((mac_reg >> 16) & 0xFFFF));
2725 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2726 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2727 (u16)(mac_reg & 0xFFFF));
2728 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2729 (u16)((mac_reg & E1000_RAH_AV)
2733 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2736 hw->phy.ops.release(hw);
2739 static u32 e1000_calc_rx_da_crc(u8 mac[])
2741 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
2742 u32 i, j, mask, crc;
2744 DEBUGFUNC("e1000_calc_rx_da_crc");
2747 for (i = 0; i < 6; i++) {
2749 for (j = 8; j > 0; j--) {
2750 mask = (crc & 1) * (-1);
2751 crc = (crc >> 1) ^ (poly & mask);
2758 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2760 * @hw: pointer to the HW structure
2761 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
2763 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2765 s32 ret_val = E1000_SUCCESS;
2770 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2772 if (hw->mac.type < e1000_pch2lan)
2773 return E1000_SUCCESS;
2775 /* disable Rx path while enabling/disabling workaround */
2776 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2777 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2778 phy_reg | (1 << 14));
2783 /* Write Rx addresses (rar_entry_count for RAL/H, and
2784 * SHRAL/H) and initial CRC values to the MAC
2786 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2787 u8 mac_addr[ETH_ADDR_LEN] = {0};
2788 u32 addr_high, addr_low;
2790 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2791 if (!(addr_high & E1000_RAH_AV))
2793 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2794 mac_addr[0] = (addr_low & 0xFF);
2795 mac_addr[1] = ((addr_low >> 8) & 0xFF);
2796 mac_addr[2] = ((addr_low >> 16) & 0xFF);
2797 mac_addr[3] = ((addr_low >> 24) & 0xFF);
2798 mac_addr[4] = (addr_high & 0xFF);
2799 mac_addr[5] = ((addr_high >> 8) & 0xFF);
2801 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2802 e1000_calc_rx_da_crc(mac_addr));
2805 /* Write Rx addresses to the PHY */
2806 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2808 /* Enable jumbo frame workaround in the MAC */
2809 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2810 mac_reg &= ~(1 << 14);
2811 mac_reg |= (7 << 15);
2812 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2814 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2815 mac_reg |= E1000_RCTL_SECRC;
2816 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2818 ret_val = e1000_read_kmrn_reg_generic(hw,
2819 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2823 ret_val = e1000_write_kmrn_reg_generic(hw,
2824 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2828 ret_val = e1000_read_kmrn_reg_generic(hw,
2829 E1000_KMRNCTRLSTA_HD_CTRL,
2833 data &= ~(0xF << 8);
2835 ret_val = e1000_write_kmrn_reg_generic(hw,
2836 E1000_KMRNCTRLSTA_HD_CTRL,
2841 /* Enable jumbo frame workaround in the PHY */
2842 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2843 data &= ~(0x7F << 5);
2844 data |= (0x37 << 5);
2845 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2848 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2850 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2853 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2854 data &= ~(0x3FF << 2);
2855 data |= (E1000_TX_PTR_GAP << 2);
2856 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2859 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2862 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2863 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2868 /* Write MAC register values back to h/w defaults */
2869 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2870 mac_reg &= ~(0xF << 14);
2871 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2873 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2874 mac_reg &= ~E1000_RCTL_SECRC;
2875 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2877 ret_val = e1000_read_kmrn_reg_generic(hw,
2878 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2882 ret_val = e1000_write_kmrn_reg_generic(hw,
2883 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2887 ret_val = e1000_read_kmrn_reg_generic(hw,
2888 E1000_KMRNCTRLSTA_HD_CTRL,
2892 data &= ~(0xF << 8);
2894 ret_val = e1000_write_kmrn_reg_generic(hw,
2895 E1000_KMRNCTRLSTA_HD_CTRL,
2900 /* Write PHY register values back to h/w defaults */
2901 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2902 data &= ~(0x7F << 5);
2903 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2906 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2908 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2911 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2912 data &= ~(0x3FF << 2);
2914 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2917 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2920 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2921 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2927 /* re-enable Rx path after enabling/disabling workaround */
2928 return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2933 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2934 * done after every PHY reset.
2936 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2938 s32 ret_val = E1000_SUCCESS;
2940 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2942 if (hw->mac.type != e1000_pch2lan)
2943 return E1000_SUCCESS;
2945 /* Set MDIO slow mode before any other MDIO access */
2946 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2950 ret_val = hw->phy.ops.acquire(hw);
2953 /* set MSE higher to enable link to stay up when noise is high */
2954 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2957 /* drop link after 5 times MSE threshold was reached */
2958 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2960 hw->phy.ops.release(hw);
2966 * e1000_k1_gig_workaround_lv - K1 Si workaround
2967 * @hw: pointer to the HW structure
2969 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2970 * Disable K1 for 1000 and 100 speeds
2972 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2974 s32 ret_val = E1000_SUCCESS;
2977 DEBUGFUNC("e1000_k1_workaround_lv");
2979 if (hw->mac.type != e1000_pch2lan)
2980 return E1000_SUCCESS;
2982 /* Set K1 beacon duration based on 10Mbs speed */
2983 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2987 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2988 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2990 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2993 /* LV 1G/100 Packet drop issue wa */
2994 ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2998 pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2999 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
3005 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
3006 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
3007 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
3008 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
3016 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
3017 * @hw: pointer to the HW structure
3018 * @gate: boolean set to TRUE to gate, FALSE to ungate
3020 * Gate/ungate the automatic PHY configuration via hardware; perform
3021 * the configuration via software instead.
3023 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
3027 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
3029 if (hw->mac.type < e1000_pch2lan)
3032 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
3035 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3037 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3039 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
3043 * e1000_lan_init_done_ich8lan - Check for PHY config completion
3044 * @hw: pointer to the HW structure
3046 * Check the appropriate indication the MAC has finished configuring the
3047 * PHY after a software reset.
3049 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
3051 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
3053 DEBUGFUNC("e1000_lan_init_done_ich8lan");
3055 /* Wait for basic configuration completes before proceeding */
3057 data = E1000_READ_REG(hw, E1000_STATUS);
3058 data &= E1000_STATUS_LAN_INIT_DONE;
3060 } while ((!data) && --loop);
3062 /* If basic configuration is incomplete before the above loop
3063 * count reaches 0, loading the configuration from NVM will
3064 * leave the PHY in a bad state possibly resulting in no link.
3067 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
3069 /* Clear the Init Done bit for the next init event */
3070 data = E1000_READ_REG(hw, E1000_STATUS);
3071 data &= ~E1000_STATUS_LAN_INIT_DONE;
3072 E1000_WRITE_REG(hw, E1000_STATUS, data);
3076 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
3077 * @hw: pointer to the HW structure
3079 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
3081 s32 ret_val = E1000_SUCCESS;
3084 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
3086 if (hw->phy.ops.check_reset_block(hw))
3087 return E1000_SUCCESS;
3089 /* Allow time for h/w to get to quiescent state after reset */
3092 /* Perform any necessary post-reset workarounds */
3093 switch (hw->mac.type) {
3095 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3100 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3108 /* Clear the host wakeup bit after lcd reset */
3109 if (hw->mac.type >= e1000_pchlan) {
3110 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®);
3111 reg &= ~BM_WUC_HOST_WU_BIT;
3112 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3115 /* Configure the LCD with the extended configuration region in NVM */
3116 ret_val = e1000_sw_lcd_config_ich8lan(hw);
3120 /* Configure the LCD with the OEM bits in NVM */
3121 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
3123 if (hw->mac.type == e1000_pch2lan) {
3124 /* Ungate automatic PHY configuration on non-managed 82579 */
3125 if (!(E1000_READ_REG(hw, E1000_FWSM) &
3126 E1000_ICH_FWSM_FW_VALID)) {
3128 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
3131 /* Set EEE LPI Update Timer to 200usec */
3132 ret_val = hw->phy.ops.acquire(hw);
3135 ret_val = e1000_write_emi_reg_locked(hw,
3136 I82579_LPI_UPDATE_TIMER,
3138 hw->phy.ops.release(hw);
3145 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3146 * @hw: pointer to the HW structure
3149 * This is a function pointer entry point called by drivers
3150 * or other shared routines.
3152 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3154 s32 ret_val = E1000_SUCCESS;
3156 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3158 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
3159 if ((hw->mac.type == e1000_pch2lan) &&
3160 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3161 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3163 ret_val = e1000_phy_hw_reset_generic(hw);
3167 return e1000_post_phy_reset_ich8lan(hw);
3171 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3172 * @hw: pointer to the HW structure
3173 * @active: TRUE to enable LPLU, FALSE to disable
3175 * Sets the LPLU state according to the active flag. For PCH, if OEM write
3176 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3177 * the phy speed. This function will manually set the LPLU bit and restart
3178 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
3179 * since it configures the same bit.
3181 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3186 DEBUGFUNC("e1000_set_lplu_state_pchlan");
3187 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3192 oem_reg |= HV_OEM_BITS_LPLU;
3194 oem_reg &= ~HV_OEM_BITS_LPLU;
3196 if (!hw->phy.ops.check_reset_block(hw))
3197 oem_reg |= HV_OEM_BITS_RESTART_AN;
3199 return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3203 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3204 * @hw: pointer to the HW structure
3205 * @active: TRUE to enable LPLU, FALSE to disable
3207 * Sets the LPLU D0 state according to the active flag. When
3208 * activating LPLU this function also disables smart speed
3209 * and vice versa. LPLU will not be activated unless the
3210 * device autonegotiation advertisement meets standards of
3211 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3212 * This is a function pointer entry point only called by
3213 * PHY setup routines.
3215 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3217 struct e1000_phy_info *phy = &hw->phy;
3219 s32 ret_val = E1000_SUCCESS;
3222 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3224 if (phy->type == e1000_phy_ife)
3225 return E1000_SUCCESS;
3227 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3230 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3231 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3233 if (phy->type != e1000_phy_igp_3)
3234 return E1000_SUCCESS;
3236 /* Call gig speed drop workaround on LPLU before accessing
3239 if (hw->mac.type == e1000_ich8lan)
3240 e1000_gig_downshift_workaround_ich8lan(hw);
3242 /* When LPLU is enabled, we should disable SmartSpeed */
3243 ret_val = phy->ops.read_reg(hw,
3244 IGP01E1000_PHY_PORT_CONFIG,
3248 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3249 ret_val = phy->ops.write_reg(hw,
3250 IGP01E1000_PHY_PORT_CONFIG,
3255 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3256 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3258 if (phy->type != e1000_phy_igp_3)
3259 return E1000_SUCCESS;
3261 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
3262 * during Dx states where the power conservation is most
3263 * important. During driver activity we should enable
3264 * SmartSpeed, so performance is maintained.
3266 if (phy->smart_speed == e1000_smart_speed_on) {
3267 ret_val = phy->ops.read_reg(hw,
3268 IGP01E1000_PHY_PORT_CONFIG,
3273 data |= IGP01E1000_PSCFR_SMART_SPEED;
3274 ret_val = phy->ops.write_reg(hw,
3275 IGP01E1000_PHY_PORT_CONFIG,
3279 } else if (phy->smart_speed == e1000_smart_speed_off) {
3280 ret_val = phy->ops.read_reg(hw,
3281 IGP01E1000_PHY_PORT_CONFIG,
3286 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3287 ret_val = phy->ops.write_reg(hw,
3288 IGP01E1000_PHY_PORT_CONFIG,
3295 return E1000_SUCCESS;
3299 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3300 * @hw: pointer to the HW structure
3301 * @active: TRUE to enable LPLU, FALSE to disable
3303 * Sets the LPLU D3 state according to the active flag. When
3304 * activating LPLU this function also disables smart speed
3305 * and vice versa. LPLU will not be activated unless the
3306 * device autonegotiation advertisement meets standards of
3307 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3308 * This is a function pointer entry point only called by
3309 * PHY setup routines.
3311 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3313 struct e1000_phy_info *phy = &hw->phy;
3315 s32 ret_val = E1000_SUCCESS;
3318 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3320 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3323 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3324 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3326 if (phy->type != e1000_phy_igp_3)
3327 return E1000_SUCCESS;
3329 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
3330 * during Dx states where the power conservation is most
3331 * important. During driver activity we should enable
3332 * SmartSpeed, so performance is maintained.
3334 if (phy->smart_speed == e1000_smart_speed_on) {
3335 ret_val = phy->ops.read_reg(hw,
3336 IGP01E1000_PHY_PORT_CONFIG,
3341 data |= IGP01E1000_PSCFR_SMART_SPEED;
3342 ret_val = phy->ops.write_reg(hw,
3343 IGP01E1000_PHY_PORT_CONFIG,
3347 } else if (phy->smart_speed == e1000_smart_speed_off) {
3348 ret_val = phy->ops.read_reg(hw,
3349 IGP01E1000_PHY_PORT_CONFIG,
3354 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3355 ret_val = phy->ops.write_reg(hw,
3356 IGP01E1000_PHY_PORT_CONFIG,
3361 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3362 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3363 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3364 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3365 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3367 if (phy->type != e1000_phy_igp_3)
3368 return E1000_SUCCESS;
3370 /* Call gig speed drop workaround on LPLU before accessing
3373 if (hw->mac.type == e1000_ich8lan)
3374 e1000_gig_downshift_workaround_ich8lan(hw);
3376 /* When LPLU is enabled, we should disable SmartSpeed */
3377 ret_val = phy->ops.read_reg(hw,
3378 IGP01E1000_PHY_PORT_CONFIG,
3383 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3384 ret_val = phy->ops.write_reg(hw,
3385 IGP01E1000_PHY_PORT_CONFIG,
3393 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3394 * @hw: pointer to the HW structure
3395 * @bank: pointer to the variable that returns the active bank
3397 * Reads signature byte from the NVM using the flash access registers.
3398 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3400 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3403 struct e1000_nvm_info *nvm = &hw->nvm;
3404 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3405 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3410 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3412 switch (hw->mac.type) {
3414 bank1_offset = nvm->flash_bank_size;
3415 act_offset = E1000_ICH_NVM_SIG_WORD;
3417 /* set bank to 0 in case flash read fails */
3421 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3425 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3426 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3427 E1000_ICH_NVM_SIG_VALUE) {
3429 return E1000_SUCCESS;
3433 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3438 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3439 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3440 E1000_ICH_NVM_SIG_VALUE) {
3442 return E1000_SUCCESS;
3445 DEBUGOUT("ERROR: No valid NVM bank present\n");
3446 return -E1000_ERR_NVM;
3449 eecd = E1000_READ_REG(hw, E1000_EECD);
3450 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3451 E1000_EECD_SEC1VAL_VALID_MASK) {
3452 if (eecd & E1000_EECD_SEC1VAL)
3457 return E1000_SUCCESS;
3459 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3462 /* set bank to 0 in case flash read fails */
3466 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3470 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3471 E1000_ICH_NVM_SIG_VALUE) {
3473 return E1000_SUCCESS;
3477 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3482 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3483 E1000_ICH_NVM_SIG_VALUE) {
3485 return E1000_SUCCESS;
3488 DEBUGOUT("ERROR: No valid NVM bank present\n");
3489 return -E1000_ERR_NVM;
3494 * e1000_read_nvm_spt - NVM access for SPT
3495 * @hw: pointer to the HW structure
3496 * @offset: The offset (in bytes) of the word(s) to read.
3497 * @words: Size of data to read in words.
3498 * @data: pointer to the word(s) to read at offset.
3500 * Reads a word(s) from the NVM
3502 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3505 struct e1000_nvm_info *nvm = &hw->nvm;
3506 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3508 s32 ret_val = E1000_SUCCESS;
3514 DEBUGFUNC("e1000_read_nvm_spt");
3516 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3518 DEBUGOUT("nvm parameter(s) out of bounds\n");
3519 ret_val = -E1000_ERR_NVM;
3523 nvm->ops.acquire(hw);
3525 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3526 if (ret_val != E1000_SUCCESS) {
3527 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3531 act_offset = (bank) ? nvm->flash_bank_size : 0;
3532 act_offset += offset;
3534 ret_val = E1000_SUCCESS;
3536 for (i = 0; i < words; i += 2) {
3537 if (words - i == 1) {
3538 if (dev_spec->shadow_ram[offset+i].modified) {
3539 data[i] = dev_spec->shadow_ram[offset+i].value;
3541 offset_to_read = act_offset + i -
3542 ((act_offset + i) % 2);
3544 e1000_read_flash_dword_ich8lan(hw,
3549 if ((act_offset + i) % 2 == 0)
3550 data[i] = (u16)(dword & 0xFFFF);
3552 data[i] = (u16)((dword >> 16) & 0xFFFF);
3555 offset_to_read = act_offset + i;
3556 if (!(dev_spec->shadow_ram[offset+i].modified) ||
3557 !(dev_spec->shadow_ram[offset+i+1].modified)) {
3559 e1000_read_flash_dword_ich8lan(hw,
3565 if (dev_spec->shadow_ram[offset+i].modified)
3566 data[i] = dev_spec->shadow_ram[offset+i].value;
3568 data[i] = (u16) (dword & 0xFFFF);
3569 if (dev_spec->shadow_ram[offset+i].modified)
3571 dev_spec->shadow_ram[offset+i+1].value;
3573 data[i+1] = (u16) (dword >> 16 & 0xFFFF);
3577 nvm->ops.release(hw);
3581 DEBUGOUT1("NVM read error: %d\n", ret_val);
3587 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
3588 * @hw: pointer to the HW structure
3589 * @offset: The offset (in bytes) of the word(s) to read.
3590 * @words: Size of data to read in words
3591 * @data: Pointer to the word(s) to read at offset.
3593 * Reads a word(s) from the NVM using the flash access registers.
3595 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3598 struct e1000_nvm_info *nvm = &hw->nvm;
3599 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3601 s32 ret_val = E1000_SUCCESS;
3605 DEBUGFUNC("e1000_read_nvm_ich8lan");
3607 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3609 DEBUGOUT("nvm parameter(s) out of bounds\n");
3610 ret_val = -E1000_ERR_NVM;
3614 nvm->ops.acquire(hw);
3616 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3617 if (ret_val != E1000_SUCCESS) {
3618 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3622 act_offset = (bank) ? nvm->flash_bank_size : 0;
3623 act_offset += offset;
3625 ret_val = E1000_SUCCESS;
3626 for (i = 0; i < words; i++) {
3627 if (dev_spec->shadow_ram[offset+i].modified) {
3628 data[i] = dev_spec->shadow_ram[offset+i].value;
3630 ret_val = e1000_read_flash_word_ich8lan(hw,
3639 nvm->ops.release(hw);
3643 DEBUGOUT1("NVM read error: %d\n", ret_val);
3649 * e1000_flash_cycle_init_ich8lan - Initialize flash
3650 * @hw: pointer to the HW structure
3652 * This function does initial flash setup so that a new read/write/erase cycle
3655 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3657 union ich8_hws_flash_status hsfsts;
3658 s32 ret_val = -E1000_ERR_NVM;
3660 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3662 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3664 /* Check if the flash descriptor is valid */
3665 if (!hsfsts.hsf_status.fldesvalid) {
3666 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
3667 return -E1000_ERR_NVM;
3670 /* Clear FCERR and DAEL in hw status by writing 1 */
3671 hsfsts.hsf_status.flcerr = 1;
3672 hsfsts.hsf_status.dael = 1;
3673 if (hw->mac.type >= e1000_pch_spt)
3674 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3675 hsfsts.regval & 0xFFFF);
3677 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3679 /* Either we should have a hardware SPI cycle in progress
3680 * bit to check against, in order to start a new cycle or
3681 * FDONE bit should be changed in the hardware so that it
3682 * is 1 after hardware reset, which can then be used as an
3683 * indication whether a cycle is in progress or has been
3687 if (!hsfsts.hsf_status.flcinprog) {
3688 /* There is no cycle running at present,
3689 * so we can start a cycle.
3690 * Begin by setting Flash Cycle Done.
3692 hsfsts.hsf_status.flcdone = 1;
3693 if (hw->mac.type >= e1000_pch_spt)
3694 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3695 hsfsts.regval & 0xFFFF);
3697 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3699 ret_val = E1000_SUCCESS;
3703 /* Otherwise poll for sometime so the current
3704 * cycle has a chance to end before giving up.
3706 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3707 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3709 if (!hsfsts.hsf_status.flcinprog) {
3710 ret_val = E1000_SUCCESS;
3715 if (ret_val == E1000_SUCCESS) {
3716 /* Successful in waiting for previous cycle to timeout,
3717 * now set the Flash Cycle Done.
3719 hsfsts.hsf_status.flcdone = 1;
3720 if (hw->mac.type >= e1000_pch_spt)
3721 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3722 hsfsts.regval & 0xFFFF);
3724 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3727 DEBUGOUT("Flash controller busy, cannot get access\n");
3735 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3736 * @hw: pointer to the HW structure
3737 * @timeout: maximum time to wait for completion
3739 * This function starts a flash cycle and waits for its completion.
3741 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3743 union ich8_hws_flash_ctrl hsflctl;
3744 union ich8_hws_flash_status hsfsts;
3747 DEBUGFUNC("e1000_flash_cycle_ich8lan");
3749 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3750 if (hw->mac.type >= e1000_pch_spt)
3751 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3753 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3754 hsflctl.hsf_ctrl.flcgo = 1;
3756 if (hw->mac.type >= e1000_pch_spt)
3757 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3758 hsflctl.regval << 16);
3760 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3762 /* wait till FDONE bit is set to 1 */
3764 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3765 if (hsfsts.hsf_status.flcdone)
3768 } while (i++ < timeout);
3770 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3771 return E1000_SUCCESS;
3773 return -E1000_ERR_NVM;
3777 * e1000_read_flash_dword_ich8lan - Read dword from flash
3778 * @hw: pointer to the HW structure
3779 * @offset: offset to data location
3780 * @data: pointer to the location for storing the data
3782 * Reads the flash dword at offset into data. Offset is converted
3783 * to bytes before read.
3785 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3788 DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3791 return -E1000_ERR_NVM;
3793 /* Must convert word offset into bytes. */
3796 return e1000_read_flash_data32_ich8lan(hw, offset, data);
3800 * e1000_read_flash_word_ich8lan - Read word from flash
3801 * @hw: pointer to the HW structure
3802 * @offset: offset to data location
3803 * @data: pointer to the location for storing the data
3805 * Reads the flash word at offset into data. Offset is converted
3806 * to bytes before read.
3808 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3811 DEBUGFUNC("e1000_read_flash_word_ich8lan");
3814 return -E1000_ERR_NVM;
3816 /* Must convert offset into bytes. */
3819 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3823 * e1000_read_flash_byte_ich8lan - Read byte from flash
3824 * @hw: pointer to the HW structure
3825 * @offset: The offset of the byte to read.
3826 * @data: Pointer to a byte to store the value read.
3828 * Reads a single byte from the NVM using the flash access registers.
3830 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3836 /* In SPT, only 32 bits access is supported,
3837 * so this function should not be called.
3839 if (hw->mac.type >= e1000_pch_spt)
3840 return -E1000_ERR_NVM;
3842 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3849 return E1000_SUCCESS;
3853 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
3854 * @hw: pointer to the HW structure
3855 * @offset: The offset (in bytes) of the byte or word to read.
3856 * @size: Size of data to read, 1=byte 2=word
3857 * @data: Pointer to the word to store the value read.
3859 * Reads a byte or word from the NVM using the flash access registers.
3861 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3864 union ich8_hws_flash_status hsfsts;
3865 union ich8_hws_flash_ctrl hsflctl;
3866 u32 flash_linear_addr;
3868 s32 ret_val = -E1000_ERR_NVM;
3871 DEBUGFUNC("e1000_read_flash_data_ich8lan");
3873 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3874 return -E1000_ERR_NVM;
3875 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3876 hw->nvm.flash_base_addr);
3881 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3882 if (ret_val != E1000_SUCCESS)
3884 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3886 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3887 hsflctl.hsf_ctrl.fldbcount = size - 1;
3888 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3889 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3890 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3892 ret_val = e1000_flash_cycle_ich8lan(hw,
3893 ICH_FLASH_READ_COMMAND_TIMEOUT);
3895 /* Check if FCERR is set to 1, if set to 1, clear it
3896 * and try the whole sequence a few more times, else
3897 * read in (shift in) the Flash Data0, the order is
3898 * least significant byte first msb to lsb
3900 if (ret_val == E1000_SUCCESS) {
3901 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3903 *data = (u8)(flash_data & 0x000000FF);
3905 *data = (u16)(flash_data & 0x0000FFFF);
3908 /* If we've gotten here, then things are probably
3909 * completely hosed, but if the error condition is
3910 * detected, it won't hurt to give it another try...
3911 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3913 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3915 if (hsfsts.hsf_status.flcerr) {
3916 /* Repeat for some time before giving up. */
3918 } else if (!hsfsts.hsf_status.flcdone) {
3919 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3923 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3929 * e1000_read_flash_data32_ich8lan - Read dword from NVM
3930 * @hw: pointer to the HW structure
3931 * @offset: The offset (in bytes) of the dword to read.
3932 * @data: Pointer to the dword to store the value read.
3934 * Reads a byte or word from the NVM using the flash access registers.
3936 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3939 union ich8_hws_flash_status hsfsts;
3940 union ich8_hws_flash_ctrl hsflctl;
3941 u32 flash_linear_addr;
3942 s32 ret_val = -E1000_ERR_NVM;
3945 DEBUGFUNC("e1000_read_flash_data_ich8lan");
3947 if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3948 hw->mac.type < e1000_pch_spt)
3949 return -E1000_ERR_NVM;
3950 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3951 hw->nvm.flash_base_addr);
3956 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3957 if (ret_val != E1000_SUCCESS)
3959 /* In SPT, This register is in Lan memory space, not flash.
3960 * Therefore, only 32 bit access is supported
3962 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3964 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3965 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3966 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3967 /* In SPT, This register is in Lan memory space, not flash.
3968 * Therefore, only 32 bit access is supported
3970 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3971 (u32)hsflctl.regval << 16);
3972 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3974 ret_val = e1000_flash_cycle_ich8lan(hw,
3975 ICH_FLASH_READ_COMMAND_TIMEOUT);
3977 /* Check if FCERR is set to 1, if set to 1, clear it
3978 * and try the whole sequence a few more times, else
3979 * read in (shift in) the Flash Data0, the order is
3980 * least significant byte first msb to lsb
3982 if (ret_val == E1000_SUCCESS) {
3983 *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3986 /* If we've gotten here, then things are probably
3987 * completely hosed, but if the error condition is
3988 * detected, it won't hurt to give it another try...
3989 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3991 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3993 if (hsfsts.hsf_status.flcerr) {
3994 /* Repeat for some time before giving up. */
3996 } else if (!hsfsts.hsf_status.flcdone) {
3997 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4001 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4007 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
4008 * @hw: pointer to the HW structure
4009 * @offset: The offset (in bytes) of the word(s) to write.
4010 * @words: Size of data to write in words
4011 * @data: Pointer to the word(s) to write at offset.
4013 * Writes a byte or word to the NVM using the flash access registers.
4015 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
4018 struct e1000_nvm_info *nvm = &hw->nvm;
4019 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4022 DEBUGFUNC("e1000_write_nvm_ich8lan");
4024 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
4026 DEBUGOUT("nvm parameter(s) out of bounds\n");
4027 return -E1000_ERR_NVM;
4030 nvm->ops.acquire(hw);
4032 for (i = 0; i < words; i++) {
4033 dev_spec->shadow_ram[offset+i].modified = TRUE;
4034 dev_spec->shadow_ram[offset+i].value = data[i];
4037 nvm->ops.release(hw);
4039 return E1000_SUCCESS;
4043 * e1000_update_nvm_checksum_spt - Update the checksum for NVM
4044 * @hw: pointer to the HW structure
4046 * The NVM checksum is updated by calling the generic update_nvm_checksum,
4047 * which writes the checksum to the shadow ram. The changes in the shadow
4048 * ram are then committed to the EEPROM by processing each bank at a time
4049 * checking for the modified bit and writing only the pending changes.
4050 * After a successful commit, the shadow ram is cleared and is ready for
4053 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
4055 struct e1000_nvm_info *nvm = &hw->nvm;
4056 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4057 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4061 DEBUGFUNC("e1000_update_nvm_checksum_spt");
4063 ret_val = e1000_update_nvm_checksum_generic(hw);
4067 if (nvm->type != e1000_nvm_flash_sw)
4070 nvm->ops.acquire(hw);
4072 /* We're writing to the opposite bank so if we're on bank 1,
4073 * write to bank 0 etc. We also need to erase the segment that
4074 * is going to be written
4076 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4077 if (ret_val != E1000_SUCCESS) {
4078 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4083 new_bank_offset = nvm->flash_bank_size;
4084 old_bank_offset = 0;
4085 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4089 old_bank_offset = nvm->flash_bank_size;
4090 new_bank_offset = 0;
4091 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4095 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
4096 /* Determine whether to write the value stored
4097 * in the other NVM bank or a modified value stored
4100 ret_val = e1000_read_flash_dword_ich8lan(hw,
4101 i + old_bank_offset,
4104 if (dev_spec->shadow_ram[i].modified) {
4105 dword &= 0xffff0000;
4106 dword |= (dev_spec->shadow_ram[i].value & 0xffff);
4108 if (dev_spec->shadow_ram[i + 1].modified) {
4109 dword &= 0x0000ffff;
4110 dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
4116 /* If the word is 0x13, then make sure the signature bits
4117 * (15:14) are 11b until the commit has completed.
4118 * This will allow us to write 10b which indicates the
4119 * signature is valid. We want to do this after the write
4120 * has completed so that we don't mark the segment valid
4121 * while the write is still in progress
4123 if (i == E1000_ICH_NVM_SIG_WORD - 1)
4124 dword |= E1000_ICH_NVM_SIG_MASK << 16;
4126 /* Convert offset to bytes. */
4127 act_offset = (i + new_bank_offset) << 1;
4131 /* Write the data to the new bank. Offset in words*/
4132 act_offset = i + new_bank_offset;
4133 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
4139 /* Don't bother writing the segment valid bits if sector
4140 * programming failed.
4143 DEBUGOUT("Flash commit failed.\n");
4147 /* Finally validate the new segment by setting bit 15:14
4148 * to 10b in word 0x13 , this can be done without an
4149 * erase as well since these bits are 11 to start with
4150 * and we need to change bit 14 to 0b
4152 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4154 /*offset in words but we read dword*/
4156 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4161 dword &= 0xBFFFFFFF;
4162 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4167 /* And invalidate the previously valid segment by setting
4168 * its signature word (0x13) high_byte to 0b. This can be
4169 * done without an erase because flash erase sets all bits
4170 * to 1's. We can write 1's to 0's without an erase
4172 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4174 /* offset in words but we read dword*/
4175 act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4176 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4181 dword &= 0x00FFFFFF;
4182 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4187 /* Great! Everything worked, we can now clear the cached entries. */
4188 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4189 dev_spec->shadow_ram[i].modified = FALSE;
4190 dev_spec->shadow_ram[i].value = 0xFFFF;
4194 nvm->ops.release(hw);
4196 /* Reload the EEPROM, or else modifications will not appear
4197 * until after the next adapter reset.
4200 nvm->ops.reload(hw);
4206 DEBUGOUT1("NVM update error: %d\n", ret_val);
4212 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4213 * @hw: pointer to the HW structure
4215 * The NVM checksum is updated by calling the generic update_nvm_checksum,
4216 * which writes the checksum to the shadow ram. The changes in the shadow
4217 * ram are then committed to the EEPROM by processing each bank at a time
4218 * checking for the modified bit and writing only the pending changes.
4219 * After a successful commit, the shadow ram is cleared and is ready for
4222 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4224 struct e1000_nvm_info *nvm = &hw->nvm;
4225 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4226 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4230 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4232 ret_val = e1000_update_nvm_checksum_generic(hw);
4236 if (nvm->type != e1000_nvm_flash_sw)
4239 nvm->ops.acquire(hw);
4241 /* We're writing to the opposite bank so if we're on bank 1,
4242 * write to bank 0 etc. We also need to erase the segment that
4243 * is going to be written
4245 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4246 if (ret_val != E1000_SUCCESS) {
4247 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4252 new_bank_offset = nvm->flash_bank_size;
4253 old_bank_offset = 0;
4254 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4258 old_bank_offset = nvm->flash_bank_size;
4259 new_bank_offset = 0;
4260 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4264 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4265 if (dev_spec->shadow_ram[i].modified) {
4266 data = dev_spec->shadow_ram[i].value;
4268 ret_val = e1000_read_flash_word_ich8lan(hw, i +
4274 /* If the word is 0x13, then make sure the signature bits
4275 * (15:14) are 11b until the commit has completed.
4276 * This will allow us to write 10b which indicates the
4277 * signature is valid. We want to do this after the write
4278 * has completed so that we don't mark the segment valid
4279 * while the write is still in progress
4281 if (i == E1000_ICH_NVM_SIG_WORD)
4282 data |= E1000_ICH_NVM_SIG_MASK;
4284 /* Convert offset to bytes. */
4285 act_offset = (i + new_bank_offset) << 1;
4289 /* Write the bytes to the new bank. */
4290 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4297 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4304 /* Don't bother writing the segment valid bits if sector
4305 * programming failed.
4308 DEBUGOUT("Flash commit failed.\n");
4312 /* Finally validate the new segment by setting bit 15:14
4313 * to 10b in word 0x13 , this can be done without an
4314 * erase as well since these bits are 11 to start with
4315 * and we need to change bit 14 to 0b
4317 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4318 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4323 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4328 /* And invalidate the previously valid segment by setting
4329 * its signature word (0x13) high_byte to 0b. This can be
4330 * done without an erase because flash erase sets all bits
4331 * to 1's. We can write 1's to 0's without an erase
4333 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4335 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4340 /* Great! Everything worked, we can now clear the cached entries. */
4341 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4342 dev_spec->shadow_ram[i].modified = FALSE;
4343 dev_spec->shadow_ram[i].value = 0xFFFF;
4347 nvm->ops.release(hw);
4349 /* Reload the EEPROM, or else modifications will not appear
4350 * until after the next adapter reset.
4353 nvm->ops.reload(hw);
4359 DEBUGOUT1("NVM update error: %d\n", ret_val);
4365 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4366 * @hw: pointer to the HW structure
4368 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4369 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
4370 * calculated, in which case we need to calculate the checksum and set bit 6.
4372 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4377 u16 valid_csum_mask;
4379 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4381 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
4382 * the checksum needs to be fixed. This bit is an indication that
4383 * the NVM was prepared by OEM software and did not calculate
4384 * the checksum...a likely scenario.
4386 switch (hw->mac.type) {
4390 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4393 word = NVM_FUTURE_INIT_WORD1;
4394 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4398 ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4402 if (!(data & valid_csum_mask)) {
4403 data |= valid_csum_mask;
4404 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4407 ret_val = hw->nvm.ops.update(hw);
4412 return e1000_validate_nvm_checksum_generic(hw);
4416 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4417 * @hw: pointer to the HW structure
4418 * @offset: The offset (in bytes) of the byte/word to read.
4419 * @size: Size of data to read, 1=byte 2=word
4420 * @data: The byte(s) to write to the NVM.
4422 * Writes one/two bytes to the NVM using the flash access registers.
4424 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4427 union ich8_hws_flash_status hsfsts;
4428 union ich8_hws_flash_ctrl hsflctl;
4429 u32 flash_linear_addr;
4434 DEBUGFUNC("e1000_write_ich8_data");
4436 if (hw->mac.type >= e1000_pch_spt) {
4437 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4438 return -E1000_ERR_NVM;
4440 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4441 return -E1000_ERR_NVM;
4444 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4445 hw->nvm.flash_base_addr);
4450 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4451 if (ret_val != E1000_SUCCESS)
4453 /* In SPT, This register is in Lan memory space, not
4454 * flash. Therefore, only 32 bit access is supported
4456 if (hw->mac.type >= e1000_pch_spt)
4458 E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
4461 E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4463 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4464 hsflctl.hsf_ctrl.fldbcount = size - 1;
4465 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4466 /* In SPT, This register is in Lan memory space,
4467 * not flash. Therefore, only 32 bit access is
4470 if (hw->mac.type >= e1000_pch_spt)
4471 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4472 hsflctl.regval << 16);
4474 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4477 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4480 flash_data = (u32)data & 0x00FF;
4482 flash_data = (u32)data;
4484 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4486 /* check if FCERR is set to 1 , if set to 1, clear it
4487 * and try the whole sequence a few more times else done
4490 e1000_flash_cycle_ich8lan(hw,
4491 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4492 if (ret_val == E1000_SUCCESS)
4495 /* If we're here, then things are most likely
4496 * completely hosed, but if the error condition
4497 * is detected, it won't hurt to give it another
4498 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4500 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4501 if (hsfsts.hsf_status.flcerr)
4502 /* Repeat for some time before giving up. */
4504 if (!hsfsts.hsf_status.flcdone) {
4505 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4508 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4514 * e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4515 * @hw: pointer to the HW structure
4516 * @offset: The offset (in bytes) of the dwords to read.
4517 * @data: The 4 bytes to write to the NVM.
4519 * Writes one/two/four bytes to the NVM using the flash access registers.
4521 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4524 union ich8_hws_flash_status hsfsts;
4525 union ich8_hws_flash_ctrl hsflctl;
4526 u32 flash_linear_addr;
4530 DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4532 if (hw->mac.type >= e1000_pch_spt) {
4533 if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4534 return -E1000_ERR_NVM;
4536 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4537 hw->nvm.flash_base_addr);
4541 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4542 if (ret_val != E1000_SUCCESS)
4545 /* In SPT, This register is in Lan memory space, not
4546 * flash. Therefore, only 32 bit access is supported
4548 if (hw->mac.type >= e1000_pch_spt)
4549 hsflctl.regval = E1000_READ_FLASH_REG(hw,
4553 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4556 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4557 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4559 /* In SPT, This register is in Lan memory space,
4560 * not flash. Therefore, only 32 bit access is
4563 if (hw->mac.type >= e1000_pch_spt)
4564 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4565 hsflctl.regval << 16);
4567 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4570 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4572 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4574 /* check if FCERR is set to 1 , if set to 1, clear it
4575 * and try the whole sequence a few more times else done
4577 ret_val = e1000_flash_cycle_ich8lan(hw,
4578 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4580 if (ret_val == E1000_SUCCESS)
4583 /* If we're here, then things are most likely
4584 * completely hosed, but if the error condition
4585 * is detected, it won't hurt to give it another
4586 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4588 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4590 if (hsfsts.hsf_status.flcerr)
4591 /* Repeat for some time before giving up. */
4593 if (!hsfsts.hsf_status.flcdone) {
4594 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4597 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4603 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4604 * @hw: pointer to the HW structure
4605 * @offset: The index of the byte to read.
4606 * @data: The byte to write to the NVM.
4608 * Writes a single byte to the NVM using the flash access registers.
4610 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4613 u16 word = (u16)data;
4615 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4617 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4621 * e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4622 * @hw: pointer to the HW structure
4623 * @offset: The offset of the word to write.
4624 * @dword: The dword to write to the NVM.
4626 * Writes a single dword to the NVM using the flash access registers.
4627 * Goes through a retry algorithm before giving up.
4629 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4630 u32 offset, u32 dword)
4633 u16 program_retries;
4635 DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
4637 /* Must convert word offset into bytes. */
4640 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4644 for (program_retries = 0; program_retries < 100; program_retries++) {
4645 DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
4647 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4648 if (ret_val == E1000_SUCCESS)
4651 if (program_retries == 100)
4652 return -E1000_ERR_NVM;
4654 return E1000_SUCCESS;
4658 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4659 * @hw: pointer to the HW structure
4660 * @offset: The offset of the byte to write.
4661 * @byte: The byte to write to the NVM.
4663 * Writes a single byte to the NVM using the flash access registers.
4664 * Goes through a retry algorithm before giving up.
4666 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4667 u32 offset, u8 byte)
4670 u16 program_retries;
4672 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4674 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4678 for (program_retries = 0; program_retries < 100; program_retries++) {
4679 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4681 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4682 if (ret_val == E1000_SUCCESS)
4685 if (program_retries == 100)
4686 return -E1000_ERR_NVM;
4688 return E1000_SUCCESS;
4692 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4693 * @hw: pointer to the HW structure
4694 * @bank: 0 for first bank, 1 for second bank, etc.
4696 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4697 * bank N is 4096 * N + flash_reg_addr.
4699 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4701 struct e1000_nvm_info *nvm = &hw->nvm;
4702 union ich8_hws_flash_status hsfsts;
4703 union ich8_hws_flash_ctrl hsflctl;
4704 u32 flash_linear_addr;
4705 /* bank size is in 16bit words - adjust to bytes */
4706 u32 flash_bank_size = nvm->flash_bank_size * 2;
4709 s32 j, iteration, sector_size;
4711 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4713 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4715 /* Determine HW Sector size: Read BERASE bits of hw flash status
4717 * 00: The Hw sector is 256 bytes, hence we need to erase 16
4718 * consecutive sectors. The start index for the nth Hw sector
4719 * can be calculated as = bank * 4096 + n * 256
4720 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4721 * The start index for the nth Hw sector can be calculated
4723 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4724 * (ich9 only, otherwise error condition)
4725 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4727 switch (hsfsts.hsf_status.berasesz) {
4729 /* Hw sector size 256 */
4730 sector_size = ICH_FLASH_SEG_SIZE_256;
4731 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4734 sector_size = ICH_FLASH_SEG_SIZE_4K;
4738 sector_size = ICH_FLASH_SEG_SIZE_8K;
4742 sector_size = ICH_FLASH_SEG_SIZE_64K;
4746 return -E1000_ERR_NVM;
4749 /* Start with the base address, then add the sector offset. */
4750 flash_linear_addr = hw->nvm.flash_base_addr;
4751 flash_linear_addr += (bank) ? flash_bank_size : 0;
4753 for (j = 0; j < iteration; j++) {
4755 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4758 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4762 /* Write a value 11 (block Erase) in Flash
4763 * Cycle field in hw flash control
4765 if (hw->mac.type >= e1000_pch_spt)
4767 E1000_READ_FLASH_REG(hw,
4768 ICH_FLASH_HSFSTS)>>16;
4771 E1000_READ_FLASH_REG16(hw,
4774 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4775 if (hw->mac.type >= e1000_pch_spt)
4776 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4777 hsflctl.regval << 16);
4779 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4782 /* Write the last 24 bits of an index within the
4783 * block into Flash Linear address field in Flash
4786 flash_linear_addr += (j * sector_size);
4787 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4790 ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4791 if (ret_val == E1000_SUCCESS)
4794 /* Check if FCERR is set to 1. If 1,
4795 * clear it and try the whole sequence
4796 * a few more times else Done
4798 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4800 if (hsfsts.hsf_status.flcerr)
4801 /* repeat for some time before giving up */
4803 else if (!hsfsts.hsf_status.flcdone)
4805 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4808 return E1000_SUCCESS;
4812 * e1000_valid_led_default_ich8lan - Set the default LED settings
4813 * @hw: pointer to the HW structure
4814 * @data: Pointer to the LED settings
4816 * Reads the LED default settings from the NVM to data. If the NVM LED
4817 * settings is all 0's or F's, set the LED default to a valid LED default
4820 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4824 DEBUGFUNC("e1000_valid_led_default_ich8lan");
4826 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4828 DEBUGOUT("NVM Read Error\n");
4832 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4833 *data = ID_LED_DEFAULT_ICH8LAN;
4835 return E1000_SUCCESS;
4839 * e1000_id_led_init_pchlan - store LED configurations
4840 * @hw: pointer to the HW structure
4842 * PCH does not control LEDs via the LEDCTL register, rather it uses
4843 * the PHY LED configuration register.
4845 * PCH also does not have an "always on" or "always off" mode which
4846 * complicates the ID feature. Instead of using the "on" mode to indicate
4847 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4848 * use "link_up" mode. The LEDs will still ID on request if there is no
4849 * link based on logic in e1000_led_[on|off]_pchlan().
4851 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4853 struct e1000_mac_info *mac = &hw->mac;
4855 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4856 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4857 u16 data, i, temp, shift;
4859 DEBUGFUNC("e1000_id_led_init_pchlan");
4861 /* Get default ID LED modes */
4862 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4866 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4867 mac->ledctl_mode1 = mac->ledctl_default;
4868 mac->ledctl_mode2 = mac->ledctl_default;
4870 for (i = 0; i < 4; i++) {
4871 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4874 case ID_LED_ON1_DEF2:
4875 case ID_LED_ON1_ON2:
4876 case ID_LED_ON1_OFF2:
4877 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4878 mac->ledctl_mode1 |= (ledctl_on << shift);
4880 case ID_LED_OFF1_DEF2:
4881 case ID_LED_OFF1_ON2:
4882 case ID_LED_OFF1_OFF2:
4883 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4884 mac->ledctl_mode1 |= (ledctl_off << shift);
4891 case ID_LED_DEF1_ON2:
4892 case ID_LED_ON1_ON2:
4893 case ID_LED_OFF1_ON2:
4894 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4895 mac->ledctl_mode2 |= (ledctl_on << shift);
4897 case ID_LED_DEF1_OFF2:
4898 case ID_LED_ON1_OFF2:
4899 case ID_LED_OFF1_OFF2:
4900 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4901 mac->ledctl_mode2 |= (ledctl_off << shift);
4909 return E1000_SUCCESS;
4913 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4914 * @hw: pointer to the HW structure
4916 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4917 * register, so the bus width is hard coded.
4919 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4921 struct e1000_bus_info *bus = &hw->bus;
4924 DEBUGFUNC("e1000_get_bus_info_ich8lan");
4926 ret_val = e1000_get_bus_info_pcie_generic(hw);
4928 /* ICH devices are "PCI Express"-ish. They have
4929 * a configuration space, but do not contain
4930 * PCI Express Capability registers, so bus width
4931 * must be hardcoded.
4933 if (bus->width == e1000_bus_width_unknown)
4934 bus->width = e1000_bus_width_pcie_x1;
4940 * e1000_reset_hw_ich8lan - Reset the hardware
4941 * @hw: pointer to the HW structure
4943 * Does a full reset of the hardware which includes a reset of the PHY and
4946 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4948 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4953 DEBUGFUNC("e1000_reset_hw_ich8lan");
4955 /* Prevent the PCI-E bus from sticking if there is no TLP connection
4956 * on the last TLP read/write transaction when MAC is reset.
4958 ret_val = e1000_disable_pcie_master_generic(hw);
4960 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4962 DEBUGOUT("Masking off all interrupts\n");
4963 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4965 /* Disable the Transmit and Receive units. Then delay to allow
4966 * any pending transactions to complete before we hit the MAC
4967 * with the global reset.
4969 E1000_WRITE_REG(hw, E1000_RCTL, 0);
4970 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4971 E1000_WRITE_FLUSH(hw);
4975 /* Workaround for ICH8 bit corruption issue in FIFO memory */
4976 if (hw->mac.type == e1000_ich8lan) {
4977 /* Set Tx and Rx buffer allocation to 8k apiece. */
4978 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4979 /* Set Packet Buffer Size to 16k. */
4980 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4983 if (hw->mac.type == e1000_pchlan) {
4984 /* Save the NVM K1 bit setting*/
4985 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4989 if (kum_cfg & E1000_NVM_K1_ENABLE)
4990 dev_spec->nvm_k1_enabled = TRUE;
4992 dev_spec->nvm_k1_enabled = FALSE;
4995 ctrl = E1000_READ_REG(hw, E1000_CTRL);
4997 if (!hw->phy.ops.check_reset_block(hw)) {
4998 /* Full-chip reset requires MAC and PHY reset at the same
4999 * time to make sure the interface between MAC and the
5000 * external PHY is reset.
5002 ctrl |= E1000_CTRL_PHY_RST;
5004 /* Gate automatic PHY configuration by hardware on
5007 if ((hw->mac.type == e1000_pch2lan) &&
5008 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
5009 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
5011 ret_val = e1000_acquire_swflag_ich8lan(hw);
5012 DEBUGOUT("Issuing a global reset to ich8lan\n");
5013 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
5014 /* cannot issue a flush here because it hangs the hardware */
5017 /* Set Phy Config Counter to 50msec */
5018 if (hw->mac.type == e1000_pch2lan) {
5019 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
5020 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
5021 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
5022 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
5026 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
5028 if (ctrl & E1000_CTRL_PHY_RST) {
5029 ret_val = hw->phy.ops.get_cfg_done(hw);
5033 ret_val = e1000_post_phy_reset_ich8lan(hw);
5038 /* For PCH, this write will make sure that any noise
5039 * will be detected as a CRC error and be dropped rather than show up
5040 * as a bad packet to the DMA engine.
5042 if (hw->mac.type == e1000_pchlan)
5043 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
5045 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5046 E1000_READ_REG(hw, E1000_ICR);
5048 reg = E1000_READ_REG(hw, E1000_KABGTXD);
5049 reg |= E1000_KABGTXD_BGSQLBIAS;
5050 E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
5052 return E1000_SUCCESS;
5056 * e1000_init_hw_ich8lan - Initialize the hardware
5057 * @hw: pointer to the HW structure
5059 * Prepares the hardware for transmit and receive by doing the following:
5060 * - initialize hardware bits
5061 * - initialize LED identification
5062 * - setup receive address registers
5063 * - setup flow control
5064 * - setup transmit descriptors
5065 * - clear statistics
5067 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
5069 struct e1000_mac_info *mac = &hw->mac;
5070 u32 ctrl_ext, txdctl, snoop;
5074 DEBUGFUNC("e1000_init_hw_ich8lan");
5076 e1000_initialize_hw_bits_ich8lan(hw);
5078 /* Initialize identification LED */
5079 ret_val = mac->ops.id_led_init(hw);
5080 /* An error is not fatal and we should not stop init due to this */
5082 DEBUGOUT("Error initializing identification LED\n");
5084 /* Setup the receive address. */
5085 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
5087 /* Zero out the Multicast HASH table */
5088 DEBUGOUT("Zeroing the MTA\n");
5089 for (i = 0; i < mac->mta_reg_count; i++)
5090 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
5092 /* The 82578 Rx buffer will stall if wakeup is enabled in host and
5093 * the ME. Disable wakeup by clearing the host wakeup bit.
5094 * Reset the phy after disabling host wakeup to reset the Rx buffer.
5096 if (hw->phy.type == e1000_phy_82578) {
5097 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
5098 i &= ~BM_WUC_HOST_WU_BIT;
5099 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
5100 ret_val = e1000_phy_hw_reset_ich8lan(hw);
5105 /* Setup link and flow control */
5106 ret_val = mac->ops.setup_link(hw);
5108 /* Set the transmit descriptor write-back policy for both queues */
5109 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
5110 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5111 E1000_TXDCTL_FULL_TX_DESC_WB);
5112 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5113 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5114 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
5115 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
5116 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5117 E1000_TXDCTL_FULL_TX_DESC_WB);
5118 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5119 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5120 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
5122 /* ICH8 has opposite polarity of no_snoop bits.
5123 * By default, we should use snoop behavior.
5125 if (mac->type == e1000_ich8lan)
5126 snoop = PCIE_ICH8_SNOOP_ALL;
5128 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
5129 e1000_set_pcie_no_snoop_generic(hw, snoop);
5131 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5132 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
5133 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5135 /* Clear all of the statistics registers (clear on read). It is
5136 * important that we do this after we have tried to establish link
5137 * because the symbol error count will increment wildly if there
5140 e1000_clear_hw_cntrs_ich8lan(hw);
5146 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
5147 * @hw: pointer to the HW structure
5149 * Sets/Clears required hardware bits necessary for correctly setting up the
5150 * hardware for transmit and receive.
5152 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
5156 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5158 /* Extended Device Control */
5159 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5161 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
5162 if (hw->mac.type >= e1000_pchlan)
5163 reg |= E1000_CTRL_EXT_PHYPDEN;
5164 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5166 /* Transmit Descriptor Control 0 */
5167 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5169 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5171 /* Transmit Descriptor Control 1 */
5172 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5174 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5176 /* Transmit Arbitration Control 0 */
5177 reg = E1000_READ_REG(hw, E1000_TARC(0));
5178 if (hw->mac.type == e1000_ich8lan)
5179 reg |= (1 << 28) | (1 << 29);
5180 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5181 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5183 /* Transmit Arbitration Control 1 */
5184 reg = E1000_READ_REG(hw, E1000_TARC(1));
5185 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5189 reg |= (1 << 24) | (1 << 26) | (1 << 30);
5190 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5193 if (hw->mac.type == e1000_ich8lan) {
5194 reg = E1000_READ_REG(hw, E1000_STATUS);
5196 E1000_WRITE_REG(hw, E1000_STATUS, reg);
5199 /* work-around descriptor data corruption issue during nfs v2 udp
5200 * traffic, just disable the nfs filtering capability
5202 reg = E1000_READ_REG(hw, E1000_RFCTL);
5203 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5205 /* Disable IPv6 extension header parsing because some malformed
5206 * IPv6 headers can hang the Rx.
5208 if (hw->mac.type == e1000_ich8lan)
5209 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5210 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5212 /* Enable ECC on Lynxpoint */
5213 if (hw->mac.type >= e1000_pch_lpt) {
5214 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5215 reg |= E1000_PBECCSTS_ECC_ENABLE;
5216 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5218 reg = E1000_READ_REG(hw, E1000_CTRL);
5219 reg |= E1000_CTRL_MEHE;
5220 E1000_WRITE_REG(hw, E1000_CTRL, reg);
5227 * e1000_setup_link_ich8lan - Setup flow control and link settings
5228 * @hw: pointer to the HW structure
5230 * Determines which flow control settings to use, then configures flow
5231 * control. Calls the appropriate media-specific link configuration
5232 * function. Assuming the adapter has a valid link partner, a valid link
5233 * should be established. Assumes the hardware has previously been reset
5234 * and the transmitter and receiver are not enabled.
5236 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5240 DEBUGFUNC("e1000_setup_link_ich8lan");
5242 if (hw->phy.ops.check_reset_block(hw))
5243 return E1000_SUCCESS;
5245 /* ICH parts do not have a word in the NVM to determine
5246 * the default flow control setting, so we explicitly
5249 if (hw->fc.requested_mode == e1000_fc_default)
5250 hw->fc.requested_mode = e1000_fc_full;
5252 /* Save off the requested flow control mode for use later. Depending
5253 * on the link partner's capabilities, we may or may not use this mode.
5255 hw->fc.current_mode = hw->fc.requested_mode;
5257 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5258 hw->fc.current_mode);
5260 /* Continue to configure the copper link. */
5261 ret_val = hw->mac.ops.setup_physical_interface(hw);
5265 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5266 if ((hw->phy.type == e1000_phy_82578) ||
5267 (hw->phy.type == e1000_phy_82579) ||
5268 (hw->phy.type == e1000_phy_i217) ||
5269 (hw->phy.type == e1000_phy_82577)) {
5270 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5272 ret_val = hw->phy.ops.write_reg(hw,
5273 PHY_REG(BM_PORT_CTRL_PAGE, 27),
5279 return e1000_set_fc_watermarks_generic(hw);
5283 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5284 * @hw: pointer to the HW structure
5286 * Configures the kumeran interface to the PHY to wait the appropriate time
5287 * when polling the PHY, then call the generic setup_copper_link to finish
5288 * configuring the copper link.
5290 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5296 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5298 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5299 ctrl |= E1000_CTRL_SLU;
5300 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5301 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5303 /* Set the mac to wait the maximum time between each iteration
5304 * and increase the max iterations when polling the phy;
5305 * this fixes erroneous timeouts at 10Mbps.
5307 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5311 ret_val = e1000_read_kmrn_reg_generic(hw,
5312 E1000_KMRNCTRLSTA_INBAND_PARAM,
5317 ret_val = e1000_write_kmrn_reg_generic(hw,
5318 E1000_KMRNCTRLSTA_INBAND_PARAM,
5323 switch (hw->phy.type) {
5324 case e1000_phy_igp_3:
5325 ret_val = e1000_copper_link_setup_igp(hw);
5330 case e1000_phy_82578:
5331 ret_val = e1000_copper_link_setup_m88(hw);
5335 case e1000_phy_82577:
5336 case e1000_phy_82579:
5337 ret_val = e1000_copper_link_setup_82577(hw);
5342 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5347 reg_data &= ~IFE_PMC_AUTO_MDIX;
5349 switch (hw->phy.mdix) {
5351 reg_data &= ~IFE_PMC_FORCE_MDIX;
5354 reg_data |= IFE_PMC_FORCE_MDIX;
5358 reg_data |= IFE_PMC_AUTO_MDIX;
5361 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5370 return e1000_setup_copper_link_generic(hw);
5374 * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5375 * @hw: pointer to the HW structure
5377 * Calls the PHY specific link setup function and then calls the
5378 * generic setup_copper_link to finish configuring the link for
5379 * Lynxpoint PCH devices
5381 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5386 DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5388 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5389 ctrl |= E1000_CTRL_SLU;
5390 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5391 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5393 ret_val = e1000_copper_link_setup_82577(hw);
5397 return e1000_setup_copper_link_generic(hw);
5401 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5402 * @hw: pointer to the HW structure
5403 * @speed: pointer to store current link speed
5404 * @duplex: pointer to store the current link duplex
5406 * Calls the generic get_speed_and_duplex to retrieve the current link
5407 * information and then calls the Kumeran lock loss workaround for links at
5410 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5415 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5417 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5421 if ((hw->mac.type == e1000_ich8lan) &&
5422 (hw->phy.type == e1000_phy_igp_3) &&
5423 (*speed == SPEED_1000)) {
5424 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5431 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5432 * @hw: pointer to the HW structure
5434 * Work-around for 82566 Kumeran PCS lock loss:
5435 * On link status change (i.e. PCI reset, speed change) and link is up and
5437 * 0) if workaround is optionally disabled do nothing
5438 * 1) wait 1ms for Kumeran link to come up
5439 * 2) check Kumeran Diagnostic register PCS lock loss bit
5440 * 3) if not set the link is locked (all is good), otherwise...
5442 * 5) repeat up to 10 times
5443 * Note: this is only called for IGP3 copper when speed is 1gb.
5445 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5447 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5453 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5455 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5456 return E1000_SUCCESS;
5458 /* Make sure link is up before proceeding. If not just return.
5459 * Attempting this while link is negotiating fouled up link
5462 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5464 return E1000_SUCCESS;
5466 for (i = 0; i < 10; i++) {
5467 /* read once to clear */
5468 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5471 /* and again to get new status */
5472 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5476 /* check for PCS lock */
5477 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5478 return E1000_SUCCESS;
5480 /* Issue PHY reset */
5481 hw->phy.ops.reset(hw);
5484 /* Disable GigE link negotiation */
5485 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5486 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5487 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5488 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5490 /* Call gig speed drop workaround on Gig disable before accessing
5493 e1000_gig_downshift_workaround_ich8lan(hw);
5495 /* unable to acquire PCS lock */
5496 return -E1000_ERR_PHY;
5500 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5501 * @hw: pointer to the HW structure
5502 * @state: boolean value used to set the current Kumeran workaround state
5504 * If ICH8, set the current Kumeran workaround state (enabled - TRUE
5505 * /disabled - FALSE).
5507 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5510 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5512 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5514 if (hw->mac.type != e1000_ich8lan) {
5515 DEBUGOUT("Workaround applies to ICH8 only.\n");
5519 dev_spec->kmrn_lock_loss_workaround_enabled = state;
5525 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5526 * @hw: pointer to the HW structure
5528 * Workaround for 82566 power-down on D3 entry:
5529 * 1) disable gigabit link
5530 * 2) write VR power-down enable
5532 * Continue if successful, else issue LCD reset and repeat
5534 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5540 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5542 if (hw->phy.type != e1000_phy_igp_3)
5545 /* Try the workaround twice (if needed) */
5548 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5549 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5550 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5551 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5553 /* Call gig speed drop workaround on Gig disable before
5554 * accessing any PHY registers
5556 if (hw->mac.type == e1000_ich8lan)
5557 e1000_gig_downshift_workaround_ich8lan(hw);
5559 /* Write VR power-down enable */
5560 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5561 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5562 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5563 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5565 /* Read it back and test */
5566 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5567 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5568 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5571 /* Issue PHY reset and repeat at most one more time */
5572 reg = E1000_READ_REG(hw, E1000_CTRL);
5573 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5579 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5580 * @hw: pointer to the HW structure
5582 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5583 * LPLU, Gig disable, MDIC PHY reset):
5584 * 1) Set Kumeran Near-end loopback
5585 * 2) Clear Kumeran Near-end loopback
5586 * Should only be called for ICH8[m] devices with any 1G Phy.
5588 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5593 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5595 if ((hw->mac.type != e1000_ich8lan) ||
5596 (hw->phy.type == e1000_phy_ife))
5599 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5603 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5604 ret_val = e1000_write_kmrn_reg_generic(hw,
5605 E1000_KMRNCTRLSTA_DIAG_OFFSET,
5609 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5610 e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5615 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5616 * @hw: pointer to the HW structure
5618 * During S0 to Sx transition, it is possible the link remains at gig
5619 * instead of negotiating to a lower speed. Before going to Sx, set
5620 * 'Gig Disable' to force link speed negotiation to a lower speed based on
5621 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
5622 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5623 * needs to be written.
5624 * Parts that support (and are linked to a partner which support) EEE in
5625 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5626 * than 10Mbps w/o EEE.
5628 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5630 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5634 DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5636 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5637 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5639 if (hw->phy.type == e1000_phy_i217) {
5640 u16 phy_reg, device_id = hw->device_id;
5642 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5643 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5644 (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5645 (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5646 (hw->mac.type >= e1000_pch_spt)) {
5647 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5649 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5650 fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5653 ret_val = hw->phy.ops.acquire(hw);
5657 if (!dev_spec->eee_disable) {
5661 e1000_read_emi_reg_locked(hw,
5662 I217_EEE_ADVERTISEMENT,
5667 /* Disable LPLU if both link partners support 100BaseT
5668 * EEE and 100Full is advertised on both ends of the
5669 * link, and enable Auto Enable LPI since there will
5670 * be no driver to enable LPI while in Sx.
5672 if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5673 (dev_spec->eee_lp_ability &
5674 I82579_EEE_100_SUPPORTED) &&
5675 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5676 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5677 E1000_PHY_CTRL_NOND0A_LPLU);
5679 /* Set Auto Enable LPI after link up */
5680 hw->phy.ops.read_reg_locked(hw,
5683 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5684 hw->phy.ops.write_reg_locked(hw,
5690 /* For i217 Intel Rapid Start Technology support,
5691 * when the system is going into Sx and no manageability engine
5692 * is present, the driver must configure proxy to reset only on
5693 * power good. LPI (Low Power Idle) state must also reset only
5694 * on power good, as well as the MTA (Multicast table array).
5695 * The SMBus release must also be disabled on LCD reset.
5697 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5698 E1000_ICH_FWSM_FW_VALID)) {
5699 /* Enable proxy to reset only on power good. */
5700 hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5702 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5703 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5706 /* Set bit enable LPI (EEE) to reset only on
5709 hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5710 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5711 hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5713 /* Disable the SMB release on LCD reset. */
5714 hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5715 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5716 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5719 /* Enable MTA to reset for Intel Rapid Start Technology
5722 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5723 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5724 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5727 hw->phy.ops.release(hw);
5730 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5732 if (hw->mac.type == e1000_ich8lan)
5733 e1000_gig_downshift_workaround_ich8lan(hw);
5735 if (hw->mac.type >= e1000_pchlan) {
5736 e1000_oem_bits_config_ich8lan(hw, FALSE);
5738 /* Reset PHY to activate OEM bits on 82577/8 */
5739 if (hw->mac.type == e1000_pchlan)
5740 e1000_phy_hw_reset_generic(hw);
5742 ret_val = hw->phy.ops.acquire(hw);
5745 e1000_write_smbus_addr(hw);
5746 hw->phy.ops.release(hw);
5753 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5754 * @hw: pointer to the HW structure
5756 * During Sx to S0 transitions on non-managed devices or managed devices
5757 * on which PHY resets are not blocked, if the PHY registers cannot be
5758 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
5760 * On i217, setup Intel Rapid Start Technology.
5762 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5766 DEBUGFUNC("e1000_resume_workarounds_pchlan");
5767 if (hw->mac.type < e1000_pch2lan)
5768 return E1000_SUCCESS;
5770 ret_val = e1000_init_phy_workarounds_pchlan(hw);
5772 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5776 /* For i217 Intel Rapid Start Technology support when the system
5777 * is transitioning from Sx and no manageability engine is present
5778 * configure SMBus to restore on reset, disable proxy, and enable
5779 * the reset on MTA (Multicast table array).
5781 if (hw->phy.type == e1000_phy_i217) {
5784 ret_val = hw->phy.ops.acquire(hw);
5786 DEBUGOUT("Failed to setup iRST\n");
5790 /* Clear Auto Enable LPI after link up */
5791 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5792 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5793 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5795 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5796 E1000_ICH_FWSM_FW_VALID)) {
5797 /* Restore clear on SMB if no manageability engine
5800 ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5804 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5805 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5808 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5810 /* Enable reset on MTA */
5811 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5815 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5816 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5819 DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5820 hw->phy.ops.release(hw);
5823 return E1000_SUCCESS;
5827 * e1000_cleanup_led_ich8lan - Restore the default LED operation
5828 * @hw: pointer to the HW structure
5830 * Return the LED back to the default configuration.
5832 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5834 DEBUGFUNC("e1000_cleanup_led_ich8lan");
5836 if (hw->phy.type == e1000_phy_ife)
5837 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5840 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5841 return E1000_SUCCESS;
5845 * e1000_led_on_ich8lan - Turn LEDs on
5846 * @hw: pointer to the HW structure
5850 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5852 DEBUGFUNC("e1000_led_on_ich8lan");
5854 if (hw->phy.type == e1000_phy_ife)
5855 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5856 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5858 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5859 return E1000_SUCCESS;
5863 * e1000_led_off_ich8lan - Turn LEDs off
5864 * @hw: pointer to the HW structure
5866 * Turn off the LEDs.
5868 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5870 DEBUGFUNC("e1000_led_off_ich8lan");
5872 if (hw->phy.type == e1000_phy_ife)
5873 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5874 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5876 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5877 return E1000_SUCCESS;
5881 * e1000_setup_led_pchlan - Configures SW controllable LED
5882 * @hw: pointer to the HW structure
5884 * This prepares the SW controllable LED for use.
5886 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5888 DEBUGFUNC("e1000_setup_led_pchlan");
5890 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5891 (u16)hw->mac.ledctl_mode1);
5895 * e1000_cleanup_led_pchlan - Restore the default LED operation
5896 * @hw: pointer to the HW structure
5898 * Return the LED back to the default configuration.
5900 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5902 DEBUGFUNC("e1000_cleanup_led_pchlan");
5904 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5905 (u16)hw->mac.ledctl_default);
5909 * e1000_led_on_pchlan - Turn LEDs on
5910 * @hw: pointer to the HW structure
5914 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5916 u16 data = (u16)hw->mac.ledctl_mode2;
5919 DEBUGFUNC("e1000_led_on_pchlan");
5921 /* If no link, then turn LED on by setting the invert bit
5922 * for each LED that's mode is "link_up" in ledctl_mode2.
5924 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5925 for (i = 0; i < 3; i++) {
5926 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5927 if ((led & E1000_PHY_LED0_MODE_MASK) !=
5928 E1000_LEDCTL_MODE_LINK_UP)
5930 if (led & E1000_PHY_LED0_IVRT)
5931 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5933 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5937 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5941 * e1000_led_off_pchlan - Turn LEDs off
5942 * @hw: pointer to the HW structure
5944 * Turn off the LEDs.
5946 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5948 u16 data = (u16)hw->mac.ledctl_mode1;
5951 DEBUGFUNC("e1000_led_off_pchlan");
5953 /* If no link, then turn LED off by clearing the invert bit
5954 * for each LED that's mode is "link_up" in ledctl_mode1.
5956 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5957 for (i = 0; i < 3; i++) {
5958 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5959 if ((led & E1000_PHY_LED0_MODE_MASK) !=
5960 E1000_LEDCTL_MODE_LINK_UP)
5962 if (led & E1000_PHY_LED0_IVRT)
5963 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5965 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5969 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5973 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5974 * @hw: pointer to the HW structure
5976 * Read appropriate register for the config done bit for completion status
5977 * and configure the PHY through s/w for EEPROM-less parts.
5979 * NOTE: some silicon which is EEPROM-less will fail trying to read the
5980 * config done bit, so only an error is logged and continues. If we were
5981 * to return with error, EEPROM-less silicon would not be able to be reset
5984 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5986 s32 ret_val = E1000_SUCCESS;
5990 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5992 e1000_get_cfg_done_generic(hw);
5994 /* Wait for indication from h/w that it has completed basic config */
5995 if (hw->mac.type >= e1000_ich10lan) {
5996 e1000_lan_init_done_ich8lan(hw);
5998 ret_val = e1000_get_auto_rd_done_generic(hw);
6000 /* When auto config read does not complete, do not
6001 * return with an error. This can happen in situations
6002 * where there is no eeprom and prevents getting link.
6004 DEBUGOUT("Auto Read Done did not complete\n");
6005 ret_val = E1000_SUCCESS;
6009 /* Clear PHY Reset Asserted bit */
6010 status = E1000_READ_REG(hw, E1000_STATUS);
6011 if (status & E1000_STATUS_PHYRA)
6012 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
6014 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
6016 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
6017 if (hw->mac.type <= e1000_ich9lan) {
6018 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
6019 (hw->phy.type == e1000_phy_igp_3)) {
6020 e1000_phy_init_script_igp3(hw);
6023 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
6024 /* Maybe we should do a basic PHY config */
6025 DEBUGOUT("EEPROM not present\n");
6026 ret_val = -E1000_ERR_CONFIG;
6034 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
6035 * @hw: pointer to the HW structure
6037 * In the case of a PHY power down to save power, or to turn off link during a
6038 * driver unload, or wake on lan is not enabled, remove the link.
6040 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
6042 /* If the management interface is not enabled, then power down */
6043 if (!(hw->mac.ops.check_mng_mode(hw) ||
6044 hw->phy.ops.check_reset_block(hw)))
6045 e1000_power_down_phy_copper(hw);
6051 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
6052 * @hw: pointer to the HW structure
6054 * Clears hardware counters specific to the silicon family and calls
6055 * clear_hw_cntrs_generic to clear all general purpose counters.
6057 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
6062 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
6064 e1000_clear_hw_cntrs_base_generic(hw);
6066 E1000_READ_REG(hw, E1000_ALGNERRC);
6067 E1000_READ_REG(hw, E1000_RXERRC);
6068 E1000_READ_REG(hw, E1000_TNCRS);
6069 E1000_READ_REG(hw, E1000_CEXTERR);
6070 E1000_READ_REG(hw, E1000_TSCTC);
6071 E1000_READ_REG(hw, E1000_TSCTFC);
6073 E1000_READ_REG(hw, E1000_MGTPRC);
6074 E1000_READ_REG(hw, E1000_MGTPDC);
6075 E1000_READ_REG(hw, E1000_MGTPTC);
6077 E1000_READ_REG(hw, E1000_IAC);
6078 E1000_READ_REG(hw, E1000_ICRXOC);
6080 /* Clear PHY statistics registers */
6081 if ((hw->phy.type == e1000_phy_82578) ||
6082 (hw->phy.type == e1000_phy_82579) ||
6083 (hw->phy.type == e1000_phy_i217) ||
6084 (hw->phy.type == e1000_phy_82577)) {
6085 ret_val = hw->phy.ops.acquire(hw);
6088 ret_val = hw->phy.ops.set_page(hw,
6089 HV_STATS_PAGE << IGP_PAGE_SHIFT);
6092 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
6093 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
6094 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
6095 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
6096 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
6097 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
6098 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
6099 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
6100 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
6101 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
6102 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
6103 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
6104 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
6105 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
6107 hw->phy.ops.release(hw);