1 /******************************************************************************
3 Copyright (c) 2001-2013, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 /* 82562G 10/100 Network Connection
36 * 82562G-2 10/100 Network Connection
37 * 82562GT 10/100 Network Connection
38 * 82562GT-2 10/100 Network Connection
39 * 82562V 10/100 Network Connection
40 * 82562V-2 10/100 Network Connection
41 * 82566DC-2 Gigabit Network Connection
42 * 82566DC Gigabit Network Connection
43 * 82566DM-2 Gigabit Network Connection
44 * 82566DM Gigabit Network Connection
45 * 82566MC Gigabit Network Connection
46 * 82566MM Gigabit Network Connection
47 * 82567LM Gigabit Network Connection
48 * 82567LF Gigabit Network Connection
49 * 82567V Gigabit Network Connection
50 * 82567LM-2 Gigabit Network Connection
51 * 82567LF-2 Gigabit Network Connection
52 * 82567V-2 Gigabit Network Connection
53 * 82567LF-3 Gigabit Network Connection
54 * 82567LM-3 Gigabit Network Connection
55 * 82567LM-4 Gigabit Network Connection
56 * 82577LM Gigabit Network Connection
57 * 82577LC Gigabit Network Connection
58 * 82578DM Gigabit Network Connection
59 * 82578DC Gigabit Network Connection
60 * 82579LM Gigabit Network Connection
61 * 82579V Gigabit Network Connection
64 #include "e1000_api.h"
66 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
67 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
68 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
69 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
70 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
71 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
72 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
73 static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
74 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
77 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
78 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
79 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
80 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
82 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
84 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
85 u16 words, u16 *data);
86 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
87 u16 words, u16 *data);
88 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
89 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
90 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
92 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
93 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
94 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
95 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
96 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
97 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
98 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
99 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
100 u16 *speed, u16 *duplex);
101 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
102 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
103 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
104 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
105 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
106 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
107 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
108 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
109 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
110 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
111 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
112 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
113 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
114 u32 offset, u8 *data);
115 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
117 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
118 u32 offset, u16 *data);
119 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
120 u32 offset, u8 byte);
121 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
122 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
123 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
124 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
125 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
126 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
127 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
129 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
130 /* Offset 04h HSFSTS */
131 union ich8_hws_flash_status {
133 u16 flcdone:1; /* bit 0 Flash Cycle Done */
134 u16 flcerr:1; /* bit 1 Flash Cycle Error */
135 u16 dael:1; /* bit 2 Direct Access error Log */
136 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
137 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
138 u16 reserved1:2; /* bit 13:6 Reserved */
139 u16 reserved2:6; /* bit 13:6 Reserved */
140 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
141 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
146 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
147 /* Offset 06h FLCTL */
148 union ich8_hws_flash_ctrl {
149 struct ich8_hsflctl {
150 u16 flcgo:1; /* 0 Flash Cycle Go */
151 u16 flcycle:2; /* 2:1 Flash Cycle */
152 u16 reserved:5; /* 7:3 Reserved */
153 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
154 u16 flockdn:6; /* 15:10 Reserved */
159 /* ICH Flash Region Access Permissions */
160 union ich8_hws_flash_regacc {
162 u32 grra:8; /* 0:7 GbE region Read Access */
163 u32 grwa:8; /* 8:15 GbE region Write Access */
164 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
165 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
171 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
172 * @hw: pointer to the HW structure
174 * Test access to the PHY registers by reading the PHY ID registers. If
175 * the PHY ID is already known (e.g. resume path) compare it with known ID,
176 * otherwise assume the read PHY ID is correct if it is valid.
178 * Assumes the sw/fw/hw semaphore is already acquired.
180 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
187 for (retry_count = 0; retry_count < 2; retry_count++) {
188 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
189 if (ret_val || (phy_reg == 0xFFFF))
191 phy_id = (u32)(phy_reg << 16);
193 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
194 if (ret_val || (phy_reg == 0xFFFF)) {
198 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
203 if (hw->phy.id == phy_id)
207 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
211 /* In case the PHY needs to be in mdio slow mode,
212 * set slow mode and try to get the PHY id again.
214 hw->phy.ops.release(hw);
215 ret_val = e1000_set_mdio_slow_mode_hv(hw);
217 ret_val = e1000_get_phy_id(hw);
218 hw->phy.ops.acquire(hw);
224 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
225 * @hw: pointer to the HW structure
227 * Workarounds/flow necessary for PHY initialization during driver load
230 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
232 u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
236 DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
238 /* Gate automatic PHY configuration by hardware on managed and
239 * non-managed 82579 and newer adapters.
241 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
243 ret_val = hw->phy.ops.acquire(hw);
245 DEBUGOUT("Failed to initialize PHY flow\n");
249 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
250 * inaccessible and resetting the PHY is not blocked, toggle the
251 * LANPHYPC Value bit to force the interconnect to PCIe mode.
253 switch (hw->mac.type) {
255 if (e1000_phy_is_accessible_pchlan(hw))
258 /* Before toggling LANPHYPC, see if PHY is accessible by
259 * forcing MAC to SMBus mode first.
261 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
262 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
263 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
267 if (e1000_phy_is_accessible_pchlan(hw)) {
268 if (hw->mac.type == e1000_pch_lpt) {
269 /* Unforce SMBus mode in PHY */
270 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL,
272 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
273 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL,
276 /* Unforce SMBus mode in MAC */
277 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
278 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
279 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
286 if ((hw->mac.type == e1000_pchlan) &&
287 (fwsm & E1000_ICH_FWSM_FW_VALID))
290 if (hw->phy.ops.check_reset_block(hw)) {
291 DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
295 DEBUGOUT("Toggling LANPHYPC\n");
297 /* Set Phy Config Counter to 50msec */
298 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
299 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
300 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
301 E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
303 if (hw->mac.type == e1000_pch_lpt) {
304 /* Toggling LANPHYPC brings the PHY out of SMBus mode
305 * So ensure that the MAC is also out of SMBus mode
307 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
308 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
309 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
312 /* Toggle LANPHYPC Value bit */
313 mac_reg = E1000_READ_REG(hw, E1000_CTRL);
314 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
315 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
316 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
317 E1000_WRITE_FLUSH(hw);
319 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
320 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
321 E1000_WRITE_FLUSH(hw);
322 if (hw->mac.type < e1000_pch_lpt) {
328 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
329 E1000_CTRL_EXT_LPCD) && count--);
336 hw->phy.ops.release(hw);
338 /* Reset the PHY before any access to it. Doing so, ensures
339 * that the PHY is in a known good state before we read/write
340 * PHY registers. The generic reset is sufficient here,
341 * because we haven't determined the PHY type yet.
343 ret_val = e1000_phy_hw_reset_generic(hw);
346 /* Ungate automatic PHY configuration on non-managed 82579 */
347 if ((hw->mac.type == e1000_pch2lan) &&
348 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
350 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
357 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
358 * @hw: pointer to the HW structure
360 * Initialize family-specific PHY parameters and function pointers.
362 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
364 struct e1000_phy_info *phy = &hw->phy;
367 DEBUGFUNC("e1000_init_phy_params_pchlan");
370 phy->reset_delay_us = 100;
372 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
373 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
374 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
375 phy->ops.set_page = e1000_set_page_igp;
376 phy->ops.read_reg = e1000_read_phy_reg_hv;
377 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
378 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
379 phy->ops.release = e1000_release_swflag_ich8lan;
380 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
381 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
382 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
383 phy->ops.write_reg = e1000_write_phy_reg_hv;
384 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
385 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
386 phy->ops.power_up = e1000_power_up_phy_copper;
387 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
388 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
390 phy->id = e1000_phy_unknown;
392 ret_val = e1000_init_phy_workarounds_pchlan(hw);
396 if (phy->id == e1000_phy_unknown)
397 switch (hw->mac.type) {
399 ret_val = e1000_get_phy_id(hw);
402 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
407 /* In case the PHY needs to be in mdio slow mode,
408 * set slow mode and try to get the PHY id again.
410 ret_val = e1000_set_mdio_slow_mode_hv(hw);
413 ret_val = e1000_get_phy_id(hw);
418 phy->type = e1000_get_phy_type_from_id(phy->id);
421 case e1000_phy_82577:
422 case e1000_phy_82579:
424 phy->ops.check_polarity = e1000_check_polarity_82577;
425 phy->ops.force_speed_duplex =
426 e1000_phy_force_speed_duplex_82577;
427 phy->ops.get_cable_length = e1000_get_cable_length_82577;
428 phy->ops.get_info = e1000_get_phy_info_82577;
429 phy->ops.commit = e1000_phy_sw_reset_generic;
431 case e1000_phy_82578:
432 phy->ops.check_polarity = e1000_check_polarity_m88;
433 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
434 phy->ops.get_cable_length = e1000_get_cable_length_m88;
435 phy->ops.get_info = e1000_get_phy_info_m88;
438 ret_val = -E1000_ERR_PHY;
446 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
447 * @hw: pointer to the HW structure
449 * Initialize family-specific PHY parameters and function pointers.
451 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
453 struct e1000_phy_info *phy = &hw->phy;
457 DEBUGFUNC("e1000_init_phy_params_ich8lan");
460 phy->reset_delay_us = 100;
462 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
463 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
464 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
465 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
466 phy->ops.read_reg = e1000_read_phy_reg_igp;
467 phy->ops.release = e1000_release_swflag_ich8lan;
468 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
469 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
470 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
471 phy->ops.write_reg = e1000_write_phy_reg_igp;
472 phy->ops.power_up = e1000_power_up_phy_copper;
473 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
475 /* We may need to do this twice - once for IGP and if that fails,
476 * we'll set BM func pointers and try again
478 ret_val = e1000_determine_phy_address(hw);
480 phy->ops.write_reg = e1000_write_phy_reg_bm;
481 phy->ops.read_reg = e1000_read_phy_reg_bm;
482 ret_val = e1000_determine_phy_address(hw);
484 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
490 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
493 ret_val = e1000_get_phy_id(hw);
500 case IGP03E1000_E_PHY_ID:
501 phy->type = e1000_phy_igp_3;
502 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
503 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
504 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
505 phy->ops.get_info = e1000_get_phy_info_igp;
506 phy->ops.check_polarity = e1000_check_polarity_igp;
507 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
510 case IFE_PLUS_E_PHY_ID:
512 phy->type = e1000_phy_ife;
513 phy->autoneg_mask = E1000_ALL_NOT_GIG;
514 phy->ops.get_info = e1000_get_phy_info_ife;
515 phy->ops.check_polarity = e1000_check_polarity_ife;
516 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
518 case BME1000_E_PHY_ID:
519 phy->type = e1000_phy_bm;
520 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
521 phy->ops.read_reg = e1000_read_phy_reg_bm;
522 phy->ops.write_reg = e1000_write_phy_reg_bm;
523 phy->ops.commit = e1000_phy_sw_reset_generic;
524 phy->ops.get_info = e1000_get_phy_info_m88;
525 phy->ops.check_polarity = e1000_check_polarity_m88;
526 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
529 return -E1000_ERR_PHY;
533 return E1000_SUCCESS;
537 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
538 * @hw: pointer to the HW structure
540 * Initialize family-specific NVM parameters and function
543 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
545 struct e1000_nvm_info *nvm = &hw->nvm;
546 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
547 u32 gfpreg, sector_base_addr, sector_end_addr;
550 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
552 /* Can't read flash registers if the register set isn't mapped. */
553 if (!hw->flash_address) {
554 DEBUGOUT("ERROR: Flash registers not mapped\n");
555 return -E1000_ERR_CONFIG;
558 nvm->type = e1000_nvm_flash_sw;
560 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
562 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
563 * Add 1 to sector_end_addr since this sector is included in
566 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
567 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
569 /* flash_base_addr is byte-aligned */
570 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
572 /* find total size of the NVM, then cut in half since the total
573 * size represents two separate NVM banks.
575 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
576 << FLASH_SECTOR_ADDR_SHIFT;
577 nvm->flash_bank_size /= 2;
578 /* Adjust to word count */
579 nvm->flash_bank_size /= sizeof(u16);
581 nvm->word_size = E1000_SHADOW_RAM_WORDS;
583 /* Clear shadow ram */
584 for (i = 0; i < nvm->word_size; i++) {
585 dev_spec->shadow_ram[i].modified = FALSE;
586 dev_spec->shadow_ram[i].value = 0xFFFF;
589 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
590 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
592 /* Function Pointers */
593 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
594 nvm->ops.release = e1000_release_nvm_ich8lan;
595 nvm->ops.read = e1000_read_nvm_ich8lan;
596 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
597 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
598 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
599 nvm->ops.write = e1000_write_nvm_ich8lan;
601 return E1000_SUCCESS;
605 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
606 * @hw: pointer to the HW structure
608 * Initialize family-specific MAC parameters and function
611 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
613 struct e1000_mac_info *mac = &hw->mac;
615 DEBUGFUNC("e1000_init_mac_params_ich8lan");
617 /* Set media type function pointer */
618 hw->phy.media_type = e1000_media_type_copper;
620 /* Set mta register count */
621 mac->mta_reg_count = 32;
622 /* Set rar entry count */
623 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
624 if (mac->type == e1000_ich8lan)
625 mac->rar_entry_count--;
626 /* Set if part includes ASF firmware */
627 mac->asf_firmware_present = TRUE;
629 mac->has_fwsm = TRUE;
630 /* ARC subsystem not supported */
631 mac->arc_subsystem_valid = FALSE;
632 /* Adaptive IFS supported */
633 mac->adaptive_ifs = TRUE;
635 /* Function pointers */
637 /* bus type/speed/width */
638 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
640 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
642 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
643 /* hw initialization */
644 mac->ops.init_hw = e1000_init_hw_ich8lan;
646 mac->ops.setup_link = e1000_setup_link_ich8lan;
647 /* physical interface setup */
648 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
650 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
652 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
653 /* multicast address update */
654 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
655 /* clear hardware counters */
656 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
658 /* LED and other operations */
663 /* check management mode */
664 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
666 mac->ops.id_led_init = e1000_id_led_init_generic;
668 mac->ops.blink_led = e1000_blink_led_generic;
670 mac->ops.setup_led = e1000_setup_led_generic;
672 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
673 /* turn on/off LED */
674 mac->ops.led_on = e1000_led_on_ich8lan;
675 mac->ops.led_off = e1000_led_off_ich8lan;
678 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
679 mac->ops.rar_set = e1000_rar_set_pch2lan;
682 /* multicast address update for pch2 */
683 mac->ops.update_mc_addr_list =
684 e1000_update_mc_addr_list_pch2lan;
686 /* check management mode */
687 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
689 mac->ops.id_led_init = e1000_id_led_init_pchlan;
691 mac->ops.setup_led = e1000_setup_led_pchlan;
693 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
694 /* turn on/off LED */
695 mac->ops.led_on = e1000_led_on_pchlan;
696 mac->ops.led_off = e1000_led_off_pchlan;
702 if (mac->type == e1000_pch_lpt) {
703 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
704 mac->ops.rar_set = e1000_rar_set_pch_lpt;
705 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
706 mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
709 /* Enable PCS Lock-loss workaround for ICH8 */
710 if (mac->type == e1000_ich8lan)
711 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
713 return E1000_SUCCESS;
717 * __e1000_access_emi_reg_locked - Read/write EMI register
718 * @hw: pointer to the HW structure
719 * @addr: EMI address to program
720 * @data: pointer to value to read/write from/to the EMI address
721 * @read: boolean flag to indicate read or write
723 * This helper function assumes the SW/FW/HW Semaphore is already acquired.
725 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
726 u16 *data, bool read)
730 DEBUGFUNC("__e1000_access_emi_reg_locked");
732 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
737 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
740 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
747 * e1000_read_emi_reg_locked - Read Extended Management Interface register
748 * @hw: pointer to the HW structure
749 * @addr: EMI address to program
750 * @data: value to be read from the EMI address
752 * Assumes the SW/FW/HW Semaphore is already acquired.
754 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
756 DEBUGFUNC("e1000_read_emi_reg_locked");
758 return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
762 * e1000_write_emi_reg_locked - Write Extended Management Interface register
763 * @hw: pointer to the HW structure
764 * @addr: EMI address to program
765 * @data: value to be written to the EMI address
767 * Assumes the SW/FW/HW Semaphore is already acquired.
769 static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
771 DEBUGFUNC("e1000_read_emi_reg_locked");
773 return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
777 * e1000_set_eee_pchlan - Enable/disable EEE support
778 * @hw: pointer to the HW structure
780 * Enable/disable EEE based on setting in dev_spec structure, the duplex of
781 * the link and the EEE capabilities of the link partner. The LPI Control
782 * register bits will remain set only if/when link is up.
784 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
786 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
790 DEBUGFUNC("e1000_set_eee_pchlan");
792 if ((hw->phy.type != e1000_phy_82579) &&
793 (hw->phy.type != e1000_phy_i217))
794 return E1000_SUCCESS;
796 ret_val = hw->phy.ops.acquire(hw);
800 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
804 /* Clear bits that enable EEE in various speeds */
805 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
807 /* Enable EEE if not disabled by user */
808 if (!dev_spec->eee_disable) {
809 u16 lpa, pcs_status, data;
811 /* Save off link partner's EEE ability */
812 switch (hw->phy.type) {
813 case e1000_phy_82579:
814 lpa = I82579_EEE_LP_ABILITY;
815 pcs_status = I82579_EEE_PCS_STATUS;
818 lpa = I217_EEE_LP_ABILITY;
819 pcs_status = I217_EEE_PCS_STATUS;
822 ret_val = -E1000_ERR_PHY;
825 ret_val = e1000_read_emi_reg_locked(hw, lpa,
826 &dev_spec->eee_lp_ability);
830 /* Enable EEE only for speeds in which the link partner is
833 if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
834 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
836 if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
837 hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
838 if (data & NWAY_LPAR_100TX_FD_CAPS)
839 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
841 /* EEE is not supported in 100Half, so ignore
842 * partner's EEE in 100 ability if full-duplex
845 dev_spec->eee_lp_ability &=
846 ~I82579_EEE_100_SUPPORTED;
849 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
850 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
855 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
857 hw->phy.ops.release(hw);
863 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
864 * @hw: pointer to the HW structure
865 * @link: link up bool flag
867 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
868 * preventing further DMA write requests. Workaround the issue by disabling
869 * the de-assertion of the clock request when in 1Gpbs mode.
871 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
873 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
874 s32 ret_val = E1000_SUCCESS;
876 if (link && (E1000_READ_REG(hw, E1000_STATUS) &
877 E1000_STATUS_SPEED_1000)) {
880 ret_val = hw->phy.ops.acquire(hw);
885 e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
891 e1000_write_kmrn_reg_locked(hw,
892 E1000_KMRNCTRLSTA_K1_CONFIG,
894 ~E1000_KMRNCTRLSTA_K1_ENABLE);
900 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
901 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
904 e1000_write_kmrn_reg_locked(hw,
905 E1000_KMRNCTRLSTA_K1_CONFIG,
908 hw->phy.ops.release(hw);
910 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
911 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
912 fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
918 static u64 e1000_ltr2ns(u16 ltr)
922 /* Determine the latency in nsec based on the LTR value & scale */
923 value = ltr & E1000_LTRV_VALUE_MASK;
924 scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
926 return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
930 * e1000_platform_pm_pch_lpt - Set platform power management values
931 * @hw: pointer to the HW structure
932 * @link: bool indicating link status
934 * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
935 * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
936 * when link is up (which must not exceed the maximum latency supported
937 * by the platform), otherwise specify there is no LTR requirement.
938 * Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
939 * latencies in the LTR Extended Capability Structure in the PCIe Extended
940 * Capability register set, on this device LTR is set by writing the
941 * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
942 * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
943 * message to the PMC.
945 * Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
948 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
950 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
951 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
952 u16 lat_enc = 0; /* latency encoded */
955 DEBUGFUNC("e1000_platform_pm_pch_lpt");
958 u16 speed, duplex, scale = 0;
959 u16 max_snoop, max_nosnoop;
960 u16 max_ltr_enc; /* max LTR latency encoded */
961 s64 lat_ns; /* latency (ns) */
965 if (!hw->mac.max_frame_size) {
966 DEBUGOUT("max_frame_size not set.\n");
967 return -E1000_ERR_CONFIG;
970 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
972 DEBUGOUT("Speed not set.\n");
973 return -E1000_ERR_CONFIG;
976 /* Rx Packet Buffer Allocation size (KB) */
977 rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
979 /* Determine the maximum latency tolerated by the device.
981 * Per the PCIe spec, the tolerated latencies are encoded as
982 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
983 * a 10-bit value (0-1023) to provide a range from 1 ns to
984 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
985 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
987 lat_ns = ((s64)rxa * 1024 -
988 (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
995 while (value > E1000_LTRV_VALUE_MASK) {
997 value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
999 if (scale > E1000_LTRV_SCALE_MAX) {
1000 DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1001 return -E1000_ERR_CONFIG;
1003 lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1005 /* Determine the maximum latency tolerated by the platform */
1006 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1007 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1008 max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1010 if (lat_enc > max_ltr_enc) {
1011 lat_enc = max_ltr_enc;
1012 lat_ns = e1000_ltr2ns(max_ltr_enc);
1016 lat_ns *= speed * 1000;
1018 lat_ns /= 1000000000;
1019 obff_hwm = (s32)(rxa - lat_ns);
1022 if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1023 DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1024 return -E1000_ERR_CONFIG;
1028 /* Set Snoop and No-Snoop latencies the same */
1029 reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1030 E1000_WRITE_REG(hw, E1000_LTRV, reg);
1032 /* Set OBFF high water mark */
1033 reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1035 E1000_WRITE_REG(hw, E1000_SVT, reg);
1038 reg = E1000_READ_REG(hw, E1000_SVCR);
1039 reg |= E1000_SVCR_OFF_EN;
1040 /* Always unblock interrupts to the CPU even when the system is
1041 * in OBFF mode. This ensures that small round-robin traffic
1042 * (like ping) does not get dropped or experience long latency.
1044 reg |= E1000_SVCR_OFF_MASKINT;
1045 E1000_WRITE_REG(hw, E1000_SVCR, reg);
1047 return E1000_SUCCESS;
1051 * e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1052 * @hw: pointer to the HW structure
1053 * @itr: interrupt throttling rate
1055 * Configure OBFF with the updated interrupt rate.
1057 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1062 DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1064 /* Convert ITR value into microseconds for OBFF timer */
1065 timer = itr & E1000_ITR_MASK;
1066 timer = (timer * E1000_ITR_MULT) / 1000;
1068 if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1069 DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1070 return -E1000_ERR_CONFIG;
1073 svcr = E1000_READ_REG(hw, E1000_SVCR);
1074 svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1075 svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1076 E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1078 return E1000_SUCCESS;
1082 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1083 * @hw: pointer to the HW structure
1085 * Checks to see of the link status of the hardware has changed. If a
1086 * change in link status has been detected, then we read the PHY registers
1087 * to get the current speed/duplex if link exists.
1089 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1091 struct e1000_mac_info *mac = &hw->mac;
1096 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1098 /* We only want to go out to the PHY registers to see if Auto-Neg
1099 * has completed and/or if our link status has changed. The
1100 * get_link_status flag is set upon receiving a Link Status
1101 * Change or Rx Sequence Error interrupt.
1103 if (!mac->get_link_status)
1104 return E1000_SUCCESS;
1106 /* First we want to see if the MII Status Register reports
1107 * link. If so, then we want to get the current speed/duplex
1110 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1114 if (hw->mac.type == e1000_pchlan) {
1115 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1120 /* When connected at 10Mbps half-duplex, 82579 parts are excessively
1121 * aggressive resulting in many collisions. To avoid this, increase
1122 * the IPG and reduce Rx latency in the PHY.
1124 if ((hw->mac.type == e1000_pch2lan) && link) {
1126 reg = E1000_READ_REG(hw, E1000_STATUS);
1127 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1128 reg = E1000_READ_REG(hw, E1000_TIPG);
1129 reg &= ~E1000_TIPG_IPGT_MASK;
1131 E1000_WRITE_REG(hw, E1000_TIPG, reg);
1133 /* Reduce Rx latency in analog PHY */
1134 ret_val = hw->phy.ops.acquire(hw);
1138 ret_val = e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
1140 hw->phy.ops.release(hw);
1147 /* Work-around I218 hang issue */
1148 if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1149 (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
1150 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1155 if (hw->mac.type == e1000_pch_lpt) {
1156 /* Set platform power management values for Latency Tolerance
1157 * Reporting (LTR) and Optimized Buffer Flush/Fill (OBFF).
1159 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1164 /* Clear link partner's EEE ability */
1165 hw->dev_spec.ich8lan.eee_lp_ability = 0;
1168 return E1000_SUCCESS; /* No link detected */
1170 mac->get_link_status = FALSE;
1172 switch (hw->mac.type) {
1174 ret_val = e1000_k1_workaround_lv(hw);
1179 if (hw->phy.type == e1000_phy_82578) {
1180 ret_val = e1000_link_stall_workaround_hv(hw);
1185 /* Workaround for PCHx parts in half-duplex:
1186 * Set the number of preambles removed from the packet
1187 * when it is passed from the PHY to the MAC to prevent
1188 * the MAC from misinterpreting the packet type.
1190 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1191 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1193 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1195 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1197 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1203 /* Check if there was DownShift, must be checked
1204 * immediately after link-up
1206 e1000_check_downshift_generic(hw);
1208 /* Enable/Disable EEE after link up */
1209 ret_val = e1000_set_eee_pchlan(hw);
1213 /* If we are forcing speed/duplex, then we simply return since
1214 * we have already determined whether we have link or not.
1217 return -E1000_ERR_CONFIG;
1219 /* Auto-Neg is enabled. Auto Speed Detection takes care
1220 * of MAC speed/duplex configuration. So we only need to
1221 * configure Collision Distance in the MAC.
1223 mac->ops.config_collision_dist(hw);
1225 /* Configure Flow Control now that Auto-Neg has completed.
1226 * First, we need to restore the desired flow control
1227 * settings because we may have had to re-autoneg with a
1228 * different link partner.
1230 ret_val = e1000_config_fc_after_link_up_generic(hw);
1232 DEBUGOUT("Error configuring flow control\n");
1238 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1239 * @hw: pointer to the HW structure
1241 * Initialize family-specific function pointers for PHY, MAC, and NVM.
1243 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1245 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1247 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1248 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1249 switch (hw->mac.type) {
1252 case e1000_ich10lan:
1253 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1258 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1266 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1267 * @hw: pointer to the HW structure
1269 * Acquires the mutex for performing NVM operations.
1271 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1273 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1275 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1277 return E1000_SUCCESS;
1281 * e1000_release_nvm_ich8lan - Release NVM mutex
1282 * @hw: pointer to the HW structure
1284 * Releases the mutex used while performing NVM operations.
1286 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1288 DEBUGFUNC("e1000_release_nvm_ich8lan");
1290 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1296 * e1000_acquire_swflag_ich8lan - Acquire software control flag
1297 * @hw: pointer to the HW structure
1299 * Acquires the software control flag for performing PHY and select
1302 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1304 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1305 s32 ret_val = E1000_SUCCESS;
1307 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1309 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1312 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1313 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1321 DEBUGOUT("SW has already locked the resource.\n");
1322 ret_val = -E1000_ERR_CONFIG;
1326 timeout = SW_FLAG_TIMEOUT;
1328 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1329 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1332 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1333 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1341 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1342 E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1343 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1344 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1345 ret_val = -E1000_ERR_CONFIG;
1351 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1357 * e1000_release_swflag_ich8lan - Release software control flag
1358 * @hw: pointer to the HW structure
1360 * Releases the software control flag for performing PHY and select
1363 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1367 DEBUGFUNC("e1000_release_swflag_ich8lan");
1369 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1371 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1372 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1373 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1375 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1378 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1384 * e1000_check_mng_mode_ich8lan - Checks management mode
1385 * @hw: pointer to the HW structure
1387 * This checks if the adapter has any manageability enabled.
1388 * This is a function pointer entry point only called by read/write
1389 * routines for the PHY and NVM parts.
1391 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1395 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1397 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1399 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1400 ((fwsm & E1000_FWSM_MODE_MASK) ==
1401 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1405 * e1000_check_mng_mode_pchlan - Checks management mode
1406 * @hw: pointer to the HW structure
1408 * This checks if the adapter has iAMT enabled.
1409 * This is a function pointer entry point only called by read/write
1410 * routines for the PHY and NVM parts.
1412 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1416 DEBUGFUNC("e1000_check_mng_mode_pchlan");
1418 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1420 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1421 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1425 * e1000_rar_set_pch2lan - Set receive address register
1426 * @hw: pointer to the HW structure
1427 * @addr: pointer to the receive address
1428 * @index: receive address array register
1430 * Sets the receive address array register at index to the address passed
1431 * in by addr. For 82579, RAR[0] is the base address register that is to
1432 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1433 * Use SHRA[0-3] in place of those reserved for ME.
1435 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1437 u32 rar_low, rar_high;
1439 DEBUGFUNC("e1000_rar_set_pch2lan");
1441 /* HW expects these in little endian so we reverse the byte order
1442 * from network order (big endian) to little endian
1444 rar_low = ((u32) addr[0] |
1445 ((u32) addr[1] << 8) |
1446 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1448 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1450 /* If MAC address zero, no need to set the AV bit */
1451 if (rar_low || rar_high)
1452 rar_high |= E1000_RAH_AV;
1455 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1456 E1000_WRITE_FLUSH(hw);
1457 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1458 E1000_WRITE_FLUSH(hw);
1462 if (index < hw->mac.rar_entry_count) {
1465 ret_val = e1000_acquire_swflag_ich8lan(hw);
1469 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1470 E1000_WRITE_FLUSH(hw);
1471 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1472 E1000_WRITE_FLUSH(hw);
1474 e1000_release_swflag_ich8lan(hw);
1476 /* verify the register updates */
1477 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1478 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1481 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1482 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1486 DEBUGOUT1("Failed to write receive address at index %d\n", index);
1490 * e1000_rar_set_pch_lpt - Set receive address registers
1491 * @hw: pointer to the HW structure
1492 * @addr: pointer to the receive address
1493 * @index: receive address array register
1495 * Sets the receive address register array at index to the address passed
1496 * in by addr. For LPT, RAR[0] is the base address register that is to
1497 * contain the MAC address. SHRA[0-10] are the shared receive address
1498 * registers that are shared between the Host and manageability engine (ME).
1500 static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1502 u32 rar_low, rar_high;
1505 DEBUGFUNC("e1000_rar_set_pch_lpt");
1507 /* HW expects these in little endian so we reverse the byte order
1508 * from network order (big endian) to little endian
1510 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1511 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1513 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1515 /* If MAC address zero, no need to set the AV bit */
1516 if (rar_low || rar_high)
1517 rar_high |= E1000_RAH_AV;
1520 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1521 E1000_WRITE_FLUSH(hw);
1522 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1523 E1000_WRITE_FLUSH(hw);
1527 /* The manageability engine (ME) can lock certain SHRAR registers that
1528 * it is using - those registers are unavailable for use.
1530 if (index < hw->mac.rar_entry_count) {
1531 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1532 E1000_FWSM_WLOCK_MAC_MASK;
1533 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1535 /* Check if all SHRAR registers are locked */
1539 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1542 ret_val = e1000_acquire_swflag_ich8lan(hw);
1547 E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1549 E1000_WRITE_FLUSH(hw);
1550 E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1552 E1000_WRITE_FLUSH(hw);
1554 e1000_release_swflag_ich8lan(hw);
1556 /* verify the register updates */
1557 if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1558 (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1564 DEBUGOUT1("Failed to write receive address at index %d\n", index);
1568 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1569 * @hw: pointer to the HW structure
1570 * @mc_addr_list: array of multicast addresses to program
1571 * @mc_addr_count: number of multicast addresses to program
1573 * Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1574 * The caller must have a packed mc_addr_list of multicast addresses.
1576 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1584 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1586 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1588 ret_val = hw->phy.ops.acquire(hw);
1592 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1596 for (i = 0; i < hw->mac.mta_reg_count; i++) {
1597 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1598 (u16)(hw->mac.mta_shadow[i] &
1600 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1601 (u16)((hw->mac.mta_shadow[i] >> 16) &
1605 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1608 hw->phy.ops.release(hw);
1612 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1613 * @hw: pointer to the HW structure
1615 * Checks if firmware is blocking the reset of the PHY.
1616 * This is a function pointer entry point only called by
1619 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1623 DEBUGFUNC("e1000_check_reset_block_ich8lan");
1625 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1627 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
1628 : E1000_BLK_PHY_RESET;
1632 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1633 * @hw: pointer to the HW structure
1635 * Assumes semaphore already acquired.
1638 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1641 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1642 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1643 E1000_STRAP_SMT_FREQ_SHIFT;
1646 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1648 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1652 phy_data &= ~HV_SMB_ADDR_MASK;
1653 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1654 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1656 if (hw->phy.type == e1000_phy_i217) {
1657 /* Restore SMBus frequency */
1659 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1660 phy_data |= (freq & (1 << 0)) <<
1661 HV_SMB_ADDR_FREQ_LOW_SHIFT;
1662 phy_data |= (freq & (1 << 1)) <<
1663 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1665 DEBUGOUT("Unsupported SMB frequency in PHY\n");
1669 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1673 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1674 * @hw: pointer to the HW structure
1676 * SW should configure the LCD from the NVM extended configuration region
1677 * as a workaround for certain parts.
1679 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1681 struct e1000_phy_info *phy = &hw->phy;
1682 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1683 s32 ret_val = E1000_SUCCESS;
1684 u16 word_addr, reg_data, reg_addr, phy_page = 0;
1686 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1688 /* Initialize the PHY from the NVM on ICH platforms. This
1689 * is needed due to an issue where the NVM configuration is
1690 * not properly autoloaded after power transitions.
1691 * Therefore, after each PHY reset, we will load the
1692 * configuration data out of the NVM manually.
1694 switch (hw->mac.type) {
1696 if (phy->type != e1000_phy_igp_3)
1699 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1700 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1701 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1708 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1714 ret_val = hw->phy.ops.acquire(hw);
1718 data = E1000_READ_REG(hw, E1000_FEXTNVM);
1719 if (!(data & sw_cfg_mask))
1722 /* Make sure HW does not configure LCD from PHY
1723 * extended configuration before SW configuration
1725 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1726 if ((hw->mac.type < e1000_pch2lan) &&
1727 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
1730 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1731 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1732 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1736 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1737 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1739 if (((hw->mac.type == e1000_pchlan) &&
1740 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
1741 (hw->mac.type > e1000_pchlan)) {
1742 /* HW configures the SMBus address and LEDs when the
1743 * OEM and LCD Write Enable bits are set in the NVM.
1744 * When both NVM bits are cleared, SW will configure
1747 ret_val = e1000_write_smbus_addr(hw);
1751 data = E1000_READ_REG(hw, E1000_LEDCTL);
1752 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1758 /* Configure LCD from extended configuration region. */
1760 /* cnf_base_addr is in DWORD */
1761 word_addr = (u16)(cnf_base_addr << 1);
1763 for (i = 0; i < cnf_size; i++) {
1764 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1769 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1774 /* Save off the PHY page for future writes. */
1775 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1776 phy_page = reg_data;
1780 reg_addr &= PHY_REG_MASK;
1781 reg_addr |= phy_page;
1783 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1790 hw->phy.ops.release(hw);
1795 * e1000_k1_gig_workaround_hv - K1 Si workaround
1796 * @hw: pointer to the HW structure
1797 * @link: link up bool flag
1799 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1800 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
1801 * If link is down, the function will restore the default K1 setting located
1804 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1806 s32 ret_val = E1000_SUCCESS;
1808 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1810 DEBUGFUNC("e1000_k1_gig_workaround_hv");
1812 if (hw->mac.type != e1000_pchlan)
1813 return E1000_SUCCESS;
1815 /* Wrap the whole flow with the sw flag */
1816 ret_val = hw->phy.ops.acquire(hw);
1820 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1822 if (hw->phy.type == e1000_phy_82578) {
1823 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1828 status_reg &= BM_CS_STATUS_LINK_UP |
1829 BM_CS_STATUS_RESOLVED |
1830 BM_CS_STATUS_SPEED_MASK;
1832 if (status_reg == (BM_CS_STATUS_LINK_UP |
1833 BM_CS_STATUS_RESOLVED |
1834 BM_CS_STATUS_SPEED_1000))
1838 if (hw->phy.type == e1000_phy_82577) {
1839 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1844 status_reg &= HV_M_STATUS_LINK_UP |
1845 HV_M_STATUS_AUTONEG_COMPLETE |
1846 HV_M_STATUS_SPEED_MASK;
1848 if (status_reg == (HV_M_STATUS_LINK_UP |
1849 HV_M_STATUS_AUTONEG_COMPLETE |
1850 HV_M_STATUS_SPEED_1000))
1854 /* Link stall fix for link up */
1855 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1861 /* Link stall fix for link down */
1862 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1868 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1871 hw->phy.ops.release(hw);
1877 * e1000_configure_k1_ich8lan - Configure K1 power state
1878 * @hw: pointer to the HW structure
1879 * @enable: K1 state to configure
1881 * Configure the K1 power state based on the provided parameter.
1882 * Assumes semaphore already acquired.
1884 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1886 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1894 DEBUGFUNC("e1000_configure_k1_ich8lan");
1896 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1902 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1904 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1906 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1912 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1913 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1915 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1916 reg |= E1000_CTRL_FRCSPD;
1917 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1919 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1920 E1000_WRITE_FLUSH(hw);
1922 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1923 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1924 E1000_WRITE_FLUSH(hw);
1927 return E1000_SUCCESS;
1931 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1932 * @hw: pointer to the HW structure
1933 * @d0_state: boolean if entering d0 or d3 device state
1935 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1936 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1937 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1939 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1945 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1947 if (hw->mac.type < e1000_pchlan)
1950 ret_val = hw->phy.ops.acquire(hw);
1954 if (hw->mac.type == e1000_pchlan) {
1955 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1956 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1960 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1961 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1964 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1966 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1970 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1973 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1974 oem_reg |= HV_OEM_BITS_GBE_DIS;
1976 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1977 oem_reg |= HV_OEM_BITS_LPLU;
1979 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1980 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
1981 oem_reg |= HV_OEM_BITS_GBE_DIS;
1983 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
1984 E1000_PHY_CTRL_NOND0A_LPLU))
1985 oem_reg |= HV_OEM_BITS_LPLU;
1988 /* Set Restart auto-neg to activate the bits */
1989 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
1990 !hw->phy.ops.check_reset_block(hw))
1991 oem_reg |= HV_OEM_BITS_RESTART_AN;
1993 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1996 hw->phy.ops.release(hw);
2003 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2004 * @hw: pointer to the HW structure
2006 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2011 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2013 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2017 data |= HV_KMRN_MDIO_SLOW;
2019 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2025 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2026 * done after every PHY reset.
2028 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2030 s32 ret_val = E1000_SUCCESS;
2033 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2035 if (hw->mac.type != e1000_pchlan)
2036 return E1000_SUCCESS;
2038 /* Set MDIO slow mode before any other MDIO access */
2039 if (hw->phy.type == e1000_phy_82577) {
2040 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2045 if (((hw->phy.type == e1000_phy_82577) &&
2046 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2047 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2048 /* Disable generation of early preamble */
2049 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2053 /* Preamble tuning for SSC */
2054 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2060 if (hw->phy.type == e1000_phy_82578) {
2061 /* Return registers to default by doing a soft reset then
2062 * writing 0x3140 to the control register.
2064 if (hw->phy.revision < 2) {
2065 e1000_phy_sw_reset_generic(hw);
2066 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2072 ret_val = hw->phy.ops.acquire(hw);
2077 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2078 hw->phy.ops.release(hw);
2082 /* Configure the K1 Si workaround during phy reset assuming there is
2083 * link so that it disables K1 if link is in 1Gbps.
2085 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2089 /* Workaround for link disconnects on a busy hub in half duplex */
2090 ret_val = hw->phy.ops.acquire(hw);
2093 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2096 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2101 /* set MSE higher to enable link to stay up when noise is high */
2102 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2104 hw->phy.ops.release(hw);
2110 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2111 * @hw: pointer to the HW structure
2113 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2119 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2121 ret_val = hw->phy.ops.acquire(hw);
2124 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2128 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
2129 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
2130 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2131 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2132 (u16)(mac_reg & 0xFFFF));
2133 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2134 (u16)((mac_reg >> 16) & 0xFFFF));
2136 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2137 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2138 (u16)(mac_reg & 0xFFFF));
2139 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2140 (u16)((mac_reg & E1000_RAH_AV)
2144 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2147 hw->phy.ops.release(hw);
2150 static u32 e1000_calc_rx_da_crc(u8 mac[])
2152 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
2153 u32 i, j, mask, crc;
2155 DEBUGFUNC("e1000_calc_rx_da_crc");
2158 for (i = 0; i < 6; i++) {
2160 for (j = 8; j > 0; j--) {
2161 mask = (crc & 1) * (-1);
2162 crc = (crc >> 1) ^ (poly & mask);
2169 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2171 * @hw: pointer to the HW structure
2172 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
2174 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2176 s32 ret_val = E1000_SUCCESS;
2181 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2183 if (hw->mac.type < e1000_pch2lan)
2184 return E1000_SUCCESS;
2186 /* disable Rx path while enabling/disabling workaround */
2187 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2188 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2189 phy_reg | (1 << 14));
2194 /* Write Rx addresses (rar_entry_count for RAL/H, +4 for
2195 * SHRAL/H) and initial CRC values to the MAC
2197 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
2198 u8 mac_addr[ETH_ADDR_LEN] = {0};
2199 u32 addr_high, addr_low;
2201 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2202 if (!(addr_high & E1000_RAH_AV))
2204 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2205 mac_addr[0] = (addr_low & 0xFF);
2206 mac_addr[1] = ((addr_low >> 8) & 0xFF);
2207 mac_addr[2] = ((addr_low >> 16) & 0xFF);
2208 mac_addr[3] = ((addr_low >> 24) & 0xFF);
2209 mac_addr[4] = (addr_high & 0xFF);
2210 mac_addr[5] = ((addr_high >> 8) & 0xFF);
2212 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2213 e1000_calc_rx_da_crc(mac_addr));
2216 /* Write Rx addresses to the PHY */
2217 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2219 /* Enable jumbo frame workaround in the MAC */
2220 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2221 mac_reg &= ~(1 << 14);
2222 mac_reg |= (7 << 15);
2223 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2225 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2226 mac_reg |= E1000_RCTL_SECRC;
2227 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2229 ret_val = e1000_read_kmrn_reg_generic(hw,
2230 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2234 ret_val = e1000_write_kmrn_reg_generic(hw,
2235 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2239 ret_val = e1000_read_kmrn_reg_generic(hw,
2240 E1000_KMRNCTRLSTA_HD_CTRL,
2244 data &= ~(0xF << 8);
2246 ret_val = e1000_write_kmrn_reg_generic(hw,
2247 E1000_KMRNCTRLSTA_HD_CTRL,
2252 /* Enable jumbo frame workaround in the PHY */
2253 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2254 data &= ~(0x7F << 5);
2255 data |= (0x37 << 5);
2256 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2259 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2261 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2264 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2265 data &= ~(0x3FF << 2);
2266 data |= (0x1A << 2);
2267 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2270 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2273 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2274 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2279 /* Write MAC register values back to h/w defaults */
2280 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2281 mac_reg &= ~(0xF << 14);
2282 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2284 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2285 mac_reg &= ~E1000_RCTL_SECRC;
2286 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2288 ret_val = e1000_read_kmrn_reg_generic(hw,
2289 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2293 ret_val = e1000_write_kmrn_reg_generic(hw,
2294 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2298 ret_val = e1000_read_kmrn_reg_generic(hw,
2299 E1000_KMRNCTRLSTA_HD_CTRL,
2303 data &= ~(0xF << 8);
2305 ret_val = e1000_write_kmrn_reg_generic(hw,
2306 E1000_KMRNCTRLSTA_HD_CTRL,
2311 /* Write PHY register values back to h/w defaults */
2312 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2313 data &= ~(0x7F << 5);
2314 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2317 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2319 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2322 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2323 data &= ~(0x3FF << 2);
2325 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2328 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2331 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2332 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2338 /* re-enable Rx path after enabling/disabling workaround */
2339 return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2344 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2345 * done after every PHY reset.
2347 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2349 s32 ret_val = E1000_SUCCESS;
2351 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2353 if (hw->mac.type != e1000_pch2lan)
2354 return E1000_SUCCESS;
2356 /* Set MDIO slow mode before any other MDIO access */
2357 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2361 ret_val = hw->phy.ops.acquire(hw);
2364 /* set MSE higher to enable link to stay up when noise is high */
2365 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2368 /* drop link after 5 times MSE threshold was reached */
2369 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2371 hw->phy.ops.release(hw);
2377 * e1000_k1_gig_workaround_lv - K1 Si workaround
2378 * @hw: pointer to the HW structure
2380 * Workaround to set the K1 beacon duration for 82579 parts
2382 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2384 s32 ret_val = E1000_SUCCESS;
2389 DEBUGFUNC("e1000_k1_workaround_lv");
2391 if (hw->mac.type != e1000_pch2lan)
2392 return E1000_SUCCESS;
2394 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
2395 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2399 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2400 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2401 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2402 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2404 ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
2408 if (status_reg & HV_M_STATUS_SPEED_1000) {
2411 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
2412 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2413 /* LV 1G Packet drop issue wa */
2414 ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2418 pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
2419 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2424 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2425 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2427 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2428 ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
2435 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2436 * @hw: pointer to the HW structure
2437 * @gate: boolean set to TRUE to gate, FALSE to ungate
2439 * Gate/ungate the automatic PHY configuration via hardware; perform
2440 * the configuration via software instead.
2442 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2446 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2448 if (hw->mac.type < e1000_pch2lan)
2451 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2454 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2456 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2458 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2462 * e1000_lan_init_done_ich8lan - Check for PHY config completion
2463 * @hw: pointer to the HW structure
2465 * Check the appropriate indication the MAC has finished configuring the
2466 * PHY after a software reset.
2468 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2470 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2472 DEBUGFUNC("e1000_lan_init_done_ich8lan");
2474 /* Wait for basic configuration completes before proceeding */
2476 data = E1000_READ_REG(hw, E1000_STATUS);
2477 data &= E1000_STATUS_LAN_INIT_DONE;
2479 } while ((!data) && --loop);
2481 /* If basic configuration is incomplete before the above loop
2482 * count reaches 0, loading the configuration from NVM will
2483 * leave the PHY in a bad state possibly resulting in no link.
2486 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2488 /* Clear the Init Done bit for the next init event */
2489 data = E1000_READ_REG(hw, E1000_STATUS);
2490 data &= ~E1000_STATUS_LAN_INIT_DONE;
2491 E1000_WRITE_REG(hw, E1000_STATUS, data);
2495 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2496 * @hw: pointer to the HW structure
2498 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2500 s32 ret_val = E1000_SUCCESS;
2503 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2505 if (hw->phy.ops.check_reset_block(hw))
2506 return E1000_SUCCESS;
2508 /* Allow time for h/w to get to quiescent state after reset */
2511 /* Perform any necessary post-reset workarounds */
2512 switch (hw->mac.type) {
2514 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2519 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2527 /* Clear the host wakeup bit after lcd reset */
2528 if (hw->mac.type >= e1000_pchlan) {
2529 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®);
2530 reg &= ~BM_WUC_HOST_WU_BIT;
2531 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2534 /* Configure the LCD with the extended configuration region in NVM */
2535 ret_val = e1000_sw_lcd_config_ich8lan(hw);
2539 /* Configure the LCD with the OEM bits in NVM */
2540 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
2542 if (hw->mac.type == e1000_pch2lan) {
2543 /* Ungate automatic PHY configuration on non-managed 82579 */
2544 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2545 E1000_ICH_FWSM_FW_VALID)) {
2547 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
2550 /* Set EEE LPI Update Timer to 200usec */
2551 ret_val = hw->phy.ops.acquire(hw);
2554 ret_val = e1000_write_emi_reg_locked(hw,
2555 I82579_LPI_UPDATE_TIMER,
2557 hw->phy.ops.release(hw);
2564 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2565 * @hw: pointer to the HW structure
2568 * This is a function pointer entry point called by drivers
2569 * or other shared routines.
2571 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2573 s32 ret_val = E1000_SUCCESS;
2575 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2577 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2578 if ((hw->mac.type == e1000_pch2lan) &&
2579 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2580 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
2582 ret_val = e1000_phy_hw_reset_generic(hw);
2586 return e1000_post_phy_reset_ich8lan(hw);
2590 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2591 * @hw: pointer to the HW structure
2592 * @active: TRUE to enable LPLU, FALSE to disable
2594 * Sets the LPLU state according to the active flag. For PCH, if OEM write
2595 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2596 * the phy speed. This function will manually set the LPLU bit and restart
2597 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
2598 * since it configures the same bit.
2600 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2605 DEBUGFUNC("e1000_set_lplu_state_pchlan");
2607 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2612 oem_reg |= HV_OEM_BITS_LPLU;
2614 oem_reg &= ~HV_OEM_BITS_LPLU;
2616 if (!hw->phy.ops.check_reset_block(hw))
2617 oem_reg |= HV_OEM_BITS_RESTART_AN;
2619 return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2623 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2624 * @hw: pointer to the HW structure
2625 * @active: TRUE to enable LPLU, FALSE to disable
2627 * Sets the LPLU D0 state according to the active flag. When
2628 * activating LPLU this function also disables smart speed
2629 * and vice versa. LPLU will not be activated unless the
2630 * device autonegotiation advertisement meets standards of
2631 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2632 * This is a function pointer entry point only called by
2633 * PHY setup routines.
2635 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2637 struct e1000_phy_info *phy = &hw->phy;
2639 s32 ret_val = E1000_SUCCESS;
2642 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2644 if (phy->type == e1000_phy_ife)
2645 return E1000_SUCCESS;
2647 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2650 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2651 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2653 if (phy->type != e1000_phy_igp_3)
2654 return E1000_SUCCESS;
2656 /* Call gig speed drop workaround on LPLU before accessing
2659 if (hw->mac.type == e1000_ich8lan)
2660 e1000_gig_downshift_workaround_ich8lan(hw);
2662 /* When LPLU is enabled, we should disable SmartSpeed */
2663 ret_val = phy->ops.read_reg(hw,
2664 IGP01E1000_PHY_PORT_CONFIG,
2668 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2669 ret_val = phy->ops.write_reg(hw,
2670 IGP01E1000_PHY_PORT_CONFIG,
2675 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2676 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2678 if (phy->type != e1000_phy_igp_3)
2679 return E1000_SUCCESS;
2681 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
2682 * during Dx states where the power conservation is most
2683 * important. During driver activity we should enable
2684 * SmartSpeed, so performance is maintained.
2686 if (phy->smart_speed == e1000_smart_speed_on) {
2687 ret_val = phy->ops.read_reg(hw,
2688 IGP01E1000_PHY_PORT_CONFIG,
2693 data |= IGP01E1000_PSCFR_SMART_SPEED;
2694 ret_val = phy->ops.write_reg(hw,
2695 IGP01E1000_PHY_PORT_CONFIG,
2699 } else if (phy->smart_speed == e1000_smart_speed_off) {
2700 ret_val = phy->ops.read_reg(hw,
2701 IGP01E1000_PHY_PORT_CONFIG,
2706 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2707 ret_val = phy->ops.write_reg(hw,
2708 IGP01E1000_PHY_PORT_CONFIG,
2715 return E1000_SUCCESS;
2719 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2720 * @hw: pointer to the HW structure
2721 * @active: TRUE to enable LPLU, FALSE to disable
2723 * Sets the LPLU D3 state according to the active flag. When
2724 * activating LPLU this function also disables smart speed
2725 * and vice versa. LPLU will not be activated unless the
2726 * device autonegotiation advertisement meets standards of
2727 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2728 * This is a function pointer entry point only called by
2729 * PHY setup routines.
2731 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2733 struct e1000_phy_info *phy = &hw->phy;
2735 s32 ret_val = E1000_SUCCESS;
2738 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2740 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2743 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2744 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2746 if (phy->type != e1000_phy_igp_3)
2747 return E1000_SUCCESS;
2749 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
2750 * during Dx states where the power conservation is most
2751 * important. During driver activity we should enable
2752 * SmartSpeed, so performance is maintained.
2754 if (phy->smart_speed == e1000_smart_speed_on) {
2755 ret_val = phy->ops.read_reg(hw,
2756 IGP01E1000_PHY_PORT_CONFIG,
2761 data |= IGP01E1000_PSCFR_SMART_SPEED;
2762 ret_val = phy->ops.write_reg(hw,
2763 IGP01E1000_PHY_PORT_CONFIG,
2767 } else if (phy->smart_speed == e1000_smart_speed_off) {
2768 ret_val = phy->ops.read_reg(hw,
2769 IGP01E1000_PHY_PORT_CONFIG,
2774 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2775 ret_val = phy->ops.write_reg(hw,
2776 IGP01E1000_PHY_PORT_CONFIG,
2781 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2782 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2783 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2784 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2785 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2787 if (phy->type != e1000_phy_igp_3)
2788 return E1000_SUCCESS;
2790 /* Call gig speed drop workaround on LPLU before accessing
2793 if (hw->mac.type == e1000_ich8lan)
2794 e1000_gig_downshift_workaround_ich8lan(hw);
2796 /* When LPLU is enabled, we should disable SmartSpeed */
2797 ret_val = phy->ops.read_reg(hw,
2798 IGP01E1000_PHY_PORT_CONFIG,
2803 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2804 ret_val = phy->ops.write_reg(hw,
2805 IGP01E1000_PHY_PORT_CONFIG,
2813 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2814 * @hw: pointer to the HW structure
2815 * @bank: pointer to the variable that returns the active bank
2817 * Reads signature byte from the NVM using the flash access registers.
2818 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2820 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2823 struct e1000_nvm_info *nvm = &hw->nvm;
2824 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2825 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2829 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2831 switch (hw->mac.type) {
2834 eecd = E1000_READ_REG(hw, E1000_EECD);
2835 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2836 E1000_EECD_SEC1VAL_VALID_MASK) {
2837 if (eecd & E1000_EECD_SEC1VAL)
2842 return E1000_SUCCESS;
2844 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2847 /* set bank to 0 in case flash read fails */
2851 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2855 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2856 E1000_ICH_NVM_SIG_VALUE) {
2858 return E1000_SUCCESS;
2862 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2867 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2868 E1000_ICH_NVM_SIG_VALUE) {
2870 return E1000_SUCCESS;
2873 DEBUGOUT("ERROR: No valid NVM bank present\n");
2874 return -E1000_ERR_NVM;
2879 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
2880 * @hw: pointer to the HW structure
2881 * @offset: The offset (in bytes) of the word(s) to read.
2882 * @words: Size of data to read in words
2883 * @data: Pointer to the word(s) to read at offset.
2885 * Reads a word(s) from the NVM using the flash access registers.
2887 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2890 struct e1000_nvm_info *nvm = &hw->nvm;
2891 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2893 s32 ret_val = E1000_SUCCESS;
2897 DEBUGFUNC("e1000_read_nvm_ich8lan");
2899 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2901 DEBUGOUT("nvm parameter(s) out of bounds\n");
2902 ret_val = -E1000_ERR_NVM;
2906 nvm->ops.acquire(hw);
2908 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2909 if (ret_val != E1000_SUCCESS) {
2910 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2914 act_offset = (bank) ? nvm->flash_bank_size : 0;
2915 act_offset += offset;
2917 ret_val = E1000_SUCCESS;
2918 for (i = 0; i < words; i++) {
2919 if (dev_spec->shadow_ram[offset+i].modified) {
2920 data[i] = dev_spec->shadow_ram[offset+i].value;
2922 ret_val = e1000_read_flash_word_ich8lan(hw,
2931 nvm->ops.release(hw);
2935 DEBUGOUT1("NVM read error: %d\n", ret_val);
2941 * e1000_flash_cycle_init_ich8lan - Initialize flash
2942 * @hw: pointer to the HW structure
2944 * This function does initial flash setup so that a new read/write/erase cycle
2947 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2949 union ich8_hws_flash_status hsfsts;
2950 s32 ret_val = -E1000_ERR_NVM;
2952 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2954 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2956 /* Check if the flash descriptor is valid */
2957 if (!hsfsts.hsf_status.fldesvalid) {
2958 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
2959 return -E1000_ERR_NVM;
2962 /* Clear FCERR and DAEL in hw status by writing 1 */
2963 hsfsts.hsf_status.flcerr = 1;
2964 hsfsts.hsf_status.dael = 1;
2966 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2968 /* Either we should have a hardware SPI cycle in progress
2969 * bit to check against, in order to start a new cycle or
2970 * FDONE bit should be changed in the hardware so that it
2971 * is 1 after hardware reset, which can then be used as an
2972 * indication whether a cycle is in progress or has been
2976 if (!hsfsts.hsf_status.flcinprog) {
2977 /* There is no cycle running at present,
2978 * so we can start a cycle.
2979 * Begin by setting Flash Cycle Done.
2981 hsfsts.hsf_status.flcdone = 1;
2982 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2983 ret_val = E1000_SUCCESS;
2987 /* Otherwise poll for sometime so the current
2988 * cycle has a chance to end before giving up.
2990 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2991 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2993 if (!hsfsts.hsf_status.flcinprog) {
2994 ret_val = E1000_SUCCESS;
2999 if (ret_val == E1000_SUCCESS) {
3000 /* Successful in waiting for previous cycle to timeout,
3001 * now set the Flash Cycle Done.
3003 hsfsts.hsf_status.flcdone = 1;
3004 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3007 DEBUGOUT("Flash controller busy, cannot get access\n");
3015 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3016 * @hw: pointer to the HW structure
3017 * @timeout: maximum time to wait for completion
3019 * This function starts a flash cycle and waits for its completion.
3021 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3023 union ich8_hws_flash_ctrl hsflctl;
3024 union ich8_hws_flash_status hsfsts;
3027 DEBUGFUNC("e1000_flash_cycle_ich8lan");
3029 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3030 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3031 hsflctl.hsf_ctrl.flcgo = 1;
3032 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3034 /* wait till FDONE bit is set to 1 */
3036 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3037 if (hsfsts.hsf_status.flcdone)
3040 } while (i++ < timeout);
3042 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3043 return E1000_SUCCESS;
3045 return -E1000_ERR_NVM;
3049 * e1000_read_flash_word_ich8lan - Read word from flash
3050 * @hw: pointer to the HW structure
3051 * @offset: offset to data location
3052 * @data: pointer to the location for storing the data
3054 * Reads the flash word at offset into data. Offset is converted
3055 * to bytes before read.
3057 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3060 DEBUGFUNC("e1000_read_flash_word_ich8lan");
3063 return -E1000_ERR_NVM;
3065 /* Must convert offset into bytes. */
3068 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3072 * e1000_read_flash_byte_ich8lan - Read byte from flash
3073 * @hw: pointer to the HW structure
3074 * @offset: The offset of the byte to read.
3075 * @data: Pointer to a byte to store the value read.
3077 * Reads a single byte from the NVM using the flash access registers.
3079 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3085 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3091 return E1000_SUCCESS;
3095 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
3096 * @hw: pointer to the HW structure
3097 * @offset: The offset (in bytes) of the byte or word to read.
3098 * @size: Size of data to read, 1=byte 2=word
3099 * @data: Pointer to the word to store the value read.
3101 * Reads a byte or word from the NVM using the flash access registers.
3103 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3106 union ich8_hws_flash_status hsfsts;
3107 union ich8_hws_flash_ctrl hsflctl;
3108 u32 flash_linear_addr;
3110 s32 ret_val = -E1000_ERR_NVM;
3113 DEBUGFUNC("e1000_read_flash_data_ich8lan");
3115 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3116 return -E1000_ERR_NVM;
3118 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3119 hw->nvm.flash_base_addr;
3124 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3125 if (ret_val != E1000_SUCCESS)
3128 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3129 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3130 hsflctl.hsf_ctrl.fldbcount = size - 1;
3131 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3132 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3134 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3136 ret_val = e1000_flash_cycle_ich8lan(hw,
3137 ICH_FLASH_READ_COMMAND_TIMEOUT);
3139 /* Check if FCERR is set to 1, if set to 1, clear it
3140 * and try the whole sequence a few more times, else
3141 * read in (shift in) the Flash Data0, the order is
3142 * least significant byte first msb to lsb
3144 if (ret_val == E1000_SUCCESS) {
3145 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3147 *data = (u8)(flash_data & 0x000000FF);
3149 *data = (u16)(flash_data & 0x0000FFFF);
3152 /* If we've gotten here, then things are probably
3153 * completely hosed, but if the error condition is
3154 * detected, it won't hurt to give it another try...
3155 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3157 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3159 if (hsfsts.hsf_status.flcerr) {
3160 /* Repeat for some time before giving up. */
3162 } else if (!hsfsts.hsf_status.flcdone) {
3163 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3167 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3173 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
3174 * @hw: pointer to the HW structure
3175 * @offset: The offset (in bytes) of the word(s) to write.
3176 * @words: Size of data to write in words
3177 * @data: Pointer to the word(s) to write at offset.
3179 * Writes a byte or word to the NVM using the flash access registers.
3181 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3184 struct e1000_nvm_info *nvm = &hw->nvm;
3185 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3188 DEBUGFUNC("e1000_write_nvm_ich8lan");
3190 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3192 DEBUGOUT("nvm parameter(s) out of bounds\n");
3193 return -E1000_ERR_NVM;
3196 nvm->ops.acquire(hw);
3198 for (i = 0; i < words; i++) {
3199 dev_spec->shadow_ram[offset+i].modified = TRUE;
3200 dev_spec->shadow_ram[offset+i].value = data[i];
3203 nvm->ops.release(hw);
3205 return E1000_SUCCESS;
3209 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3210 * @hw: pointer to the HW structure
3212 * The NVM checksum is updated by calling the generic update_nvm_checksum,
3213 * which writes the checksum to the shadow ram. The changes in the shadow
3214 * ram are then committed to the EEPROM by processing each bank at a time
3215 * checking for the modified bit and writing only the pending changes.
3216 * After a successful commit, the shadow ram is cleared and is ready for
3219 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3221 struct e1000_nvm_info *nvm = &hw->nvm;
3222 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3223 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3227 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3229 ret_val = e1000_update_nvm_checksum_generic(hw);
3233 if (nvm->type != e1000_nvm_flash_sw)
3236 nvm->ops.acquire(hw);
3238 /* We're writing to the opposite bank so if we're on bank 1,
3239 * write to bank 0 etc. We also need to erase the segment that
3240 * is going to be written
3242 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3243 if (ret_val != E1000_SUCCESS) {
3244 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3249 new_bank_offset = nvm->flash_bank_size;
3250 old_bank_offset = 0;
3251 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3255 old_bank_offset = nvm->flash_bank_size;
3256 new_bank_offset = 0;
3257 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3262 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3263 /* Determine whether to write the value stored
3264 * in the other NVM bank or a modified value stored
3267 if (dev_spec->shadow_ram[i].modified) {
3268 data = dev_spec->shadow_ram[i].value;
3270 ret_val = e1000_read_flash_word_ich8lan(hw, i +
3277 /* If the word is 0x13, then make sure the signature bits
3278 * (15:14) are 11b until the commit has completed.
3279 * This will allow us to write 10b which indicates the
3280 * signature is valid. We want to do this after the write
3281 * has completed so that we don't mark the segment valid
3282 * while the write is still in progress
3284 if (i == E1000_ICH_NVM_SIG_WORD)
3285 data |= E1000_ICH_NVM_SIG_MASK;
3287 /* Convert offset to bytes. */
3288 act_offset = (i + new_bank_offset) << 1;
3291 /* Write the bytes to the new bank. */
3292 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3299 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3306 /* Don't bother writing the segment valid bits if sector
3307 * programming failed.
3310 DEBUGOUT("Flash commit failed.\n");
3314 /* Finally validate the new segment by setting bit 15:14
3315 * to 10b in word 0x13 , this can be done without an
3316 * erase as well since these bits are 11 to start with
3317 * and we need to change bit 14 to 0b
3319 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3320 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3325 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3331 /* And invalidate the previously valid segment by setting
3332 * its signature word (0x13) high_byte to 0b. This can be
3333 * done without an erase because flash erase sets all bits
3334 * to 1's. We can write 1's to 0's without an erase
3336 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3337 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3341 /* Great! Everything worked, we can now clear the cached entries. */
3342 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3343 dev_spec->shadow_ram[i].modified = FALSE;
3344 dev_spec->shadow_ram[i].value = 0xFFFF;
3348 nvm->ops.release(hw);
3350 /* Reload the EEPROM, or else modifications will not appear
3351 * until after the next adapter reset.
3354 nvm->ops.reload(hw);
3360 DEBUGOUT1("NVM update error: %d\n", ret_val);
3366 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3367 * @hw: pointer to the HW structure
3369 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3370 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
3371 * calculated, in which case we need to calculate the checksum and set bit 6.
3373 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3378 u16 valid_csum_mask;
3380 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3382 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
3383 * the checksum needs to be fixed. This bit is an indication that
3384 * the NVM was prepared by OEM software and did not calculate
3385 * the checksum...a likely scenario.
3387 switch (hw->mac.type) {
3390 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3393 word = NVM_FUTURE_INIT_WORD1;
3394 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3398 ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3402 if (!(data & valid_csum_mask)) {
3403 data |= valid_csum_mask;
3404 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3407 ret_val = hw->nvm.ops.update(hw);
3412 return e1000_validate_nvm_checksum_generic(hw);
3416 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3417 * @hw: pointer to the HW structure
3418 * @offset: The offset (in bytes) of the byte/word to read.
3419 * @size: Size of data to read, 1=byte 2=word
3420 * @data: The byte(s) to write to the NVM.
3422 * Writes one/two bytes to the NVM using the flash access registers.
3424 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3427 union ich8_hws_flash_status hsfsts;
3428 union ich8_hws_flash_ctrl hsflctl;
3429 u32 flash_linear_addr;
3434 DEBUGFUNC("e1000_write_ich8_data");
3436 if (size < 1 || size > 2 || data > size * 0xff ||
3437 offset > ICH_FLASH_LINEAR_ADDR_MASK)
3438 return -E1000_ERR_NVM;
3440 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3441 hw->nvm.flash_base_addr;
3446 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3447 if (ret_val != E1000_SUCCESS)
3450 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3451 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3452 hsflctl.hsf_ctrl.fldbcount = size - 1;
3453 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3454 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3456 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3459 flash_data = (u32)data & 0x00FF;
3461 flash_data = (u32)data;
3463 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3465 /* check if FCERR is set to 1 , if set to 1, clear it
3466 * and try the whole sequence a few more times else done
3468 ret_val = e1000_flash_cycle_ich8lan(hw,
3469 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3470 if (ret_val == E1000_SUCCESS)
3473 /* If we're here, then things are most likely
3474 * completely hosed, but if the error condition
3475 * is detected, it won't hurt to give it another
3476 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3478 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3479 if (hsfsts.hsf_status.flcerr)
3480 /* Repeat for some time before giving up. */
3482 if (!hsfsts.hsf_status.flcdone) {
3483 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3486 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3492 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3493 * @hw: pointer to the HW structure
3494 * @offset: The index of the byte to read.
3495 * @data: The byte to write to the NVM.
3497 * Writes a single byte to the NVM using the flash access registers.
3499 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3502 u16 word = (u16)data;
3504 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3506 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3510 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3511 * @hw: pointer to the HW structure
3512 * @offset: The offset of the byte to write.
3513 * @byte: The byte to write to the NVM.
3515 * Writes a single byte to the NVM using the flash access registers.
3516 * Goes through a retry algorithm before giving up.
3518 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3519 u32 offset, u8 byte)
3522 u16 program_retries;
3524 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3526 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3530 for (program_retries = 0; program_retries < 100; program_retries++) {
3531 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3533 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3534 if (ret_val == E1000_SUCCESS)
3537 if (program_retries == 100)
3538 return -E1000_ERR_NVM;
3540 return E1000_SUCCESS;
3544 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3545 * @hw: pointer to the HW structure
3546 * @bank: 0 for first bank, 1 for second bank, etc.
3548 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3549 * bank N is 4096 * N + flash_reg_addr.
3551 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3553 struct e1000_nvm_info *nvm = &hw->nvm;
3554 union ich8_hws_flash_status hsfsts;
3555 union ich8_hws_flash_ctrl hsflctl;
3556 u32 flash_linear_addr;
3557 /* bank size is in 16bit words - adjust to bytes */
3558 u32 flash_bank_size = nvm->flash_bank_size * 2;
3561 s32 j, iteration, sector_size;
3563 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3565 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3567 /* Determine HW Sector size: Read BERASE bits of hw flash status
3569 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3570 * consecutive sectors. The start index for the nth Hw sector
3571 * can be calculated as = bank * 4096 + n * 256
3572 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3573 * The start index for the nth Hw sector can be calculated
3575 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3576 * (ich9 only, otherwise error condition)
3577 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3579 switch (hsfsts.hsf_status.berasesz) {
3581 /* Hw sector size 256 */
3582 sector_size = ICH_FLASH_SEG_SIZE_256;
3583 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3586 sector_size = ICH_FLASH_SEG_SIZE_4K;
3590 sector_size = ICH_FLASH_SEG_SIZE_8K;
3594 sector_size = ICH_FLASH_SEG_SIZE_64K;
3598 return -E1000_ERR_NVM;
3601 /* Start with the base address, then add the sector offset. */
3602 flash_linear_addr = hw->nvm.flash_base_addr;
3603 flash_linear_addr += (bank) ? flash_bank_size : 0;
3605 for (j = 0; j < iteration ; j++) {
3608 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3612 /* Write a value 11 (block Erase) in Flash
3613 * Cycle field in hw flash control
3615 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3617 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3618 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3621 /* Write the last 24 bits of an index within the
3622 * block into Flash Linear address field in Flash
3625 flash_linear_addr += (j * sector_size);
3626 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3629 ret_val = e1000_flash_cycle_ich8lan(hw,
3630 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3631 if (ret_val == E1000_SUCCESS)
3634 /* Check if FCERR is set to 1. If 1,
3635 * clear it and try the whole sequence
3636 * a few more times else Done
3638 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3640 if (hsfsts.hsf_status.flcerr)
3641 /* repeat for some time before giving up */
3643 else if (!hsfsts.hsf_status.flcdone)
3645 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3648 return E1000_SUCCESS;
3652 * e1000_valid_led_default_ich8lan - Set the default LED settings
3653 * @hw: pointer to the HW structure
3654 * @data: Pointer to the LED settings
3656 * Reads the LED default settings from the NVM to data. If the NVM LED
3657 * settings is all 0's or F's, set the LED default to a valid LED default
3660 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3664 DEBUGFUNC("e1000_valid_led_default_ich8lan");
3666 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3668 DEBUGOUT("NVM Read Error\n");
3672 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3673 *data = ID_LED_DEFAULT_ICH8LAN;
3675 return E1000_SUCCESS;
3679 * e1000_id_led_init_pchlan - store LED configurations
3680 * @hw: pointer to the HW structure
3682 * PCH does not control LEDs via the LEDCTL register, rather it uses
3683 * the PHY LED configuration register.
3685 * PCH also does not have an "always on" or "always off" mode which
3686 * complicates the ID feature. Instead of using the "on" mode to indicate
3687 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3688 * use "link_up" mode. The LEDs will still ID on request if there is no
3689 * link based on logic in e1000_led_[on|off]_pchlan().
3691 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3693 struct e1000_mac_info *mac = &hw->mac;
3695 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3696 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3697 u16 data, i, temp, shift;
3699 DEBUGFUNC("e1000_id_led_init_pchlan");
3701 /* Get default ID LED modes */
3702 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3706 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3707 mac->ledctl_mode1 = mac->ledctl_default;
3708 mac->ledctl_mode2 = mac->ledctl_default;
3710 for (i = 0; i < 4; i++) {
3711 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3714 case ID_LED_ON1_DEF2:
3715 case ID_LED_ON1_ON2:
3716 case ID_LED_ON1_OFF2:
3717 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3718 mac->ledctl_mode1 |= (ledctl_on << shift);
3720 case ID_LED_OFF1_DEF2:
3721 case ID_LED_OFF1_ON2:
3722 case ID_LED_OFF1_OFF2:
3723 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3724 mac->ledctl_mode1 |= (ledctl_off << shift);
3731 case ID_LED_DEF1_ON2:
3732 case ID_LED_ON1_ON2:
3733 case ID_LED_OFF1_ON2:
3734 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3735 mac->ledctl_mode2 |= (ledctl_on << shift);
3737 case ID_LED_DEF1_OFF2:
3738 case ID_LED_ON1_OFF2:
3739 case ID_LED_OFF1_OFF2:
3740 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3741 mac->ledctl_mode2 |= (ledctl_off << shift);
3749 return E1000_SUCCESS;
3753 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3754 * @hw: pointer to the HW structure
3756 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3757 * register, so the the bus width is hard coded.
3759 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3761 struct e1000_bus_info *bus = &hw->bus;
3764 DEBUGFUNC("e1000_get_bus_info_ich8lan");
3766 ret_val = e1000_get_bus_info_pcie_generic(hw);
3768 /* ICH devices are "PCI Express"-ish. They have
3769 * a configuration space, but do not contain
3770 * PCI Express Capability registers, so bus width
3771 * must be hardcoded.
3773 if (bus->width == e1000_bus_width_unknown)
3774 bus->width = e1000_bus_width_pcie_x1;
3780 * e1000_reset_hw_ich8lan - Reset the hardware
3781 * @hw: pointer to the HW structure
3783 * Does a full reset of the hardware which includes a reset of the PHY and
3786 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3788 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3793 DEBUGFUNC("e1000_reset_hw_ich8lan");
3795 /* Prevent the PCI-E bus from sticking if there is no TLP connection
3796 * on the last TLP read/write transaction when MAC is reset.
3798 ret_val = e1000_disable_pcie_master_generic(hw);
3800 DEBUGOUT("PCI-E Master disable polling has failed.\n");
3802 DEBUGOUT("Masking off all interrupts\n");
3803 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3805 /* Disable the Transmit and Receive units. Then delay to allow
3806 * any pending transactions to complete before we hit the MAC
3807 * with the global reset.
3809 E1000_WRITE_REG(hw, E1000_RCTL, 0);
3810 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3811 E1000_WRITE_FLUSH(hw);
3815 /* Workaround for ICH8 bit corruption issue in FIFO memory */
3816 if (hw->mac.type == e1000_ich8lan) {
3817 /* Set Tx and Rx buffer allocation to 8k apiece. */
3818 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3819 /* Set Packet Buffer Size to 16k. */
3820 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3823 if (hw->mac.type == e1000_pchlan) {
3824 /* Save the NVM K1 bit setting*/
3825 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
3829 if (kum_cfg & E1000_NVM_K1_ENABLE)
3830 dev_spec->nvm_k1_enabled = TRUE;
3832 dev_spec->nvm_k1_enabled = FALSE;
3835 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3837 if (!hw->phy.ops.check_reset_block(hw)) {
3838 /* Full-chip reset requires MAC and PHY reset at the same
3839 * time to make sure the interface between MAC and the
3840 * external PHY is reset.
3842 ctrl |= E1000_CTRL_PHY_RST;
3844 /* Gate automatic PHY configuration by hardware on
3847 if ((hw->mac.type == e1000_pch2lan) &&
3848 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3849 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3851 ret_val = e1000_acquire_swflag_ich8lan(hw);
3852 DEBUGOUT("Issuing a global reset to ich8lan\n");
3853 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3854 /* cannot issue a flush here because it hangs the hardware */
3857 /* Set Phy Config Counter to 50msec */
3858 if (hw->mac.type == e1000_pch2lan) {
3859 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
3860 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
3861 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
3862 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
3866 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
3868 if (ctrl & E1000_CTRL_PHY_RST) {
3869 ret_val = hw->phy.ops.get_cfg_done(hw);
3873 ret_val = e1000_post_phy_reset_ich8lan(hw);
3878 /* For PCH, this write will make sure that any noise
3879 * will be detected as a CRC error and be dropped rather than show up
3880 * as a bad packet to the DMA engine.
3882 if (hw->mac.type == e1000_pchlan)
3883 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3885 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3886 E1000_READ_REG(hw, E1000_ICR);
3888 reg = E1000_READ_REG(hw, E1000_KABGTXD);
3889 reg |= E1000_KABGTXD_BGSQLBIAS;
3890 E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
3892 return E1000_SUCCESS;
3896 * e1000_init_hw_ich8lan - Initialize the hardware
3897 * @hw: pointer to the HW structure
3899 * Prepares the hardware for transmit and receive by doing the following:
3900 * - initialize hardware bits
3901 * - initialize LED identification
3902 * - setup receive address registers
3903 * - setup flow control
3904 * - setup transmit descriptors
3905 * - clear statistics
3907 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3909 struct e1000_mac_info *mac = &hw->mac;
3910 u32 ctrl_ext, txdctl, snoop;
3914 DEBUGFUNC("e1000_init_hw_ich8lan");
3916 e1000_initialize_hw_bits_ich8lan(hw);
3918 /* Initialize identification LED */
3919 ret_val = mac->ops.id_led_init(hw);
3920 /* An error is not fatal and we should not stop init due to this */
3922 DEBUGOUT("Error initializing identification LED\n");
3924 /* Setup the receive address. */
3925 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3927 /* Zero out the Multicast HASH table */
3928 DEBUGOUT("Zeroing the MTA\n");
3929 for (i = 0; i < mac->mta_reg_count; i++)
3930 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3932 /* The 82578 Rx buffer will stall if wakeup is enabled in host and
3933 * the ME. Disable wakeup by clearing the host wakeup bit.
3934 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3936 if (hw->phy.type == e1000_phy_82578) {
3937 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
3938 i &= ~BM_WUC_HOST_WU_BIT;
3939 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
3940 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3945 /* Setup link and flow control */
3946 ret_val = mac->ops.setup_link(hw);
3948 /* Set the transmit descriptor write-back policy for both queues */
3949 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3950 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3951 E1000_TXDCTL_FULL_TX_DESC_WB;
3952 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3953 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3954 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3955 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3956 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3957 E1000_TXDCTL_FULL_TX_DESC_WB;
3958 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3959 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3960 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3962 /* ICH8 has opposite polarity of no_snoop bits.
3963 * By default, we should use snoop behavior.
3965 if (mac->type == e1000_ich8lan)
3966 snoop = PCIE_ICH8_SNOOP_ALL;
3968 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3969 e1000_set_pcie_no_snoop_generic(hw, snoop);
3971 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3972 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3973 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3975 /* Clear all of the statistics registers (clear on read). It is
3976 * important that we do this after we have tried to establish link
3977 * because the symbol error count will increment wildly if there
3980 e1000_clear_hw_cntrs_ich8lan(hw);
3986 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3987 * @hw: pointer to the HW structure
3989 * Sets/Clears required hardware bits necessary for correctly setting up the
3990 * hardware for transmit and receive.
3992 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3996 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3998 /* Extended Device Control */
3999 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4001 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4002 if (hw->mac.type >= e1000_pchlan)
4003 reg |= E1000_CTRL_EXT_PHYPDEN;
4004 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4006 /* Transmit Descriptor Control 0 */
4007 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4009 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4011 /* Transmit Descriptor Control 1 */
4012 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4014 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4016 /* Transmit Arbitration Control 0 */
4017 reg = E1000_READ_REG(hw, E1000_TARC(0));
4018 if (hw->mac.type == e1000_ich8lan)
4019 reg |= (1 << 28) | (1 << 29);
4020 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4021 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4023 /* Transmit Arbitration Control 1 */
4024 reg = E1000_READ_REG(hw, E1000_TARC(1));
4025 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4029 reg |= (1 << 24) | (1 << 26) | (1 << 30);
4030 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4033 if (hw->mac.type == e1000_ich8lan) {
4034 reg = E1000_READ_REG(hw, E1000_STATUS);
4036 E1000_WRITE_REG(hw, E1000_STATUS, reg);
4039 /* work-around descriptor data corruption issue during nfs v2 udp
4040 * traffic, just disable the nfs filtering capability
4042 reg = E1000_READ_REG(hw, E1000_RFCTL);
4043 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4044 /* Disable IPv6 extension header parsing because some malformed
4045 * IPv6 headers can hang the Rx.
4047 if (hw->mac.type == e1000_ich8lan)
4048 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4049 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4051 /* Enable ECC on Lynxpoint */
4052 if (hw->mac.type == e1000_pch_lpt) {
4053 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4054 reg |= E1000_PBECCSTS_ECC_ENABLE;
4055 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4057 reg = E1000_READ_REG(hw, E1000_CTRL);
4058 reg |= E1000_CTRL_MEHE;
4059 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4066 * e1000_setup_link_ich8lan - Setup flow control and link settings
4067 * @hw: pointer to the HW structure
4069 * Determines which flow control settings to use, then configures flow
4070 * control. Calls the appropriate media-specific link configuration
4071 * function. Assuming the adapter has a valid link partner, a valid link
4072 * should be established. Assumes the hardware has previously been reset
4073 * and the transmitter and receiver are not enabled.
4075 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4079 DEBUGFUNC("e1000_setup_link_ich8lan");
4081 if (hw->phy.ops.check_reset_block(hw))
4082 return E1000_SUCCESS;
4084 /* ICH parts do not have a word in the NVM to determine
4085 * the default flow control setting, so we explicitly
4088 if (hw->fc.requested_mode == e1000_fc_default)
4089 hw->fc.requested_mode = e1000_fc_full;
4091 /* Save off the requested flow control mode for use later. Depending
4092 * on the link partner's capabilities, we may or may not use this mode.
4094 hw->fc.current_mode = hw->fc.requested_mode;
4096 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4097 hw->fc.current_mode);
4099 /* Continue to configure the copper link. */
4100 ret_val = hw->mac.ops.setup_physical_interface(hw);
4104 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4105 if ((hw->phy.type == e1000_phy_82578) ||
4106 (hw->phy.type == e1000_phy_82579) ||
4107 (hw->phy.type == e1000_phy_i217) ||
4108 (hw->phy.type == e1000_phy_82577)) {
4109 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4111 ret_val = hw->phy.ops.write_reg(hw,
4112 PHY_REG(BM_PORT_CTRL_PAGE, 27),
4118 return e1000_set_fc_watermarks_generic(hw);
4122 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4123 * @hw: pointer to the HW structure
4125 * Configures the kumeran interface to the PHY to wait the appropriate time
4126 * when polling the PHY, then call the generic setup_copper_link to finish
4127 * configuring the copper link.
4129 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4135 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4137 ctrl = E1000_READ_REG(hw, E1000_CTRL);
4138 ctrl |= E1000_CTRL_SLU;
4139 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4140 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4142 /* Set the mac to wait the maximum time between each iteration
4143 * and increase the max iterations when polling the phy;
4144 * this fixes erroneous timeouts at 10Mbps.
4146 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4150 ret_val = e1000_read_kmrn_reg_generic(hw,
4151 E1000_KMRNCTRLSTA_INBAND_PARAM,
4156 ret_val = e1000_write_kmrn_reg_generic(hw,
4157 E1000_KMRNCTRLSTA_INBAND_PARAM,
4162 switch (hw->phy.type) {
4163 case e1000_phy_igp_3:
4164 ret_val = e1000_copper_link_setup_igp(hw);
4169 case e1000_phy_82578:
4170 ret_val = e1000_copper_link_setup_m88(hw);
4174 case e1000_phy_82577:
4175 case e1000_phy_82579:
4176 ret_val = e1000_copper_link_setup_82577(hw);
4181 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4186 reg_data &= ~IFE_PMC_AUTO_MDIX;
4188 switch (hw->phy.mdix) {
4190 reg_data &= ~IFE_PMC_FORCE_MDIX;
4193 reg_data |= IFE_PMC_FORCE_MDIX;
4197 reg_data |= IFE_PMC_AUTO_MDIX;
4200 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4209 return e1000_setup_copper_link_generic(hw);
4213 * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4214 * @hw: pointer to the HW structure
4216 * Calls the PHY specific link setup function and then calls the
4217 * generic setup_copper_link to finish configuring the link for
4218 * Lynxpoint PCH devices
4220 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4225 DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4227 ctrl = E1000_READ_REG(hw, E1000_CTRL);
4228 ctrl |= E1000_CTRL_SLU;
4229 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4230 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4232 ret_val = e1000_copper_link_setup_82577(hw);
4236 return e1000_setup_copper_link_generic(hw);
4240 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4241 * @hw: pointer to the HW structure
4242 * @speed: pointer to store current link speed
4243 * @duplex: pointer to store the current link duplex
4245 * Calls the generic get_speed_and_duplex to retrieve the current link
4246 * information and then calls the Kumeran lock loss workaround for links at
4249 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4254 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4256 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4260 if ((hw->mac.type == e1000_ich8lan) &&
4261 (hw->phy.type == e1000_phy_igp_3) &&
4262 (*speed == SPEED_1000)) {
4263 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4270 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4271 * @hw: pointer to the HW structure
4273 * Work-around for 82566 Kumeran PCS lock loss:
4274 * On link status change (i.e. PCI reset, speed change) and link is up and
4276 * 0) if workaround is optionally disabled do nothing
4277 * 1) wait 1ms for Kumeran link to come up
4278 * 2) check Kumeran Diagnostic register PCS lock loss bit
4279 * 3) if not set the link is locked (all is good), otherwise...
4281 * 5) repeat up to 10 times
4282 * Note: this is only called for IGP3 copper when speed is 1gb.
4284 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4286 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4292 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4294 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4295 return E1000_SUCCESS;
4297 /* Make sure link is up before proceeding. If not just return.
4298 * Attempting this while link is negotiating fouled up link
4301 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4303 return E1000_SUCCESS;
4305 for (i = 0; i < 10; i++) {
4306 /* read once to clear */
4307 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4310 /* and again to get new status */
4311 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4315 /* check for PCS lock */
4316 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4317 return E1000_SUCCESS;
4319 /* Issue PHY reset */
4320 hw->phy.ops.reset(hw);
4323 /* Disable GigE link negotiation */
4324 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4325 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4326 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4327 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4329 /* Call gig speed drop workaround on Gig disable before accessing
4332 e1000_gig_downshift_workaround_ich8lan(hw);
4334 /* unable to acquire PCS lock */
4335 return -E1000_ERR_PHY;
4339 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4340 * @hw: pointer to the HW structure
4341 * @state: boolean value used to set the current Kumeran workaround state
4343 * If ICH8, set the current Kumeran workaround state (enabled - TRUE
4344 * /disabled - FALSE).
4346 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4349 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4351 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4353 if (hw->mac.type != e1000_ich8lan) {
4354 DEBUGOUT("Workaround applies to ICH8 only.\n");
4358 dev_spec->kmrn_lock_loss_workaround_enabled = state;
4364 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4365 * @hw: pointer to the HW structure
4367 * Workaround for 82566 power-down on D3 entry:
4368 * 1) disable gigabit link
4369 * 2) write VR power-down enable
4371 * Continue if successful, else issue LCD reset and repeat
4373 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4379 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4381 if (hw->phy.type != e1000_phy_igp_3)
4384 /* Try the workaround twice (if needed) */
4387 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4388 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4389 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4390 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4392 /* Call gig speed drop workaround on Gig disable before
4393 * accessing any PHY registers
4395 if (hw->mac.type == e1000_ich8lan)
4396 e1000_gig_downshift_workaround_ich8lan(hw);
4398 /* Write VR power-down enable */
4399 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4400 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4401 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4402 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4404 /* Read it back and test */
4405 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4406 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4407 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4410 /* Issue PHY reset and repeat at most one more time */
4411 reg = E1000_READ_REG(hw, E1000_CTRL);
4412 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4418 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4419 * @hw: pointer to the HW structure
4421 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4422 * LPLU, Gig disable, MDIC PHY reset):
4423 * 1) Set Kumeran Near-end loopback
4424 * 2) Clear Kumeran Near-end loopback
4425 * Should only be called for ICH8[m] devices with any 1G Phy.
4427 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4432 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4434 if ((hw->mac.type != e1000_ich8lan) ||
4435 (hw->phy.type == e1000_phy_ife))
4438 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4442 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4443 ret_val = e1000_write_kmrn_reg_generic(hw,
4444 E1000_KMRNCTRLSTA_DIAG_OFFSET,
4448 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4449 e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4454 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4455 * @hw: pointer to the HW structure
4457 * During S0 to Sx transition, it is possible the link remains at gig
4458 * instead of negotiating to a lower speed. Before going to Sx, set
4459 * 'Gig Disable' to force link speed negotiation to a lower speed based on
4460 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
4461 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4462 * needs to be written.
4463 * Parts that support (and are linked to a partner which support) EEE in
4464 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4465 * than 10Mbps w/o EEE.
4467 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4469 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4473 DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4475 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4476 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4478 if (hw->phy.type == e1000_phy_i217) {
4479 u16 phy_reg, device_id = hw->device_id;
4481 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4482 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
4483 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4485 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4486 fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4489 ret_val = hw->phy.ops.acquire(hw);
4493 if (!dev_spec->eee_disable) {
4497 e1000_read_emi_reg_locked(hw,
4498 I217_EEE_ADVERTISEMENT,
4503 /* Disable LPLU if both link partners support 100BaseT
4504 * EEE and 100Full is advertised on both ends of the
4507 if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4508 (dev_spec->eee_lp_ability &
4509 I82579_EEE_100_SUPPORTED) &&
4510 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
4511 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4512 E1000_PHY_CTRL_NOND0A_LPLU);
4515 /* For i217 Intel Rapid Start Technology support,
4516 * when the system is going into Sx and no manageability engine
4517 * is present, the driver must configure proxy to reset only on
4518 * power good. LPI (Low Power Idle) state must also reset only
4519 * on power good, as well as the MTA (Multicast table array).
4520 * The SMBus release must also be disabled on LCD reset.
4522 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4523 E1000_ICH_FWSM_FW_VALID)) {
4524 /* Enable proxy to reset only on power good. */
4525 hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4527 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4528 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4531 /* Set bit enable LPI (EEE) to reset only on
4534 hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4535 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4536 hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4538 /* Disable the SMB release on LCD reset. */
4539 hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4540 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4541 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4544 /* Enable MTA to reset for Intel Rapid Start Technology
4547 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4548 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4549 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4552 hw->phy.ops.release(hw);
4555 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4557 if (hw->mac.type == e1000_ich8lan)
4558 e1000_gig_downshift_workaround_ich8lan(hw);
4560 if (hw->mac.type >= e1000_pchlan) {
4561 e1000_oem_bits_config_ich8lan(hw, FALSE);
4563 /* Reset PHY to activate OEM bits on 82577/8 */
4564 if (hw->mac.type == e1000_pchlan)
4565 e1000_phy_hw_reset_generic(hw);
4567 ret_val = hw->phy.ops.acquire(hw);
4570 e1000_write_smbus_addr(hw);
4571 hw->phy.ops.release(hw);
4578 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4579 * @hw: pointer to the HW structure
4581 * During Sx to S0 transitions on non-managed devices or managed devices
4582 * on which PHY resets are not blocked, if the PHY registers cannot be
4583 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
4585 * On i217, setup Intel Rapid Start Technology.
4587 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4591 DEBUGFUNC("e1000_resume_workarounds_pchlan");
4593 if (hw->mac.type < e1000_pch2lan)
4596 ret_val = e1000_init_phy_workarounds_pchlan(hw);
4598 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4602 /* For i217 Intel Rapid Start Technology support when the system
4603 * is transitioning from Sx and no manageability engine is present
4604 * configure SMBus to restore on reset, disable proxy, and enable
4605 * the reset on MTA (Multicast table array).
4607 if (hw->phy.type == e1000_phy_i217) {
4610 ret_val = hw->phy.ops.acquire(hw);
4612 DEBUGOUT("Failed to setup iRST\n");
4616 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4617 E1000_ICH_FWSM_FW_VALID)) {
4618 /* Restore clear on SMB if no manageability engine
4621 ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
4625 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4626 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4629 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
4631 /* Enable reset on MTA */
4632 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
4636 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4637 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4640 DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
4641 hw->phy.ops.release(hw);
4646 * e1000_cleanup_led_ich8lan - Restore the default LED operation
4647 * @hw: pointer to the HW structure
4649 * Return the LED back to the default configuration.
4651 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
4653 DEBUGFUNC("e1000_cleanup_led_ich8lan");
4655 if (hw->phy.type == e1000_phy_ife)
4656 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4659 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
4660 return E1000_SUCCESS;
4664 * e1000_led_on_ich8lan - Turn LEDs on
4665 * @hw: pointer to the HW structure
4669 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
4671 DEBUGFUNC("e1000_led_on_ich8lan");
4673 if (hw->phy.type == e1000_phy_ife)
4674 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4675 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
4677 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
4678 return E1000_SUCCESS;
4682 * e1000_led_off_ich8lan - Turn LEDs off
4683 * @hw: pointer to the HW structure
4685 * Turn off the LEDs.
4687 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
4689 DEBUGFUNC("e1000_led_off_ich8lan");
4691 if (hw->phy.type == e1000_phy_ife)
4692 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4693 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
4695 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
4696 return E1000_SUCCESS;
4700 * e1000_setup_led_pchlan - Configures SW controllable LED
4701 * @hw: pointer to the HW structure
4703 * This prepares the SW controllable LED for use.
4705 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
4707 DEBUGFUNC("e1000_setup_led_pchlan");
4709 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4710 (u16)hw->mac.ledctl_mode1);
4714 * e1000_cleanup_led_pchlan - Restore the default LED operation
4715 * @hw: pointer to the HW structure
4717 * Return the LED back to the default configuration.
4719 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
4721 DEBUGFUNC("e1000_cleanup_led_pchlan");
4723 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4724 (u16)hw->mac.ledctl_default);
4728 * e1000_led_on_pchlan - Turn LEDs on
4729 * @hw: pointer to the HW structure
4733 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
4735 u16 data = (u16)hw->mac.ledctl_mode2;
4738 DEBUGFUNC("e1000_led_on_pchlan");
4740 /* If no link, then turn LED on by setting the invert bit
4741 * for each LED that's mode is "link_up" in ledctl_mode2.
4743 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4744 for (i = 0; i < 3; i++) {
4745 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4746 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4747 E1000_LEDCTL_MODE_LINK_UP)
4749 if (led & E1000_PHY_LED0_IVRT)
4750 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4752 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4756 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4760 * e1000_led_off_pchlan - Turn LEDs off
4761 * @hw: pointer to the HW structure
4763 * Turn off the LEDs.
4765 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4767 u16 data = (u16)hw->mac.ledctl_mode1;
4770 DEBUGFUNC("e1000_led_off_pchlan");
4772 /* If no link, then turn LED off by clearing the invert bit
4773 * for each LED that's mode is "link_up" in ledctl_mode1.
4775 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4776 for (i = 0; i < 3; i++) {
4777 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4778 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4779 E1000_LEDCTL_MODE_LINK_UP)
4781 if (led & E1000_PHY_LED0_IVRT)
4782 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4784 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4788 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4792 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4793 * @hw: pointer to the HW structure
4795 * Read appropriate register for the config done bit for completion status
4796 * and configure the PHY through s/w for EEPROM-less parts.
4798 * NOTE: some silicon which is EEPROM-less will fail trying to read the
4799 * config done bit, so only an error is logged and continues. If we were
4800 * to return with error, EEPROM-less silicon would not be able to be reset
4803 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4805 s32 ret_val = E1000_SUCCESS;
4809 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4811 e1000_get_cfg_done_generic(hw);
4813 /* Wait for indication from h/w that it has completed basic config */
4814 if (hw->mac.type >= e1000_ich10lan) {
4815 e1000_lan_init_done_ich8lan(hw);
4817 ret_val = e1000_get_auto_rd_done_generic(hw);
4819 /* When auto config read does not complete, do not
4820 * return with an error. This can happen in situations
4821 * where there is no eeprom and prevents getting link.
4823 DEBUGOUT("Auto Read Done did not complete\n");
4824 ret_val = E1000_SUCCESS;
4828 /* Clear PHY Reset Asserted bit */
4829 status = E1000_READ_REG(hw, E1000_STATUS);
4830 if (status & E1000_STATUS_PHYRA)
4831 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4833 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4835 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
4836 if (hw->mac.type <= e1000_ich9lan) {
4837 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
4838 (hw->phy.type == e1000_phy_igp_3)) {
4839 e1000_phy_init_script_igp3(hw);
4842 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4843 /* Maybe we should do a basic PHY config */
4844 DEBUGOUT("EEPROM not present\n");
4845 ret_val = -E1000_ERR_CONFIG;
4853 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4854 * @hw: pointer to the HW structure
4856 * In the case of a PHY power down to save power, or to turn off link during a
4857 * driver unload, or wake on lan is not enabled, remove the link.
4859 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4861 /* If the management interface is not enabled, then power down */
4862 if (!(hw->mac.ops.check_mng_mode(hw) ||
4863 hw->phy.ops.check_reset_block(hw)))
4864 e1000_power_down_phy_copper(hw);
4870 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4871 * @hw: pointer to the HW structure
4873 * Clears hardware counters specific to the silicon family and calls
4874 * clear_hw_cntrs_generic to clear all general purpose counters.
4876 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4881 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4883 e1000_clear_hw_cntrs_base_generic(hw);
4885 E1000_READ_REG(hw, E1000_ALGNERRC);
4886 E1000_READ_REG(hw, E1000_RXERRC);
4887 E1000_READ_REG(hw, E1000_TNCRS);
4888 E1000_READ_REG(hw, E1000_CEXTERR);
4889 E1000_READ_REG(hw, E1000_TSCTC);
4890 E1000_READ_REG(hw, E1000_TSCTFC);
4892 E1000_READ_REG(hw, E1000_MGTPRC);
4893 E1000_READ_REG(hw, E1000_MGTPDC);
4894 E1000_READ_REG(hw, E1000_MGTPTC);
4896 E1000_READ_REG(hw, E1000_IAC);
4897 E1000_READ_REG(hw, E1000_ICRXOC);
4899 /* Clear PHY statistics registers */
4900 if ((hw->phy.type == e1000_phy_82578) ||
4901 (hw->phy.type == e1000_phy_82579) ||
4902 (hw->phy.type == e1000_phy_i217) ||
4903 (hw->phy.type == e1000_phy_82577)) {
4904 ret_val = hw->phy.ops.acquire(hw);
4907 ret_val = hw->phy.ops.set_page(hw,
4908 HV_STATS_PAGE << IGP_PAGE_SHIFT);
4911 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4912 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4913 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4914 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4915 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4916 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4917 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4918 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4919 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4920 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4921 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4922 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4923 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4924 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4926 hw->phy.ops.release(hw);