1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 * 82562G 10/100 Network Connection
37 * 82562G-2 10/100 Network Connection
38 * 82562GT 10/100 Network Connection
39 * 82562GT-2 10/100 Network Connection
40 * 82562V 10/100 Network Connection
41 * 82562V-2 10/100 Network Connection
42 * 82566DC-2 Gigabit Network Connection
43 * 82566DC Gigabit Network Connection
44 * 82566DM-2 Gigabit Network Connection
45 * 82566DM Gigabit Network Connection
46 * 82566MC Gigabit Network Connection
47 * 82566MM Gigabit Network Connection
48 * 82567LM Gigabit Network Connection
49 * 82567LF Gigabit Network Connection
50 * 82567V Gigabit Network Connection
51 * 82567LM-2 Gigabit Network Connection
52 * 82567LF-2 Gigabit Network Connection
53 * 82567V-2 Gigabit Network Connection
54 * 82567LF-3 Gigabit Network Connection
55 * 82567LM-3 Gigabit Network Connection
56 * 82567LM-4 Gigabit Network Connection
57 * 82577LM Gigabit Network Connection
58 * 82577LC Gigabit Network Connection
59 * 82578DM Gigabit Network Connection
60 * 82578DC Gigabit Network Connection
61 * 82579LM Gigabit Network Connection
62 * 82579V Gigabit Network Connection
65 #include "e1000_api.h"
67 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
68 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
69 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
70 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
71 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
72 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
73 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
74 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
75 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
76 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
77 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
78 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
79 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
80 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
81 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
83 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
85 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
86 u16 words, u16 *data);
87 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
88 u16 words, u16 *data);
89 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
90 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
91 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
93 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
94 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
95 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
96 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
97 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
98 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
99 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
100 u16 *speed, u16 *duplex);
101 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
102 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
103 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
104 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
105 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
106 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
107 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
108 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
109 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
110 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
111 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
112 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
113 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
114 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
115 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
116 u32 offset, u8 *data);
117 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
119 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
120 u32 offset, u16 *data);
121 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
122 u32 offset, u8 byte);
123 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
124 u32 offset, u8 data);
125 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
127 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
128 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
129 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
130 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
131 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
132 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
133 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
134 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
136 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
137 /* Offset 04h HSFSTS */
138 union ich8_hws_flash_status {
140 u16 flcdone :1; /* bit 0 Flash Cycle Done */
141 u16 flcerr :1; /* bit 1 Flash Cycle Error */
142 u16 dael :1; /* bit 2 Direct Access error Log */
143 u16 berasesz :2; /* bit 4:3 Sector Erase Size */
144 u16 flcinprog :1; /* bit 5 flash cycle in Progress */
145 u16 reserved1 :2; /* bit 13:6 Reserved */
146 u16 reserved2 :6; /* bit 13:6 Reserved */
147 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
148 u16 flockdn :1; /* bit 15 Flash Config Lock-Down */
153 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
154 /* Offset 06h FLCTL */
155 union ich8_hws_flash_ctrl {
156 struct ich8_hsflctl {
157 u16 flcgo :1; /* 0 Flash Cycle Go */
158 u16 flcycle :2; /* 2:1 Flash Cycle */
159 u16 reserved :5; /* 7:3 Reserved */
160 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
161 u16 flockdn :6; /* 15:10 Reserved */
166 /* ICH Flash Region Access Permissions */
167 union ich8_hws_flash_regacc {
169 u32 grra :8; /* 0:7 GbE region Read Access */
170 u32 grwa :8; /* 8:15 GbE region Write Access */
171 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
172 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
178 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
179 * @hw: pointer to the HW structure
181 * Initialize family-specific PHY parameters and function pointers.
183 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
185 struct e1000_phy_info *phy = &hw->phy;
187 s32 ret_val = E1000_SUCCESS;
189 DEBUGFUNC("e1000_init_phy_params_pchlan");
192 phy->reset_delay_us = 100;
194 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
195 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
196 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
197 phy->ops.read_reg = e1000_read_phy_reg_hv;
198 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
199 phy->ops.release = e1000_release_swflag_ich8lan;
200 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
201 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
202 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
203 phy->ops.write_reg = e1000_write_phy_reg_hv;
204 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
205 phy->ops.power_up = e1000_power_up_phy_copper;
206 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
207 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
210 * The MAC-PHY interconnect may still be in SMBus mode
211 * after Sx->S0. If the manageability engine (ME) is
212 * disabled, then toggle the LANPHYPC Value bit to force
213 * the interconnect to PCIe mode.
215 fwsm = E1000_READ_REG(hw, E1000_FWSM);
216 if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
217 ctrl = E1000_READ_REG(hw, E1000_CTRL);
218 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
219 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
220 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
222 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
223 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
227 * Gate automatic PHY configuration by hardware on
230 if (hw->mac.type == e1000_pch2lan)
231 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
235 * Reset the PHY before any acccess to it. Doing so, ensures that
236 * the PHY is in a known good state before we read/write PHY registers.
237 * The generic reset is sufficient here, because we haven't determined
240 ret_val = e1000_phy_hw_reset_generic(hw);
244 /* Ungate automatic PHY configuration on non-managed 82579 */
245 if ((hw->mac.type == e1000_pch2lan) &&
246 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
248 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
251 phy->id = e1000_phy_unknown;
252 switch (hw->mac.type) {
254 ret_val = e1000_get_phy_id(hw);
257 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
262 * In case the PHY needs to be in mdio slow mode,
263 * set slow mode and try to get the PHY id again.
265 ret_val = e1000_set_mdio_slow_mode_hv(hw);
268 ret_val = e1000_get_phy_id(hw);
273 phy->type = e1000_get_phy_type_from_id(phy->id);
276 case e1000_phy_82577:
277 case e1000_phy_82579:
278 phy->ops.check_polarity = e1000_check_polarity_82577;
279 phy->ops.force_speed_duplex =
280 e1000_phy_force_speed_duplex_82577;
281 phy->ops.get_cable_length = e1000_get_cable_length_82577;
282 phy->ops.get_info = e1000_get_phy_info_82577;
283 phy->ops.commit = e1000_phy_sw_reset_generic;
285 case e1000_phy_82578:
286 phy->ops.check_polarity = e1000_check_polarity_m88;
287 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
288 phy->ops.get_cable_length = e1000_get_cable_length_m88;
289 phy->ops.get_info = e1000_get_phy_info_m88;
292 ret_val = -E1000_ERR_PHY;
301 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
302 * @hw: pointer to the HW structure
304 * Initialize family-specific PHY parameters and function pointers.
306 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
308 struct e1000_phy_info *phy = &hw->phy;
309 s32 ret_val = E1000_SUCCESS;
312 DEBUGFUNC("e1000_init_phy_params_ich8lan");
315 phy->reset_delay_us = 100;
317 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
318 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
319 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
320 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
321 phy->ops.read_reg = e1000_read_phy_reg_igp;
322 phy->ops.release = e1000_release_swflag_ich8lan;
323 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
324 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
325 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
326 phy->ops.write_reg = e1000_write_phy_reg_igp;
327 phy->ops.power_up = e1000_power_up_phy_copper;
328 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
331 * We may need to do this twice - once for IGP and if that fails,
332 * we'll set BM func pointers and try again
334 ret_val = e1000_determine_phy_address(hw);
336 phy->ops.write_reg = e1000_write_phy_reg_bm;
337 phy->ops.read_reg = e1000_read_phy_reg_bm;
338 ret_val = e1000_determine_phy_address(hw);
340 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
346 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
349 ret_val = e1000_get_phy_id(hw);
356 case IGP03E1000_E_PHY_ID:
357 phy->type = e1000_phy_igp_3;
358 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
359 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
360 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
361 phy->ops.get_info = e1000_get_phy_info_igp;
362 phy->ops.check_polarity = e1000_check_polarity_igp;
363 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
366 case IFE_PLUS_E_PHY_ID:
368 phy->type = e1000_phy_ife;
369 phy->autoneg_mask = E1000_ALL_NOT_GIG;
370 phy->ops.get_info = e1000_get_phy_info_ife;
371 phy->ops.check_polarity = e1000_check_polarity_ife;
372 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
374 case BME1000_E_PHY_ID:
375 phy->type = e1000_phy_bm;
376 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
377 phy->ops.read_reg = e1000_read_phy_reg_bm;
378 phy->ops.write_reg = e1000_write_phy_reg_bm;
379 phy->ops.commit = e1000_phy_sw_reset_generic;
380 phy->ops.get_info = e1000_get_phy_info_m88;
381 phy->ops.check_polarity = e1000_check_polarity_m88;
382 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
385 ret_val = -E1000_ERR_PHY;
394 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
395 * @hw: pointer to the HW structure
397 * Initialize family-specific NVM parameters and function
400 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
402 struct e1000_nvm_info *nvm = &hw->nvm;
403 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
404 u32 gfpreg, sector_base_addr, sector_end_addr;
405 s32 ret_val = E1000_SUCCESS;
408 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
410 /* Can't read flash registers if the register set isn't mapped. */
411 if (!hw->flash_address) {
412 DEBUGOUT("ERROR: Flash registers not mapped\n");
413 ret_val = -E1000_ERR_CONFIG;
417 nvm->type = e1000_nvm_flash_sw;
419 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
422 * sector_X_addr is a "sector"-aligned address (4096 bytes)
423 * Add 1 to sector_end_addr since this sector is included in
426 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
427 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
429 /* flash_base_addr is byte-aligned */
430 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
433 * find total size of the NVM, then cut in half since the total
434 * size represents two separate NVM banks.
436 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
437 << FLASH_SECTOR_ADDR_SHIFT;
438 nvm->flash_bank_size /= 2;
439 /* Adjust to word count */
440 nvm->flash_bank_size /= sizeof(u16);
442 nvm->word_size = E1000_SHADOW_RAM_WORDS;
444 /* Clear shadow ram */
445 for (i = 0; i < nvm->word_size; i++) {
446 dev_spec->shadow_ram[i].modified = FALSE;
447 dev_spec->shadow_ram[i].value = 0xFFFF;
450 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
451 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
453 /* Function Pointers */
454 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
455 nvm->ops.release = e1000_release_nvm_ich8lan;
456 nvm->ops.read = e1000_read_nvm_ich8lan;
457 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
458 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
459 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
460 nvm->ops.write = e1000_write_nvm_ich8lan;
467 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
468 * @hw: pointer to the HW structure
470 * Initialize family-specific MAC parameters and function
473 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
475 struct e1000_mac_info *mac = &hw->mac;
478 DEBUGFUNC("e1000_init_mac_params_ich8lan");
480 /* Set media type function pointer */
481 hw->phy.media_type = e1000_media_type_copper;
483 /* Set mta register count */
484 mac->mta_reg_count = 32;
485 /* Set rar entry count */
486 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
487 if (mac->type == e1000_ich8lan)
488 mac->rar_entry_count--;
489 /* Set if part includes ASF firmware */
490 mac->asf_firmware_present = TRUE;
492 mac->has_fwsm = TRUE;
493 /* ARC subsystem not supported */
494 mac->arc_subsystem_valid = FALSE;
495 /* Adaptive IFS supported */
496 mac->adaptive_ifs = TRUE;
498 /* Function pointers */
500 /* bus type/speed/width */
501 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
503 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
505 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
506 /* hw initialization */
507 mac->ops.init_hw = e1000_init_hw_ich8lan;
509 mac->ops.setup_link = e1000_setup_link_ich8lan;
510 /* physical interface setup */
511 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
513 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
515 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
516 /* multicast address update */
517 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
518 /* clear hardware counters */
519 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
526 /* check management mode */
527 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
529 mac->ops.id_led_init = e1000_id_led_init_generic;
531 mac->ops.blink_led = e1000_blink_led_generic;
533 mac->ops.setup_led = e1000_setup_led_generic;
535 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
536 /* turn on/off LED */
537 mac->ops.led_on = e1000_led_on_ich8lan;
538 mac->ops.led_off = e1000_led_off_ich8lan;
541 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
542 mac->ops.rar_set = e1000_rar_set_pch2lan;
545 /* save PCH revision_id */
546 e1000_read_pci_cfg(hw, 0x2, &pci_cfg);
547 hw->revision_id = (u8)(pci_cfg &= 0x000F);
548 /* check management mode */
549 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
551 mac->ops.id_led_init = e1000_id_led_init_pchlan;
553 mac->ops.setup_led = e1000_setup_led_pchlan;
555 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
556 /* turn on/off LED */
557 mac->ops.led_on = e1000_led_on_pchlan;
558 mac->ops.led_off = e1000_led_off_pchlan;
564 /* Enable PCS Lock-loss workaround for ICH8 */
565 if (mac->type == e1000_ich8lan)
566 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
568 /* Gate automatic PHY configuration by hardware on managed 82579 */
569 if ((mac->type == e1000_pch2lan) &&
570 (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
571 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
573 return E1000_SUCCESS;
577 * e1000_set_eee_pchlan - Enable/disable EEE support
578 * @hw: pointer to the HW structure
580 * Enable/disable EEE based on setting in dev_spec structure. The bits in
581 * the LPI Control register will remain set only if/when link is up.
583 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
585 s32 ret_val = E1000_SUCCESS;
588 DEBUGFUNC("e1000_set_eee_pchlan");
590 if (hw->phy.type != e1000_phy_82579)
593 ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
597 if (hw->dev_spec.ich8lan.eee_disable)
598 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
600 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
602 ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
608 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
609 * @hw: pointer to the HW structure
611 * Checks to see of the link status of the hardware has changed. If a
612 * change in link status has been detected, then we read the PHY registers
613 * to get the current speed/duplex if link exists.
615 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
617 struct e1000_mac_info *mac = &hw->mac;
621 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
624 * We only want to go out to the PHY registers to see if Auto-Neg
625 * has completed and/or if our link status has changed. The
626 * get_link_status flag is set upon receiving a Link Status
627 * Change or Rx Sequence Error interrupt.
629 if (!mac->get_link_status) {
630 ret_val = E1000_SUCCESS;
635 * First we want to see if the MII Status Register reports
636 * link. If so, then we want to get the current speed/duplex
639 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
643 if (hw->mac.type == e1000_pchlan) {
644 ret_val = e1000_k1_gig_workaround_hv(hw, link);
650 goto out; /* No link detected */
652 mac->get_link_status = FALSE;
654 if (hw->phy.type == e1000_phy_82578) {
655 ret_val = e1000_link_stall_workaround_hv(hw);
660 if (hw->mac.type == e1000_pch2lan) {
661 ret_val = e1000_k1_workaround_lv(hw);
667 * Check if there was DownShift, must be checked
668 * immediately after link-up
670 e1000_check_downshift_generic(hw);
672 /* Enable/Disable EEE after link up */
673 ret_val = e1000_set_eee_pchlan(hw);
678 * If we are forcing speed/duplex, then we simply return since
679 * we have already determined whether we have link or not.
682 ret_val = -E1000_ERR_CONFIG;
687 * Auto-Neg is enabled. Auto Speed Detection takes care
688 * of MAC speed/duplex configuration. So we only need to
689 * configure Collision Distance in the MAC.
691 e1000_config_collision_dist_generic(hw);
694 * Configure Flow Control now that Auto-Neg has completed.
695 * First, we need to restore the desired flow control
696 * settings because we may have had to re-autoneg with a
697 * different link partner.
699 ret_val = e1000_config_fc_after_link_up_generic(hw);
701 DEBUGOUT("Error configuring flow control\n");
708 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
709 * @hw: pointer to the HW structure
711 * Initialize family-specific function pointers for PHY, MAC, and NVM.
713 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
715 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
717 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
718 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
719 switch (hw->mac.type) {
723 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
727 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
735 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
736 * @hw: pointer to the HW structure
738 * Acquires the mutex for performing NVM operations.
740 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
742 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
744 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
746 return E1000_SUCCESS;
750 * e1000_release_nvm_ich8lan - Release NVM mutex
751 * @hw: pointer to the HW structure
753 * Releases the mutex used while performing NVM operations.
755 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
757 DEBUGFUNC("e1000_release_nvm_ich8lan");
759 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
765 * e1000_acquire_swflag_ich8lan - Acquire software control flag
766 * @hw: pointer to the HW structure
768 * Acquires the software control flag for performing PHY and select
771 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
773 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
774 s32 ret_val = E1000_SUCCESS;
776 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
778 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
781 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
782 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
790 DEBUGOUT("SW/FW/HW has locked the resource for too long.\n");
791 ret_val = -E1000_ERR_CONFIG;
795 timeout = SW_FLAG_TIMEOUT;
797 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
798 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
801 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
802 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
810 DEBUGOUT("Failed to acquire the semaphore.\n");
811 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
812 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
813 ret_val = -E1000_ERR_CONFIG;
819 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
825 * e1000_release_swflag_ich8lan - Release software control flag
826 * @hw: pointer to the HW structure
828 * Releases the software control flag for performing PHY and select
831 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
835 DEBUGFUNC("e1000_release_swflag_ich8lan");
837 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
838 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
839 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
841 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
847 * e1000_check_mng_mode_ich8lan - Checks management mode
848 * @hw: pointer to the HW structure
850 * This checks if the adapter has any manageability enabled.
851 * This is a function pointer entry point only called by read/write
852 * routines for the PHY and NVM parts.
854 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
858 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
860 fwsm = E1000_READ_REG(hw, E1000_FWSM);
862 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
863 ((fwsm & E1000_FWSM_MODE_MASK) ==
864 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
868 * e1000_check_mng_mode_pchlan - Checks management mode
869 * @hw: pointer to the HW structure
871 * This checks if the adapter has iAMT enabled.
872 * This is a function pointer entry point only called by read/write
873 * routines for the PHY and NVM parts.
875 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
879 DEBUGFUNC("e1000_check_mng_mode_pchlan");
881 fwsm = E1000_READ_REG(hw, E1000_FWSM);
883 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
884 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
888 * e1000_rar_set_pch2lan - Set receive address register
889 * @hw: pointer to the HW structure
890 * @addr: pointer to the receive address
891 * @index: receive address array register
893 * Sets the receive address array register at index to the address passed
894 * in by addr. For 82579, RAR[0] is the base address register that is to
895 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
896 * Use SHRA[0-3] in place of those reserved for ME.
898 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
900 u32 rar_low, rar_high;
902 DEBUGFUNC("e1000_rar_set_pch2lan");
905 * HW expects these in little endian so we reverse the byte order
906 * from network order (big endian) to little endian
908 rar_low = ((u32) addr[0] |
909 ((u32) addr[1] << 8) |
910 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
912 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
914 /* If MAC address zero, no need to set the AV bit */
915 if (rar_low || rar_high)
916 rar_high |= E1000_RAH_AV;
919 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
920 E1000_WRITE_FLUSH(hw);
921 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
922 E1000_WRITE_FLUSH(hw);
926 if (index < hw->mac.rar_entry_count) {
927 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
928 E1000_WRITE_FLUSH(hw);
929 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
930 E1000_WRITE_FLUSH(hw);
932 /* verify the register updates */
933 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
934 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
937 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
938 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
941 DEBUGOUT1("Failed to write receive address at index %d\n", index);
945 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
946 * @hw: pointer to the HW structure
948 * Checks if firmware is blocking the reset of the PHY.
949 * This is a function pointer entry point only called by
952 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
956 DEBUGFUNC("e1000_check_reset_block_ich8lan");
958 if (hw->phy.reset_disable)
959 return E1000_BLK_PHY_RESET;
961 fwsm = E1000_READ_REG(hw, E1000_FWSM);
963 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
964 : E1000_BLK_PHY_RESET;
968 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
969 * @hw: pointer to the HW structure
971 * Assumes semaphore already acquired.
974 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
977 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
978 s32 ret_val = E1000_SUCCESS;
980 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
982 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
986 phy_data &= ~HV_SMB_ADDR_MASK;
987 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
988 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
989 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
996 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
997 * @hw: pointer to the HW structure
999 * SW should configure the LCD from the NVM extended configuration region
1000 * as a workaround for certain parts.
1002 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1004 struct e1000_phy_info *phy = &hw->phy;
1005 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1006 s32 ret_val = E1000_SUCCESS;
1007 u16 word_addr, reg_data, reg_addr, phy_page = 0;
1009 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1012 * Initialize the PHY from the NVM on ICH platforms. This
1013 * is needed due to an issue where the NVM configuration is
1014 * not properly autoloaded after power transitions.
1015 * Therefore, after each PHY reset, we will load the
1016 * configuration data out of the NVM manually.
1018 switch (hw->mac.type) {
1020 if (phy->type != e1000_phy_igp_3)
1023 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1024 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1025 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1031 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1037 ret_val = hw->phy.ops.acquire(hw);
1041 data = E1000_READ_REG(hw, E1000_FEXTNVM);
1042 if (!(data & sw_cfg_mask))
1046 * Make sure HW does not configure LCD from PHY
1047 * extended configuration before SW configuration
1049 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1050 if (!(hw->mac.type == e1000_pch2lan)) {
1051 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
1055 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1056 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1057 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1061 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1062 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1064 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
1065 (hw->mac.type == e1000_pchlan)) ||
1066 (hw->mac.type == e1000_pch2lan)) {
1068 * HW configures the SMBus address and LEDs when the
1069 * OEM and LCD Write Enable bits are set in the NVM.
1070 * When both NVM bits are cleared, SW will configure
1073 ret_val = e1000_write_smbus_addr(hw);
1077 data = E1000_READ_REG(hw, E1000_LEDCTL);
1078 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1084 /* Configure LCD from extended configuration region. */
1086 /* cnf_base_addr is in DWORD */
1087 word_addr = (u16)(cnf_base_addr << 1);
1089 for (i = 0; i < cnf_size; i++) {
1090 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1095 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1100 /* Save off the PHY page for future writes. */
1101 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1102 phy_page = reg_data;
1106 reg_addr &= PHY_REG_MASK;
1107 reg_addr |= phy_page;
1109 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1116 hw->phy.ops.release(hw);
1121 * e1000_k1_gig_workaround_hv - K1 Si workaround
1122 * @hw: pointer to the HW structure
1123 * @link: link up bool flag
1125 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1126 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
1127 * If link is down, the function will restore the default K1 setting located
1130 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1132 s32 ret_val = E1000_SUCCESS;
1134 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1136 DEBUGFUNC("e1000_k1_gig_workaround_hv");
1138 if (hw->mac.type != e1000_pchlan)
1141 /* Wrap the whole flow with the sw flag */
1142 ret_val = hw->phy.ops.acquire(hw);
1146 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1148 if (hw->phy.type == e1000_phy_82578) {
1149 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1154 status_reg &= BM_CS_STATUS_LINK_UP |
1155 BM_CS_STATUS_RESOLVED |
1156 BM_CS_STATUS_SPEED_MASK;
1158 if (status_reg == (BM_CS_STATUS_LINK_UP |
1159 BM_CS_STATUS_RESOLVED |
1160 BM_CS_STATUS_SPEED_1000))
1164 if (hw->phy.type == e1000_phy_82577) {
1165 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1170 status_reg &= HV_M_STATUS_LINK_UP |
1171 HV_M_STATUS_AUTONEG_COMPLETE |
1172 HV_M_STATUS_SPEED_MASK;
1174 if (status_reg == (HV_M_STATUS_LINK_UP |
1175 HV_M_STATUS_AUTONEG_COMPLETE |
1176 HV_M_STATUS_SPEED_1000))
1180 /* Link stall fix for link up */
1181 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1187 /* Link stall fix for link down */
1188 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1194 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1197 hw->phy.ops.release(hw);
1203 * e1000_configure_k1_ich8lan - Configure K1 power state
1204 * @hw: pointer to the HW structure
1205 * @enable: K1 state to configure
1207 * Configure the K1 power state based on the provided parameter.
1208 * Assumes semaphore already acquired.
1210 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1212 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1214 s32 ret_val = E1000_SUCCESS;
1220 DEBUGFUNC("e1000_configure_k1_ich8lan");
1222 ret_val = e1000_read_kmrn_reg_locked(hw,
1223 E1000_KMRNCTRLSTA_K1_CONFIG,
1229 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1231 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1233 ret_val = e1000_write_kmrn_reg_locked(hw,
1234 E1000_KMRNCTRLSTA_K1_CONFIG,
1240 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1241 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1243 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1244 reg |= E1000_CTRL_FRCSPD;
1245 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1247 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1249 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1250 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1258 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1259 * @hw: pointer to the HW structure
1260 * @d0_state: boolean if entering d0 or d3 device state
1262 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1263 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1264 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1266 s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1272 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1274 if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
1277 ret_val = hw->phy.ops.acquire(hw);
1281 if (!(hw->mac.type == e1000_pch2lan)) {
1282 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1283 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1287 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1288 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1291 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1293 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1297 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1300 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1301 oem_reg |= HV_OEM_BITS_GBE_DIS;
1303 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1304 oem_reg |= HV_OEM_BITS_LPLU;
1306 if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
1307 oem_reg |= HV_OEM_BITS_GBE_DIS;
1309 if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
1310 oem_reg |= HV_OEM_BITS_LPLU;
1312 /* Restart auto-neg to activate the bits */
1313 if (!hw->phy.ops.check_reset_block(hw))
1314 oem_reg |= HV_OEM_BITS_RESTART_AN;
1315 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1318 hw->phy.ops.release(hw);
1325 * e1000_hv_phy_powerdown_workaround_ich8lan - Power down workaround on Sx
1326 * @hw: pointer to the HW structure
1328 s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
1330 DEBUGFUNC("e1000_hv_phy_powerdown_workaround_ich8lan");
1332 if ((hw->phy.type != e1000_phy_82577) || (hw->revision_id > 2))
1333 return E1000_SUCCESS;
1335 return hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0444);
1339 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1340 * @hw: pointer to the HW structure
1342 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1347 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
1349 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
1353 data |= HV_KMRN_MDIO_SLOW;
1355 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
1361 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1362 * done after every PHY reset.
1364 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1366 s32 ret_val = E1000_SUCCESS;
1369 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
1371 if (hw->mac.type != e1000_pchlan)
1374 /* Set MDIO slow mode before any other MDIO access */
1375 if (hw->phy.type == e1000_phy_82577) {
1376 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1381 /* Hanksville M Phy init for IEEE. */
1382 if ((hw->revision_id == 2) &&
1383 (hw->phy.type == e1000_phy_82577) &&
1384 ((hw->phy.revision == 2) || (hw->phy.revision == 3))) {
1385 hw->phy.ops.write_reg(hw, 0x10, 0x8823);
1386 hw->phy.ops.write_reg(hw, 0x11, 0x0018);
1387 hw->phy.ops.write_reg(hw, 0x10, 0x8824);
1388 hw->phy.ops.write_reg(hw, 0x11, 0x0016);
1389 hw->phy.ops.write_reg(hw, 0x10, 0x8825);
1390 hw->phy.ops.write_reg(hw, 0x11, 0x001A);
1391 hw->phy.ops.write_reg(hw, 0x10, 0x888C);
1392 hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1393 hw->phy.ops.write_reg(hw, 0x10, 0x888D);
1394 hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1395 hw->phy.ops.write_reg(hw, 0x10, 0x888E);
1396 hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1397 hw->phy.ops.write_reg(hw, 0x10, 0x8827);
1398 hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1399 hw->phy.ops.write_reg(hw, 0x10, 0x8835);
1400 hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1401 hw->phy.ops.write_reg(hw, 0x10, 0x8834);
1402 hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1403 hw->phy.ops.write_reg(hw, 0x10, 0x8833);
1404 hw->phy.ops.write_reg(hw, 0x11, 0x0002);
1407 if (((hw->phy.type == e1000_phy_82577) &&
1408 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1409 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1410 /* Disable generation of early preamble */
1411 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1415 /* Preamble tuning for SSC */
1416 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
1421 if (hw->phy.type == e1000_phy_82578) {
1422 if (hw->revision_id < 3) {
1424 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29,
1430 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E,
1437 * Return registers to default by doing a soft reset then
1438 * writing 0x3140 to the control register.
1440 if (hw->phy.revision < 2) {
1441 e1000_phy_sw_reset_generic(hw);
1442 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
1447 if ((hw->revision_id == 2) &&
1448 (hw->phy.type == e1000_phy_82577) &&
1449 ((hw->phy.revision == 2) || (hw->phy.revision == 3))) {
1451 * Workaround for OEM (GbE) not operating after reset -
1452 * restart AN (twice)
1454 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
1457 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
1463 ret_val = hw->phy.ops.acquire(hw);
1468 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1469 hw->phy.ops.release(hw);
1474 * Configure the K1 Si workaround during phy reset assuming there is
1475 * link so that it disables K1 if link is in 1Gbps.
1477 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
1481 /* Workaround for link disconnects on a busy hub in half duplex */
1482 ret_val = hw->phy.ops.acquire(hw);
1485 ret_val = hw->phy.ops.read_reg_locked(hw,
1486 PHY_REG(BM_PORT_CTRL_PAGE, 17),
1490 ret_val = hw->phy.ops.write_reg_locked(hw,
1491 PHY_REG(BM_PORT_CTRL_PAGE, 17),
1494 hw->phy.ops.release(hw);
1500 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1501 * @hw: pointer to the HW structure
1503 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1508 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
1510 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1511 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1512 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
1513 hw->phy.ops.write_reg(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
1514 hw->phy.ops.write_reg(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
1515 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
1516 hw->phy.ops.write_reg(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
1517 hw->phy.ops.write_reg(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0x8000));
1521 static u32 e1000_calc_rx_da_crc(u8 mac[])
1523 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
1524 u32 i, j, mask, crc;
1526 DEBUGFUNC("e1000_calc_rx_da_crc");
1529 for (i = 0; i < 6; i++) {
1531 for (j = 8; j > 0; j--) {
1532 mask = (crc & 1) * (-1);
1533 crc = (crc >> 1) ^ (poly & mask);
1540 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1542 * @hw: pointer to the HW structure
1543 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
1545 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1547 s32 ret_val = E1000_SUCCESS;
1552 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
1554 if (hw->mac.type != e1000_pch2lan)
1557 /* disable Rx path while enabling/disabling workaround */
1558 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
1559 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
1565 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1566 * SHRAL/H) and initial CRC values to the MAC
1568 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1569 u8 mac_addr[ETH_ADDR_LEN] = {0};
1570 u32 addr_high, addr_low;
1572 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
1573 if (!(addr_high & E1000_RAH_AV))
1575 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
1576 mac_addr[0] = (addr_low & 0xFF);
1577 mac_addr[1] = ((addr_low >> 8) & 0xFF);
1578 mac_addr[2] = ((addr_low >> 16) & 0xFF);
1579 mac_addr[3] = ((addr_low >> 24) & 0xFF);
1580 mac_addr[4] = (addr_high & 0xFF);
1581 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1583 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
1584 e1000_calc_rx_da_crc(mac_addr));
1587 /* Write Rx addresses to the PHY */
1588 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1590 /* Enable jumbo frame workaround in the MAC */
1591 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1592 mac_reg &= ~(1 << 14);
1593 mac_reg |= (7 << 15);
1594 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1596 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1597 mac_reg |= E1000_RCTL_SECRC;
1598 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1600 ret_val = e1000_read_kmrn_reg_generic(hw,
1601 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1605 ret_val = e1000_write_kmrn_reg_generic(hw,
1606 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1610 ret_val = e1000_read_kmrn_reg_generic(hw,
1611 E1000_KMRNCTRLSTA_HD_CTRL,
1615 data &= ~(0xF << 8);
1617 ret_val = e1000_write_kmrn_reg_generic(hw,
1618 E1000_KMRNCTRLSTA_HD_CTRL,
1623 /* Enable jumbo frame workaround in the PHY */
1624 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1625 data &= ~(0x7F << 5);
1626 data |= (0x37 << 5);
1627 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1630 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1632 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1635 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1636 data &= ~(0x3FF << 2);
1637 data |= (0x1A << 2);
1638 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1641 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xFE00);
1644 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1645 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data | (1 << 10));
1649 /* Write MAC register values back to h/w defaults */
1650 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1651 mac_reg &= ~(0xF << 14);
1652 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1654 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1655 mac_reg &= ~E1000_RCTL_SECRC;
1656 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1658 ret_val = e1000_read_kmrn_reg_generic(hw,
1659 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1663 ret_val = e1000_write_kmrn_reg_generic(hw,
1664 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1668 ret_val = e1000_read_kmrn_reg_generic(hw,
1669 E1000_KMRNCTRLSTA_HD_CTRL,
1673 data &= ~(0xF << 8);
1675 ret_val = e1000_write_kmrn_reg_generic(hw,
1676 E1000_KMRNCTRLSTA_HD_CTRL,
1681 /* Write PHY register values back to h/w defaults */
1682 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1683 data &= ~(0x7F << 5);
1684 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1687 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1689 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1692 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1693 data &= ~(0x3FF << 2);
1695 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1698 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
1701 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1702 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data & ~(1 << 10));
1707 /* re-enable Rx path after enabling/disabling workaround */
1708 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
1715 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1716 * done after every PHY reset.
1718 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1720 s32 ret_val = E1000_SUCCESS;
1722 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
1724 if (hw->mac.type != e1000_pch2lan)
1727 /* Set MDIO slow mode before any other MDIO access */
1728 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1735 * e1000_k1_gig_workaround_lv - K1 Si workaround
1736 * @hw: pointer to the HW structure
1738 * Workaround to set the K1 beacon duration for 82579 parts
1740 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1742 s32 ret_val = E1000_SUCCESS;
1746 DEBUGFUNC("e1000_k1_workaround_lv");
1748 if (hw->mac.type != e1000_pch2lan)
1751 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1752 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
1756 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1757 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1758 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1759 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1761 if (status_reg & HV_M_STATUS_SPEED_1000)
1762 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1764 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1766 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1774 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1775 * @hw: pointer to the HW structure
1776 * @gate: boolean set to TRUE to gate, FALSE to un-gate
1778 * Gate/ungate the automatic PHY configuration via hardware; perform
1779 * the configuration via software instead.
1781 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1785 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
1787 if (hw->mac.type != e1000_pch2lan)
1790 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1793 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1795 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1797 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1802 * e1000_hv_phy_tuning_workaround_ich8lan - This is a Phy tuning work around
1803 * needed for Nahum3 + Hanksville testing, requested by HW team
1805 static s32 e1000_hv_phy_tuning_workaround_ich8lan(struct e1000_hw *hw)
1807 s32 ret_val = E1000_SUCCESS;
1809 DEBUGFUNC("e1000_hv_phy_tuning_workaround_ich8lan");
1811 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1815 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
1819 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29, 0x66C0);
1823 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E, 0xFFFF);
1830 * e1000_lan_init_done_ich8lan - Check for PHY config completion
1831 * @hw: pointer to the HW structure
1833 * Check the appropriate indication the MAC has finished configuring the
1834 * PHY after a software reset.
1836 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1838 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1840 DEBUGFUNC("e1000_lan_init_done_ich8lan");
1842 /* Wait for basic configuration completes before proceeding */
1844 data = E1000_READ_REG(hw, E1000_STATUS);
1845 data &= E1000_STATUS_LAN_INIT_DONE;
1847 } while ((!data) && --loop);
1850 * If basic configuration is incomplete before the above loop
1851 * count reaches 0, loading the configuration from NVM will
1852 * leave the PHY in a bad state possibly resulting in no link.
1855 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
1857 /* Clear the Init Done bit for the next init event */
1858 data = E1000_READ_REG(hw, E1000_STATUS);
1859 data &= ~E1000_STATUS_LAN_INIT_DONE;
1860 E1000_WRITE_REG(hw, E1000_STATUS, data);
1864 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
1865 * @hw: pointer to the HW structure
1867 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1869 s32 ret_val = E1000_SUCCESS;
1872 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
1874 if (hw->phy.ops.check_reset_block(hw))
1877 /* Allow time for h/w to get to quiescent state after reset */
1880 /* Perform any necessary post-reset workarounds */
1881 switch (hw->mac.type) {
1883 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1888 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
1896 if (hw->device_id == E1000_DEV_ID_ICH10_HANKSVILLE) {
1897 ret_val = e1000_hv_phy_tuning_workaround_ich8lan(hw);
1902 /* Dummy read to clear the phy wakeup bit after lcd reset */
1903 if (hw->mac.type >= e1000_pchlan)
1904 hw->phy.ops.read_reg(hw, BM_WUC, ®);
1906 /* Configure the LCD with the extended configuration region in NVM */
1907 ret_val = e1000_sw_lcd_config_ich8lan(hw);
1911 /* Configure the LCD with the OEM bits in NVM */
1912 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
1914 /* Ungate automatic PHY configuration on non-managed 82579 */
1915 if ((hw->mac.type == e1000_pch2lan) &&
1916 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) {
1918 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
1926 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1927 * @hw: pointer to the HW structure
1930 * This is a function pointer entry point called by drivers
1931 * or other shared routines.
1933 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1935 s32 ret_val = E1000_SUCCESS;
1937 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
1939 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
1940 if ((hw->mac.type == e1000_pch2lan) &&
1941 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
1942 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
1944 ret_val = e1000_phy_hw_reset_generic(hw);
1948 ret_val = e1000_post_phy_reset_ich8lan(hw);
1955 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
1956 * @hw: pointer to the HW structure
1957 * @active: TRUE to enable LPLU, FALSE to disable
1959 * Sets the LPLU state according to the active flag. For PCH, if OEM write
1960 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
1961 * the phy speed. This function will manually set the LPLU bit and restart
1962 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
1963 * since it configures the same bit.
1965 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
1967 s32 ret_val = E1000_SUCCESS;
1970 DEBUGFUNC("e1000_set_lplu_state_pchlan");
1972 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
1977 oem_reg |= HV_OEM_BITS_LPLU;
1979 oem_reg &= ~HV_OEM_BITS_LPLU;
1981 oem_reg |= HV_OEM_BITS_RESTART_AN;
1982 ret_val = hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
1989 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
1990 * @hw: pointer to the HW structure
1991 * @active: TRUE to enable LPLU, FALSE to disable
1993 * Sets the LPLU D0 state according to the active flag. When
1994 * activating LPLU this function also disables smart speed
1995 * and vice versa. LPLU will not be activated unless the
1996 * device autonegotiation advertisement meets standards of
1997 * either 10 or 10/100 or 10/100/1000 at all duplexes.
1998 * This is a function pointer entry point only called by
1999 * PHY setup routines.
2001 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2003 struct e1000_phy_info *phy = &hw->phy;
2005 s32 ret_val = E1000_SUCCESS;
2008 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2010 if (phy->type == e1000_phy_ife)
2013 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2016 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2017 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2019 if (phy->type != e1000_phy_igp_3)
2023 * Call gig speed drop workaround on LPLU before accessing
2026 if (hw->mac.type == e1000_ich8lan)
2027 e1000_gig_downshift_workaround_ich8lan(hw);
2029 /* When LPLU is enabled, we should disable SmartSpeed */
2030 ret_val = phy->ops.read_reg(hw,
2031 IGP01E1000_PHY_PORT_CONFIG,
2033 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2034 ret_val = phy->ops.write_reg(hw,
2035 IGP01E1000_PHY_PORT_CONFIG,
2040 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2041 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2043 if (phy->type != e1000_phy_igp_3)
2047 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2048 * during Dx states where the power conservation is most
2049 * important. During driver activity we should enable
2050 * SmartSpeed, so performance is maintained.
2052 if (phy->smart_speed == e1000_smart_speed_on) {
2053 ret_val = phy->ops.read_reg(hw,
2054 IGP01E1000_PHY_PORT_CONFIG,
2059 data |= IGP01E1000_PSCFR_SMART_SPEED;
2060 ret_val = phy->ops.write_reg(hw,
2061 IGP01E1000_PHY_PORT_CONFIG,
2065 } else if (phy->smart_speed == e1000_smart_speed_off) {
2066 ret_val = phy->ops.read_reg(hw,
2067 IGP01E1000_PHY_PORT_CONFIG,
2072 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2073 ret_val = phy->ops.write_reg(hw,
2074 IGP01E1000_PHY_PORT_CONFIG,
2086 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2087 * @hw: pointer to the HW structure
2088 * @active: TRUE to enable LPLU, FALSE to disable
2090 * Sets the LPLU D3 state according to the active flag. When
2091 * activating LPLU this function also disables smart speed
2092 * and vice versa. LPLU will not be activated unless the
2093 * device autonegotiation advertisement meets standards of
2094 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2095 * This is a function pointer entry point only called by
2096 * PHY setup routines.
2098 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2100 struct e1000_phy_info *phy = &hw->phy;
2102 s32 ret_val = E1000_SUCCESS;
2105 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2107 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2110 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2111 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2113 if (phy->type != e1000_phy_igp_3)
2117 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2118 * during Dx states where the power conservation is most
2119 * important. During driver activity we should enable
2120 * SmartSpeed, so performance is maintained.
2122 if (phy->smart_speed == e1000_smart_speed_on) {
2123 ret_val = phy->ops.read_reg(hw,
2124 IGP01E1000_PHY_PORT_CONFIG,
2129 data |= IGP01E1000_PSCFR_SMART_SPEED;
2130 ret_val = phy->ops.write_reg(hw,
2131 IGP01E1000_PHY_PORT_CONFIG,
2135 } else if (phy->smart_speed == e1000_smart_speed_off) {
2136 ret_val = phy->ops.read_reg(hw,
2137 IGP01E1000_PHY_PORT_CONFIG,
2142 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2143 ret_val = phy->ops.write_reg(hw,
2144 IGP01E1000_PHY_PORT_CONFIG,
2149 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2150 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2151 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2152 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2153 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2155 if (phy->type != e1000_phy_igp_3)
2159 * Call gig speed drop workaround on LPLU before accessing
2162 if (hw->mac.type == e1000_ich8lan)
2163 e1000_gig_downshift_workaround_ich8lan(hw);
2165 /* When LPLU is enabled, we should disable SmartSpeed */
2166 ret_val = phy->ops.read_reg(hw,
2167 IGP01E1000_PHY_PORT_CONFIG,
2172 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2173 ret_val = phy->ops.write_reg(hw,
2174 IGP01E1000_PHY_PORT_CONFIG,
2183 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2184 * @hw: pointer to the HW structure
2185 * @bank: pointer to the variable that returns the active bank
2187 * Reads signature byte from the NVM using the flash access registers.
2188 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2190 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2193 struct e1000_nvm_info *nvm = &hw->nvm;
2194 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2195 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2197 s32 ret_val = E1000_SUCCESS;
2199 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2201 switch (hw->mac.type) {
2204 eecd = E1000_READ_REG(hw, E1000_EECD);
2205 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2206 E1000_EECD_SEC1VAL_VALID_MASK) {
2207 if (eecd & E1000_EECD_SEC1VAL)
2214 DEBUGOUT("Unable to determine valid NVM bank via EEC - "
2215 "reading flash signature\n");
2218 /* set bank to 0 in case flash read fails */
2222 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2226 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2227 E1000_ICH_NVM_SIG_VALUE) {
2233 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2238 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2239 E1000_ICH_NVM_SIG_VALUE) {
2244 DEBUGOUT("ERROR: No valid NVM bank present\n");
2245 ret_val = -E1000_ERR_NVM;
2253 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
2254 * @hw: pointer to the HW structure
2255 * @offset: The offset (in bytes) of the word(s) to read.
2256 * @words: Size of data to read in words
2257 * @data: Pointer to the word(s) to read at offset.
2259 * Reads a word(s) from the NVM using the flash access registers.
2261 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2264 struct e1000_nvm_info *nvm = &hw->nvm;
2265 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2267 s32 ret_val = E1000_SUCCESS;
2271 DEBUGFUNC("e1000_read_nvm_ich8lan");
2273 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2275 DEBUGOUT("nvm parameter(s) out of bounds\n");
2276 ret_val = -E1000_ERR_NVM;
2280 nvm->ops.acquire(hw);
2282 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2283 if (ret_val != E1000_SUCCESS) {
2284 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2288 act_offset = (bank) ? nvm->flash_bank_size : 0;
2289 act_offset += offset;
2291 ret_val = E1000_SUCCESS;
2292 for (i = 0; i < words; i++) {
2293 if ((dev_spec->shadow_ram) &&
2294 (dev_spec->shadow_ram[offset+i].modified)) {
2295 data[i] = dev_spec->shadow_ram[offset+i].value;
2297 ret_val = e1000_read_flash_word_ich8lan(hw,
2306 nvm->ops.release(hw);
2310 DEBUGOUT1("NVM read error: %d\n", ret_val);
2316 * e1000_flash_cycle_init_ich8lan - Initialize flash
2317 * @hw: pointer to the HW structure
2319 * This function does initial flash setup so that a new read/write/erase cycle
2322 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2324 union ich8_hws_flash_status hsfsts;
2325 s32 ret_val = -E1000_ERR_NVM;
2328 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2330 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2332 /* Check if the flash descriptor is valid */
2333 if (hsfsts.hsf_status.fldesvalid == 0) {
2334 DEBUGOUT("Flash descriptor invalid. "
2335 "SW Sequencing must be used.");
2339 /* Clear FCERR and DAEL in hw status by writing 1 */
2340 hsfsts.hsf_status.flcerr = 1;
2341 hsfsts.hsf_status.dael = 1;
2343 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2346 * Either we should have a hardware SPI cycle in progress
2347 * bit to check against, in order to start a new cycle or
2348 * FDONE bit should be changed in the hardware so that it
2349 * is 1 after hardware reset, which can then be used as an
2350 * indication whether a cycle is in progress or has been
2354 if (hsfsts.hsf_status.flcinprog == 0) {
2356 * There is no cycle running at present,
2357 * so we can start a cycle.
2358 * Begin by setting Flash Cycle Done.
2360 hsfsts.hsf_status.flcdone = 1;
2361 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2362 ret_val = E1000_SUCCESS;
2365 * Otherwise poll for sometime so the current
2366 * cycle has a chance to end before giving up.
2368 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2369 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2371 if (hsfsts.hsf_status.flcinprog == 0) {
2372 ret_val = E1000_SUCCESS;
2377 if (ret_val == E1000_SUCCESS) {
2379 * Successful in waiting for previous cycle to timeout,
2380 * now set the Flash Cycle Done.
2382 hsfsts.hsf_status.flcdone = 1;
2383 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
2386 DEBUGOUT("Flash controller busy, cannot get access");
2395 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2396 * @hw: pointer to the HW structure
2397 * @timeout: maximum time to wait for completion
2399 * This function starts a flash cycle and waits for its completion.
2401 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2403 union ich8_hws_flash_ctrl hsflctl;
2404 union ich8_hws_flash_status hsfsts;
2405 s32 ret_val = -E1000_ERR_NVM;
2408 DEBUGFUNC("e1000_flash_cycle_ich8lan");
2410 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2411 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2412 hsflctl.hsf_ctrl.flcgo = 1;
2413 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2415 /* wait till FDONE bit is set to 1 */
2417 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2418 if (hsfsts.hsf_status.flcdone == 1)
2421 } while (i++ < timeout);
2423 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
2424 ret_val = E1000_SUCCESS;
2430 * e1000_read_flash_word_ich8lan - Read word from flash
2431 * @hw: pointer to the HW structure
2432 * @offset: offset to data location
2433 * @data: pointer to the location for storing the data
2435 * Reads the flash word at offset into data. Offset is converted
2436 * to bytes before read.
2438 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2443 DEBUGFUNC("e1000_read_flash_word_ich8lan");
2446 ret_val = -E1000_ERR_NVM;
2450 /* Must convert offset into bytes. */
2453 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2460 * e1000_read_flash_byte_ich8lan - Read byte from flash
2461 * @hw: pointer to the HW structure
2462 * @offset: The offset of the byte to read.
2463 * @data: Pointer to a byte to store the value read.
2465 * Reads a single byte from the NVM using the flash access registers.
2467 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2470 s32 ret_val = E1000_SUCCESS;
2473 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2484 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
2485 * @hw: pointer to the HW structure
2486 * @offset: The offset (in bytes) of the byte or word to read.
2487 * @size: Size of data to read, 1=byte 2=word
2488 * @data: Pointer to the word to store the value read.
2490 * Reads a byte or word from the NVM using the flash access registers.
2492 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2495 union ich8_hws_flash_status hsfsts;
2496 union ich8_hws_flash_ctrl hsflctl;
2497 u32 flash_linear_addr;
2499 s32 ret_val = -E1000_ERR_NVM;
2502 DEBUGFUNC("e1000_read_flash_data_ich8lan");
2504 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2507 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2508 hw->nvm.flash_base_addr;
2513 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2514 if (ret_val != E1000_SUCCESS)
2517 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2518 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2519 hsflctl.hsf_ctrl.fldbcount = size - 1;
2520 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2521 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2523 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2525 ret_val = e1000_flash_cycle_ich8lan(hw,
2526 ICH_FLASH_READ_COMMAND_TIMEOUT);
2529 * Check if FCERR is set to 1, if set to 1, clear it
2530 * and try the whole sequence a few more times, else
2531 * read in (shift in) the Flash Data0, the order is
2532 * least significant byte first msb to lsb
2534 if (ret_val == E1000_SUCCESS) {
2535 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
2537 *data = (u8)(flash_data & 0x000000FF);
2539 *data = (u16)(flash_data & 0x0000FFFF);
2543 * If we've gotten here, then things are probably
2544 * completely hosed, but if the error condition is
2545 * detected, it won't hurt to give it another try...
2546 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2548 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2550 if (hsfsts.hsf_status.flcerr == 1) {
2551 /* Repeat for some time before giving up. */
2553 } else if (hsfsts.hsf_status.flcdone == 0) {
2554 DEBUGOUT("Timeout error - flash cycle "
2555 "did not complete.");
2559 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2566 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
2567 * @hw: pointer to the HW structure
2568 * @offset: The offset (in bytes) of the word(s) to write.
2569 * @words: Size of data to write in words
2570 * @data: Pointer to the word(s) to write at offset.
2572 * Writes a byte or word to the NVM using the flash access registers.
2574 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2577 struct e1000_nvm_info *nvm = &hw->nvm;
2578 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2579 s32 ret_val = E1000_SUCCESS;
2582 DEBUGFUNC("e1000_write_nvm_ich8lan");
2584 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2586 DEBUGOUT("nvm parameter(s) out of bounds\n");
2587 ret_val = -E1000_ERR_NVM;
2591 nvm->ops.acquire(hw);
2593 for (i = 0; i < words; i++) {
2594 dev_spec->shadow_ram[offset+i].modified = TRUE;
2595 dev_spec->shadow_ram[offset+i].value = data[i];
2598 nvm->ops.release(hw);
2605 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2606 * @hw: pointer to the HW structure
2608 * The NVM checksum is updated by calling the generic update_nvm_checksum,
2609 * which writes the checksum to the shadow ram. The changes in the shadow
2610 * ram are then committed to the EEPROM by processing each bank at a time
2611 * checking for the modified bit and writing only the pending changes.
2612 * After a successful commit, the shadow ram is cleared and is ready for
2615 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2617 struct e1000_nvm_info *nvm = &hw->nvm;
2618 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2619 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2623 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
2625 ret_val = e1000_update_nvm_checksum_generic(hw);
2629 if (nvm->type != e1000_nvm_flash_sw)
2632 nvm->ops.acquire(hw);
2635 * We're writing to the opposite bank so if we're on bank 1,
2636 * write to bank 0 etc. We also need to erase the segment that
2637 * is going to be written
2639 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2640 if (ret_val != E1000_SUCCESS) {
2641 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2646 new_bank_offset = nvm->flash_bank_size;
2647 old_bank_offset = 0;
2648 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2652 old_bank_offset = nvm->flash_bank_size;
2653 new_bank_offset = 0;
2654 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2659 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2661 * Determine whether to write the value stored
2662 * in the other NVM bank or a modified value stored
2665 if (dev_spec->shadow_ram[i].modified) {
2666 data = dev_spec->shadow_ram[i].value;
2668 ret_val = e1000_read_flash_word_ich8lan(hw, i +
2676 * If the word is 0x13, then make sure the signature bits
2677 * (15:14) are 11b until the commit has completed.
2678 * This will allow us to write 10b which indicates the
2679 * signature is valid. We want to do this after the write
2680 * has completed so that we don't mark the segment valid
2681 * while the write is still in progress
2683 if (i == E1000_ICH_NVM_SIG_WORD)
2684 data |= E1000_ICH_NVM_SIG_MASK;
2686 /* Convert offset to bytes. */
2687 act_offset = (i + new_bank_offset) << 1;
2690 /* Write the bytes to the new bank. */
2691 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2698 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2706 * Don't bother writing the segment valid bits if sector
2707 * programming failed.
2710 DEBUGOUT("Flash commit failed.\n");
2715 * Finally validate the new segment by setting bit 15:14
2716 * to 10b in word 0x13 , this can be done without an
2717 * erase as well since these bits are 11 to start with
2718 * and we need to change bit 14 to 0b
2720 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2721 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2726 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2733 * And invalidate the previously valid segment by setting
2734 * its signature word (0x13) high_byte to 0b. This can be
2735 * done without an erase because flash erase sets all bits
2736 * to 1's. We can write 1's to 0's without an erase
2738 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2739 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2743 /* Great! Everything worked, we can now clear the cached entries. */
2744 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2745 dev_spec->shadow_ram[i].modified = FALSE;
2746 dev_spec->shadow_ram[i].value = 0xFFFF;
2750 nvm->ops.release(hw);
2753 * Reload the EEPROM, or else modifications will not appear
2754 * until after the next adapter reset.
2757 nvm->ops.reload(hw);
2763 DEBUGOUT1("NVM update error: %d\n", ret_val);
2769 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2770 * @hw: pointer to the HW structure
2772 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2773 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
2774 * calculated, in which case we need to calculate the checksum and set bit 6.
2776 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2778 s32 ret_val = E1000_SUCCESS;
2781 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
2784 * Read 0x19 and check bit 6. If this bit is 0, the checksum
2785 * needs to be fixed. This bit is an indication that the NVM
2786 * was prepared by OEM software and did not calculate the
2787 * checksum...a likely scenario.
2789 ret_val = hw->nvm.ops.read(hw, 0x19, 1, &data);
2793 if ((data & 0x40) == 0) {
2795 ret_val = hw->nvm.ops.write(hw, 0x19, 1, &data);
2798 ret_val = hw->nvm.ops.update(hw);
2803 ret_val = e1000_validate_nvm_checksum_generic(hw);
2810 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2811 * @hw: pointer to the HW structure
2812 * @offset: The offset (in bytes) of the byte/word to read.
2813 * @size: Size of data to read, 1=byte 2=word
2814 * @data: The byte(s) to write to the NVM.
2816 * Writes one/two bytes to the NVM using the flash access registers.
2818 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2821 union ich8_hws_flash_status hsfsts;
2822 union ich8_hws_flash_ctrl hsflctl;
2823 u32 flash_linear_addr;
2825 s32 ret_val = -E1000_ERR_NVM;
2828 DEBUGFUNC("e1000_write_ich8_data");
2830 if (size < 1 || size > 2 || data > size * 0xff ||
2831 offset > ICH_FLASH_LINEAR_ADDR_MASK)
2834 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2835 hw->nvm.flash_base_addr;
2840 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2841 if (ret_val != E1000_SUCCESS)
2844 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2845 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2846 hsflctl.hsf_ctrl.fldbcount = size - 1;
2847 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2848 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2850 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2853 flash_data = (u32)data & 0x00FF;
2855 flash_data = (u32)data;
2857 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
2860 * check if FCERR is set to 1 , if set to 1, clear it
2861 * and try the whole sequence a few more times else done
2863 ret_val = e1000_flash_cycle_ich8lan(hw,
2864 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2865 if (ret_val == E1000_SUCCESS)
2869 * If we're here, then things are most likely
2870 * completely hosed, but if the error condition
2871 * is detected, it won't hurt to give it another
2872 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2874 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2875 if (hsfsts.hsf_status.flcerr == 1)
2876 /* Repeat for some time before giving up. */
2878 if (hsfsts.hsf_status.flcdone == 0) {
2879 DEBUGOUT("Timeout error - flash cycle "
2880 "did not complete.");
2883 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2890 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
2891 * @hw: pointer to the HW structure
2892 * @offset: The index of the byte to read.
2893 * @data: The byte to write to the NVM.
2895 * Writes a single byte to the NVM using the flash access registers.
2897 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2900 u16 word = (u16)data;
2902 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
2904 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
2908 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
2909 * @hw: pointer to the HW structure
2910 * @offset: The offset of the byte to write.
2911 * @byte: The byte to write to the NVM.
2913 * Writes a single byte to the NVM using the flash access registers.
2914 * Goes through a retry algorithm before giving up.
2916 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2917 u32 offset, u8 byte)
2920 u16 program_retries;
2922 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
2924 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2925 if (ret_val == E1000_SUCCESS)
2928 for (program_retries = 0; program_retries < 100; program_retries++) {
2929 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
2931 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2932 if (ret_val == E1000_SUCCESS)
2935 if (program_retries == 100) {
2936 ret_val = -E1000_ERR_NVM;
2945 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
2946 * @hw: pointer to the HW structure
2947 * @bank: 0 for first bank, 1 for second bank, etc.
2949 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
2950 * bank N is 4096 * N + flash_reg_addr.
2952 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2954 struct e1000_nvm_info *nvm = &hw->nvm;
2955 union ich8_hws_flash_status hsfsts;
2956 union ich8_hws_flash_ctrl hsflctl;
2957 u32 flash_linear_addr;
2958 /* bank size is in 16bit words - adjust to bytes */
2959 u32 flash_bank_size = nvm->flash_bank_size * 2;
2960 s32 ret_val = E1000_SUCCESS;
2962 s32 j, iteration, sector_size;
2964 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
2966 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2969 * Determine HW Sector size: Read BERASE bits of hw flash status
2971 * 00: The Hw sector is 256 bytes, hence we need to erase 16
2972 * consecutive sectors. The start index for the nth Hw sector
2973 * can be calculated as = bank * 4096 + n * 256
2974 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
2975 * The start index for the nth Hw sector can be calculated
2977 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
2978 * (ich9 only, otherwise error condition)
2979 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
2981 switch (hsfsts.hsf_status.berasesz) {
2983 /* Hw sector size 256 */
2984 sector_size = ICH_FLASH_SEG_SIZE_256;
2985 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
2988 sector_size = ICH_FLASH_SEG_SIZE_4K;
2992 sector_size = ICH_FLASH_SEG_SIZE_8K;
2996 sector_size = ICH_FLASH_SEG_SIZE_64K;
3000 ret_val = -E1000_ERR_NVM;
3004 /* Start with the base address, then add the sector offset. */
3005 flash_linear_addr = hw->nvm.flash_base_addr;
3006 flash_linear_addr += (bank) ? flash_bank_size : 0;
3008 for (j = 0; j < iteration ; j++) {
3011 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3016 * Write a value 11 (block Erase) in Flash
3017 * Cycle field in hw flash control
3019 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3021 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3022 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3026 * Write the last 24 bits of an index within the
3027 * block into Flash Linear address field in Flash
3030 flash_linear_addr += (j * sector_size);
3031 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3034 ret_val = e1000_flash_cycle_ich8lan(hw,
3035 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3036 if (ret_val == E1000_SUCCESS)
3040 * Check if FCERR is set to 1. If 1,
3041 * clear it and try the whole sequence
3042 * a few more times else Done
3044 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3046 if (hsfsts.hsf_status.flcerr == 1)
3047 /* repeat for some time before giving up */
3049 else if (hsfsts.hsf_status.flcdone == 0)
3051 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3059 * e1000_valid_led_default_ich8lan - Set the default LED settings
3060 * @hw: pointer to the HW structure
3061 * @data: Pointer to the LED settings
3063 * Reads the LED default settings from the NVM to data. If the NVM LED
3064 * settings is all 0's or F's, set the LED default to a valid LED default
3067 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3071 DEBUGFUNC("e1000_valid_led_default_ich8lan");
3073 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3075 DEBUGOUT("NVM Read Error\n");
3079 if (*data == ID_LED_RESERVED_0000 ||
3080 *data == ID_LED_RESERVED_FFFF)
3081 *data = ID_LED_DEFAULT_ICH8LAN;
3088 * e1000_id_led_init_pchlan - store LED configurations
3089 * @hw: pointer to the HW structure
3091 * PCH does not control LEDs via the LEDCTL register, rather it uses
3092 * the PHY LED configuration register.
3094 * PCH also does not have an "always on" or "always off" mode which
3095 * complicates the ID feature. Instead of using the "on" mode to indicate
3096 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3097 * use "link_up" mode. The LEDs will still ID on request if there is no
3098 * link based on logic in e1000_led_[on|off]_pchlan().
3100 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3102 struct e1000_mac_info *mac = &hw->mac;
3104 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3105 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3106 u16 data, i, temp, shift;
3108 DEBUGFUNC("e1000_id_led_init_pchlan");
3110 /* Get default ID LED modes */
3111 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3115 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3116 mac->ledctl_mode1 = mac->ledctl_default;
3117 mac->ledctl_mode2 = mac->ledctl_default;
3119 for (i = 0; i < 4; i++) {
3120 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3123 case ID_LED_ON1_DEF2:
3124 case ID_LED_ON1_ON2:
3125 case ID_LED_ON1_OFF2:
3126 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3127 mac->ledctl_mode1 |= (ledctl_on << shift);
3129 case ID_LED_OFF1_DEF2:
3130 case ID_LED_OFF1_ON2:
3131 case ID_LED_OFF1_OFF2:
3132 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3133 mac->ledctl_mode1 |= (ledctl_off << shift);
3140 case ID_LED_DEF1_ON2:
3141 case ID_LED_ON1_ON2:
3142 case ID_LED_OFF1_ON2:
3143 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3144 mac->ledctl_mode2 |= (ledctl_on << shift);
3146 case ID_LED_DEF1_OFF2:
3147 case ID_LED_ON1_OFF2:
3148 case ID_LED_OFF1_OFF2:
3149 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3150 mac->ledctl_mode2 |= (ledctl_off << shift);
3163 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3164 * @hw: pointer to the HW structure
3166 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3167 * register, so the the bus width is hard coded.
3169 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3171 struct e1000_bus_info *bus = &hw->bus;
3174 DEBUGFUNC("e1000_get_bus_info_ich8lan");
3176 ret_val = e1000_get_bus_info_pcie_generic(hw);
3179 * ICH devices are "PCI Express"-ish. They have
3180 * a configuration space, but do not contain
3181 * PCI Express Capability registers, so bus width
3182 * must be hardcoded.
3184 if (bus->width == e1000_bus_width_unknown)
3185 bus->width = e1000_bus_width_pcie_x1;
3191 * e1000_reset_hw_ich8lan - Reset the hardware
3192 * @hw: pointer to the HW structure
3194 * Does a full reset of the hardware which includes a reset of the PHY and
3197 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3199 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3204 DEBUGFUNC("e1000_reset_hw_ich8lan");
3207 * Prevent the PCI-E bus from sticking if there is no TLP connection
3208 * on the last TLP read/write transaction when MAC is reset.
3210 ret_val = e1000_disable_pcie_master_generic(hw);
3212 DEBUGOUT("PCI-E Master disable polling has failed.\n");
3214 DEBUGOUT("Masking off all interrupts\n");
3215 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3218 * Disable the Transmit and Receive units. Then delay to allow
3219 * any pending transactions to complete before we hit the MAC
3220 * with the global reset.
3222 E1000_WRITE_REG(hw, E1000_RCTL, 0);
3223 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3224 E1000_WRITE_FLUSH(hw);
3228 /* Workaround for ICH8 bit corruption issue in FIFO memory */
3229 if (hw->mac.type == e1000_ich8lan) {
3230 /* Set Tx and Rx buffer allocation to 8k apiece. */
3231 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3232 /* Set Packet Buffer Size to 16k. */
3233 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3236 if (hw->mac.type == e1000_pchlan) {
3237 /* Save the NVM K1 bit setting*/
3238 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®);
3242 if (reg & E1000_NVM_K1_ENABLE)
3243 dev_spec->nvm_k1_enabled = TRUE;
3245 dev_spec->nvm_k1_enabled = FALSE;
3248 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3250 if (!hw->phy.ops.check_reset_block(hw)) {
3252 * Full-chip reset requires MAC and PHY reset at the same
3253 * time to make sure the interface between MAC and the
3254 * external PHY is reset.
3256 ctrl |= E1000_CTRL_PHY_RST;
3259 * Gate automatic PHY configuration by hardware on
3262 if ((hw->mac.type == e1000_pch2lan) &&
3263 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3264 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3266 ret_val = e1000_acquire_swflag_ich8lan(hw);
3267 DEBUGOUT("Issuing a global reset to ich8lan\n");
3268 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3272 e1000_release_swflag_ich8lan(hw);
3274 if (ctrl & E1000_CTRL_PHY_RST) {
3275 ret_val = hw->phy.ops.get_cfg_done(hw);
3279 ret_val = e1000_post_phy_reset_ich8lan(hw);
3285 * For PCH, this write will make sure that any noise
3286 * will be detected as a CRC error and be dropped rather than show up
3287 * as a bad packet to the DMA engine.
3289 if (hw->mac.type == e1000_pchlan)
3290 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3292 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3293 icr = E1000_READ_REG(hw, E1000_ICR);
3295 kab = E1000_READ_REG(hw, E1000_KABGTXD);
3296 kab |= E1000_KABGTXD_BGSQLBIAS;
3297 E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
3304 * e1000_init_hw_ich8lan - Initialize the hardware
3305 * @hw: pointer to the HW structure
3307 * Prepares the hardware for transmit and receive by doing the following:
3308 * - initialize hardware bits
3309 * - initialize LED identification
3310 * - setup receive address registers
3311 * - setup flow control
3312 * - setup transmit descriptors
3313 * - clear statistics
3315 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3317 struct e1000_mac_info *mac = &hw->mac;
3318 u32 ctrl_ext, txdctl, snoop;
3322 DEBUGFUNC("e1000_init_hw_ich8lan");
3324 e1000_initialize_hw_bits_ich8lan(hw);
3326 /* Initialize identification LED */
3327 ret_val = mac->ops.id_led_init(hw);
3329 DEBUGOUT("Error initializing identification LED\n");
3330 /* This is not fatal and we should not stop init due to this */
3332 /* Setup the receive address. */
3333 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3335 /* Zero out the Multicast HASH table */
3336 DEBUGOUT("Zeroing the MTA\n");
3337 for (i = 0; i < mac->mta_reg_count; i++)
3338 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3341 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3342 * the ME. Reading the BM_WUC register will clear the host wakeup bit.
3343 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3345 if (hw->phy.type == e1000_phy_82578) {
3346 hw->phy.ops.read_reg(hw, BM_WUC, &i);
3347 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3352 /* Setup link and flow control */
3353 ret_val = mac->ops.setup_link(hw);
3355 /* Set the transmit descriptor write-back policy for both queues */
3356 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3357 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3358 E1000_TXDCTL_FULL_TX_DESC_WB;
3359 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3360 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3361 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3362 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3363 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3364 E1000_TXDCTL_FULL_TX_DESC_WB;
3365 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3366 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3367 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3370 * ICH8 has opposite polarity of no_snoop bits.
3371 * By default, we should use snoop behavior.
3373 if (mac->type == e1000_ich8lan)
3374 snoop = PCIE_ICH8_SNOOP_ALL;
3376 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3377 e1000_set_pcie_no_snoop_generic(hw, snoop);
3379 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3380 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3381 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3384 * Clear all of the statistics registers (clear on read). It is
3385 * important that we do this after we have tried to establish link
3386 * because the symbol error count will increment wildly if there
3389 e1000_clear_hw_cntrs_ich8lan(hw);
3394 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3395 * @hw: pointer to the HW structure
3397 * Sets/Clears required hardware bits necessary for correctly setting up the
3398 * hardware for transmit and receive.
3400 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3404 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3406 /* Extended Device Control */
3407 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
3409 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3410 if (hw->mac.type >= e1000_pchlan)
3411 reg |= E1000_CTRL_EXT_PHYPDEN;
3412 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
3414 /* Transmit Descriptor Control 0 */
3415 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
3417 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
3419 /* Transmit Descriptor Control 1 */
3420 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
3422 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
3424 /* Transmit Arbitration Control 0 */
3425 reg = E1000_READ_REG(hw, E1000_TARC(0));
3426 if (hw->mac.type == e1000_ich8lan)
3427 reg |= (1 << 28) | (1 << 29);
3428 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3429 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3431 /* Transmit Arbitration Control 1 */
3432 reg = E1000_READ_REG(hw, E1000_TARC(1));
3433 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
3437 reg |= (1 << 24) | (1 << 26) | (1 << 30);
3438 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
3441 if (hw->mac.type == e1000_ich8lan) {
3442 reg = E1000_READ_REG(hw, E1000_STATUS);
3444 E1000_WRITE_REG(hw, E1000_STATUS, reg);
3448 * work-around descriptor data corruption issue during nfs v2 udp
3449 * traffic, just disable the nfs filtering capability
3451 reg = E1000_READ_REG(hw, E1000_RFCTL);
3452 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3453 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
3459 * e1000_setup_link_ich8lan - Setup flow control and link settings
3460 * @hw: pointer to the HW structure
3462 * Determines which flow control settings to use, then configures flow
3463 * control. Calls the appropriate media-specific link configuration
3464 * function. Assuming the adapter has a valid link partner, a valid link
3465 * should be established. Assumes the hardware has previously been reset
3466 * and the transmitter and receiver are not enabled.
3468 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3470 s32 ret_val = E1000_SUCCESS;
3472 DEBUGFUNC("e1000_setup_link_ich8lan");
3474 if (hw->phy.ops.check_reset_block(hw))
3478 * ICH parts do not have a word in the NVM to determine
3479 * the default flow control setting, so we explicitly
3482 if (hw->fc.requested_mode == e1000_fc_default)
3483 hw->fc.requested_mode = e1000_fc_full;
3486 * Save off the requested flow control mode for use later. Depending
3487 * on the link partner's capabilities, we may or may not use this mode.
3489 hw->fc.current_mode = hw->fc.requested_mode;
3491 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
3492 hw->fc.current_mode);
3494 /* Continue to configure the copper link. */
3495 ret_val = hw->mac.ops.setup_physical_interface(hw);
3499 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
3500 if ((hw->phy.type == e1000_phy_82578) ||
3501 (hw->phy.type == e1000_phy_82579) ||
3502 (hw->phy.type == e1000_phy_82577)) {
3503 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
3505 ret_val = hw->phy.ops.write_reg(hw,
3506 PHY_REG(BM_PORT_CTRL_PAGE, 27),
3512 ret_val = e1000_set_fc_watermarks_generic(hw);
3519 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3520 * @hw: pointer to the HW structure
3522 * Configures the kumeran interface to the PHY to wait the appropriate time
3523 * when polling the PHY, then call the generic setup_copper_link to finish
3524 * configuring the copper link.
3526 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3532 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
3534 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3535 ctrl |= E1000_CTRL_SLU;
3536 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3537 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3540 * Set the mac to wait the maximum time between each iteration
3541 * and increase the max iterations when polling the phy;
3542 * this fixes erroneous timeouts at 10Mbps.
3544 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
3548 ret_val = e1000_read_kmrn_reg_generic(hw,
3549 E1000_KMRNCTRLSTA_INBAND_PARAM,
3554 ret_val = e1000_write_kmrn_reg_generic(hw,
3555 E1000_KMRNCTRLSTA_INBAND_PARAM,
3560 switch (hw->phy.type) {
3561 case e1000_phy_igp_3:
3562 ret_val = e1000_copper_link_setup_igp(hw);
3567 case e1000_phy_82578:
3568 ret_val = e1000_copper_link_setup_m88(hw);
3572 case e1000_phy_82577:
3573 case e1000_phy_82579:
3574 ret_val = e1000_copper_link_setup_82577(hw);
3579 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
3584 reg_data &= ~IFE_PMC_AUTO_MDIX;
3586 switch (hw->phy.mdix) {
3588 reg_data &= ~IFE_PMC_FORCE_MDIX;
3591 reg_data |= IFE_PMC_FORCE_MDIX;
3595 reg_data |= IFE_PMC_AUTO_MDIX;
3598 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
3606 ret_val = e1000_setup_copper_link_generic(hw);
3613 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3614 * @hw: pointer to the HW structure
3615 * @speed: pointer to store current link speed
3616 * @duplex: pointer to store the current link duplex
3618 * Calls the generic get_speed_and_duplex to retrieve the current link
3619 * information and then calls the Kumeran lock loss workaround for links at
3622 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3627 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
3629 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
3633 if ((hw->mac.type == e1000_ich8lan) &&
3634 (hw->phy.type == e1000_phy_igp_3) &&
3635 (*speed == SPEED_1000)) {
3636 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3644 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3645 * @hw: pointer to the HW structure
3647 * Work-around for 82566 Kumeran PCS lock loss:
3648 * On link status change (i.e. PCI reset, speed change) and link is up and
3650 * 0) if workaround is optionally disabled do nothing
3651 * 1) wait 1ms for Kumeran link to come up
3652 * 2) check Kumeran Diagnostic register PCS lock loss bit
3653 * 3) if not set the link is locked (all is good), otherwise...
3655 * 5) repeat up to 10 times
3656 * Note: this is only called for IGP3 copper when speed is 1gb.
3658 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3660 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3662 s32 ret_val = E1000_SUCCESS;
3666 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
3668 if (!(dev_spec->kmrn_lock_loss_workaround_enabled))
3672 * Make sure link is up before proceeding. If not just return.
3673 * Attempting this while link is negotiating fouled up link
3676 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
3678 ret_val = E1000_SUCCESS;
3682 for (i = 0; i < 10; i++) {
3683 /* read once to clear */
3684 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3687 /* and again to get new status */
3688 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3692 /* check for PCS lock */
3693 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) {
3694 ret_val = E1000_SUCCESS;
3698 /* Issue PHY reset */
3699 hw->phy.ops.reset(hw);
3702 /* Disable GigE link negotiation */
3703 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3704 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3705 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3706 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3709 * Call gig speed drop workaround on Gig disable before accessing
3712 e1000_gig_downshift_workaround_ich8lan(hw);
3714 /* unable to acquire PCS lock */
3715 ret_val = -E1000_ERR_PHY;
3722 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3723 * @hw: pointer to the HW structure
3724 * @state: boolean value used to set the current Kumeran workaround state
3726 * If ICH8, set the current Kumeran workaround state (enabled - TRUE
3727 * /disabled - FALSE).
3729 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3732 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3734 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
3736 if (hw->mac.type != e1000_ich8lan) {
3737 DEBUGOUT("Workaround applies to ICH8 only.\n");
3741 dev_spec->kmrn_lock_loss_workaround_enabled = state;
3747 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3748 * @hw: pointer to the HW structure
3750 * Workaround for 82566 power-down on D3 entry:
3751 * 1) disable gigabit link
3752 * 2) write VR power-down enable
3754 * Continue if successful, else issue LCD reset and repeat
3756 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3762 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
3764 if (hw->phy.type != e1000_phy_igp_3)
3767 /* Try the workaround twice (if needed) */
3770 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
3771 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3772 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3773 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
3776 * Call gig speed drop workaround on Gig disable before
3777 * accessing any PHY registers
3779 if (hw->mac.type == e1000_ich8lan)
3780 e1000_gig_downshift_workaround_ich8lan(hw);
3782 /* Write VR power-down enable */
3783 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3784 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3785 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
3786 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3788 /* Read it back and test */
3789 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3790 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3791 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3794 /* Issue PHY reset and repeat at most one more time */
3795 reg = E1000_READ_REG(hw, E1000_CTRL);
3796 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
3805 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3806 * @hw: pointer to the HW structure
3808 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3809 * LPLU, Gig disable, MDIC PHY reset):
3810 * 1) Set Kumeran Near-end loopback
3811 * 2) Clear Kumeran Near-end loopback
3812 * Should only be called for ICH8[m] devices with IGP_3 Phy.
3814 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3816 s32 ret_val = E1000_SUCCESS;
3819 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
3821 if ((hw->mac.type != e1000_ich8lan) ||
3822 (hw->phy.type != e1000_phy_igp_3))
3825 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3829 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3830 ret_val = e1000_write_kmrn_reg_generic(hw,
3831 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3835 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3836 ret_val = e1000_write_kmrn_reg_generic(hw,
3837 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3844 * e1000_disable_gig_wol_ich8lan - disable gig during WoL
3845 * @hw: pointer to the HW structure
3847 * During S0 to Sx transition, it is possible the link remains at gig
3848 * instead of negotiating to a lower speed. Before going to Sx, set
3849 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
3852 * Should only be called for applicable parts.
3854 void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw)
3859 DEBUGFUNC("e1000_disable_gig_wol_ich8lan");
3861 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3862 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
3863 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3865 if (hw->mac.type >= e1000_pchlan) {
3866 e1000_oem_bits_config_ich8lan(hw, FALSE);
3867 ret_val = hw->phy.ops.acquire(hw);
3870 e1000_write_smbus_addr(hw);
3871 hw->phy.ops.release(hw);
3878 * e1000_cleanup_led_ich8lan - Restore the default LED operation
3879 * @hw: pointer to the HW structure
3881 * Return the LED back to the default configuration.
3883 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
3885 DEBUGFUNC("e1000_cleanup_led_ich8lan");
3887 if (hw->phy.type == e1000_phy_ife)
3888 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3891 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
3892 return E1000_SUCCESS;
3896 * e1000_led_on_ich8lan - Turn LEDs on
3897 * @hw: pointer to the HW structure
3901 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
3903 DEBUGFUNC("e1000_led_on_ich8lan");
3905 if (hw->phy.type == e1000_phy_ife)
3906 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3907 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
3909 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
3910 return E1000_SUCCESS;
3914 * e1000_led_off_ich8lan - Turn LEDs off
3915 * @hw: pointer to the HW structure
3917 * Turn off the LEDs.
3919 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3921 DEBUGFUNC("e1000_led_off_ich8lan");
3923 if (hw->phy.type == e1000_phy_ife)
3924 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3925 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
3927 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
3928 return E1000_SUCCESS;
3932 * e1000_setup_led_pchlan - Configures SW controllable LED
3933 * @hw: pointer to the HW structure
3935 * This prepares the SW controllable LED for use.
3937 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3939 DEBUGFUNC("e1000_setup_led_pchlan");
3941 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
3942 (u16)hw->mac.ledctl_mode1);
3946 * e1000_cleanup_led_pchlan - Restore the default LED operation
3947 * @hw: pointer to the HW structure
3949 * Return the LED back to the default configuration.
3951 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
3953 DEBUGFUNC("e1000_cleanup_led_pchlan");
3955 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
3956 (u16)hw->mac.ledctl_default);
3960 * e1000_led_on_pchlan - Turn LEDs on
3961 * @hw: pointer to the HW structure
3965 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
3967 u16 data = (u16)hw->mac.ledctl_mode2;
3970 DEBUGFUNC("e1000_led_on_pchlan");
3973 * If no link, then turn LED on by setting the invert bit
3974 * for each LED that's mode is "link_up" in ledctl_mode2.
3976 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
3977 for (i = 0; i < 3; i++) {
3978 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3979 if ((led & E1000_PHY_LED0_MODE_MASK) !=
3980 E1000_LEDCTL_MODE_LINK_UP)
3982 if (led & E1000_PHY_LED0_IVRT)
3983 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
3985 data |= (E1000_PHY_LED0_IVRT << (i * 5));
3989 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
3993 * e1000_led_off_pchlan - Turn LEDs off
3994 * @hw: pointer to the HW structure
3996 * Turn off the LEDs.
3998 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4000 u16 data = (u16)hw->mac.ledctl_mode1;
4003 DEBUGFUNC("e1000_led_off_pchlan");
4006 * If no link, then turn LED off by clearing the invert bit
4007 * for each LED that's mode is "link_up" in ledctl_mode1.
4009 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4010 for (i = 0; i < 3; i++) {
4011 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4012 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4013 E1000_LEDCTL_MODE_LINK_UP)
4015 if (led & E1000_PHY_LED0_IVRT)
4016 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4018 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4022 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4026 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4027 * @hw: pointer to the HW structure
4029 * Read appropriate register for the config done bit for completion status
4030 * and configure the PHY through s/w for EEPROM-less parts.
4032 * NOTE: some silicon which is EEPROM-less will fail trying to read the
4033 * config done bit, so only an error is logged and continues. If we were
4034 * to return with error, EEPROM-less silicon would not be able to be reset
4037 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4039 s32 ret_val = E1000_SUCCESS;
4043 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4045 e1000_get_cfg_done_generic(hw);
4047 /* Wait for indication from h/w that it has completed basic config */
4048 if (hw->mac.type >= e1000_ich10lan) {
4049 e1000_lan_init_done_ich8lan(hw);
4051 ret_val = e1000_get_auto_rd_done_generic(hw);
4054 * When auto config read does not complete, do not
4055 * return with an error. This can happen in situations
4056 * where there is no eeprom and prevents getting link.
4058 DEBUGOUT("Auto Read Done did not complete\n");
4059 ret_val = E1000_SUCCESS;
4063 /* Clear PHY Reset Asserted bit */
4064 status = E1000_READ_REG(hw, E1000_STATUS);
4065 if (status & E1000_STATUS_PHYRA)
4066 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4068 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4070 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
4071 if (hw->mac.type <= e1000_ich9lan) {
4072 if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
4073 (hw->phy.type == e1000_phy_igp_3)) {
4074 e1000_phy_init_script_igp3(hw);
4077 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4078 /* Maybe we should do a basic PHY config */
4079 DEBUGOUT("EEPROM not present\n");
4080 ret_val = -E1000_ERR_CONFIG;
4088 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4089 * @hw: pointer to the HW structure
4091 * In the case of a PHY power down to save power, or to turn off link during a
4092 * driver unload, or wake on lan is not enabled, remove the link.
4094 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4096 /* If the management interface is not enabled, then power down */
4097 if (!(hw->mac.ops.check_mng_mode(hw) ||
4098 hw->phy.ops.check_reset_block(hw)))
4099 e1000_power_down_phy_copper(hw);
4105 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4106 * @hw: pointer to the HW structure
4108 * Clears hardware counters specific to the silicon family and calls
4109 * clear_hw_cntrs_generic to clear all general purpose counters.
4111 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4115 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4117 e1000_clear_hw_cntrs_base_generic(hw);
4119 E1000_READ_REG(hw, E1000_ALGNERRC);
4120 E1000_READ_REG(hw, E1000_RXERRC);
4121 E1000_READ_REG(hw, E1000_TNCRS);
4122 E1000_READ_REG(hw, E1000_CEXTERR);
4123 E1000_READ_REG(hw, E1000_TSCTC);
4124 E1000_READ_REG(hw, E1000_TSCTFC);
4126 E1000_READ_REG(hw, E1000_MGTPRC);
4127 E1000_READ_REG(hw, E1000_MGTPDC);
4128 E1000_READ_REG(hw, E1000_MGTPTC);
4130 E1000_READ_REG(hw, E1000_IAC);
4131 E1000_READ_REG(hw, E1000_ICRXOC);
4133 /* Clear PHY statistics registers */
4134 if ((hw->phy.type == e1000_phy_82578) ||
4135 (hw->phy.type == e1000_phy_82579) ||
4136 (hw->phy.type == e1000_phy_82577)) {
4137 hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
4138 hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
4139 hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
4140 hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
4141 hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
4142 hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
4143 hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
4144 hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
4145 hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
4146 hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
4147 hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
4148 hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
4149 hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
4150 hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);