2 * Copyright 2021 Intel Corp
3 * Copyright 2021 Rubicon Communications, LLC (Netgate)
4 * SPDX-License-Identifier: BSD-3-Clause
16 * igc_acquire_phy_base - Acquire rights to access PHY
17 * @hw: pointer to the HW structure
19 * Acquire access rights to the correct PHY.
21 s32 igc_acquire_phy_base(struct igc_hw *hw)
23 u16 mask = IGC_SWFW_PHY0_SM;
25 DEBUGFUNC("igc_acquire_phy_base");
27 if (hw->bus.func == IGC_FUNC_1)
28 mask = IGC_SWFW_PHY1_SM;
30 return hw->mac.ops.acquire_swfw_sync(hw, mask);
34 * igc_release_phy_base - Release rights to access PHY
35 * @hw: pointer to the HW structure
37 * A wrapper to release access rights to the correct PHY.
39 void igc_release_phy_base(struct igc_hw *hw)
41 u16 mask = IGC_SWFW_PHY0_SM;
43 DEBUGFUNC("igc_release_phy_base");
45 if (hw->bus.func == IGC_FUNC_1)
46 mask = IGC_SWFW_PHY1_SM;
48 hw->mac.ops.release_swfw_sync(hw, mask);
52 * igc_init_hw_base - Initialize hardware
53 * @hw: pointer to the HW structure
55 * This inits the hardware readying it for operation.
57 s32 igc_init_hw_base(struct igc_hw *hw)
59 struct igc_mac_info *mac = &hw->mac;
61 u16 i, rar_count = mac->rar_entry_count;
63 DEBUGFUNC("igc_init_hw_base");
65 /* Setup the receive address */
66 igc_init_rx_addrs_generic(hw, rar_count);
68 /* Zero out the Multicast HASH table */
69 DEBUGOUT("Zeroing the MTA\n");
70 for (i = 0; i < mac->mta_reg_count; i++)
71 IGC_WRITE_REG_ARRAY(hw, IGC_MTA, i, 0);
73 /* Zero out the Unicast HASH table */
74 DEBUGOUT("Zeroing the UTA\n");
75 for (i = 0; i < mac->uta_reg_count; i++)
76 IGC_WRITE_REG_ARRAY(hw, IGC_UTA, i, 0);
78 /* Setup link and flow control */
79 ret_val = mac->ops.setup_link(hw);
81 * Clear all of the statistics registers (clear on read). It is
82 * important that we do this after we have tried to establish link
83 * because the symbol error count will increment wildly if there
86 igc_clear_hw_cntrs_base_generic(hw);
92 * igc_power_down_phy_copper_base - Remove link during PHY power down
93 * @hw: pointer to the HW structure
95 * In the case of a PHY power down to save power, or to turn off link during a
96 * driver unload, or wake on lan is not enabled, remove the link.
98 void igc_power_down_phy_copper_base(struct igc_hw *hw)
100 struct igc_phy_info *phy = &hw->phy;
102 if (!(phy->ops.check_reset_block))
105 /* If the management interface is not enabled, then power down */
106 if (phy->ops.check_reset_block(hw))
107 igc_power_down_phy_copper(hw);
113 * igc_rx_fifo_flush_base - Clean Rx FIFO after Rx enable
114 * @hw: pointer to the HW structure
116 * After Rx enable, if manageability is enabled then there is likely some
117 * bad data at the start of the FIFO and possibly in the DMA FIFO. This
118 * function clears the FIFOs and flushes any packets that came in as Rx was
121 void igc_rx_fifo_flush_base(struct igc_hw *hw)
123 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
126 DEBUGFUNC("igc_rx_fifo_flush_base");
128 /* disable IPv6 options as per hardware errata */
129 rfctl = IGC_READ_REG(hw, IGC_RFCTL);
130 rfctl |= IGC_RFCTL_IPV6_EX_DIS;
131 IGC_WRITE_REG(hw, IGC_RFCTL, rfctl);
133 if (!(IGC_READ_REG(hw, IGC_MANC) & IGC_MANC_RCV_TCO_EN))
136 /* Disable all Rx queues */
137 for (i = 0; i < 4; i++) {
138 rxdctl[i] = IGC_READ_REG(hw, IGC_RXDCTL(i));
139 IGC_WRITE_REG(hw, IGC_RXDCTL(i),
140 rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE);
142 /* Poll all queues to verify they have shut down */
143 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
146 for (i = 0; i < 4; i++)
147 rx_enabled |= IGC_READ_REG(hw, IGC_RXDCTL(i));
148 if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE))
153 DEBUGOUT("Queue disable timed out after 10ms\n");
155 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
156 * incoming packets are rejected. Set enable and wait 2ms so that
157 * any packet that was coming in as RCTL.EN was set is flushed
159 IGC_WRITE_REG(hw, IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF);
161 rlpml = IGC_READ_REG(hw, IGC_RLPML);
162 IGC_WRITE_REG(hw, IGC_RLPML, 0);
164 rctl = IGC_READ_REG(hw, IGC_RCTL);
165 temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP);
166 temp_rctl |= IGC_RCTL_LPE;
168 IGC_WRITE_REG(hw, IGC_RCTL, temp_rctl);
169 IGC_WRITE_REG(hw, IGC_RCTL, temp_rctl | IGC_RCTL_EN);
173 /* Enable Rx queues that were previously enabled and restore our
176 for (i = 0; i < 4; i++)
177 IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl[i]);
178 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
181 IGC_WRITE_REG(hw, IGC_RLPML, rlpml);
182 IGC_WRITE_REG(hw, IGC_RFCTL, rfctl);
184 /* Flush receive errors generated by workaround */
185 IGC_READ_REG(hw, IGC_ROC);
186 IGC_READ_REG(hw, IGC_RNBC);
187 IGC_READ_REG(hw, IGC_MPC);